diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-16 18:52:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-16 18:52:38 -0400 |
commit | 399eb9b6cbf31ff6ef91a6930e2e94c703d74078 (patch) | |
tree | 60c26e51c167efdfec5ab5821111df9ea90cbf7f | |
parent | 2b97c39514a6130f38b14227a36d9cd37e650a9d (diff) | |
parent | 3dc8dcb02fdba3370aec0696727e6adfe8033aa4 (diff) |
Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann:
"This contains driver changes that are tightly connected to SoC
specific code. Aside from smaller cleanups and bug fixes, here is a
list of the notable changes.
New device drivers:
- The Turris Mox router has a new "moxtet" bus driver for its
on-board pluggable extension bus. The same platform also gains a
firmware driver.
- The Samsung Exynos family gains a new Chipid driver exporting using
the soc device sysfs interface
- A similar socinfo driver for Qualcomm Snapdragon chips.
- A firmware driver for the NXP i.MX DSP IPC protocol using shared
memory and a mailbox
Other changes:
- The i.MX reset controller driver now supports the NXP i.MX8MM chip
- Amlogic SoC specific drivers gain support for the S905X3 and A311D
chips
- A rework of the TI Davinci framebuffer driver to allow important
cleanups in the platform code
- A couple of device drivers for removed ARM SoC platforms are
removed. Most of the removals were picked up by other maintainers,
this contains whatever was left"
* tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (123 commits)
bus: uniphier-system-bus: use devm_platform_ioremap_resource()
soc: ti: ti_sci_pm_domains: Add support for exclusive and shared access
dt-bindings: ti_sci_pm_domains: Add support for exclusive and shared access
firmware: ti_sci: Allow for device shared and exclusive requests
bus: imx-weim: remove incorrect __init annotations
fbdev: remove w90x900/nuc900 platform drivers
spi: remove w90x900 driver
net: remove w90p910-ether driver
net: remove ks8695 driver
firmware: turris-mox-rwtm: Add sysfs documentation
firmware: Add Turris Mox rWTM firmware driver
dt-bindings: firmware: Document cznic,turris-mox-rwtm binding
bus: moxtet: fix unsigned comparison to less than zero
bus: moxtet: remove set but not used variable 'dummy'
ARM: scoop: Use the right include
dt-bindings: power: add Amlogic Everything-Else power domains bindings
soc: amlogic: Add support for Everything-Else power domains controller
fbdev: da8xx: use resource management for dma
fbdev: da8xx-fb: drop a redundant if
fbdev: da8xx-fb: use devm_platform_ioremap_resource()
...
128 files changed, 5409 insertions, 4954 deletions
diff --git a/Documentation/ABI/testing/debugfs-moxtet b/Documentation/ABI/testing/debugfs-moxtet new file mode 100644 index 000000000000..67b1717794d8 --- /dev/null +++ b/Documentation/ABI/testing/debugfs-moxtet | |||
@@ -0,0 +1,23 @@ | |||
1 | What: /sys/kernel/debug/moxtet/input | ||
2 | Date: March 2019 | ||
3 | KernelVersion: 5.3 | ||
4 | Contact: Marek Behún <marek.behun@nic.cz> | ||
5 | Description: (R) Read input from the shift registers, in hexadecimal. | ||
6 | Returns N+1 bytes, where N is the number of Moxtet connected | ||
7 | modules. The first byte is from the CPU board itself. | ||
8 | Example: 101214 | ||
9 | 10: CPU board with SD card | ||
10 | 12: 2 = PCIe module, 1 = IRQ not active | ||
11 | 14: 4 = Peridot module, 1 = IRQ not active | ||
12 | |||
13 | What: /sys/kernel/debug/moxtet/output | ||
14 | Date: March 2019 | ||
15 | KernelVersion: 5.3 | ||
16 | Contact: Marek Behún <marek.behun@nic.cz> | ||
17 | Description: (RW) Read last written value to the shift registers, in | ||
18 | hexadecimal, or write values to the shift registers, also | ||
19 | in hexadecimal. | ||
20 | Example: 0102 | ||
21 | 01: 01 was last written, or is to be written, to the | ||
22 | first module's shift register | ||
23 | 02: the same for second module | ||
diff --git a/Documentation/ABI/testing/sysfs-bus-moxtet-devices b/Documentation/ABI/testing/sysfs-bus-moxtet-devices new file mode 100644 index 000000000000..355958527fa3 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-moxtet-devices | |||
@@ -0,0 +1,17 @@ | |||
1 | What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_description | ||
2 | Date: March 2019 | ||
3 | KernelVersion: 5.3 | ||
4 | Contact: Marek Behún <marek.behun@nic.cz> | ||
5 | Description: (R) Moxtet module description. Format: string | ||
6 | |||
7 | What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_id | ||
8 | Date: March 2019 | ||
9 | KernelVersion: 5.3 | ||
10 | Contact: Marek Behún <marek.behun@nic.cz> | ||
11 | Description: (R) Moxtet module ID. Format: %x | ||
12 | |||
13 | What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_name | ||
14 | Date: March 2019 | ||
15 | KernelVersion: 5.3 | ||
16 | Contact: Marek Behún <marek.behun@nic.cz> | ||
17 | Description: (R) Moxtet module name. Format: string | ||
diff --git a/Documentation/ABI/testing/sysfs-devices-soc b/Documentation/ABI/testing/sysfs-devices-soc index 6d9cc253f2b2..ba3a3fac0ee1 100644 --- a/Documentation/ABI/testing/sysfs-devices-soc +++ b/Documentation/ABI/testing/sysfs-devices-soc | |||
@@ -26,6 +26,13 @@ Description: | |||
26 | Read-only attribute common to all SoCs. Contains SoC family name | 26 | Read-only attribute common to all SoCs. Contains SoC family name |
27 | (e.g. DB8500). | 27 | (e.g. DB8500). |
28 | 28 | ||
29 | What: /sys/devices/socX/serial_number | ||
30 | Date: January 2019 | ||
31 | contact: Bjorn Andersson <bjorn.andersson@linaro.org> | ||
32 | Description: | ||
33 | Read-only attribute supported by most SoCs. Contains the SoC's | ||
34 | serial number, if available. | ||
35 | |||
29 | What: /sys/devices/socX/soc_id | 36 | What: /sys/devices/socX/soc_id |
30 | Date: January 2012 | 37 | Date: January 2012 |
31 | contact: Lee Jones <lee.jones@linaro.org> | 38 | contact: Lee Jones <lee.jones@linaro.org> |
diff --git a/Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm b/Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm new file mode 100644 index 000000000000..15595fab88d1 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm | |||
@@ -0,0 +1,37 @@ | |||
1 | What: /sys/firmware/turris-mox-rwtm/board_version | ||
2 | Date: August 2019 | ||
3 | KernelVersion: 5.4 | ||
4 | Contact: Marek Behún <marek.behun@nic.cz> | ||
5 | Description: (R) Board version burned into eFuses of this Turris Mox board. | ||
6 | Format: %i | ||
7 | |||
8 | What: /sys/firmware/turris-mox-rwtm/mac_address* | ||
9 | Date: August 2019 | ||
10 | KernelVersion: 5.4 | ||
11 | Contact: Marek Behún <marek.behun@nic.cz> | ||
12 | Description: (R) MAC addresses burned into eFuses of this Turris Mox board. | ||
13 | Format: %pM | ||
14 | |||
15 | What: /sys/firmware/turris-mox-rwtm/pubkey | ||
16 | Date: August 2019 | ||
17 | KernelVersion: 5.4 | ||
18 | Contact: Marek Behún <marek.behun@nic.cz> | ||
19 | Description: (R) ECDSA public key (in pubkey hex compressed form) computed | ||
20 | as pair to the ECDSA private key burned into eFuses of this | ||
21 | Turris Mox Board. | ||
22 | Format: string | ||
23 | |||
24 | What: /sys/firmware/turris-mox-rwtm/ram_size | ||
25 | Date: August 2019 | ||
26 | KernelVersion: 5.4 | ||
27 | Contact: Marek Behún <marek.behun@nic.cz> | ||
28 | Description: (R) RAM size in MiB of this Turris Mox board as was detected | ||
29 | during manufacturing and burned into eFuses. Can be 512 or 1024. | ||
30 | Format: %i | ||
31 | |||
32 | What: /sys/firmware/turris-mox-rwtm/serial_number | ||
33 | Date: August 2019 | ||
34 | KernelVersion: 5.4 | ||
35 | Contact: Marek Behún <marek.behun@nic.cz> | ||
36 | Description: (R) Serial number burned into eFuses of this Turris Mox device. | ||
37 | Format: %016X | ||
diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt index 317a2fc3667a..083dbf96ee00 100644 --- a/Documentation/devicetree/bindings/arm/arm,scmi.txt +++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt | |||
@@ -73,6 +73,16 @@ Required properties: | |||
73 | as used by the firmware. Refer to platform details | 73 | as used by the firmware. Refer to platform details |
74 | for your implementation for the IDs to use. | 74 | for your implementation for the IDs to use. |
75 | 75 | ||
76 | Reset signal bindings for the reset domains based on SCMI Message Protocol | ||
77 | ------------------------------------------------------------ | ||
78 | |||
79 | This binding for the SCMI reset domain providers uses the generic reset | ||
80 | signal binding[5]. | ||
81 | |||
82 | Required properties: | ||
83 | - #reset-cells : Should be 1. Contains the reset domain ID value used | ||
84 | by SCMI commands. | ||
85 | |||
76 | SRAM and Shared Memory for SCMI | 86 | SRAM and Shared Memory for SCMI |
77 | ------------------------------- | 87 | ------------------------------- |
78 | 88 | ||
@@ -93,6 +103,7 @@ Required sub-node properties: | |||
93 | [2] Documentation/devicetree/bindings/power/power_domain.txt | 103 | [2] Documentation/devicetree/bindings/power/power_domain.txt |
94 | [3] Documentation/devicetree/bindings/thermal/thermal.txt | 104 | [3] Documentation/devicetree/bindings/thermal/thermal.txt |
95 | [4] Documentation/devicetree/bindings/sram/sram.txt | 105 | [4] Documentation/devicetree/bindings/sram/sram.txt |
106 | [5] Documentation/devicetree/bindings/reset/reset.txt | ||
96 | 107 | ||
97 | Example: | 108 | Example: |
98 | 109 | ||
@@ -152,6 +163,11 @@ firmware { | |||
152 | reg = <0x15>; | 163 | reg = <0x15>; |
153 | #thermal-sensor-cells = <1>; | 164 | #thermal-sensor-cells = <1>; |
154 | }; | 165 | }; |
166 | |||
167 | scmi_reset: protocol@16 { | ||
168 | reg = <0x16>; | ||
169 | #reset-cells = <1>; | ||
170 | }; | ||
155 | }; | 171 | }; |
156 | }; | 172 | }; |
157 | 173 | ||
@@ -166,6 +182,7 @@ hdlcd@7ff60000 { | |||
166 | reg = <0 0x7ff60000 0 0x1000>; | 182 | reg = <0 0x7ff60000 0 0x1000>; |
167 | clocks = <&scmi_clk 4>; | 183 | clocks = <&scmi_clk 4>; |
168 | power-domains = <&scmi_devpd 1>; | 184 | power-domains = <&scmi_devpd 1>; |
185 | resets = <&scmi_reset 10>; | ||
169 | }; | 186 | }; |
170 | 187 | ||
171 | thermal-zones { | 188 | thermal-zones { |
diff --git a/Documentation/devicetree/bindings/bus/moxtet.txt b/Documentation/devicetree/bindings/bus/moxtet.txt new file mode 100644 index 000000000000..fb50fc865336 --- /dev/null +++ b/Documentation/devicetree/bindings/bus/moxtet.txt | |||
@@ -0,0 +1,46 @@ | |||
1 | Turris Mox module status and configuration bus (over SPI) | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : Should be "cznic,moxtet" | ||
5 | - #address-cells : Has to be 1 | ||
6 | - #size-cells : Has to be 0 | ||
7 | - spi-cpol : Required inverted clock polarity | ||
8 | - spi-cpha : Required shifted clock phase | ||
9 | - interrupts : Must contain reference to the shared interrupt line | ||
10 | - interrupt-controller : Required | ||
11 | - #interrupt-cells : Has to be 1 | ||
12 | |||
13 | For other required and optional properties of SPI slave nodes please refer to | ||
14 | ../spi/spi-bus.txt. | ||
15 | |||
16 | Required properties of subnodes: | ||
17 | - reg : Should be position on the Moxtet bus (how many Moxtet | ||
18 | modules are between this module and CPU module, so | ||
19 | either 0 or a positive integer) | ||
20 | |||
21 | The driver finds the devices connected to the bus by itself, but it may be | ||
22 | needed to reference some of them from other parts of the device tree. In that | ||
23 | case the devices can be defined as subnodes of the moxtet node. | ||
24 | |||
25 | Example: | ||
26 | |||
27 | moxtet@1 { | ||
28 | compatible = "cznic,moxtet"; | ||
29 | #address-cells = <1>; | ||
30 | #size-cells = <0>; | ||
31 | reg = <1>; | ||
32 | spi-max-frequency = <10000000>; | ||
33 | spi-cpol; | ||
34 | spi-cpha; | ||
35 | interrupt-controller; | ||
36 | #interrupt-cells = <1>; | ||
37 | interrupt-parent = <&gpiosb>; | ||
38 | interrupts = <5 IRQ_TYPE_EDGE_FALLING>; | ||
39 | |||
40 | moxtet_sfp: gpio@0 { | ||
41 | compatible = "cznic,moxtet-gpio"; | ||
42 | gpio-controller; | ||
43 | #gpio-cells = <2>; | ||
44 | reg = <0>; | ||
45 | } | ||
46 | }; | ||
diff --git a/Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt b/Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt new file mode 100644 index 000000000000..338169dea7bb --- /dev/null +++ b/Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt | |||
@@ -0,0 +1,19 @@ | |||
1 | Turris Mox rWTM firmware driver | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : Should be "cznic,turris-mox-rwtm" | ||
5 | - mboxes : Must contain a reference to associated mailbox | ||
6 | |||
7 | This device tree node should be used on Turris Mox, or potentially another A3700 | ||
8 | compatible device running the Mox's rWTM firmware in the secure processor (for | ||
9 | example it is possible to flash this firmware into EspressoBin). | ||
10 | |||
11 | Example: | ||
12 | |||
13 | firmware { | ||
14 | turris-mox-rwtm { | ||
15 | compatible = "cznic,turris-mox-rwtm"; | ||
16 | mboxes = <&rwtm 0>; | ||
17 | status = "okay"; | ||
18 | }; | ||
19 | }; | ||
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt index 41f133a4e2fa..3f29ea04b5fe 100644 --- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt +++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt | |||
@@ -9,14 +9,16 @@ Required properties: | |||
9 | - compatible: must contain one of the following: | 9 | - compatible: must contain one of the following: |
10 | * "qcom,scm-apq8064" | 10 | * "qcom,scm-apq8064" |
11 | * "qcom,scm-apq8084" | 11 | * "qcom,scm-apq8084" |
12 | * "qcom,scm-ipq4019" | ||
12 | * "qcom,scm-msm8660" | 13 | * "qcom,scm-msm8660" |
13 | * "qcom,scm-msm8916" | 14 | * "qcom,scm-msm8916" |
14 | * "qcom,scm-msm8960" | 15 | * "qcom,scm-msm8960" |
15 | * "qcom,scm-msm8974" | 16 | * "qcom,scm-msm8974" |
16 | * "qcom,scm-msm8996" | 17 | * "qcom,scm-msm8996" |
17 | * "qcom,scm-msm8998" | 18 | * "qcom,scm-msm8998" |
18 | * "qcom,scm-ipq4019" | 19 | * "qcom,scm-sc7180" |
19 | * "qcom,scm-sdm845" | 20 | * "qcom,scm-sdm845" |
21 | * "qcom,scm-sm8150" | ||
20 | and: | 22 | and: |
21 | * "qcom,scm" | 23 | * "qcom,scm" |
22 | - clocks: Specifies clocks needed by the SCM interface, if any: | 24 | - clocks: Specifies clocks needed by the SCM interface, if any: |
diff --git a/Documentation/devicetree/bindings/gpio/gpio-moxtet.txt b/Documentation/devicetree/bindings/gpio/gpio-moxtet.txt new file mode 100644 index 000000000000..410759de9f09 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gpio-moxtet.txt | |||
@@ -0,0 +1,18 @@ | |||
1 | Turris Mox Moxtet GPIO expander via Moxtet bus | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : Should be "cznic,moxtet-gpio". | ||
5 | - gpio-controller : Marks the device node as a GPIO controller. | ||
6 | - #gpio-cells : Should be two. For consumer use see gpio.txt. | ||
7 | |||
8 | Other properties are required for a Moxtet bus device, please refer to | ||
9 | Documentation/devicetree/bindings/bus/moxtet.txt. | ||
10 | |||
11 | Example: | ||
12 | |||
13 | moxtet_sfp: gpio@0 { | ||
14 | compatible = "cznic,moxtet-gpio"; | ||
15 | gpio-controller; | ||
16 | #gpio-cells = <2>; | ||
17 | reg = <0>; | ||
18 | } | ||
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml new file mode 100644 index 000000000000..aab70e8b681e --- /dev/null +++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml | |||
@@ -0,0 +1,93 @@ | |||
1 | # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) | ||
2 | # Copyright 2019 BayLibre, SAS | ||
3 | %YAML 1.2 | ||
4 | --- | ||
5 | $id: "http://devicetree.org/schemas/power/amlogic,meson-ee-pwrc.yaml#" | ||
6 | $schema: "http://devicetree.org/meta-schemas/core.yaml#" | ||
7 | |||
8 | title: Amlogic Meson Everything-Else Power Domains | ||
9 | |||
10 | maintainers: | ||
11 | - Neil Armstrong <narmstrong@baylibre.com> | ||
12 | |||
13 | description: |+ | ||
14 | The Everything-Else Power Domains node should be the child of a syscon | ||
15 | node with the required property: | ||
16 | |||
17 | - compatible: Should be the following: | ||
18 | "amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon" | ||
19 | |||
20 | Refer to the the bindings described in | ||
21 | Documentation/devicetree/bindings/mfd/syscon.txt | ||
22 | |||
23 | properties: | ||
24 | compatible: | ||
25 | enum: | ||
26 | - amlogic,meson-g12a-pwrc | ||
27 | - amlogic,meson-sm1-pwrc | ||
28 | |||
29 | clocks: | ||
30 | minItems: 2 | ||
31 | |||
32 | clock-names: | ||
33 | items: | ||
34 | - const: vpu | ||
35 | - const: vapb | ||
36 | |||
37 | resets: | ||
38 | minItems: 11 | ||
39 | |||
40 | reset-names: | ||
41 | items: | ||
42 | - const: viu | ||
43 | - const: venc | ||
44 | - const: vcbus | ||
45 | - const: bt656 | ||
46 | - const: rdma | ||
47 | - const: venci | ||
48 | - const: vencp | ||
49 | - const: vdac | ||
50 | - const: vdi6 | ||
51 | - const: vencl | ||
52 | - const: vid_lock | ||
53 | |||
54 | "#power-domain-cells": | ||
55 | const: 1 | ||
56 | |||
57 | amlogic,ao-sysctrl: | ||
58 | description: phandle to the AO sysctrl node | ||
59 | allOf: | ||
60 | - $ref: /schemas/types.yaml#/definitions/phandle | ||
61 | |||
62 | required: | ||
63 | - compatible | ||
64 | - clocks | ||
65 | - clock-names | ||
66 | - resets | ||
67 | - reset-names | ||
68 | - "#power-domain-cells" | ||
69 | - amlogic,ao-sysctrl | ||
70 | |||
71 | examples: | ||
72 | - | | ||
73 | pwrc: power-controller { | ||
74 | compatible = "amlogic,meson-sm1-pwrc"; | ||
75 | #power-domain-cells = <1>; | ||
76 | amlogic,ao-sysctrl = <&rti>; | ||
77 | resets = <&reset_viu>, | ||
78 | <&reset_venc>, | ||
79 | <&reset_vcbus>, | ||
80 | <&reset_bt656>, | ||
81 | <&reset_rdma>, | ||
82 | <&reset_venci>, | ||
83 | <&reset_vencp>, | ||
84 | <&reset_vdac>, | ||
85 | <&reset_vdi6>, | ||
86 | <&reset_vencl>, | ||
87 | <&reset_vid_lock>; | ||
88 | reset-names = "viu", "venc", "vcbus", "bt656", | ||
89 | "rdma", "venci", "vencp", "vdac", | ||
90 | "vdi6", "vencl", "vid_lock"; | ||
91 | clocks = <&clk_vpu>, <&clk_vapb>; | ||
92 | clock-names = "vpu", "vapb"; | ||
93 | }; | ||
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt index 13e095182db4..c2489e41a801 100644 --- a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt +++ b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt | |||
@@ -8,6 +8,7 @@ Required properties: | |||
8 | - compatible: | 8 | - compatible: |
9 | - For i.MX7 SoCs should be "fsl,imx7d-src", "syscon" | 9 | - For i.MX7 SoCs should be "fsl,imx7d-src", "syscon" |
10 | - For i.MX8MQ SoCs should be "fsl,imx8mq-src", "syscon" | 10 | - For i.MX8MQ SoCs should be "fsl,imx8mq-src", "syscon" |
11 | - For i.MX8MM SoCs should be "fsl,imx8mm-src", "fsl,imx8mq-src", "syscon" | ||
11 | - reg: should be register base and length as documented in the | 12 | - reg: should be register base and length as documented in the |
12 | datasheet | 13 | datasheet |
13 | - interrupts: Should contain SRC interrupt | 14 | - interrupts: Should contain SRC interrupt |
@@ -46,5 +47,6 @@ Example: | |||
46 | 47 | ||
47 | 48 | ||
48 | For list of all valid reset indices see | 49 | For list of all valid reset indices see |
49 | <dt-bindings/reset/imx7-reset.h> for i.MX7 and | 50 | <dt-bindings/reset/imx7-reset.h> for i.MX7, |
50 | <dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ | 51 | <dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ and |
52 | <dt-bindings/reset/imx8mq-reset.h> for i.MX8MM | ||
diff --git a/Documentation/devicetree/bindings/reset/snps,dw-reset.txt b/Documentation/devicetree/bindings/reset/snps,dw-reset.txt new file mode 100644 index 000000000000..f94f911dd98d --- /dev/null +++ b/Documentation/devicetree/bindings/reset/snps,dw-reset.txt | |||
@@ -0,0 +1,30 @@ | |||
1 | Synopsys DesignWare Reset controller | ||
2 | ======================================= | ||
3 | |||
4 | Please also refer to reset.txt in this directory for common reset | ||
5 | controller binding usage. | ||
6 | |||
7 | Required properties: | ||
8 | |||
9 | - compatible: should be one of the following. | ||
10 | "snps,dw-high-reset" - for active high configuration | ||
11 | "snps,dw-low-reset" - for active low configuration | ||
12 | |||
13 | - reg: physical base address of the controller and length of memory mapped | ||
14 | region. | ||
15 | |||
16 | - #reset-cells: must be 1. | ||
17 | |||
18 | example: | ||
19 | |||
20 | dw_rst_1: reset-controller@0000 { | ||
21 | compatible = "snps,dw-high-reset"; | ||
22 | reg = <0x0000 0x4>; | ||
23 | #reset-cells = <1>; | ||
24 | }; | ||
25 | |||
26 | dw_rst_2: reset-controller@1000 {i | ||
27 | compatible = "snps,dw-low-reset"; | ||
28 | reg = <0x1000 0x8>; | ||
29 | #reset-cells = <1>; | ||
30 | }; | ||
diff --git a/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt b/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt index 6bf6b43f8dd8..3dd563cec794 100644 --- a/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt +++ b/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt | |||
@@ -11,6 +11,7 @@ Required properties: | |||
11 | "amlogic,meson8b-clk-measure" for Meson8b SoCs | 11 | "amlogic,meson8b-clk-measure" for Meson8b SoCs |
12 | "amlogic,meson-axg-clk-measure" for AXG SoCs | 12 | "amlogic,meson-axg-clk-measure" for AXG SoCs |
13 | "amlogic,meson-g12a-clk-measure" for G12a SoCs | 13 | "amlogic,meson-g12a-clk-measure" for G12a SoCs |
14 | "amlogic,meson-sm1-clk-measure" for SM1 SoCs | ||
14 | - reg: base address and size of the Clock Measurer register space. | 15 | - reg: base address and size of the Clock Measurer register space. |
15 | 16 | ||
16 | Example: | 17 | Example: |
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe.txt index d7afaff5faff..05ec2a838c54 100644 --- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe.txt +++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe.txt | |||
@@ -18,7 +18,8 @@ Required properties: | |||
18 | - reg : offset and length of the device registers. | 18 | - reg : offset and length of the device registers. |
19 | - bus-frequency : the clock frequency for QUICC Engine. | 19 | - bus-frequency : the clock frequency for QUICC Engine. |
20 | - fsl,qe-num-riscs: define how many RISC engines the QE has. | 20 | - fsl,qe-num-riscs: define how many RISC engines the QE has. |
21 | - fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the | 21 | - fsl,qe-snums: This property has to be specified as '/bits/ 8' value, |
22 | defining the array of serial number (SNUM) values for the virtual | ||
22 | threads. | 23 | threads. |
23 | 24 | ||
24 | Optional properties: | 25 | Optional properties: |
@@ -34,6 +35,11 @@ Recommended properties | |||
34 | - brg-frequency : the internal clock source frequency for baud-rate | 35 | - brg-frequency : the internal clock source frequency for baud-rate |
35 | generators in Hz. | 36 | generators in Hz. |
36 | 37 | ||
38 | Deprecated properties | ||
39 | - fsl,qe-num-snums: define how many serial number(SNUM) the QE can use | ||
40 | for the threads. Use fsl,qe-snums instead to not only specify the | ||
41 | number of snums, but also their values. | ||
42 | |||
37 | Example: | 43 | Example: |
38 | qe@e0100000 { | 44 | qe@e0100000 { |
39 | #address-cells = <1>; | 45 | #address-cells = <1>; |
@@ -44,6 +50,11 @@ Example: | |||
44 | reg = <e0100000 480>; | 50 | reg = <e0100000 480>; |
45 | brg-frequency = <0>; | 51 | brg-frequency = <0>; |
46 | bus-frequency = <179A7B00>; | 52 | bus-frequency = <179A7B00>; |
53 | fsl,qe-snums = /bits/ 8 < | ||
54 | 0x04 0x05 0x0C 0x0D 0x14 0x15 0x1C 0x1D | ||
55 | 0x24 0x25 0x2C 0x2D 0x34 0x35 0x88 0x89 | ||
56 | 0x98 0x99 0xA8 0xA9 0xB8 0xB9 0xC8 0xC9 | ||
57 | 0xD8 0xD9 0xE8 0xE9>; | ||
47 | } | 58 | } |
48 | 59 | ||
49 | * Multi-User RAM (MURAM) | 60 | * Multi-User RAM (MURAM) |
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt index 954ffee0a9c4..4fc571e78f01 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt | |||
@@ -15,7 +15,10 @@ power-domains. | |||
15 | - compatible: | 15 | - compatible: |
16 | Usage: required | 16 | Usage: required |
17 | Value type: <string> | 17 | Value type: <string> |
18 | Definition: must be "qcom,sdm845-aoss-qmp" | 18 | Definition: must be one of: |
19 | "qcom,sc7180-aoss-qmp" | ||
20 | "qcom,sdm845-aoss-qmp" | ||
21 | "qcom,sm8150-aoss-qmp" | ||
19 | 22 | ||
20 | - reg: | 23 | - reg: |
21 | Usage: required | 24 | Usage: required |
diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt index f7b00a7c0f68..f541d1f776a2 100644 --- a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt +++ b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt | |||
@@ -19,8 +19,15 @@ child of the pmmc node. | |||
19 | Required Properties: | 19 | Required Properties: |
20 | -------------------- | 20 | -------------------- |
21 | - compatible: should be "ti,sci-pm-domain" | 21 | - compatible: should be "ti,sci-pm-domain" |
22 | - #power-domain-cells: Must be 1 so that an id can be provided in each | 22 | - #power-domain-cells: Can be one of the following: |
23 | device node. | 23 | 1: Containing the device id of each node |
24 | 2: First entry should be device id | ||
25 | Second entry should be one of the floowing: | ||
26 | TI_SCI_PD_EXCLUSIVE: To allow device to be | ||
27 | exclusively controlled by | ||
28 | the requesting hosts. | ||
29 | TI_SCI_PD_SHARED: To allow device to be shared | ||
30 | by multiple hosts. | ||
24 | 31 | ||
25 | Example (K2G): | 32 | Example (K2G): |
26 | ------------- | 33 | ------------- |
diff --git a/MAINTAINERS b/MAINTAINERS index 1dc49553238c..5be81cdf9338 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1617,6 +1617,21 @@ F: drivers/clocksource/timer-atlas7.c | |||
1617 | N: [^a-z]sirf | 1617 | N: [^a-z]sirf |
1618 | X: drivers/gnss | 1618 | X: drivers/gnss |
1619 | 1619 | ||
1620 | ARM/CZ.NIC TURRIS MOX SUPPORT | ||
1621 | M: Marek Behun <marek.behun@nic.cz> | ||
1622 | W: http://mox.turris.cz | ||
1623 | S: Maintained | ||
1624 | F: Documentation/ABI/testing/debugfs-moxtet | ||
1625 | F: Documentation/ABI/testing/sysfs-bus-moxtet-devices | ||
1626 | F: Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm | ||
1627 | F: Documentation/devicetree/bindings/bus/moxtet.txt | ||
1628 | F: Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt | ||
1629 | F: Documentation/devicetree/bindings/gpio/gpio-moxtet.txt | ||
1630 | F: include/linux/moxtet.h | ||
1631 | F: drivers/bus/moxtet.c | ||
1632 | F: drivers/firmware/turris-mox-rwtm.c | ||
1633 | F: drivers/gpio/gpio-moxtet.c | ||
1634 | |||
1620 | ARM/EBSA110 MACHINE SUPPORT | 1635 | ARM/EBSA110 MACHINE SUPPORT |
1621 | M: Russell King <linux@armlinux.org.uk> | 1636 | M: Russell King <linux@armlinux.org.uk> |
1622 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1637 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -15530,6 +15545,7 @@ F: drivers/clk/clk-sc[mp]i.c | |||
15530 | F: drivers/cpufreq/sc[mp]i-cpufreq.c | 15545 | F: drivers/cpufreq/sc[mp]i-cpufreq.c |
15531 | F: drivers/firmware/arm_scpi.c | 15546 | F: drivers/firmware/arm_scpi.c |
15532 | F: drivers/firmware/arm_scmi/ | 15547 | F: drivers/firmware/arm_scmi/ |
15548 | F: drivers/reset/reset-scmi.c | ||
15533 | F: include/linux/sc[mp]i_protocol.h | 15549 | F: include/linux/sc[mp]i_protocol.h |
15534 | 15550 | ||
15535 | SYSTEM RESET/SHUTDOWN DRIVERS | 15551 | SYSTEM RESET/SHUTDOWN DRIVERS |
@@ -15838,6 +15854,7 @@ F: drivers/firmware/ti_sci* | |||
15838 | F: include/linux/soc/ti/ti_sci_protocol.h | 15854 | F: include/linux/soc/ti/ti_sci_protocol.h |
15839 | F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt | 15855 | F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt |
15840 | F: drivers/soc/ti/ti_sci_pm_domains.c | 15856 | F: drivers/soc/ti/ti_sci_pm_domains.c |
15857 | F: include/dt-bindings/soc/ti,sci_pm_domain.h | ||
15841 | F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt | 15858 | F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt |
15842 | F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt | 15859 | F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt |
15843 | F: drivers/clk/keystone/sci-clk.c | 15860 | F: drivers/clk/keystone/sci-clk.c |
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c index 60130bd7b182..6edb961bd6c1 100644 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c | |||
@@ -8,7 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | #include <linux/gpio.h> | 11 | #include <linux/gpio/driver.h> |
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 0628e7d7dcf3..5b3549f1236c 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/platform_data/ti-aemif.h> | 36 | #include <linux/platform_data/ti-aemif.h> |
37 | #include <linux/platform_data/spi-davinci.h> | 37 | #include <linux/platform_data/spi-davinci.h> |
38 | #include <linux/platform_data/uio_pruss.h> | 38 | #include <linux/platform_data/uio_pruss.h> |
39 | #include <linux/property.h> | ||
39 | #include <linux/regulator/machine.h> | 40 | #include <linux/regulator/machine.h> |
40 | #include <linux/regulator/tps6507x.h> | 41 | #include <linux/regulator/tps6507x.h> |
41 | #include <linux/regulator/fixed.h> | 42 | #include <linux/regulator/fixed.h> |
@@ -802,37 +803,79 @@ static const short da850_evm_mmcsd0_pins[] __initconst = { | |||
802 | -1 | 803 | -1 |
803 | }; | 804 | }; |
804 | 805 | ||
805 | static void da850_panel_power_ctrl(int val) | 806 | static struct property_entry da850_lcd_backlight_props[] = { |
806 | { | 807 | PROPERTY_ENTRY_BOOL("default-on"), |
807 | /* lcd backlight */ | 808 | { } |
808 | gpio_set_value(DA850_LCD_BL_PIN, val); | 809 | }; |
809 | 810 | ||
810 | /* lcd power */ | 811 | static struct gpiod_lookup_table da850_lcd_backlight_gpio_table = { |
811 | gpio_set_value(DA850_LCD_PWR_PIN, val); | 812 | .dev_id = "gpio-backlight", |
812 | } | 813 | .table = { |
814 | GPIO_LOOKUP("davinci_gpio", DA850_LCD_BL_PIN, NULL, 0), | ||
815 | { } | ||
816 | }, | ||
817 | }; | ||
818 | |||
819 | static const struct platform_device_info da850_lcd_backlight_info = { | ||
820 | .name = "gpio-backlight", | ||
821 | .id = PLATFORM_DEVID_NONE, | ||
822 | .properties = da850_lcd_backlight_props, | ||
823 | }; | ||
824 | |||
825 | static struct regulator_consumer_supply da850_lcd_supplies[] = { | ||
826 | REGULATOR_SUPPLY("lcd", NULL), | ||
827 | }; | ||
828 | |||
829 | static struct regulator_init_data da850_lcd_supply_data = { | ||
830 | .consumer_supplies = da850_lcd_supplies, | ||
831 | .num_consumer_supplies = ARRAY_SIZE(da850_lcd_supplies), | ||
832 | .constraints = { | ||
833 | .valid_ops_mask = REGULATOR_CHANGE_STATUS, | ||
834 | }, | ||
835 | }; | ||
836 | |||
837 | static struct fixed_voltage_config da850_lcd_supply = { | ||
838 | .supply_name = "lcd", | ||
839 | .microvolts = 33000000, | ||
840 | .init_data = &da850_lcd_supply_data, | ||
841 | }; | ||
842 | |||
843 | static struct platform_device da850_lcd_supply_device = { | ||
844 | .name = "reg-fixed-voltage", | ||
845 | .id = 1, /* Dummy fixed regulator is 0 */ | ||
846 | .dev = { | ||
847 | .platform_data = &da850_lcd_supply, | ||
848 | }, | ||
849 | }; | ||
850 | |||
851 | static struct gpiod_lookup_table da850_lcd_supply_gpio_table = { | ||
852 | .dev_id = "reg-fixed-voltage.1", | ||
853 | .table = { | ||
854 | GPIO_LOOKUP("davinci_gpio", DA850_LCD_PWR_PIN, NULL, 0), | ||
855 | { } | ||
856 | }, | ||
857 | }; | ||
858 | |||
859 | static struct gpiod_lookup_table *da850_lcd_gpio_lookups[] = { | ||
860 | &da850_lcd_backlight_gpio_table, | ||
861 | &da850_lcd_supply_gpio_table, | ||
862 | }; | ||
813 | 863 | ||
814 | static int da850_lcd_hw_init(void) | 864 | static int da850_lcd_hw_init(void) |
815 | { | 865 | { |
866 | struct platform_device *backlight; | ||
816 | int status; | 867 | int status; |
817 | 868 | ||
818 | status = gpio_request(DA850_LCD_BL_PIN, "lcd bl"); | 869 | gpiod_add_lookup_tables(da850_lcd_gpio_lookups, |
819 | if (status < 0) | 870 | ARRAY_SIZE(da850_lcd_gpio_lookups)); |
820 | return status; | ||
821 | |||
822 | status = gpio_request(DA850_LCD_PWR_PIN, "lcd pwr"); | ||
823 | if (status < 0) { | ||
824 | gpio_free(DA850_LCD_BL_PIN); | ||
825 | return status; | ||
826 | } | ||
827 | 871 | ||
828 | gpio_direction_output(DA850_LCD_BL_PIN, 0); | 872 | backlight = platform_device_register_full(&da850_lcd_backlight_info); |
829 | gpio_direction_output(DA850_LCD_PWR_PIN, 0); | 873 | if (IS_ERR(backlight)) |
874 | return PTR_ERR(backlight); | ||
830 | 875 | ||
831 | /* Switch off panel power and backlight */ | 876 | status = platform_device_register(&da850_lcd_supply_device); |
832 | da850_panel_power_ctrl(0); | 877 | if (status) |
833 | 878 | return status; | |
834 | /* Switch on panel power and backlight */ | ||
835 | da850_panel_power_ctrl(1); | ||
836 | 879 | ||
837 | return 0; | 880 | return 0; |
838 | } | 881 | } |
@@ -1443,7 +1486,6 @@ static __init void da850_evm_init(void) | |||
1443 | if (ret) | 1486 | if (ret) |
1444 | pr_warn("%s: LCD initialization failed: %d\n", __func__, ret); | 1487 | pr_warn("%s: LCD initialization failed: %d\n", __func__, ret); |
1445 | 1488 | ||
1446 | sharp_lk043t1dg01_pdata.panel_power_ctrl = da850_panel_power_ctrl, | ||
1447 | ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata); | 1489 | ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata); |
1448 | if (ret) | 1490 | if (ret) |
1449 | pr_warn("%s: LCDC registration failed: %d\n", __func__, ret); | 1491 | pr_warn("%s: LCDC registration failed: %d\n", __func__, ret); |
diff --git a/drivers/base/soc.c b/drivers/base/soc.c index 10b280f30217..7c0c5ca5953d 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c | |||
@@ -33,6 +33,7 @@ static struct bus_type soc_bus_type = { | |||
33 | 33 | ||
34 | static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL); | 34 | static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL); |
35 | static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL); | 35 | static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL); |
36 | static DEVICE_ATTR(serial_number, S_IRUGO, soc_info_get, NULL); | ||
36 | static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL); | 37 | static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL); |
37 | static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL); | 38 | static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL); |
38 | 39 | ||
@@ -57,6 +58,9 @@ static umode_t soc_attribute_mode(struct kobject *kobj, | |||
57 | if ((attr == &dev_attr_revision.attr) | 58 | if ((attr == &dev_attr_revision.attr) |
58 | && (soc_dev->attr->revision != NULL)) | 59 | && (soc_dev->attr->revision != NULL)) |
59 | return attr->mode; | 60 | return attr->mode; |
61 | if ((attr == &dev_attr_serial_number.attr) | ||
62 | && (soc_dev->attr->serial_number != NULL)) | ||
63 | return attr->mode; | ||
60 | if ((attr == &dev_attr_soc_id.attr) | 64 | if ((attr == &dev_attr_soc_id.attr) |
61 | && (soc_dev->attr->soc_id != NULL)) | 65 | && (soc_dev->attr->soc_id != NULL)) |
62 | return attr->mode; | 66 | return attr->mode; |
@@ -77,6 +81,8 @@ static ssize_t soc_info_get(struct device *dev, | |||
77 | return sprintf(buf, "%s\n", soc_dev->attr->family); | 81 | return sprintf(buf, "%s\n", soc_dev->attr->family); |
78 | if (attr == &dev_attr_revision) | 82 | if (attr == &dev_attr_revision) |
79 | return sprintf(buf, "%s\n", soc_dev->attr->revision); | 83 | return sprintf(buf, "%s\n", soc_dev->attr->revision); |
84 | if (attr == &dev_attr_serial_number) | ||
85 | return sprintf(buf, "%s\n", soc_dev->attr->serial_number); | ||
80 | if (attr == &dev_attr_soc_id) | 86 | if (attr == &dev_attr_soc_id) |
81 | return sprintf(buf, "%s\n", soc_dev->attr->soc_id); | 87 | return sprintf(buf, "%s\n", soc_dev->attr->soc_id); |
82 | 88 | ||
@@ -87,6 +93,7 @@ static ssize_t soc_info_get(struct device *dev, | |||
87 | static struct attribute *soc_attr[] = { | 93 | static struct attribute *soc_attr[] = { |
88 | &dev_attr_machine.attr, | 94 | &dev_attr_machine.attr, |
89 | &dev_attr_family.attr, | 95 | &dev_attr_family.attr, |
96 | &dev_attr_serial_number.attr, | ||
90 | &dev_attr_soc_id.attr, | 97 | &dev_attr_soc_id.attr, |
91 | &dev_attr_revision.attr, | 98 | &dev_attr_revision.attr, |
92 | NULL, | 99 | NULL, |
@@ -157,6 +164,7 @@ out2: | |||
157 | out1: | 164 | out1: |
158 | return ERR_PTR(ret); | 165 | return ERR_PTR(ret); |
159 | } | 166 | } |
167 | EXPORT_SYMBOL_GPL(soc_device_register); | ||
160 | 168 | ||
161 | /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ | 169 | /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ |
162 | void soc_device_unregister(struct soc_device *soc_dev) | 170 | void soc_device_unregister(struct soc_device *soc_dev) |
@@ -166,6 +174,7 @@ void soc_device_unregister(struct soc_device *soc_dev) | |||
166 | device_unregister(&soc_dev->dev); | 174 | device_unregister(&soc_dev->dev); |
167 | early_soc_dev_attr = NULL; | 175 | early_soc_dev_attr = NULL; |
168 | } | 176 | } |
177 | EXPORT_SYMBOL_GPL(soc_device_unregister); | ||
169 | 178 | ||
170 | static int __init soc_bus_register(void) | 179 | static int __init soc_bus_register(void) |
171 | { | 180 | { |
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1851112ccc29..6b331061d34b 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig | |||
@@ -29,6 +29,16 @@ config BRCMSTB_GISB_ARB | |||
29 | arbiter. This driver provides timeout and target abort error handling | 29 | arbiter. This driver provides timeout and target abort error handling |
30 | and internal bus master decoding. | 30 | and internal bus master decoding. |
31 | 31 | ||
32 | config MOXTET | ||
33 | tristate "CZ.NIC Turris Mox module configuration bus" | ||
34 | depends on SPI_MASTER && OF | ||
35 | help | ||
36 | Say yes here to add support for the module configuration bus found | ||
37 | on CZ.NIC's Turris Mox. This is needed for the ability to discover | ||
38 | the order in which the modules are connected and to get/set some of | ||
39 | their settings. For example the GPIOs on Mox SFP module are | ||
40 | configured through this bus. | ||
41 | |||
32 | config HISILICON_LPC | 42 | config HISILICON_LPC |
33 | bool "Support for ISA I/O space on HiSilicon Hip06/7" | 43 | bool "Support for ISA I/O space on HiSilicon Hip06/7" |
34 | depends on ARM64 && (ARCH_HISI || COMPILE_TEST) | 44 | depends on ARM64 && (ARCH_HISI || COMPILE_TEST) |
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index ca300b1914ce..16b43d3468c6 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile | |||
@@ -8,6 +8,7 @@ obj-$(CONFIG_ARM_CCI) += arm-cci.o | |||
8 | 8 | ||
9 | obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o | 9 | obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o |
10 | obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o | 10 | obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o |
11 | obj-$(CONFIG_MOXTET) += moxtet.o | ||
11 | 12 | ||
12 | # DPAA2 fsl-mc bus | 13 | # DPAA2 fsl-mc bus |
13 | obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ | 14 | obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ |
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index 8ad77246f322..cc7bb900f524 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c | |||
@@ -330,7 +330,6 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev) | |||
330 | 330 | ||
331 | fsl_mc_resource_free(resource); | 331 | fsl_mc_resource_free(resource); |
332 | 332 | ||
333 | device_link_del(mc_adev->consumer_link); | ||
334 | mc_adev->consumer_link = NULL; | 333 | mc_adev->consumer_link = NULL; |
335 | } | 334 | } |
336 | EXPORT_SYMBOL_GPL(fsl_mc_object_free); | 335 | EXPORT_SYMBOL_GPL(fsl_mc_object_free); |
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index 3ae574a58cce..d9629fc13a15 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c | |||
@@ -255,7 +255,6 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io) | |||
255 | fsl_destroy_mc_io(mc_io); | 255 | fsl_destroy_mc_io(mc_io); |
256 | fsl_mc_resource_free(resource); | 256 | fsl_mc_resource_free(resource); |
257 | 257 | ||
258 | device_link_del(dpmcp_dev->consumer_link); | ||
259 | dpmcp_dev->consumer_link = NULL; | 258 | dpmcp_dev->consumer_link = NULL; |
260 | } | 259 | } |
261 | EXPORT_SYMBOL_GPL(fsl_mc_portal_free); | 260 | EXPORT_SYMBOL_GPL(fsl_mc_portal_free); |
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index db74334ca5ef..28bb65a5613f 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c | |||
@@ -19,6 +19,8 @@ struct imx_weim_devtype { | |||
19 | unsigned int cs_count; | 19 | unsigned int cs_count; |
20 | unsigned int cs_regs_count; | 20 | unsigned int cs_regs_count; |
21 | unsigned int cs_stride; | 21 | unsigned int cs_stride; |
22 | unsigned int wcr_offset; | ||
23 | unsigned int wcr_bcm; | ||
22 | }; | 24 | }; |
23 | 25 | ||
24 | static const struct imx_weim_devtype imx1_weim_devtype = { | 26 | static const struct imx_weim_devtype imx1_weim_devtype = { |
@@ -37,6 +39,8 @@ static const struct imx_weim_devtype imx50_weim_devtype = { | |||
37 | .cs_count = 4, | 39 | .cs_count = 4, |
38 | .cs_regs_count = 6, | 40 | .cs_regs_count = 6, |
39 | .cs_stride = 0x18, | 41 | .cs_stride = 0x18, |
42 | .wcr_offset = 0x90, | ||
43 | .wcr_bcm = BIT(0), | ||
40 | }; | 44 | }; |
41 | 45 | ||
42 | static const struct imx_weim_devtype imx51_weim_devtype = { | 46 | static const struct imx_weim_devtype imx51_weim_devtype = { |
@@ -72,7 +76,7 @@ static const struct of_device_id weim_id_table[] = { | |||
72 | }; | 76 | }; |
73 | MODULE_DEVICE_TABLE(of, weim_id_table); | 77 | MODULE_DEVICE_TABLE(of, weim_id_table); |
74 | 78 | ||
75 | static int __init imx_weim_gpr_setup(struct platform_device *pdev) | 79 | static int imx_weim_gpr_setup(struct platform_device *pdev) |
76 | { | 80 | { |
77 | struct device_node *np = pdev->dev.of_node; | 81 | struct device_node *np = pdev->dev.of_node; |
78 | struct property *prop; | 82 | struct property *prop; |
@@ -122,10 +126,10 @@ err: | |||
122 | } | 126 | } |
123 | 127 | ||
124 | /* Parse and set the timing for this device. */ | 128 | /* Parse and set the timing for this device. */ |
125 | static int __init weim_timing_setup(struct device *dev, | 129 | static int weim_timing_setup(struct device *dev, |
126 | struct device_node *np, void __iomem *base, | 130 | struct device_node *np, void __iomem *base, |
127 | const struct imx_weim_devtype *devtype, | 131 | const struct imx_weim_devtype *devtype, |
128 | struct cs_timing_state *ts) | 132 | struct cs_timing_state *ts) |
129 | { | 133 | { |
130 | u32 cs_idx, value[MAX_CS_REGS_COUNT]; | 134 | u32 cs_idx, value[MAX_CS_REGS_COUNT]; |
131 | int i, ret; | 135 | int i, ret; |
@@ -183,8 +187,7 @@ static int __init weim_timing_setup(struct device *dev, | |||
183 | return 0; | 187 | return 0; |
184 | } | 188 | } |
185 | 189 | ||
186 | static int __init weim_parse_dt(struct platform_device *pdev, | 190 | static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) |
187 | void __iomem *base) | ||
188 | { | 191 | { |
189 | const struct of_device_id *of_id = of_match_device(weim_id_table, | 192 | const struct of_device_id *of_id = of_match_device(weim_id_table, |
190 | &pdev->dev); | 193 | &pdev->dev); |
@@ -192,6 +195,7 @@ static int __init weim_parse_dt(struct platform_device *pdev, | |||
192 | struct device_node *child; | 195 | struct device_node *child; |
193 | int ret, have_child = 0; | 196 | int ret, have_child = 0; |
194 | struct cs_timing_state ts = {}; | 197 | struct cs_timing_state ts = {}; |
198 | u32 reg; | ||
195 | 199 | ||
196 | if (devtype == &imx50_weim_devtype) { | 200 | if (devtype == &imx50_weim_devtype) { |
197 | ret = imx_weim_gpr_setup(pdev); | 201 | ret = imx_weim_gpr_setup(pdev); |
@@ -199,6 +203,17 @@ static int __init weim_parse_dt(struct platform_device *pdev, | |||
199 | return ret; | 203 | return ret; |
200 | } | 204 | } |
201 | 205 | ||
206 | if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) { | ||
207 | if (devtype->wcr_bcm) { | ||
208 | reg = readl(base + devtype->wcr_offset); | ||
209 | writel(reg | devtype->wcr_bcm, | ||
210 | base + devtype->wcr_offset); | ||
211 | } else { | ||
212 | dev_err(&pdev->dev, "burst clk mode not supported.\n"); | ||
213 | return -EINVAL; | ||
214 | } | ||
215 | } | ||
216 | |||
202 | for_each_available_child_of_node(pdev->dev.of_node, child) { | 217 | for_each_available_child_of_node(pdev->dev.of_node, child) { |
203 | ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts); | 218 | ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts); |
204 | if (ret) | 219 | if (ret) |
@@ -217,7 +232,7 @@ static int __init weim_parse_dt(struct platform_device *pdev, | |||
217 | return ret; | 232 | return ret; |
218 | } | 233 | } |
219 | 234 | ||
220 | static int __init weim_probe(struct platform_device *pdev) | 235 | static int weim_probe(struct platform_device *pdev) |
221 | { | 236 | { |
222 | struct resource *res; | 237 | struct resource *res; |
223 | struct clk *clk; | 238 | struct clk *clk; |
@@ -254,8 +269,9 @@ static struct platform_driver weim_driver = { | |||
254 | .name = "imx-weim", | 269 | .name = "imx-weim", |
255 | .of_match_table = weim_id_table, | 270 | .of_match_table = weim_id_table, |
256 | }, | 271 | }, |
272 | .probe = weim_probe, | ||
257 | }; | 273 | }; |
258 | module_platform_driver_probe(weim_driver, weim_probe); | 274 | module_platform_driver(weim_driver); |
259 | 275 | ||
260 | MODULE_AUTHOR("Freescale Semiconductor Inc."); | 276 | MODULE_AUTHOR("Freescale Semiconductor Inc."); |
261 | MODULE_DESCRIPTION("i.MX EIM Controller Driver"); | 277 | MODULE_DESCRIPTION("i.MX EIM Controller Driver"); |
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c new file mode 100644 index 000000000000..36cf13eee6b8 --- /dev/null +++ b/drivers/bus/moxtet.c | |||
@@ -0,0 +1,885 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Turris Mox module configuration bus driver | ||
4 | * | ||
5 | * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> | ||
6 | */ | ||
7 | |||
8 | #include <dt-bindings/bus/moxtet.h> | ||
9 | #include <linux/bitops.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/moxtet.h> | ||
14 | #include <linux/mutex.h> | ||
15 | #include <linux/of_device.h> | ||
16 | #include <linux/of_irq.h> | ||
17 | #include <linux/spi/spi.h> | ||
18 | |||
19 | /* | ||
20 | * @name: module name for sysfs | ||
21 | * @hwirq_base: base index for IRQ for this module (-1 if no IRQs) | ||
22 | * @nirqs: how many interrupts does the shift register provide | ||
23 | * @desc: module description for kernel log | ||
24 | */ | ||
25 | static const struct { | ||
26 | const char *name; | ||
27 | int hwirq_base; | ||
28 | int nirqs; | ||
29 | const char *desc; | ||
30 | } mox_module_table[] = { | ||
31 | /* do not change order of this array! */ | ||
32 | { NULL, 0, 0, NULL }, | ||
33 | { "sfp", -1, 0, "MOX D (SFP cage)" }, | ||
34 | { "pci", MOXTET_IRQ_PCI, 1, "MOX B (Mini-PCIe)" }, | ||
35 | { "topaz", MOXTET_IRQ_TOPAZ, 1, "MOX C (4 port switch)" }, | ||
36 | { "peridot", MOXTET_IRQ_PERIDOT(0), 1, "MOX E (8 port switch)" }, | ||
37 | { "usb3", MOXTET_IRQ_USB3, 2, "MOX F (USB 3.0)" }, | ||
38 | { "pci-bridge", -1, 0, "MOX G (Mini-PCIe bridge)" }, | ||
39 | }; | ||
40 | |||
41 | static inline bool mox_module_known(unsigned int id) | ||
42 | { | ||
43 | return id >= TURRIS_MOX_MODULE_FIRST && id <= TURRIS_MOX_MODULE_LAST; | ||
44 | } | ||
45 | |||
46 | static inline const char *mox_module_name(unsigned int id) | ||
47 | { | ||
48 | if (mox_module_known(id)) | ||
49 | return mox_module_table[id].name; | ||
50 | else | ||
51 | return "unknown"; | ||
52 | } | ||
53 | |||
54 | #define DEF_MODULE_ATTR(name, fmt, ...) \ | ||
55 | static ssize_t \ | ||
56 | module_##name##_show(struct device *dev, struct device_attribute *a, \ | ||
57 | char *buf) \ | ||
58 | { \ | ||
59 | struct moxtet_device *mdev = to_moxtet_device(dev); \ | ||
60 | return sprintf(buf, (fmt), __VA_ARGS__); \ | ||
61 | } \ | ||
62 | static DEVICE_ATTR_RO(module_##name) | ||
63 | |||
64 | DEF_MODULE_ATTR(id, "0x%x\n", mdev->id); | ||
65 | DEF_MODULE_ATTR(name, "%s\n", mox_module_name(mdev->id)); | ||
66 | DEF_MODULE_ATTR(description, "%s\n", | ||
67 | mox_module_known(mdev->id) ? mox_module_table[mdev->id].desc | ||
68 | : ""); | ||
69 | |||
70 | static struct attribute *moxtet_dev_attrs[] = { | ||
71 | &dev_attr_module_id.attr, | ||
72 | &dev_attr_module_name.attr, | ||
73 | &dev_attr_module_description.attr, | ||
74 | NULL, | ||
75 | }; | ||
76 | |||
77 | static const struct attribute_group moxtet_dev_group = { | ||
78 | .attrs = moxtet_dev_attrs, | ||
79 | }; | ||
80 | |||
81 | static const struct attribute_group *moxtet_dev_groups[] = { | ||
82 | &moxtet_dev_group, | ||
83 | NULL, | ||
84 | }; | ||
85 | |||
86 | static int moxtet_match(struct device *dev, struct device_driver *drv) | ||
87 | { | ||
88 | struct moxtet_device *mdev = to_moxtet_device(dev); | ||
89 | struct moxtet_driver *tdrv = to_moxtet_driver(drv); | ||
90 | const enum turris_mox_module_id *t; | ||
91 | |||
92 | if (of_driver_match_device(dev, drv)) | ||
93 | return 1; | ||
94 | |||
95 | if (!tdrv->id_table) | ||
96 | return 0; | ||
97 | |||
98 | for (t = tdrv->id_table; *t; ++t) | ||
99 | if (*t == mdev->id) | ||
100 | return 1; | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | struct bus_type moxtet_bus_type = { | ||
106 | .name = "moxtet", | ||
107 | .dev_groups = moxtet_dev_groups, | ||
108 | .match = moxtet_match, | ||
109 | }; | ||
110 | EXPORT_SYMBOL_GPL(moxtet_bus_type); | ||
111 | |||
112 | int __moxtet_register_driver(struct module *owner, | ||
113 | struct moxtet_driver *mdrv) | ||
114 | { | ||
115 | mdrv->driver.owner = owner; | ||
116 | mdrv->driver.bus = &moxtet_bus_type; | ||
117 | return driver_register(&mdrv->driver); | ||
118 | } | ||
119 | EXPORT_SYMBOL_GPL(__moxtet_register_driver); | ||
120 | |||
121 | static int moxtet_dev_check(struct device *dev, void *data) | ||
122 | { | ||
123 | struct moxtet_device *mdev = to_moxtet_device(dev); | ||
124 | struct moxtet_device *new_dev = data; | ||
125 | |||
126 | if (mdev->moxtet == new_dev->moxtet && mdev->id == new_dev->id && | ||
127 | mdev->idx == new_dev->idx) | ||
128 | return -EBUSY; | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static void moxtet_dev_release(struct device *dev) | ||
133 | { | ||
134 | struct moxtet_device *mdev = to_moxtet_device(dev); | ||
135 | |||
136 | put_device(mdev->moxtet->dev); | ||
137 | kfree(mdev); | ||
138 | } | ||
139 | |||
140 | static struct moxtet_device * | ||
141 | moxtet_alloc_device(struct moxtet *moxtet) | ||
142 | { | ||
143 | struct moxtet_device *dev; | ||
144 | |||
145 | if (!get_device(moxtet->dev)) | ||
146 | return NULL; | ||
147 | |||
148 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
149 | if (!dev) { | ||
150 | put_device(moxtet->dev); | ||
151 | return NULL; | ||
152 | } | ||
153 | |||
154 | dev->moxtet = moxtet; | ||
155 | dev->dev.parent = moxtet->dev; | ||
156 | dev->dev.bus = &moxtet_bus_type; | ||
157 | dev->dev.release = moxtet_dev_release; | ||
158 | |||
159 | device_initialize(&dev->dev); | ||
160 | |||
161 | return dev; | ||
162 | } | ||
163 | |||
164 | static int moxtet_add_device(struct moxtet_device *dev) | ||
165 | { | ||
166 | static DEFINE_MUTEX(add_mutex); | ||
167 | int ret; | ||
168 | |||
169 | if (dev->idx >= TURRIS_MOX_MAX_MODULES || dev->id > 0xf) | ||
170 | return -EINVAL; | ||
171 | |||
172 | dev_set_name(&dev->dev, "moxtet-%s.%u", mox_module_name(dev->id), | ||
173 | dev->idx); | ||
174 | |||
175 | mutex_lock(&add_mutex); | ||
176 | |||
177 | ret = bus_for_each_dev(&moxtet_bus_type, NULL, dev, | ||
178 | moxtet_dev_check); | ||
179 | if (ret) | ||
180 | goto done; | ||
181 | |||
182 | ret = device_add(&dev->dev); | ||
183 | if (ret < 0) | ||
184 | dev_err(dev->moxtet->dev, "can't add %s, status %d\n", | ||
185 | dev_name(dev->moxtet->dev), ret); | ||
186 | |||
187 | done: | ||
188 | mutex_unlock(&add_mutex); | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | static int __unregister(struct device *dev, void *null) | ||
193 | { | ||
194 | if (dev->of_node) { | ||
195 | of_node_clear_flag(dev->of_node, OF_POPULATED); | ||
196 | of_node_put(dev->of_node); | ||
197 | } | ||
198 | |||
199 | device_unregister(dev); | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static struct moxtet_device * | ||
205 | of_register_moxtet_device(struct moxtet *moxtet, struct device_node *nc) | ||
206 | { | ||
207 | struct moxtet_device *dev; | ||
208 | u32 val; | ||
209 | int ret; | ||
210 | |||
211 | dev = moxtet_alloc_device(moxtet); | ||
212 | if (!dev) { | ||
213 | dev_err(moxtet->dev, | ||
214 | "Moxtet device alloc error for %pOF\n", nc); | ||
215 | return ERR_PTR(-ENOMEM); | ||
216 | } | ||
217 | |||
218 | ret = of_property_read_u32(nc, "reg", &val); | ||
219 | if (ret) { | ||
220 | dev_err(moxtet->dev, "%pOF has no valid 'reg' property (%d)\n", | ||
221 | nc, ret); | ||
222 | goto err_put; | ||
223 | } | ||
224 | |||
225 | dev->idx = val; | ||
226 | |||
227 | if (dev->idx >= TURRIS_MOX_MAX_MODULES) { | ||
228 | dev_err(moxtet->dev, "%pOF Moxtet address 0x%x out of range\n", | ||
229 | nc, dev->idx); | ||
230 | ret = -EINVAL; | ||
231 | goto err_put; | ||
232 | } | ||
233 | |||
234 | dev->id = moxtet->modules[dev->idx]; | ||
235 | |||
236 | if (!dev->id) { | ||
237 | dev_err(moxtet->dev, "%pOF Moxtet address 0x%x is empty\n", nc, | ||
238 | dev->idx); | ||
239 | ret = -ENODEV; | ||
240 | goto err_put; | ||
241 | } | ||
242 | |||
243 | of_node_get(nc); | ||
244 | dev->dev.of_node = nc; | ||
245 | |||
246 | ret = moxtet_add_device(dev); | ||
247 | if (ret) { | ||
248 | dev_err(moxtet->dev, | ||
249 | "Moxtet device register error for %pOF\n", nc); | ||
250 | of_node_put(nc); | ||
251 | goto err_put; | ||
252 | } | ||
253 | |||
254 | return dev; | ||
255 | |||
256 | err_put: | ||
257 | put_device(&dev->dev); | ||
258 | return ERR_PTR(ret); | ||
259 | } | ||
260 | |||
261 | static void of_register_moxtet_devices(struct moxtet *moxtet) | ||
262 | { | ||
263 | struct moxtet_device *dev; | ||
264 | struct device_node *nc; | ||
265 | |||
266 | if (!moxtet->dev->of_node) | ||
267 | return; | ||
268 | |||
269 | for_each_available_child_of_node(moxtet->dev->of_node, nc) { | ||
270 | if (of_node_test_and_set_flag(nc, OF_POPULATED)) | ||
271 | continue; | ||
272 | dev = of_register_moxtet_device(moxtet, nc); | ||
273 | if (IS_ERR(dev)) { | ||
274 | dev_warn(moxtet->dev, | ||
275 | "Failed to create Moxtet device for %pOF\n", | ||
276 | nc); | ||
277 | of_node_clear_flag(nc, OF_POPULATED); | ||
278 | } | ||
279 | } | ||
280 | } | ||
281 | |||
282 | static void | ||
283 | moxtet_register_devices_from_topology(struct moxtet *moxtet) | ||
284 | { | ||
285 | struct moxtet_device *dev; | ||
286 | int i, ret; | ||
287 | |||
288 | for (i = 0; i < moxtet->count; ++i) { | ||
289 | dev = moxtet_alloc_device(moxtet); | ||
290 | if (!dev) { | ||
291 | dev_err(moxtet->dev, "Moxtet device %u alloc error\n", | ||
292 | i); | ||
293 | continue; | ||
294 | } | ||
295 | |||
296 | dev->idx = i; | ||
297 | dev->id = moxtet->modules[i]; | ||
298 | |||
299 | ret = moxtet_add_device(dev); | ||
300 | if (ret && ret != -EBUSY) { | ||
301 | put_device(&dev->dev); | ||
302 | dev_err(moxtet->dev, | ||
303 | "Moxtet device %u register error: %i\n", i, | ||
304 | ret); | ||
305 | } | ||
306 | } | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * @nsame: how many modules with same id are already in moxtet->modules | ||
311 | */ | ||
312 | static int moxtet_set_irq(struct moxtet *moxtet, int idx, int id, int nsame) | ||
313 | { | ||
314 | int i, first; | ||
315 | struct moxtet_irqpos *pos; | ||
316 | |||
317 | first = mox_module_table[id].hwirq_base + | ||
318 | nsame * mox_module_table[id].nirqs; | ||
319 | |||
320 | if (first + mox_module_table[id].nirqs > MOXTET_NIRQS) | ||
321 | return -EINVAL; | ||
322 | |||
323 | for (i = 0; i < mox_module_table[id].nirqs; ++i) { | ||
324 | pos = &moxtet->irq.position[first + i]; | ||
325 | pos->idx = idx; | ||
326 | pos->bit = i; | ||
327 | moxtet->irq.exists |= BIT(first + i); | ||
328 | } | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | static int moxtet_find_topology(struct moxtet *moxtet) | ||
334 | { | ||
335 | u8 buf[TURRIS_MOX_MAX_MODULES]; | ||
336 | int cnts[TURRIS_MOX_MODULE_LAST]; | ||
337 | int i, ret; | ||
338 | |||
339 | memset(cnts, 0, sizeof(cnts)); | ||
340 | |||
341 | ret = spi_read(to_spi_device(moxtet->dev), buf, TURRIS_MOX_MAX_MODULES); | ||
342 | if (ret < 0) | ||
343 | return ret; | ||
344 | |||
345 | if (buf[0] == TURRIS_MOX_CPU_ID_EMMC) { | ||
346 | dev_info(moxtet->dev, "Found MOX A (eMMC CPU) module\n"); | ||
347 | } else if (buf[0] == TURRIS_MOX_CPU_ID_SD) { | ||
348 | dev_info(moxtet->dev, "Found MOX A (CPU) module\n"); | ||
349 | } else { | ||
350 | dev_err(moxtet->dev, "Invalid Turris MOX A CPU module 0x%02x\n", | ||
351 | buf[0]); | ||
352 | return -ENODEV; | ||
353 | } | ||
354 | |||
355 | moxtet->count = 0; | ||
356 | |||
357 | for (i = 1; i < TURRIS_MOX_MAX_MODULES; ++i) { | ||
358 | int id; | ||
359 | |||
360 | if (buf[i] == 0xff) | ||
361 | break; | ||
362 | |||
363 | id = buf[i] & 0xf; | ||
364 | |||
365 | moxtet->modules[i-1] = id; | ||
366 | ++moxtet->count; | ||
367 | |||
368 | if (mox_module_known(id)) { | ||
369 | dev_info(moxtet->dev, "Found %s module\n", | ||
370 | mox_module_table[id].desc); | ||
371 | |||
372 | if (moxtet_set_irq(moxtet, i-1, id, cnts[id]++) < 0) | ||
373 | dev_err(moxtet->dev, | ||
374 | " Cannot set IRQ for module %s\n", | ||
375 | mox_module_table[id].desc); | ||
376 | } else { | ||
377 | dev_warn(moxtet->dev, | ||
378 | "Unknown Moxtet module found (ID 0x%02x)\n", | ||
379 | id); | ||
380 | } | ||
381 | } | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static int moxtet_spi_read(struct moxtet *moxtet, u8 *buf) | ||
387 | { | ||
388 | struct spi_transfer xfer = { | ||
389 | .rx_buf = buf, | ||
390 | .tx_buf = moxtet->tx, | ||
391 | .len = moxtet->count + 1 | ||
392 | }; | ||
393 | int ret; | ||
394 | |||
395 | mutex_lock(&moxtet->lock); | ||
396 | |||
397 | ret = spi_sync_transfer(to_spi_device(moxtet->dev), &xfer, 1); | ||
398 | |||
399 | mutex_unlock(&moxtet->lock); | ||
400 | |||
401 | return ret; | ||
402 | } | ||
403 | |||
404 | int moxtet_device_read(struct device *dev) | ||
405 | { | ||
406 | struct moxtet_device *mdev = to_moxtet_device(dev); | ||
407 | struct moxtet *moxtet = mdev->moxtet; | ||
408 | u8 buf[TURRIS_MOX_MAX_MODULES]; | ||
409 | int ret; | ||
410 | |||
411 | if (mdev->idx >= moxtet->count) | ||
412 | return -EINVAL; | ||
413 | |||
414 | ret = moxtet_spi_read(moxtet, buf); | ||
415 | if (ret < 0) | ||
416 | return ret; | ||
417 | |||
418 | return buf[mdev->idx + 1] >> 4; | ||
419 | } | ||
420 | EXPORT_SYMBOL_GPL(moxtet_device_read); | ||
421 | |||
422 | int moxtet_device_write(struct device *dev, u8 val) | ||
423 | { | ||
424 | struct moxtet_device *mdev = to_moxtet_device(dev); | ||
425 | struct moxtet *moxtet = mdev->moxtet; | ||
426 | int ret; | ||
427 | |||
428 | if (mdev->idx >= moxtet->count) | ||
429 | return -EINVAL; | ||
430 | |||
431 | mutex_lock(&moxtet->lock); | ||
432 | |||
433 | moxtet->tx[moxtet->count - mdev->idx] = val; | ||
434 | |||
435 | ret = spi_write(to_spi_device(moxtet->dev), moxtet->tx, | ||
436 | moxtet->count + 1); | ||
437 | |||
438 | mutex_unlock(&moxtet->lock); | ||
439 | |||
440 | return ret; | ||
441 | } | ||
442 | EXPORT_SYMBOL_GPL(moxtet_device_write); | ||
443 | |||
444 | int moxtet_device_written(struct device *dev) | ||
445 | { | ||
446 | struct moxtet_device *mdev = to_moxtet_device(dev); | ||
447 | struct moxtet *moxtet = mdev->moxtet; | ||
448 | |||
449 | if (mdev->idx >= moxtet->count) | ||
450 | return -EINVAL; | ||
451 | |||
452 | return moxtet->tx[moxtet->count - mdev->idx]; | ||
453 | } | ||
454 | EXPORT_SYMBOL_GPL(moxtet_device_written); | ||
455 | |||
456 | #ifdef CONFIG_DEBUG_FS | ||
457 | static int moxtet_debug_open(struct inode *inode, struct file *file) | ||
458 | { | ||
459 | file->private_data = inode->i_private; | ||
460 | |||
461 | return nonseekable_open(inode, file); | ||
462 | } | ||
463 | |||
464 | static ssize_t input_read(struct file *file, char __user *buf, size_t len, | ||
465 | loff_t *ppos) | ||
466 | { | ||
467 | struct moxtet *moxtet = file->private_data; | ||
468 | u8 bin[TURRIS_MOX_MAX_MODULES]; | ||
469 | u8 hex[sizeof(buf) * 2 + 1]; | ||
470 | int ret, n; | ||
471 | |||
472 | ret = moxtet_spi_read(moxtet, bin); | ||
473 | if (ret < 0) | ||
474 | return ret; | ||
475 | |||
476 | n = moxtet->count + 1; | ||
477 | bin2hex(hex, bin, n); | ||
478 | |||
479 | hex[2*n] = '\n'; | ||
480 | |||
481 | return simple_read_from_buffer(buf, len, ppos, hex, 2*n + 1); | ||
482 | } | ||
483 | |||
484 | static const struct file_operations input_fops = { | ||
485 | .owner = THIS_MODULE, | ||
486 | .open = moxtet_debug_open, | ||
487 | .read = input_read, | ||
488 | .llseek = no_llseek, | ||
489 | }; | ||
490 | |||
491 | static ssize_t output_read(struct file *file, char __user *buf, size_t len, | ||
492 | loff_t *ppos) | ||
493 | { | ||
494 | struct moxtet *moxtet = file->private_data; | ||
495 | u8 hex[TURRIS_MOX_MAX_MODULES * 2 + 1]; | ||
496 | u8 *p = hex; | ||
497 | int i; | ||
498 | |||
499 | mutex_lock(&moxtet->lock); | ||
500 | |||
501 | for (i = 0; i < moxtet->count; ++i) | ||
502 | p = hex_byte_pack(p, moxtet->tx[moxtet->count - i]); | ||
503 | |||
504 | mutex_unlock(&moxtet->lock); | ||
505 | |||
506 | *p++ = '\n'; | ||
507 | |||
508 | return simple_read_from_buffer(buf, len, ppos, hex, p - hex); | ||
509 | } | ||
510 | |||
511 | static ssize_t output_write(struct file *file, const char __user *buf, | ||
512 | size_t len, loff_t *ppos) | ||
513 | { | ||
514 | struct moxtet *moxtet = file->private_data; | ||
515 | u8 bin[TURRIS_MOX_MAX_MODULES]; | ||
516 | u8 hex[sizeof(bin) * 2 + 1]; | ||
517 | ssize_t res; | ||
518 | loff_t dummy = 0; | ||
519 | int err, i; | ||
520 | |||
521 | if (len > 2 * moxtet->count + 1 || len < 2 * moxtet->count) | ||
522 | return -EINVAL; | ||
523 | |||
524 | res = simple_write_to_buffer(hex, sizeof(hex), &dummy, buf, len); | ||
525 | if (res < 0) | ||
526 | return res; | ||
527 | |||
528 | if (len % 2 == 1 && hex[len - 1] != '\n') | ||
529 | return -EINVAL; | ||
530 | |||
531 | err = hex2bin(bin, hex, moxtet->count); | ||
532 | if (err < 0) | ||
533 | return -EINVAL; | ||
534 | |||
535 | mutex_lock(&moxtet->lock); | ||
536 | |||
537 | for (i = 0; i < moxtet->count; ++i) | ||
538 | moxtet->tx[moxtet->count - i] = bin[i]; | ||
539 | |||
540 | err = spi_write(to_spi_device(moxtet->dev), moxtet->tx, | ||
541 | moxtet->count + 1); | ||
542 | |||
543 | mutex_unlock(&moxtet->lock); | ||
544 | |||
545 | return err < 0 ? err : len; | ||
546 | } | ||
547 | |||
548 | static const struct file_operations output_fops = { | ||
549 | .owner = THIS_MODULE, | ||
550 | .open = moxtet_debug_open, | ||
551 | .read = output_read, | ||
552 | .write = output_write, | ||
553 | .llseek = no_llseek, | ||
554 | }; | ||
555 | |||
556 | static int moxtet_register_debugfs(struct moxtet *moxtet) | ||
557 | { | ||
558 | struct dentry *root, *entry; | ||
559 | |||
560 | root = debugfs_create_dir("moxtet", NULL); | ||
561 | |||
562 | if (IS_ERR(root)) | ||
563 | return PTR_ERR(root); | ||
564 | |||
565 | entry = debugfs_create_file_unsafe("input", 0444, root, moxtet, | ||
566 | &input_fops); | ||
567 | if (IS_ERR(entry)) | ||
568 | goto err_remove; | ||
569 | |||
570 | entry = debugfs_create_file_unsafe("output", 0644, root, moxtet, | ||
571 | &output_fops); | ||
572 | if (IS_ERR(entry)) | ||
573 | goto err_remove; | ||
574 | |||
575 | moxtet->debugfs_root = root; | ||
576 | |||
577 | return 0; | ||
578 | err_remove: | ||
579 | debugfs_remove_recursive(root); | ||
580 | return PTR_ERR(entry); | ||
581 | } | ||
582 | |||
583 | static void moxtet_unregister_debugfs(struct moxtet *moxtet) | ||
584 | { | ||
585 | debugfs_remove_recursive(moxtet->debugfs_root); | ||
586 | } | ||
587 | #else | ||
588 | static inline int moxtet_register_debugfs(struct moxtet *moxtet) | ||
589 | { | ||
590 | return 0; | ||
591 | } | ||
592 | |||
593 | static inline void moxtet_unregister_debugfs(struct moxtet *moxtet) | ||
594 | { | ||
595 | } | ||
596 | #endif | ||
597 | |||
598 | static int moxtet_irq_domain_map(struct irq_domain *d, unsigned int irq, | ||
599 | irq_hw_number_t hw) | ||
600 | { | ||
601 | struct moxtet *moxtet = d->host_data; | ||
602 | |||
603 | if (hw >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(hw))) { | ||
604 | dev_err(moxtet->dev, "Invalid hw irq number\n"); | ||
605 | return -EINVAL; | ||
606 | } | ||
607 | |||
608 | irq_set_chip_data(irq, d->host_data); | ||
609 | irq_set_chip_and_handler(irq, &moxtet->irq.chip, handle_level_irq); | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | static int moxtet_irq_domain_xlate(struct irq_domain *d, | ||
615 | struct device_node *ctrlr, | ||
616 | const u32 *intspec, unsigned int intsize, | ||
617 | unsigned long *out_hwirq, | ||
618 | unsigned int *out_type) | ||
619 | { | ||
620 | struct moxtet *moxtet = d->host_data; | ||
621 | int irq; | ||
622 | |||
623 | if (WARN_ON(intsize < 1)) | ||
624 | return -EINVAL; | ||
625 | |||
626 | irq = intspec[0]; | ||
627 | |||
628 | if (irq >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(irq))) | ||
629 | return -EINVAL; | ||
630 | |||
631 | *out_hwirq = irq; | ||
632 | *out_type = IRQ_TYPE_NONE; | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static const struct irq_domain_ops moxtet_irq_domain = { | ||
637 | .map = moxtet_irq_domain_map, | ||
638 | .xlate = moxtet_irq_domain_xlate, | ||
639 | }; | ||
640 | |||
641 | static void moxtet_irq_mask(struct irq_data *d) | ||
642 | { | ||
643 | struct moxtet *moxtet = irq_data_get_irq_chip_data(d); | ||
644 | |||
645 | moxtet->irq.masked |= BIT(d->hwirq); | ||
646 | } | ||
647 | |||
648 | static void moxtet_irq_unmask(struct irq_data *d) | ||
649 | { | ||
650 | struct moxtet *moxtet = irq_data_get_irq_chip_data(d); | ||
651 | |||
652 | moxtet->irq.masked &= ~BIT(d->hwirq); | ||
653 | } | ||
654 | |||
655 | static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p) | ||
656 | { | ||
657 | struct moxtet *moxtet = irq_data_get_irq_chip_data(d); | ||
658 | struct moxtet_irqpos *pos = &moxtet->irq.position[d->hwirq]; | ||
659 | int id; | ||
660 | |||
661 | id = moxtet->modules[pos->idx]; | ||
662 | |||
663 | seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx, | ||
664 | pos->bit); | ||
665 | } | ||
666 | |||
667 | static const struct irq_chip moxtet_irq_chip = { | ||
668 | .name = "moxtet", | ||
669 | .irq_mask = moxtet_irq_mask, | ||
670 | .irq_unmask = moxtet_irq_unmask, | ||
671 | .irq_print_chip = moxtet_irq_print_chip, | ||
672 | }; | ||
673 | |||
674 | static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map) | ||
675 | { | ||
676 | struct moxtet_irqpos *pos = moxtet->irq.position; | ||
677 | u8 buf[TURRIS_MOX_MAX_MODULES]; | ||
678 | int i, ret; | ||
679 | |||
680 | ret = moxtet_spi_read(moxtet, buf); | ||
681 | if (ret < 0) | ||
682 | return ret; | ||
683 | |||
684 | *map = 0; | ||
685 | |||
686 | for_each_set_bit(i, &moxtet->irq.exists, MOXTET_NIRQS) { | ||
687 | if (!(buf[pos[i].idx + 1] & BIT(4 + pos[i].bit))) | ||
688 | set_bit(i, map); | ||
689 | } | ||
690 | |||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | static irqreturn_t moxtet_irq_thread_fn(int irq, void *data) | ||
695 | { | ||
696 | struct moxtet *moxtet = data; | ||
697 | unsigned long set; | ||
698 | int nhandled = 0, i, sub_irq, ret; | ||
699 | |||
700 | ret = moxtet_irq_read(moxtet, &set); | ||
701 | if (ret < 0) | ||
702 | goto out; | ||
703 | |||
704 | set &= ~moxtet->irq.masked; | ||
705 | |||
706 | do { | ||
707 | for_each_set_bit(i, &set, MOXTET_NIRQS) { | ||
708 | sub_irq = irq_find_mapping(moxtet->irq.domain, i); | ||
709 | handle_nested_irq(sub_irq); | ||
710 | dev_dbg(moxtet->dev, "%i irq\n", i); | ||
711 | ++nhandled; | ||
712 | } | ||
713 | |||
714 | ret = moxtet_irq_read(moxtet, &set); | ||
715 | if (ret < 0) | ||
716 | goto out; | ||
717 | |||
718 | set &= ~moxtet->irq.masked; | ||
719 | } while (set); | ||
720 | |||
721 | out: | ||
722 | return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); | ||
723 | } | ||
724 | |||
725 | static void moxtet_irq_free(struct moxtet *moxtet) | ||
726 | { | ||
727 | int i, irq; | ||
728 | |||
729 | for (i = 0; i < MOXTET_NIRQS; ++i) { | ||
730 | if (moxtet->irq.exists & BIT(i)) { | ||
731 | irq = irq_find_mapping(moxtet->irq.domain, i); | ||
732 | irq_dispose_mapping(irq); | ||
733 | } | ||
734 | } | ||
735 | |||
736 | irq_domain_remove(moxtet->irq.domain); | ||
737 | } | ||
738 | |||
739 | static int moxtet_irq_setup(struct moxtet *moxtet) | ||
740 | { | ||
741 | int i, ret; | ||
742 | |||
743 | moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node, | ||
744 | MOXTET_NIRQS, 0, | ||
745 | &moxtet_irq_domain, moxtet); | ||
746 | if (moxtet->irq.domain == NULL) { | ||
747 | dev_err(moxtet->dev, "Could not add IRQ domain\n"); | ||
748 | return -ENOMEM; | ||
749 | } | ||
750 | |||
751 | for (i = 0; i < MOXTET_NIRQS; ++i) | ||
752 | if (moxtet->irq.exists & BIT(i)) | ||
753 | irq_create_mapping(moxtet->irq.domain, i); | ||
754 | |||
755 | moxtet->irq.chip = moxtet_irq_chip; | ||
756 | moxtet->irq.masked = ~0; | ||
757 | |||
758 | ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn, | ||
759 | IRQF_ONESHOT, "moxtet", moxtet); | ||
760 | if (ret < 0) | ||
761 | goto err_free; | ||
762 | |||
763 | return 0; | ||
764 | |||
765 | err_free: | ||
766 | moxtet_irq_free(moxtet); | ||
767 | return ret; | ||
768 | } | ||
769 | |||
770 | static int moxtet_probe(struct spi_device *spi) | ||
771 | { | ||
772 | struct moxtet *moxtet; | ||
773 | int ret; | ||
774 | |||
775 | ret = spi_setup(spi); | ||
776 | if (ret < 0) | ||
777 | return ret; | ||
778 | |||
779 | moxtet = devm_kzalloc(&spi->dev, sizeof(struct moxtet), | ||
780 | GFP_KERNEL); | ||
781 | if (!moxtet) | ||
782 | return -ENOMEM; | ||
783 | |||
784 | moxtet->dev = &spi->dev; | ||
785 | spi_set_drvdata(spi, moxtet); | ||
786 | |||
787 | mutex_init(&moxtet->lock); | ||
788 | |||
789 | moxtet->dev_irq = of_irq_get(moxtet->dev->of_node, 0); | ||
790 | if (moxtet->dev_irq == -EPROBE_DEFER) | ||
791 | return -EPROBE_DEFER; | ||
792 | |||
793 | if (moxtet->dev_irq <= 0) { | ||
794 | dev_err(moxtet->dev, "No IRQ resource found\n"); | ||
795 | return -ENXIO; | ||
796 | } | ||
797 | |||
798 | ret = moxtet_find_topology(moxtet); | ||
799 | if (ret < 0) | ||
800 | return ret; | ||
801 | |||
802 | if (moxtet->irq.exists) { | ||
803 | ret = moxtet_irq_setup(moxtet); | ||
804 | if (ret < 0) | ||
805 | return ret; | ||
806 | } | ||
807 | |||
808 | of_register_moxtet_devices(moxtet); | ||
809 | moxtet_register_devices_from_topology(moxtet); | ||
810 | |||
811 | ret = moxtet_register_debugfs(moxtet); | ||
812 | if (ret < 0) | ||
813 | dev_warn(moxtet->dev, "Failed creating debugfs entries: %i\n", | ||
814 | ret); | ||
815 | |||
816 | return 0; | ||
817 | } | ||
818 | |||
819 | static int moxtet_remove(struct spi_device *spi) | ||
820 | { | ||
821 | struct moxtet *moxtet = spi_get_drvdata(spi); | ||
822 | |||
823 | free_irq(moxtet->dev_irq, moxtet); | ||
824 | |||
825 | moxtet_irq_free(moxtet); | ||
826 | |||
827 | moxtet_unregister_debugfs(moxtet); | ||
828 | |||
829 | device_for_each_child(moxtet->dev, NULL, __unregister); | ||
830 | |||
831 | mutex_destroy(&moxtet->lock); | ||
832 | |||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | static const struct of_device_id moxtet_dt_ids[] = { | ||
837 | { .compatible = "cznic,moxtet" }, | ||
838 | {}, | ||
839 | }; | ||
840 | MODULE_DEVICE_TABLE(of, moxtet_dt_ids); | ||
841 | |||
842 | static struct spi_driver moxtet_spi_driver = { | ||
843 | .driver = { | ||
844 | .name = "moxtet", | ||
845 | .of_match_table = moxtet_dt_ids, | ||
846 | }, | ||
847 | .probe = moxtet_probe, | ||
848 | .remove = moxtet_remove, | ||
849 | }; | ||
850 | |||
851 | static int __init moxtet_init(void) | ||
852 | { | ||
853 | int ret; | ||
854 | |||
855 | ret = bus_register(&moxtet_bus_type); | ||
856 | if (ret < 0) { | ||
857 | pr_err("moxtet bus registration failed: %d\n", ret); | ||
858 | goto error; | ||
859 | } | ||
860 | |||
861 | ret = spi_register_driver(&moxtet_spi_driver); | ||
862 | if (ret < 0) { | ||
863 | pr_err("moxtet spi driver registration failed: %d\n", ret); | ||
864 | goto error_bus; | ||
865 | } | ||
866 | |||
867 | return 0; | ||
868 | |||
869 | error_bus: | ||
870 | bus_unregister(&moxtet_bus_type); | ||
871 | error: | ||
872 | return ret; | ||
873 | } | ||
874 | postcore_initcall_sync(moxtet_init); | ||
875 | |||
876 | static void __exit moxtet_exit(void) | ||
877 | { | ||
878 | spi_unregister_driver(&moxtet_spi_driver); | ||
879 | bus_unregister(&moxtet_bus_type); | ||
880 | } | ||
881 | module_exit(moxtet_exit); | ||
882 | |||
883 | MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); | ||
884 | MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus"); | ||
885 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 1b76d9585902..be79d6c6a4e4 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c | |||
@@ -651,10 +651,8 @@ static int sunxi_rsb_probe(struct platform_device *pdev) | |||
651 | return PTR_ERR(rsb->regs); | 651 | return PTR_ERR(rsb->regs); |
652 | 652 | ||
653 | irq = platform_get_irq(pdev, 0); | 653 | irq = platform_get_irq(pdev, 0); |
654 | if (irq < 0) { | 654 | if (irq < 0) |
655 | dev_err(dev, "failed to retrieve irq: %d\n", irq); | ||
656 | return irq; | 655 | return irq; |
657 | } | ||
658 | 656 | ||
659 | rsb->clk = devm_clk_get(dev, NULL); | 657 | rsb->clk = devm_clk_get(dev, NULL); |
660 | if (IS_ERR(rsb->clk)) { | 658 | if (IS_ERR(rsb->clk)) { |
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c index e845c1a93f21..f70dedace20b 100644 --- a/drivers/bus/uniphier-system-bus.c +++ b/drivers/bus/uniphier-system-bus.c | |||
@@ -176,7 +176,6 @@ static int uniphier_system_bus_probe(struct platform_device *pdev) | |||
176 | { | 176 | { |
177 | struct device *dev = &pdev->dev; | 177 | struct device *dev = &pdev->dev; |
178 | struct uniphier_system_bus_priv *priv; | 178 | struct uniphier_system_bus_priv *priv; |
179 | struct resource *regs; | ||
180 | const __be32 *ranges; | 179 | const __be32 *ranges; |
181 | u32 cells, addr, size; | 180 | u32 cells, addr, size; |
182 | u64 paddr; | 181 | u64 paddr; |
@@ -186,8 +185,7 @@ static int uniphier_system_bus_probe(struct platform_device *pdev) | |||
186 | if (!priv) | 185 | if (!priv) |
187 | return -ENOMEM; | 186 | return -ENOMEM; |
188 | 187 | ||
189 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 188 | priv->membase = devm_platform_ioremap_resource(pdev, 0); |
190 | priv->membase = devm_ioremap_resource(dev, regs); | ||
191 | if (IS_ERR(priv->membase)) | 189 | if (IS_ERR(priv->membase)) |
192 | return PTR_ERR(priv->membase); | 190 | return PTR_ERR(priv->membase); |
193 | 191 | ||
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index a2287c770d5c..886f7c5df51a 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c | |||
@@ -69,7 +69,7 @@ static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate, | |||
69 | { | 69 | { |
70 | struct scmi_clk *clk = to_scmi_clk(hw); | 70 | struct scmi_clk *clk = to_scmi_clk(hw); |
71 | 71 | ||
72 | return clk->handle->clk_ops->rate_set(clk->handle, clk->id, 0, rate); | 72 | return clk->handle->clk_ops->rate_set(clk->handle, clk->id, rate); |
73 | } | 73 | } |
74 | 74 | ||
75 | static int scmi_clk_enable(struct clk_hw *hw) | 75 | static int scmi_clk_enable(struct clk_hw *hw) |
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index ba8d3d0ef32c..c9a827bffe1c 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
@@ -271,6 +271,20 @@ config TRUSTED_FOUNDATIONS | |||
271 | 271 | ||
272 | Choose N if you don't know what this is about. | 272 | Choose N if you don't know what this is about. |
273 | 273 | ||
274 | config TURRIS_MOX_RWTM | ||
275 | tristate "Turris Mox rWTM secure firmware driver" | ||
276 | depends on ARCH_MVEBU || COMPILE_TEST | ||
277 | depends on HAS_DMA && OF | ||
278 | depends on MAILBOX | ||
279 | select HW_RANDOM | ||
280 | select ARMADA_37XX_RWTM_MBOX | ||
281 | help | ||
282 | This driver communicates with the firmware on the Cortex-M3 secure | ||
283 | processor of the Turris Mox router. Enable if you are building for | ||
284 | Turris Mox, and you will be able to read the device serial number and | ||
285 | other manufacturing data and also utilize the Entropy Bit Generator | ||
286 | for hardware random number generation. | ||
287 | |||
274 | config HAVE_ARM_SMCCC | 288 | config HAVE_ARM_SMCCC |
275 | bool | 289 | bool |
276 | 290 | ||
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 3fa0b34eb72f..2b6e3a0be595 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile | |||
@@ -22,6 +22,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o | |||
22 | CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a | 22 | CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a |
23 | obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o | 23 | obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o |
24 | obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o | 24 | obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o |
25 | obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o | ||
25 | 26 | ||
26 | obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/ | 27 | obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/ |
27 | obj-y += psci/ | 28 | obj-y += psci/ |
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index c47d28d556b6..5f298f00a82e 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile | |||
@@ -2,5 +2,5 @@ | |||
2 | obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o | 2 | obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o |
3 | scmi-bus-y = bus.o | 3 | scmi-bus-y = bus.o |
4 | scmi-driver-y = driver.o | 4 | scmi-driver-y = driver.o |
5 | scmi-protocols-y = base.o clock.o perf.o power.o sensors.o | 5 | scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o |
6 | obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o | 6 | obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o |
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index 204390297f4b..f804e8af6521 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c | |||
@@ -204,7 +204,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle, | |||
204 | if (ret) | 204 | if (ret) |
205 | return ret; | 205 | return ret; |
206 | 206 | ||
207 | *(__le32 *)t->tx.buf = cpu_to_le32(id); | 207 | put_unaligned_le32(id, t->tx.buf); |
208 | 208 | ||
209 | ret = scmi_do_xfer(handle, t); | 209 | ret = scmi_do_xfer(handle, t); |
210 | if (!ret) | 210 | if (!ret) |
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 0a194af92438..32526a793f3a 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c | |||
@@ -56,7 +56,7 @@ struct scmi_msg_resp_clock_describe_rates { | |||
56 | struct scmi_clock_set_rate { | 56 | struct scmi_clock_set_rate { |
57 | __le32 flags; | 57 | __le32 flags; |
58 | #define CLOCK_SET_ASYNC BIT(0) | 58 | #define CLOCK_SET_ASYNC BIT(0) |
59 | #define CLOCK_SET_DELAYED BIT(1) | 59 | #define CLOCK_SET_IGNORE_RESP BIT(1) |
60 | #define CLOCK_SET_ROUND_UP BIT(2) | 60 | #define CLOCK_SET_ROUND_UP BIT(2) |
61 | #define CLOCK_SET_ROUND_AUTO BIT(3) | 61 | #define CLOCK_SET_ROUND_AUTO BIT(3) |
62 | __le32 id; | 62 | __le32 id; |
@@ -67,6 +67,7 @@ struct scmi_clock_set_rate { | |||
67 | struct clock_info { | 67 | struct clock_info { |
68 | int num_clocks; | 68 | int num_clocks; |
69 | int max_async_req; | 69 | int max_async_req; |
70 | atomic_t cur_async_req; | ||
70 | struct scmi_clock_info *clk; | 71 | struct scmi_clock_info *clk; |
71 | }; | 72 | }; |
72 | 73 | ||
@@ -106,7 +107,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle, | |||
106 | if (ret) | 107 | if (ret) |
107 | return ret; | 108 | return ret; |
108 | 109 | ||
109 | *(__le32 *)t->tx.buf = cpu_to_le32(clk_id); | 110 | put_unaligned_le32(clk_id, t->tx.buf); |
110 | attr = t->rx.buf; | 111 | attr = t->rx.buf; |
111 | 112 | ||
112 | ret = scmi_do_xfer(handle, t); | 113 | ret = scmi_do_xfer(handle, t); |
@@ -203,39 +204,47 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value) | |||
203 | if (ret) | 204 | if (ret) |
204 | return ret; | 205 | return ret; |
205 | 206 | ||
206 | *(__le32 *)t->tx.buf = cpu_to_le32(clk_id); | 207 | put_unaligned_le32(clk_id, t->tx.buf); |
207 | 208 | ||
208 | ret = scmi_do_xfer(handle, t); | 209 | ret = scmi_do_xfer(handle, t); |
209 | if (!ret) { | 210 | if (!ret) |
210 | __le32 *pval = t->rx.buf; | 211 | *value = get_unaligned_le64(t->rx.buf); |
211 | |||
212 | *value = le32_to_cpu(*pval); | ||
213 | *value |= (u64)le32_to_cpu(*(pval + 1)) << 32; | ||
214 | } | ||
215 | 212 | ||
216 | scmi_xfer_put(handle, t); | 213 | scmi_xfer_put(handle, t); |
217 | return ret; | 214 | return ret; |
218 | } | 215 | } |
219 | 216 | ||
220 | static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, | 217 | static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, |
221 | u32 config, u64 rate) | 218 | u64 rate) |
222 | { | 219 | { |
223 | int ret; | 220 | int ret; |
221 | u32 flags = 0; | ||
224 | struct scmi_xfer *t; | 222 | struct scmi_xfer *t; |
225 | struct scmi_clock_set_rate *cfg; | 223 | struct scmi_clock_set_rate *cfg; |
224 | struct clock_info *ci = handle->clk_priv; | ||
226 | 225 | ||
227 | ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, | 226 | ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, |
228 | sizeof(*cfg), 0, &t); | 227 | sizeof(*cfg), 0, &t); |
229 | if (ret) | 228 | if (ret) |
230 | return ret; | 229 | return ret; |
231 | 230 | ||
231 | if (ci->max_async_req && | ||
232 | atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) | ||
233 | flags |= CLOCK_SET_ASYNC; | ||
234 | |||
232 | cfg = t->tx.buf; | 235 | cfg = t->tx.buf; |
233 | cfg->flags = cpu_to_le32(config); | 236 | cfg->flags = cpu_to_le32(flags); |
234 | cfg->id = cpu_to_le32(clk_id); | 237 | cfg->id = cpu_to_le32(clk_id); |
235 | cfg->value_low = cpu_to_le32(rate & 0xffffffff); | 238 | cfg->value_low = cpu_to_le32(rate & 0xffffffff); |
236 | cfg->value_high = cpu_to_le32(rate >> 32); | 239 | cfg->value_high = cpu_to_le32(rate >> 32); |
237 | 240 | ||
238 | ret = scmi_do_xfer(handle, t); | 241 | if (flags & CLOCK_SET_ASYNC) |
242 | ret = scmi_do_xfer_with_response(handle, t); | ||
243 | else | ||
244 | ret = scmi_do_xfer(handle, t); | ||
245 | |||
246 | if (ci->max_async_req) | ||
247 | atomic_dec(&ci->cur_async_req); | ||
239 | 248 | ||
240 | scmi_xfer_put(handle, t); | 249 | scmi_xfer_put(handle, t); |
241 | return ret; | 250 | return ret; |
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 44fd4f9404a9..5237c2ff79fe 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/scmi_protocol.h> | 15 | #include <linux/scmi_protocol.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | 17 | ||
18 | #include <asm/unaligned.h> | ||
19 | |||
18 | #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) | 20 | #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) |
19 | #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) | 21 | #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) |
20 | #define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))) | 22 | #define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))) |
@@ -48,11 +50,11 @@ struct scmi_msg_resp_prot_version { | |||
48 | /** | 50 | /** |
49 | * struct scmi_msg_hdr - Message(Tx/Rx) header | 51 | * struct scmi_msg_hdr - Message(Tx/Rx) header |
50 | * | 52 | * |
51 | * @id: The identifier of the command being sent | 53 | * @id: The identifier of the message being sent |
52 | * @protocol_id: The identifier of the protocol used to send @id command | 54 | * @protocol_id: The identifier of the protocol used to send @id message |
53 | * @seq: The token to identify the message. when a message/command returns, | 55 | * @seq: The token to identify the message. When a message returns, the |
54 | * the platform returns the whole message header unmodified including | 56 | * platform returns the whole message header unmodified including the |
55 | * the token | 57 | * token |
56 | * @status: Status of the transfer once it's complete | 58 | * @status: Status of the transfer once it's complete |
57 | * @poll_completion: Indicate if the transfer needs to be polled for | 59 | * @poll_completion: Indicate if the transfer needs to be polled for |
58 | * completion or interrupt mode is used | 60 | * completion or interrupt mode is used |
@@ -84,17 +86,21 @@ struct scmi_msg { | |||
84 | * @rx: Receive message, the buffer should be pre-allocated to store | 86 | * @rx: Receive message, the buffer should be pre-allocated to store |
85 | * message. If request-ACK protocol is used, we can reuse the same | 87 | * message. If request-ACK protocol is used, we can reuse the same |
86 | * buffer for the rx path as we use for the tx path. | 88 | * buffer for the rx path as we use for the tx path. |
87 | * @done: completion event | 89 | * @done: command message transmit completion event |
90 | * @async: pointer to delayed response message received event completion | ||
88 | */ | 91 | */ |
89 | struct scmi_xfer { | 92 | struct scmi_xfer { |
90 | struct scmi_msg_hdr hdr; | 93 | struct scmi_msg_hdr hdr; |
91 | struct scmi_msg tx; | 94 | struct scmi_msg tx; |
92 | struct scmi_msg rx; | 95 | struct scmi_msg rx; |
93 | struct completion done; | 96 | struct completion done; |
97 | struct completion *async_done; | ||
94 | }; | 98 | }; |
95 | 99 | ||
96 | void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer); | 100 | void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer); |
97 | int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer); | 101 | int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer); |
102 | int scmi_do_xfer_with_response(const struct scmi_handle *h, | ||
103 | struct scmi_xfer *xfer); | ||
98 | int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, | 104 | int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, |
99 | size_t tx_size, size_t rx_size, struct scmi_xfer **p); | 105 | size_t tx_size, size_t rx_size, struct scmi_xfer **p); |
100 | int scmi_handle_put(const struct scmi_handle *handle); | 106 | int scmi_handle_put(const struct scmi_handle *handle); |
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index b5bc4c7a8fab..3eb0382491ce 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c | |||
@@ -30,8 +30,14 @@ | |||
30 | #include "common.h" | 30 | #include "common.h" |
31 | 31 | ||
32 | #define MSG_ID_MASK GENMASK(7, 0) | 32 | #define MSG_ID_MASK GENMASK(7, 0) |
33 | #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) | ||
33 | #define MSG_TYPE_MASK GENMASK(9, 8) | 34 | #define MSG_TYPE_MASK GENMASK(9, 8) |
35 | #define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr)) | ||
36 | #define MSG_TYPE_COMMAND 0 | ||
37 | #define MSG_TYPE_DELAYED_RESP 2 | ||
38 | #define MSG_TYPE_NOTIFICATION 3 | ||
34 | #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) | 39 | #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) |
40 | #define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr)) | ||
35 | #define MSG_TOKEN_ID_MASK GENMASK(27, 18) | 41 | #define MSG_TOKEN_ID_MASK GENMASK(27, 18) |
36 | #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) | 42 | #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) |
37 | #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) | 43 | #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) |
@@ -86,7 +92,7 @@ struct scmi_desc { | |||
86 | }; | 92 | }; |
87 | 93 | ||
88 | /** | 94 | /** |
89 | * struct scmi_chan_info - Structure representing a SCMI channel informfation | 95 | * struct scmi_chan_info - Structure representing a SCMI channel information |
90 | * | 96 | * |
91 | * @cl: Mailbox Client | 97 | * @cl: Mailbox Client |
92 | * @chan: Transmit/Receive mailbox channel | 98 | * @chan: Transmit/Receive mailbox channel |
@@ -111,8 +117,9 @@ struct scmi_chan_info { | |||
111 | * @handle: Instance of SCMI handle to send to clients | 117 | * @handle: Instance of SCMI handle to send to clients |
112 | * @version: SCMI revision information containing protocol version, | 118 | * @version: SCMI revision information containing protocol version, |
113 | * implementation version and (sub-)vendor identification. | 119 | * implementation version and (sub-)vendor identification. |
114 | * @minfo: Message info | 120 | * @tx_minfo: Universal Transmit Message management info |
115 | * @tx_idr: IDR object to map protocol id to channel info pointer | 121 | * @tx_idr: IDR object to map protocol id to Tx channel info pointer |
122 | * @rx_idr: IDR object to map protocol id to Rx channel info pointer | ||
116 | * @protocols_imp: List of protocols implemented, currently maximum of | 123 | * @protocols_imp: List of protocols implemented, currently maximum of |
117 | * MAX_PROTOCOLS_IMP elements allocated by the base protocol | 124 | * MAX_PROTOCOLS_IMP elements allocated by the base protocol |
118 | * @node: List head | 125 | * @node: List head |
@@ -123,8 +130,9 @@ struct scmi_info { | |||
123 | const struct scmi_desc *desc; | 130 | const struct scmi_desc *desc; |
124 | struct scmi_revision_info version; | 131 | struct scmi_revision_info version; |
125 | struct scmi_handle handle; | 132 | struct scmi_handle handle; |
126 | struct scmi_xfers_info minfo; | 133 | struct scmi_xfers_info tx_minfo; |
127 | struct idr tx_idr; | 134 | struct idr tx_idr; |
135 | struct idr rx_idr; | ||
128 | u8 *protocols_imp; | 136 | u8 *protocols_imp; |
129 | struct list_head node; | 137 | struct list_head node; |
130 | int users; | 138 | int users; |
@@ -182,7 +190,7 @@ static inline int scmi_to_linux_errno(int errno) | |||
182 | static inline void scmi_dump_header_dbg(struct device *dev, | 190 | static inline void scmi_dump_header_dbg(struct device *dev, |
183 | struct scmi_msg_hdr *hdr) | 191 | struct scmi_msg_hdr *hdr) |
184 | { | 192 | { |
185 | dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n", | 193 | dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n", |
186 | hdr->id, hdr->seq, hdr->protocol_id); | 194 | hdr->id, hdr->seq, hdr->protocol_id); |
187 | } | 195 | } |
188 | 196 | ||
@@ -190,7 +198,7 @@ static void scmi_fetch_response(struct scmi_xfer *xfer, | |||
190 | struct scmi_shared_mem __iomem *mem) | 198 | struct scmi_shared_mem __iomem *mem) |
191 | { | 199 | { |
192 | xfer->hdr.status = ioread32(mem->msg_payload); | 200 | xfer->hdr.status = ioread32(mem->msg_payload); |
193 | /* Skip the length of header and statues in payload area i.e 8 bytes*/ | 201 | /* Skip the length of header and status in payload area i.e 8 bytes */ |
194 | xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8); | 202 | xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8); |
195 | 203 | ||
196 | /* Take a copy to the rx buffer.. */ | 204 | /* Take a copy to the rx buffer.. */ |
@@ -198,56 +206,12 @@ static void scmi_fetch_response(struct scmi_xfer *xfer, | |||
198 | } | 206 | } |
199 | 207 | ||
200 | /** | 208 | /** |
201 | * scmi_rx_callback() - mailbox client callback for receive messages | ||
202 | * | ||
203 | * @cl: client pointer | ||
204 | * @m: mailbox message | ||
205 | * | ||
206 | * Processes one received message to appropriate transfer information and | ||
207 | * signals completion of the transfer. | ||
208 | * | ||
209 | * NOTE: This function will be invoked in IRQ context, hence should be | ||
210 | * as optimal as possible. | ||
211 | */ | ||
212 | static void scmi_rx_callback(struct mbox_client *cl, void *m) | ||
213 | { | ||
214 | u16 xfer_id; | ||
215 | struct scmi_xfer *xfer; | ||
216 | struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); | ||
217 | struct device *dev = cinfo->dev; | ||
218 | struct scmi_info *info = handle_to_scmi_info(cinfo->handle); | ||
219 | struct scmi_xfers_info *minfo = &info->minfo; | ||
220 | struct scmi_shared_mem __iomem *mem = cinfo->payload; | ||
221 | |||
222 | xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); | ||
223 | |||
224 | /* Are we even expecting this? */ | ||
225 | if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { | ||
226 | dev_err(dev, "message for %d is not expected!\n", xfer_id); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | xfer = &minfo->xfer_block[xfer_id]; | ||
231 | |||
232 | scmi_dump_header_dbg(dev, &xfer->hdr); | ||
233 | /* Is the message of valid length? */ | ||
234 | if (xfer->rx.len > info->desc->max_msg_size) { | ||
235 | dev_err(dev, "unable to handle %zu xfer(max %d)\n", | ||
236 | xfer->rx.len, info->desc->max_msg_size); | ||
237 | return; | ||
238 | } | ||
239 | |||
240 | scmi_fetch_response(xfer, mem); | ||
241 | complete(&xfer->done); | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * pack_scmi_header() - packs and returns 32-bit header | 209 | * pack_scmi_header() - packs and returns 32-bit header |
246 | * | 210 | * |
247 | * @hdr: pointer to header containing all the information on message id, | 211 | * @hdr: pointer to header containing all the information on message id, |
248 | * protocol id and sequence id. | 212 | * protocol id and sequence id. |
249 | * | 213 | * |
250 | * Return: 32-bit packed command header to be sent to the platform. | 214 | * Return: 32-bit packed message header to be sent to the platform. |
251 | */ | 215 | */ |
252 | static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) | 216 | static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) |
253 | { | 217 | { |
@@ -257,6 +221,18 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) | |||
257 | } | 221 | } |
258 | 222 | ||
259 | /** | 223 | /** |
224 | * unpack_scmi_header() - unpacks and records message and protocol id | ||
225 | * | ||
226 | * @msg_hdr: 32-bit packed message header sent from the platform | ||
227 | * @hdr: pointer to header to fetch message and protocol id. | ||
228 | */ | ||
229 | static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) | ||
230 | { | ||
231 | hdr->id = MSG_XTRACT_ID(msg_hdr); | ||
232 | hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); | ||
233 | } | ||
234 | |||
235 | /** | ||
260 | * scmi_tx_prepare() - mailbox client callback to prepare for the transfer | 236 | * scmi_tx_prepare() - mailbox client callback to prepare for the transfer |
261 | * | 237 | * |
262 | * @cl: client pointer | 238 | * @cl: client pointer |
@@ -271,6 +247,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) | |||
271 | struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); | 247 | struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); |
272 | struct scmi_shared_mem __iomem *mem = cinfo->payload; | 248 | struct scmi_shared_mem __iomem *mem = cinfo->payload; |
273 | 249 | ||
250 | /* | ||
251 | * Ideally channel must be free by now unless OS timeout last | ||
252 | * request and platform continued to process the same, wait | ||
253 | * until it releases the shared memory, otherwise we may endup | ||
254 | * overwriting its response with new message payload or vice-versa | ||
255 | */ | ||
256 | spin_until_cond(ioread32(&mem->channel_status) & | ||
257 | SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); | ||
274 | /* Mark channel busy + clear error */ | 258 | /* Mark channel busy + clear error */ |
275 | iowrite32(0x0, &mem->channel_status); | 259 | iowrite32(0x0, &mem->channel_status); |
276 | iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, | 260 | iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, |
@@ -285,8 +269,9 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) | |||
285 | * scmi_xfer_get() - Allocate one message | 269 | * scmi_xfer_get() - Allocate one message |
286 | * | 270 | * |
287 | * @handle: Pointer to SCMI entity handle | 271 | * @handle: Pointer to SCMI entity handle |
272 | * @minfo: Pointer to Tx/Rx Message management info based on channel type | ||
288 | * | 273 | * |
289 | * Helper function which is used by various command functions that are | 274 | * Helper function which is used by various message functions that are |
290 | * exposed to clients of this driver for allocating a message traffic event. | 275 | * exposed to clients of this driver for allocating a message traffic event. |
291 | * | 276 | * |
292 | * This function can sleep depending on pending requests already in the system | 277 | * This function can sleep depending on pending requests already in the system |
@@ -295,13 +280,13 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) | |||
295 | * | 280 | * |
296 | * Return: 0 if all went fine, else corresponding error. | 281 | * Return: 0 if all went fine, else corresponding error. |
297 | */ | 282 | */ |
298 | static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle) | 283 | static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, |
284 | struct scmi_xfers_info *minfo) | ||
299 | { | 285 | { |
300 | u16 xfer_id; | 286 | u16 xfer_id; |
301 | struct scmi_xfer *xfer; | 287 | struct scmi_xfer *xfer; |
302 | unsigned long flags, bit_pos; | 288 | unsigned long flags, bit_pos; |
303 | struct scmi_info *info = handle_to_scmi_info(handle); | 289 | struct scmi_info *info = handle_to_scmi_info(handle); |
304 | struct scmi_xfers_info *minfo = &info->minfo; | ||
305 | 290 | ||
306 | /* Keep the locked section as small as possible */ | 291 | /* Keep the locked section as small as possible */ |
307 | spin_lock_irqsave(&minfo->xfer_lock, flags); | 292 | spin_lock_irqsave(&minfo->xfer_lock, flags); |
@@ -324,18 +309,17 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle) | |||
324 | } | 309 | } |
325 | 310 | ||
326 | /** | 311 | /** |
327 | * scmi_xfer_put() - Release a message | 312 | * __scmi_xfer_put() - Release a message |
328 | * | 313 | * |
329 | * @handle: Pointer to SCMI entity handle | 314 | * @minfo: Pointer to Tx/Rx Message management info based on channel type |
330 | * @xfer: message that was reserved by scmi_xfer_get | 315 | * @xfer: message that was reserved by scmi_xfer_get |
331 | * | 316 | * |
332 | * This holds a spinlock to maintain integrity of internal data structures. | 317 | * This holds a spinlock to maintain integrity of internal data structures. |
333 | */ | 318 | */ |
334 | void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) | 319 | static void |
320 | __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) | ||
335 | { | 321 | { |
336 | unsigned long flags; | 322 | unsigned long flags; |
337 | struct scmi_info *info = handle_to_scmi_info(handle); | ||
338 | struct scmi_xfers_info *minfo = &info->minfo; | ||
339 | 323 | ||
340 | /* | 324 | /* |
341 | * Keep the locked section as small as possible | 325 | * Keep the locked section as small as possible |
@@ -347,6 +331,68 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) | |||
347 | spin_unlock_irqrestore(&minfo->xfer_lock, flags); | 331 | spin_unlock_irqrestore(&minfo->xfer_lock, flags); |
348 | } | 332 | } |
349 | 333 | ||
334 | /** | ||
335 | * scmi_rx_callback() - mailbox client callback for receive messages | ||
336 | * | ||
337 | * @cl: client pointer | ||
338 | * @m: mailbox message | ||
339 | * | ||
340 | * Processes one received message to appropriate transfer information and | ||
341 | * signals completion of the transfer. | ||
342 | * | ||
343 | * NOTE: This function will be invoked in IRQ context, hence should be | ||
344 | * as optimal as possible. | ||
345 | */ | ||
346 | static void scmi_rx_callback(struct mbox_client *cl, void *m) | ||
347 | { | ||
348 | u8 msg_type; | ||
349 | u32 msg_hdr; | ||
350 | u16 xfer_id; | ||
351 | struct scmi_xfer *xfer; | ||
352 | struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); | ||
353 | struct device *dev = cinfo->dev; | ||
354 | struct scmi_info *info = handle_to_scmi_info(cinfo->handle); | ||
355 | struct scmi_xfers_info *minfo = &info->tx_minfo; | ||
356 | struct scmi_shared_mem __iomem *mem = cinfo->payload; | ||
357 | |||
358 | msg_hdr = ioread32(&mem->msg_header); | ||
359 | msg_type = MSG_XTRACT_TYPE(msg_hdr); | ||
360 | xfer_id = MSG_XTRACT_TOKEN(msg_hdr); | ||
361 | |||
362 | if (msg_type == MSG_TYPE_NOTIFICATION) | ||
363 | return; /* Notifications not yet supported */ | ||
364 | |||
365 | /* Are we even expecting this? */ | ||
366 | if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { | ||
367 | dev_err(dev, "message for %d is not expected!\n", xfer_id); | ||
368 | return; | ||
369 | } | ||
370 | |||
371 | xfer = &minfo->xfer_block[xfer_id]; | ||
372 | |||
373 | scmi_dump_header_dbg(dev, &xfer->hdr); | ||
374 | |||
375 | scmi_fetch_response(xfer, mem); | ||
376 | |||
377 | if (msg_type == MSG_TYPE_DELAYED_RESP) | ||
378 | complete(xfer->async_done); | ||
379 | else | ||
380 | complete(&xfer->done); | ||
381 | } | ||
382 | |||
383 | /** | ||
384 | * scmi_xfer_put() - Release a transmit message | ||
385 | * | ||
386 | * @handle: Pointer to SCMI entity handle | ||
387 | * @xfer: message that was reserved by scmi_xfer_get | ||
388 | */ | ||
389 | void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) | ||
390 | { | ||
391 | struct scmi_info *info = handle_to_scmi_info(handle); | ||
392 | |||
393 | __scmi_xfer_put(&info->tx_minfo, xfer); | ||
394 | } | ||
395 | |||
350 | static bool | 396 | static bool |
351 | scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) | 397 | scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer) |
352 | { | 398 | { |
@@ -435,8 +481,36 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) | |||
435 | return ret; | 481 | return ret; |
436 | } | 482 | } |
437 | 483 | ||
484 | #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC) | ||
485 | |||
486 | /** | ||
487 | * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed | ||
488 | * response is received | ||
489 | * | ||
490 | * @handle: Pointer to SCMI entity handle | ||
491 | * @xfer: Transfer to initiate and wait for response | ||
492 | * | ||
493 | * Return: -ETIMEDOUT in case of no delayed response, if transmit error, | ||
494 | * return corresponding error, else if all goes well, return 0. | ||
495 | */ | ||
496 | int scmi_do_xfer_with_response(const struct scmi_handle *handle, | ||
497 | struct scmi_xfer *xfer) | ||
498 | { | ||
499 | int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT); | ||
500 | DECLARE_COMPLETION_ONSTACK(async_response); | ||
501 | |||
502 | xfer->async_done = &async_response; | ||
503 | |||
504 | ret = scmi_do_xfer(handle, xfer); | ||
505 | if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout)) | ||
506 | ret = -ETIMEDOUT; | ||
507 | |||
508 | xfer->async_done = NULL; | ||
509 | return ret; | ||
510 | } | ||
511 | |||
438 | /** | 512 | /** |
439 | * scmi_xfer_get_init() - Allocate and initialise one message | 513 | * scmi_xfer_get_init() - Allocate and initialise one message for transmit |
440 | * | 514 | * |
441 | * @handle: Pointer to SCMI entity handle | 515 | * @handle: Pointer to SCMI entity handle |
442 | * @msg_id: Message identifier | 516 | * @msg_id: Message identifier |
@@ -457,6 +531,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id, | |||
457 | int ret; | 531 | int ret; |
458 | struct scmi_xfer *xfer; | 532 | struct scmi_xfer *xfer; |
459 | struct scmi_info *info = handle_to_scmi_info(handle); | 533 | struct scmi_info *info = handle_to_scmi_info(handle); |
534 | struct scmi_xfers_info *minfo = &info->tx_minfo; | ||
460 | struct device *dev = info->dev; | 535 | struct device *dev = info->dev; |
461 | 536 | ||
462 | /* Ensure we have sane transfer sizes */ | 537 | /* Ensure we have sane transfer sizes */ |
@@ -464,7 +539,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id, | |||
464 | tx_size > info->desc->max_msg_size) | 539 | tx_size > info->desc->max_msg_size) |
465 | return -ERANGE; | 540 | return -ERANGE; |
466 | 541 | ||
467 | xfer = scmi_xfer_get(handle); | 542 | xfer = scmi_xfer_get(handle, minfo); |
468 | if (IS_ERR(xfer)) { | 543 | if (IS_ERR(xfer)) { |
469 | ret = PTR_ERR(xfer); | 544 | ret = PTR_ERR(xfer); |
470 | dev_err(dev, "failed to get free message slot(%d)\n", ret); | 545 | dev_err(dev, "failed to get free message slot(%d)\n", ret); |
@@ -597,27 +672,13 @@ int scmi_handle_put(const struct scmi_handle *handle) | |||
597 | return 0; | 672 | return 0; |
598 | } | 673 | } |
599 | 674 | ||
600 | static const struct scmi_desc scmi_generic_desc = { | ||
601 | .max_rx_timeout_ms = 30, /* We may increase this if required */ | ||
602 | .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ | ||
603 | .max_msg_size = 128, | ||
604 | }; | ||
605 | |||
606 | /* Each compatible listed below must have descriptor associated with it */ | ||
607 | static const struct of_device_id scmi_of_match[] = { | ||
608 | { .compatible = "arm,scmi", .data = &scmi_generic_desc }, | ||
609 | { /* Sentinel */ }, | ||
610 | }; | ||
611 | |||
612 | MODULE_DEVICE_TABLE(of, scmi_of_match); | ||
613 | |||
614 | static int scmi_xfer_info_init(struct scmi_info *sinfo) | 675 | static int scmi_xfer_info_init(struct scmi_info *sinfo) |
615 | { | 676 | { |
616 | int i; | 677 | int i; |
617 | struct scmi_xfer *xfer; | 678 | struct scmi_xfer *xfer; |
618 | struct device *dev = sinfo->dev; | 679 | struct device *dev = sinfo->dev; |
619 | const struct scmi_desc *desc = sinfo->desc; | 680 | const struct scmi_desc *desc = sinfo->desc; |
620 | struct scmi_xfers_info *info = &sinfo->minfo; | 681 | struct scmi_xfers_info *info = &sinfo->tx_minfo; |
621 | 682 | ||
622 | /* Pre-allocated messages, no more than what hdr.seq can support */ | 683 | /* Pre-allocated messages, no more than what hdr.seq can support */ |
623 | if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { | 684 | if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { |
@@ -652,61 +713,32 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo) | |||
652 | return 0; | 713 | return 0; |
653 | } | 714 | } |
654 | 715 | ||
655 | static int scmi_mailbox_check(struct device_node *np) | 716 | static int scmi_mailbox_check(struct device_node *np, int idx) |
656 | { | 717 | { |
657 | return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL); | 718 | return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", |
719 | idx, NULL); | ||
658 | } | 720 | } |
659 | 721 | ||
660 | static int scmi_mbox_free_channel(int id, void *p, void *data) | 722 | static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, |
723 | int prot_id, bool tx) | ||
661 | { | 724 | { |
662 | struct scmi_chan_info *cinfo = p; | 725 | int ret, idx; |
663 | struct idr *idr = data; | ||
664 | |||
665 | if (!IS_ERR_OR_NULL(cinfo->chan)) { | ||
666 | mbox_free_channel(cinfo->chan); | ||
667 | cinfo->chan = NULL; | ||
668 | } | ||
669 | |||
670 | idr_remove(idr, id); | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | static int scmi_remove(struct platform_device *pdev) | ||
676 | { | ||
677 | int ret = 0; | ||
678 | struct scmi_info *info = platform_get_drvdata(pdev); | ||
679 | struct idr *idr = &info->tx_idr; | ||
680 | |||
681 | mutex_lock(&scmi_list_mutex); | ||
682 | if (info->users) | ||
683 | ret = -EBUSY; | ||
684 | else | ||
685 | list_del(&info->node); | ||
686 | mutex_unlock(&scmi_list_mutex); | ||
687 | |||
688 | if (ret) | ||
689 | return ret; | ||
690 | |||
691 | /* Safe to free channels since no more users */ | ||
692 | ret = idr_for_each(idr, scmi_mbox_free_channel, idr); | ||
693 | idr_destroy(&info->tx_idr); | ||
694 | |||
695 | return ret; | ||
696 | } | ||
697 | |||
698 | static inline int | ||
699 | scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id) | ||
700 | { | ||
701 | int ret; | ||
702 | struct resource res; | 726 | struct resource res; |
703 | resource_size_t size; | 727 | resource_size_t size; |
704 | struct device_node *shmem, *np = dev->of_node; | 728 | struct device_node *shmem, *np = dev->of_node; |
705 | struct scmi_chan_info *cinfo; | 729 | struct scmi_chan_info *cinfo; |
706 | struct mbox_client *cl; | 730 | struct mbox_client *cl; |
731 | struct idr *idr; | ||
732 | const char *desc = tx ? "Tx" : "Rx"; | ||
707 | 733 | ||
708 | if (scmi_mailbox_check(np)) { | 734 | /* Transmit channel is first entry i.e. index 0 */ |
709 | cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE); | 735 | idx = tx ? 0 : 1; |
736 | idr = tx ? &info->tx_idr : &info->rx_idr; | ||
737 | |||
738 | if (scmi_mailbox_check(np, idx)) { | ||
739 | cinfo = idr_find(idr, SCMI_PROTOCOL_BASE); | ||
740 | if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ | ||
741 | return -EINVAL; | ||
710 | goto idr_alloc; | 742 | goto idr_alloc; |
711 | } | 743 | } |
712 | 744 | ||
@@ -719,36 +751,36 @@ scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id) | |||
719 | cl = &cinfo->cl; | 751 | cl = &cinfo->cl; |
720 | cl->dev = dev; | 752 | cl->dev = dev; |
721 | cl->rx_callback = scmi_rx_callback; | 753 | cl->rx_callback = scmi_rx_callback; |
722 | cl->tx_prepare = scmi_tx_prepare; | 754 | cl->tx_prepare = tx ? scmi_tx_prepare : NULL; |
723 | cl->tx_block = false; | 755 | cl->tx_block = false; |
724 | cl->knows_txdone = true; | 756 | cl->knows_txdone = tx; |
725 | 757 | ||
726 | shmem = of_parse_phandle(np, "shmem", 0); | 758 | shmem = of_parse_phandle(np, "shmem", idx); |
727 | ret = of_address_to_resource(shmem, 0, &res); | 759 | ret = of_address_to_resource(shmem, 0, &res); |
728 | of_node_put(shmem); | 760 | of_node_put(shmem); |
729 | if (ret) { | 761 | if (ret) { |
730 | dev_err(dev, "failed to get SCMI Tx payload mem resource\n"); | 762 | dev_err(dev, "failed to get SCMI %s payload memory\n", desc); |
731 | return ret; | 763 | return ret; |
732 | } | 764 | } |
733 | 765 | ||
734 | size = resource_size(&res); | 766 | size = resource_size(&res); |
735 | cinfo->payload = devm_ioremap(info->dev, res.start, size); | 767 | cinfo->payload = devm_ioremap(info->dev, res.start, size); |
736 | if (!cinfo->payload) { | 768 | if (!cinfo->payload) { |
737 | dev_err(dev, "failed to ioremap SCMI Tx payload\n"); | 769 | dev_err(dev, "failed to ioremap SCMI %s payload\n", desc); |
738 | return -EADDRNOTAVAIL; | 770 | return -EADDRNOTAVAIL; |
739 | } | 771 | } |
740 | 772 | ||
741 | /* Transmit channel is first entry i.e. index 0 */ | 773 | cinfo->chan = mbox_request_channel(cl, idx); |
742 | cinfo->chan = mbox_request_channel(cl, 0); | ||
743 | if (IS_ERR(cinfo->chan)) { | 774 | if (IS_ERR(cinfo->chan)) { |
744 | ret = PTR_ERR(cinfo->chan); | 775 | ret = PTR_ERR(cinfo->chan); |
745 | if (ret != -EPROBE_DEFER) | 776 | if (ret != -EPROBE_DEFER) |
746 | dev_err(dev, "failed to request SCMI Tx mailbox\n"); | 777 | dev_err(dev, "failed to request SCMI %s mailbox\n", |
778 | desc); | ||
747 | return ret; | 779 | return ret; |
748 | } | 780 | } |
749 | 781 | ||
750 | idr_alloc: | 782 | idr_alloc: |
751 | ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); | 783 | ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL); |
752 | if (ret != prot_id) { | 784 | if (ret != prot_id) { |
753 | dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret); | 785 | dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret); |
754 | return ret; | 786 | return ret; |
@@ -758,6 +790,17 @@ idr_alloc: | |||
758 | return 0; | 790 | return 0; |
759 | } | 791 | } |
760 | 792 | ||
793 | static inline int | ||
794 | scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) | ||
795 | { | ||
796 | int ret = scmi_mbox_chan_setup(info, dev, prot_id, true); | ||
797 | |||
798 | if (!ret) /* Rx is optional, hence no error check */ | ||
799 | scmi_mbox_chan_setup(info, dev, prot_id, false); | ||
800 | |||
801 | return ret; | ||
802 | } | ||
803 | |||
761 | static inline void | 804 | static inline void |
762 | scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, | 805 | scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, |
763 | int prot_id) | 806 | int prot_id) |
@@ -771,7 +814,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, | |||
771 | return; | 814 | return; |
772 | } | 815 | } |
773 | 816 | ||
774 | if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) { | 817 | if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) { |
775 | dev_err(&sdev->dev, "failed to setup transport\n"); | 818 | dev_err(&sdev->dev, "failed to setup transport\n"); |
776 | scmi_device_destroy(sdev); | 819 | scmi_device_destroy(sdev); |
777 | return; | 820 | return; |
@@ -791,7 +834,7 @@ static int scmi_probe(struct platform_device *pdev) | |||
791 | struct device_node *child, *np = dev->of_node; | 834 | struct device_node *child, *np = dev->of_node; |
792 | 835 | ||
793 | /* Only mailbox method supported, check for the presence of one */ | 836 | /* Only mailbox method supported, check for the presence of one */ |
794 | if (scmi_mailbox_check(np)) { | 837 | if (scmi_mailbox_check(np, 0)) { |
795 | dev_err(dev, "no mailbox found in %pOF\n", np); | 838 | dev_err(dev, "no mailbox found in %pOF\n", np); |
796 | return -EINVAL; | 839 | return -EINVAL; |
797 | } | 840 | } |
@@ -814,12 +857,13 @@ static int scmi_probe(struct platform_device *pdev) | |||
814 | 857 | ||
815 | platform_set_drvdata(pdev, info); | 858 | platform_set_drvdata(pdev, info); |
816 | idr_init(&info->tx_idr); | 859 | idr_init(&info->tx_idr); |
860 | idr_init(&info->rx_idr); | ||
817 | 861 | ||
818 | handle = &info->handle; | 862 | handle = &info->handle; |
819 | handle->dev = info->dev; | 863 | handle->dev = info->dev; |
820 | handle->version = &info->version; | 864 | handle->version = &info->version; |
821 | 865 | ||
822 | ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE); | 866 | ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); |
823 | if (ret) | 867 | if (ret) |
824 | return ret; | 868 | return ret; |
825 | 869 | ||
@@ -854,6 +898,62 @@ static int scmi_probe(struct platform_device *pdev) | |||
854 | return 0; | 898 | return 0; |
855 | } | 899 | } |
856 | 900 | ||
901 | static int scmi_mbox_free_channel(int id, void *p, void *data) | ||
902 | { | ||
903 | struct scmi_chan_info *cinfo = p; | ||
904 | struct idr *idr = data; | ||
905 | |||
906 | if (!IS_ERR_OR_NULL(cinfo->chan)) { | ||
907 | mbox_free_channel(cinfo->chan); | ||
908 | cinfo->chan = NULL; | ||
909 | } | ||
910 | |||
911 | idr_remove(idr, id); | ||
912 | |||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | static int scmi_remove(struct platform_device *pdev) | ||
917 | { | ||
918 | int ret = 0; | ||
919 | struct scmi_info *info = platform_get_drvdata(pdev); | ||
920 | struct idr *idr = &info->tx_idr; | ||
921 | |||
922 | mutex_lock(&scmi_list_mutex); | ||
923 | if (info->users) | ||
924 | ret = -EBUSY; | ||
925 | else | ||
926 | list_del(&info->node); | ||
927 | mutex_unlock(&scmi_list_mutex); | ||
928 | |||
929 | if (ret) | ||
930 | return ret; | ||
931 | |||
932 | /* Safe to free channels since no more users */ | ||
933 | ret = idr_for_each(idr, scmi_mbox_free_channel, idr); | ||
934 | idr_destroy(&info->tx_idr); | ||
935 | |||
936 | idr = &info->rx_idr; | ||
937 | ret = idr_for_each(idr, scmi_mbox_free_channel, idr); | ||
938 | idr_destroy(&info->rx_idr); | ||
939 | |||
940 | return ret; | ||
941 | } | ||
942 | |||
943 | static const struct scmi_desc scmi_generic_desc = { | ||
944 | .max_rx_timeout_ms = 30, /* We may increase this if required */ | ||
945 | .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ | ||
946 | .max_msg_size = 128, | ||
947 | }; | ||
948 | |||
949 | /* Each compatible listed below must have descriptor associated with it */ | ||
950 | static const struct of_device_id scmi_of_match[] = { | ||
951 | { .compatible = "arm,scmi", .data = &scmi_generic_desc }, | ||
952 | { /* Sentinel */ }, | ||
953 | }; | ||
954 | |||
955 | MODULE_DEVICE_TABLE(of, scmi_of_match); | ||
956 | |||
857 | static struct platform_driver scmi_driver = { | 957 | static struct platform_driver scmi_driver = { |
858 | .driver = { | 958 | .driver = { |
859 | .name = "arm-scmi", | 959 | .name = "arm-scmi", |
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 3c8ae7cc35de..4a8012e3cb8c 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c | |||
@@ -5,7 +5,10 @@ | |||
5 | * Copyright (C) 2018 ARM Ltd. | 5 | * Copyright (C) 2018 ARM Ltd. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/bits.h> | ||
8 | #include <linux/of.h> | 9 | #include <linux/of.h> |
10 | #include <linux/io.h> | ||
11 | #include <linux/io-64-nonatomic-hi-lo.h> | ||
9 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
10 | #include <linux/pm_opp.h> | 13 | #include <linux/pm_opp.h> |
11 | #include <linux/sort.h> | 14 | #include <linux/sort.h> |
@@ -21,6 +24,7 @@ enum scmi_performance_protocol_cmd { | |||
21 | PERF_LEVEL_GET = 0x8, | 24 | PERF_LEVEL_GET = 0x8, |
22 | PERF_NOTIFY_LIMITS = 0x9, | 25 | PERF_NOTIFY_LIMITS = 0x9, |
23 | PERF_NOTIFY_LEVEL = 0xa, | 26 | PERF_NOTIFY_LEVEL = 0xa, |
27 | PERF_DESCRIBE_FASTCHANNEL = 0xb, | ||
24 | }; | 28 | }; |
25 | 29 | ||
26 | struct scmi_opp { | 30 | struct scmi_opp { |
@@ -44,6 +48,7 @@ struct scmi_msg_resp_perf_domain_attributes { | |||
44 | #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30)) | 48 | #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30)) |
45 | #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29)) | 49 | #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29)) |
46 | #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28)) | 50 | #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28)) |
51 | #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27)) | ||
47 | __le32 rate_limit_us; | 52 | __le32 rate_limit_us; |
48 | __le32 sustained_freq_khz; | 53 | __le32 sustained_freq_khz; |
49 | __le32 sustained_perf_level; | 54 | __le32 sustained_perf_level; |
@@ -87,17 +92,56 @@ struct scmi_msg_resp_perf_describe_levels { | |||
87 | } opp[0]; | 92 | } opp[0]; |
88 | }; | 93 | }; |
89 | 94 | ||
95 | struct scmi_perf_get_fc_info { | ||
96 | __le32 domain; | ||
97 | __le32 message_id; | ||
98 | }; | ||
99 | |||
100 | struct scmi_msg_resp_perf_desc_fc { | ||
101 | __le32 attr; | ||
102 | #define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) | ||
103 | #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) | ||
104 | __le32 rate_limit; | ||
105 | __le32 chan_addr_low; | ||
106 | __le32 chan_addr_high; | ||
107 | __le32 chan_size; | ||
108 | __le32 db_addr_low; | ||
109 | __le32 db_addr_high; | ||
110 | __le32 db_set_lmask; | ||
111 | __le32 db_set_hmask; | ||
112 | __le32 db_preserve_lmask; | ||
113 | __le32 db_preserve_hmask; | ||
114 | }; | ||
115 | |||
116 | struct scmi_fc_db_info { | ||
117 | int width; | ||
118 | u64 set; | ||
119 | u64 mask; | ||
120 | void __iomem *addr; | ||
121 | }; | ||
122 | |||
123 | struct scmi_fc_info { | ||
124 | void __iomem *level_set_addr; | ||
125 | void __iomem *limit_set_addr; | ||
126 | void __iomem *level_get_addr; | ||
127 | void __iomem *limit_get_addr; | ||
128 | struct scmi_fc_db_info *level_set_db; | ||
129 | struct scmi_fc_db_info *limit_set_db; | ||
130 | }; | ||
131 | |||
90 | struct perf_dom_info { | 132 | struct perf_dom_info { |
91 | bool set_limits; | 133 | bool set_limits; |
92 | bool set_perf; | 134 | bool set_perf; |
93 | bool perf_limit_notify; | 135 | bool perf_limit_notify; |
94 | bool perf_level_notify; | 136 | bool perf_level_notify; |
137 | bool perf_fastchannels; | ||
95 | u32 opp_count; | 138 | u32 opp_count; |
96 | u32 sustained_freq_khz; | 139 | u32 sustained_freq_khz; |
97 | u32 sustained_perf_level; | 140 | u32 sustained_perf_level; |
98 | u32 mult_factor; | 141 | u32 mult_factor; |
99 | char name[SCMI_MAX_STR_SIZE]; | 142 | char name[SCMI_MAX_STR_SIZE]; |
100 | struct scmi_opp opp[MAX_OPPS]; | 143 | struct scmi_opp opp[MAX_OPPS]; |
144 | struct scmi_fc_info *fc_info; | ||
101 | }; | 145 | }; |
102 | 146 | ||
103 | struct scmi_perf_info { | 147 | struct scmi_perf_info { |
@@ -151,7 +195,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, | |||
151 | if (ret) | 195 | if (ret) |
152 | return ret; | 196 | return ret; |
153 | 197 | ||
154 | *(__le32 *)t->tx.buf = cpu_to_le32(domain); | 198 | put_unaligned_le32(domain, t->tx.buf); |
155 | attr = t->rx.buf; | 199 | attr = t->rx.buf; |
156 | 200 | ||
157 | ret = scmi_do_xfer(handle, t); | 201 | ret = scmi_do_xfer(handle, t); |
@@ -162,6 +206,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, | |||
162 | dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags); | 206 | dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags); |
163 | dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags); | 207 | dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags); |
164 | dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags); | 208 | dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags); |
209 | dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags); | ||
165 | dom_info->sustained_freq_khz = | 210 | dom_info->sustained_freq_khz = |
166 | le32_to_cpu(attr->sustained_freq_khz); | 211 | le32_to_cpu(attr->sustained_freq_khz); |
167 | dom_info->sustained_perf_level = | 212 | dom_info->sustained_perf_level = |
@@ -249,8 +294,42 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain, | |||
249 | return ret; | 294 | return ret; |
250 | } | 295 | } |
251 | 296 | ||
252 | static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain, | 297 | #define SCMI_PERF_FC_RING_DB(w) \ |
253 | u32 max_perf, u32 min_perf) | 298 | do { \ |
299 | u##w val = 0; \ | ||
300 | \ | ||
301 | if (db->mask) \ | ||
302 | val = ioread##w(db->addr) & db->mask; \ | ||
303 | iowrite##w((u##w)db->set | val, db->addr); \ | ||
304 | } while (0) | ||
305 | |||
306 | static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db) | ||
307 | { | ||
308 | if (!db || !db->addr) | ||
309 | return; | ||
310 | |||
311 | if (db->width == 1) | ||
312 | SCMI_PERF_FC_RING_DB(8); | ||
313 | else if (db->width == 2) | ||
314 | SCMI_PERF_FC_RING_DB(16); | ||
315 | else if (db->width == 4) | ||
316 | SCMI_PERF_FC_RING_DB(32); | ||
317 | else /* db->width == 8 */ | ||
318 | #ifdef CONFIG_64BIT | ||
319 | SCMI_PERF_FC_RING_DB(64); | ||
320 | #else | ||
321 | { | ||
322 | u64 val = 0; | ||
323 | |||
324 | if (db->mask) | ||
325 | val = ioread64_hi_lo(db->addr) & db->mask; | ||
326 | iowrite64_hi_lo(db->set, db->addr); | ||
327 | } | ||
328 | #endif | ||
329 | } | ||
330 | |||
331 | static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain, | ||
332 | u32 max_perf, u32 min_perf) | ||
254 | { | 333 | { |
255 | int ret; | 334 | int ret; |
256 | struct scmi_xfer *t; | 335 | struct scmi_xfer *t; |
@@ -272,8 +351,24 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain, | |||
272 | return ret; | 351 | return ret; |
273 | } | 352 | } |
274 | 353 | ||
275 | static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain, | 354 | static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain, |
276 | u32 *max_perf, u32 *min_perf) | 355 | u32 max_perf, u32 min_perf) |
356 | { | ||
357 | struct scmi_perf_info *pi = handle->perf_priv; | ||
358 | struct perf_dom_info *dom = pi->dom_info + domain; | ||
359 | |||
360 | if (dom->fc_info && dom->fc_info->limit_set_addr) { | ||
361 | iowrite32(max_perf, dom->fc_info->limit_set_addr); | ||
362 | iowrite32(min_perf, dom->fc_info->limit_set_addr + 4); | ||
363 | scmi_perf_fc_ring_db(dom->fc_info->limit_set_db); | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf); | ||
368 | } | ||
369 | |||
370 | static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain, | ||
371 | u32 *max_perf, u32 *min_perf) | ||
277 | { | 372 | { |
278 | int ret; | 373 | int ret; |
279 | struct scmi_xfer *t; | 374 | struct scmi_xfer *t; |
@@ -284,7 +379,7 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain, | |||
284 | if (ret) | 379 | if (ret) |
285 | return ret; | 380 | return ret; |
286 | 381 | ||
287 | *(__le32 *)t->tx.buf = cpu_to_le32(domain); | 382 | put_unaligned_le32(domain, t->tx.buf); |
288 | 383 | ||
289 | ret = scmi_do_xfer(handle, t); | 384 | ret = scmi_do_xfer(handle, t); |
290 | if (!ret) { | 385 | if (!ret) { |
@@ -298,8 +393,23 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain, | |||
298 | return ret; | 393 | return ret; |
299 | } | 394 | } |
300 | 395 | ||
301 | static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, | 396 | static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain, |
302 | u32 level, bool poll) | 397 | u32 *max_perf, u32 *min_perf) |
398 | { | ||
399 | struct scmi_perf_info *pi = handle->perf_priv; | ||
400 | struct perf_dom_info *dom = pi->dom_info + domain; | ||
401 | |||
402 | if (dom->fc_info && dom->fc_info->limit_get_addr) { | ||
403 | *max_perf = ioread32(dom->fc_info->limit_get_addr); | ||
404 | *min_perf = ioread32(dom->fc_info->limit_get_addr + 4); | ||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf); | ||
409 | } | ||
410 | |||
411 | static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain, | ||
412 | u32 level, bool poll) | ||
303 | { | 413 | { |
304 | int ret; | 414 | int ret; |
305 | struct scmi_xfer *t; | 415 | struct scmi_xfer *t; |
@@ -321,8 +431,23 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, | |||
321 | return ret; | 431 | return ret; |
322 | } | 432 | } |
323 | 433 | ||
324 | static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, | 434 | static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, |
325 | u32 *level, bool poll) | 435 | u32 level, bool poll) |
436 | { | ||
437 | struct scmi_perf_info *pi = handle->perf_priv; | ||
438 | struct perf_dom_info *dom = pi->dom_info + domain; | ||
439 | |||
440 | if (dom->fc_info && dom->fc_info->level_set_addr) { | ||
441 | iowrite32(level, dom->fc_info->level_set_addr); | ||
442 | scmi_perf_fc_ring_db(dom->fc_info->level_set_db); | ||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | return scmi_perf_mb_level_set(handle, domain, level, poll); | ||
447 | } | ||
448 | |||
449 | static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain, | ||
450 | u32 *level, bool poll) | ||
326 | { | 451 | { |
327 | int ret; | 452 | int ret; |
328 | struct scmi_xfer *t; | 453 | struct scmi_xfer *t; |
@@ -333,16 +458,128 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, | |||
333 | return ret; | 458 | return ret; |
334 | 459 | ||
335 | t->hdr.poll_completion = poll; | 460 | t->hdr.poll_completion = poll; |
336 | *(__le32 *)t->tx.buf = cpu_to_le32(domain); | 461 | put_unaligned_le32(domain, t->tx.buf); |
337 | 462 | ||
338 | ret = scmi_do_xfer(handle, t); | 463 | ret = scmi_do_xfer(handle, t); |
339 | if (!ret) | 464 | if (!ret) |
340 | *level = le32_to_cpu(*(__le32 *)t->rx.buf); | 465 | *level = get_unaligned_le32(t->rx.buf); |
341 | 466 | ||
342 | scmi_xfer_put(handle, t); | 467 | scmi_xfer_put(handle, t); |
343 | return ret; | 468 | return ret; |
344 | } | 469 | } |
345 | 470 | ||
471 | static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, | ||
472 | u32 *level, bool poll) | ||
473 | { | ||
474 | struct scmi_perf_info *pi = handle->perf_priv; | ||
475 | struct perf_dom_info *dom = pi->dom_info + domain; | ||
476 | |||
477 | if (dom->fc_info && dom->fc_info->level_get_addr) { | ||
478 | *level = ioread32(dom->fc_info->level_get_addr); | ||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | return scmi_perf_mb_level_get(handle, domain, level, poll); | ||
483 | } | ||
484 | |||
485 | static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size) | ||
486 | { | ||
487 | if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4) | ||
488 | return true; | ||
489 | if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8) | ||
490 | return true; | ||
491 | return false; | ||
492 | } | ||
493 | |||
494 | static void | ||
495 | scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain, | ||
496 | u32 message_id, void __iomem **p_addr, | ||
497 | struct scmi_fc_db_info **p_db) | ||
498 | { | ||
499 | int ret; | ||
500 | u32 flags; | ||
501 | u64 phys_addr; | ||
502 | u8 size; | ||
503 | void __iomem *addr; | ||
504 | struct scmi_xfer *t; | ||
505 | struct scmi_fc_db_info *db; | ||
506 | struct scmi_perf_get_fc_info *info; | ||
507 | struct scmi_msg_resp_perf_desc_fc *resp; | ||
508 | |||
509 | if (!p_addr) | ||
510 | return; | ||
511 | |||
512 | ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL, | ||
513 | SCMI_PROTOCOL_PERF, | ||
514 | sizeof(*info), sizeof(*resp), &t); | ||
515 | if (ret) | ||
516 | return; | ||
517 | |||
518 | info = t->tx.buf; | ||
519 | info->domain = cpu_to_le32(domain); | ||
520 | info->message_id = cpu_to_le32(message_id); | ||
521 | |||
522 | ret = scmi_do_xfer(handle, t); | ||
523 | if (ret) | ||
524 | goto err_xfer; | ||
525 | |||
526 | resp = t->rx.buf; | ||
527 | flags = le32_to_cpu(resp->attr); | ||
528 | size = le32_to_cpu(resp->chan_size); | ||
529 | if (!scmi_perf_fc_size_is_valid(message_id, size)) | ||
530 | goto err_xfer; | ||
531 | |||
532 | phys_addr = le32_to_cpu(resp->chan_addr_low); | ||
533 | phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; | ||
534 | addr = devm_ioremap(handle->dev, phys_addr, size); | ||
535 | if (!addr) | ||
536 | goto err_xfer; | ||
537 | *p_addr = addr; | ||
538 | |||
539 | if (p_db && SUPPORTS_DOORBELL(flags)) { | ||
540 | db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL); | ||
541 | if (!db) | ||
542 | goto err_xfer; | ||
543 | |||
544 | size = 1 << DOORBELL_REG_WIDTH(flags); | ||
545 | phys_addr = le32_to_cpu(resp->db_addr_low); | ||
546 | phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; | ||
547 | addr = devm_ioremap(handle->dev, phys_addr, size); | ||
548 | if (!addr) | ||
549 | goto err_xfer; | ||
550 | |||
551 | db->addr = addr; | ||
552 | db->width = size; | ||
553 | db->set = le32_to_cpu(resp->db_set_lmask); | ||
554 | db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; | ||
555 | db->mask = le32_to_cpu(resp->db_preserve_lmask); | ||
556 | db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; | ||
557 | *p_db = db; | ||
558 | } | ||
559 | err_xfer: | ||
560 | scmi_xfer_put(handle, t); | ||
561 | } | ||
562 | |||
563 | static void scmi_perf_domain_init_fc(const struct scmi_handle *handle, | ||
564 | u32 domain, struct scmi_fc_info **p_fc) | ||
565 | { | ||
566 | struct scmi_fc_info *fc; | ||
567 | |||
568 | fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL); | ||
569 | if (!fc) | ||
570 | return; | ||
571 | |||
572 | scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET, | ||
573 | &fc->level_set_addr, &fc->level_set_db); | ||
574 | scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET, | ||
575 | &fc->level_get_addr, NULL); | ||
576 | scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET, | ||
577 | &fc->limit_set_addr, &fc->limit_set_db); | ||
578 | scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET, | ||
579 | &fc->limit_get_addr, NULL); | ||
580 | *p_fc = fc; | ||
581 | } | ||
582 | |||
346 | /* Device specific ops */ | 583 | /* Device specific ops */ |
347 | static int scmi_dev_domain_id(struct device *dev) | 584 | static int scmi_dev_domain_id(struct device *dev) |
348 | { | 585 | { |
@@ -494,6 +731,9 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle) | |||
494 | 731 | ||
495 | scmi_perf_domain_attributes_get(handle, domain, dom); | 732 | scmi_perf_domain_attributes_get(handle, domain, dom); |
496 | scmi_perf_describe_levels_get(handle, domain, dom); | 733 | scmi_perf_describe_levels_get(handle, domain, dom); |
734 | |||
735 | if (dom->perf_fastchannels) | ||
736 | scmi_perf_domain_init_fc(handle, domain, &dom->fc_info); | ||
497 | } | 737 | } |
498 | 738 | ||
499 | handle->perf_ops = &perf_ops; | 739 | handle->perf_ops = &perf_ops; |
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index 62f3401a1f01..5abef7079c0a 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c | |||
@@ -96,7 +96,7 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain, | |||
96 | if (ret) | 96 | if (ret) |
97 | return ret; | 97 | return ret; |
98 | 98 | ||
99 | *(__le32 *)t->tx.buf = cpu_to_le32(domain); | 99 | put_unaligned_le32(domain, t->tx.buf); |
100 | attr = t->rx.buf; | 100 | attr = t->rx.buf; |
101 | 101 | ||
102 | ret = scmi_do_xfer(handle, t); | 102 | ret = scmi_do_xfer(handle, t); |
@@ -147,11 +147,11 @@ scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state) | |||
147 | if (ret) | 147 | if (ret) |
148 | return ret; | 148 | return ret; |
149 | 149 | ||
150 | *(__le32 *)t->tx.buf = cpu_to_le32(domain); | 150 | put_unaligned_le32(domain, t->tx.buf); |
151 | 151 | ||
152 | ret = scmi_do_xfer(handle, t); | 152 | ret = scmi_do_xfer(handle, t); |
153 | if (!ret) | 153 | if (!ret) |
154 | *state = le32_to_cpu(*(__le32 *)t->rx.buf); | 154 | *state = get_unaligned_le32(t->rx.buf); |
155 | 155 | ||
156 | scmi_xfer_put(handle, t); | 156 | scmi_xfer_put(handle, t); |
157 | return ret; | 157 | return ret; |
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c new file mode 100644 index 000000000000..64cc81915581 --- /dev/null +++ b/drivers/firmware/arm_scmi/reset.c | |||
@@ -0,0 +1,231 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * System Control and Management Interface (SCMI) Reset Protocol | ||
4 | * | ||
5 | * Copyright (C) 2019 ARM Ltd. | ||
6 | */ | ||
7 | |||
8 | #include "common.h" | ||
9 | |||
10 | enum scmi_reset_protocol_cmd { | ||
11 | RESET_DOMAIN_ATTRIBUTES = 0x3, | ||
12 | RESET = 0x4, | ||
13 | RESET_NOTIFY = 0x5, | ||
14 | }; | ||
15 | |||
16 | enum scmi_reset_protocol_notify { | ||
17 | RESET_ISSUED = 0x0, | ||
18 | }; | ||
19 | |||
20 | #define NUM_RESET_DOMAIN_MASK 0xffff | ||
21 | #define RESET_NOTIFY_ENABLE BIT(0) | ||
22 | |||
23 | struct scmi_msg_resp_reset_domain_attributes { | ||
24 | __le32 attributes; | ||
25 | #define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31)) | ||
26 | #define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30)) | ||
27 | __le32 latency; | ||
28 | u8 name[SCMI_MAX_STR_SIZE]; | ||
29 | }; | ||
30 | |||
31 | struct scmi_msg_reset_domain_reset { | ||
32 | __le32 domain_id; | ||
33 | __le32 flags; | ||
34 | #define AUTONOMOUS_RESET BIT(0) | ||
35 | #define EXPLICIT_RESET_ASSERT BIT(1) | ||
36 | #define ASYNCHRONOUS_RESET BIT(2) | ||
37 | __le32 reset_state; | ||
38 | #define ARCH_RESET_TYPE BIT(31) | ||
39 | #define COLD_RESET_STATE BIT(0) | ||
40 | #define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE) | ||
41 | }; | ||
42 | |||
43 | struct reset_dom_info { | ||
44 | bool async_reset; | ||
45 | bool reset_notify; | ||
46 | u32 latency_us; | ||
47 | char name[SCMI_MAX_STR_SIZE]; | ||
48 | }; | ||
49 | |||
50 | struct scmi_reset_info { | ||
51 | int num_domains; | ||
52 | struct reset_dom_info *dom_info; | ||
53 | }; | ||
54 | |||
55 | static int scmi_reset_attributes_get(const struct scmi_handle *handle, | ||
56 | struct scmi_reset_info *pi) | ||
57 | { | ||
58 | int ret; | ||
59 | struct scmi_xfer *t; | ||
60 | u32 attr; | ||
61 | |||
62 | ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, | ||
63 | SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | |||
67 | ret = scmi_do_xfer(handle, t); | ||
68 | if (!ret) { | ||
69 | attr = get_unaligned_le32(t->rx.buf); | ||
70 | pi->num_domains = attr & NUM_RESET_DOMAIN_MASK; | ||
71 | } | ||
72 | |||
73 | scmi_xfer_put(handle, t); | ||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | static int | ||
78 | scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain, | ||
79 | struct reset_dom_info *dom_info) | ||
80 | { | ||
81 | int ret; | ||
82 | struct scmi_xfer *t; | ||
83 | struct scmi_msg_resp_reset_domain_attributes *attr; | ||
84 | |||
85 | ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES, | ||
86 | SCMI_PROTOCOL_RESET, sizeof(domain), | ||
87 | sizeof(*attr), &t); | ||
88 | if (ret) | ||
89 | return ret; | ||
90 | |||
91 | put_unaligned_le32(domain, t->tx.buf); | ||
92 | attr = t->rx.buf; | ||
93 | |||
94 | ret = scmi_do_xfer(handle, t); | ||
95 | if (!ret) { | ||
96 | u32 attributes = le32_to_cpu(attr->attributes); | ||
97 | |||
98 | dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes); | ||
99 | dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes); | ||
100 | dom_info->latency_us = le32_to_cpu(attr->latency); | ||
101 | if (dom_info->latency_us == U32_MAX) | ||
102 | dom_info->latency_us = 0; | ||
103 | strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); | ||
104 | } | ||
105 | |||
106 | scmi_xfer_put(handle, t); | ||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | static int scmi_reset_num_domains_get(const struct scmi_handle *handle) | ||
111 | { | ||
112 | struct scmi_reset_info *pi = handle->reset_priv; | ||
113 | |||
114 | return pi->num_domains; | ||
115 | } | ||
116 | |||
117 | static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain) | ||
118 | { | ||
119 | struct scmi_reset_info *pi = handle->reset_priv; | ||
120 | struct reset_dom_info *dom = pi->dom_info + domain; | ||
121 | |||
122 | return dom->name; | ||
123 | } | ||
124 | |||
125 | static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain) | ||
126 | { | ||
127 | struct scmi_reset_info *pi = handle->reset_priv; | ||
128 | struct reset_dom_info *dom = pi->dom_info + domain; | ||
129 | |||
130 | return dom->latency_us; | ||
131 | } | ||
132 | |||
133 | static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain, | ||
134 | u32 flags, u32 state) | ||
135 | { | ||
136 | int ret; | ||
137 | struct scmi_xfer *t; | ||
138 | struct scmi_msg_reset_domain_reset *dom; | ||
139 | struct scmi_reset_info *pi = handle->reset_priv; | ||
140 | struct reset_dom_info *rdom = pi->dom_info + domain; | ||
141 | |||
142 | if (rdom->async_reset) | ||
143 | flags |= ASYNCHRONOUS_RESET; | ||
144 | |||
145 | ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET, | ||
146 | sizeof(*dom), 0, &t); | ||
147 | if (ret) | ||
148 | return ret; | ||
149 | |||
150 | dom = t->tx.buf; | ||
151 | dom->domain_id = cpu_to_le32(domain); | ||
152 | dom->flags = cpu_to_le32(flags); | ||
153 | dom->domain_id = cpu_to_le32(state); | ||
154 | |||
155 | if (rdom->async_reset) | ||
156 | ret = scmi_do_xfer_with_response(handle, t); | ||
157 | else | ||
158 | ret = scmi_do_xfer(handle, t); | ||
159 | |||
160 | scmi_xfer_put(handle, t); | ||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain) | ||
165 | { | ||
166 | return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET, | ||
167 | ARCH_COLD_RESET); | ||
168 | } | ||
169 | |||
170 | static int | ||
171 | scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain) | ||
172 | { | ||
173 | return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT, | ||
174 | ARCH_COLD_RESET); | ||
175 | } | ||
176 | |||
177 | static int | ||
178 | scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain) | ||
179 | { | ||
180 | return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET); | ||
181 | } | ||
182 | |||
183 | static struct scmi_reset_ops reset_ops = { | ||
184 | .num_domains_get = scmi_reset_num_domains_get, | ||
185 | .name_get = scmi_reset_name_get, | ||
186 | .latency_get = scmi_reset_latency_get, | ||
187 | .reset = scmi_reset_domain_reset, | ||
188 | .assert = scmi_reset_domain_assert, | ||
189 | .deassert = scmi_reset_domain_deassert, | ||
190 | }; | ||
191 | |||
192 | static int scmi_reset_protocol_init(struct scmi_handle *handle) | ||
193 | { | ||
194 | int domain; | ||
195 | u32 version; | ||
196 | struct scmi_reset_info *pinfo; | ||
197 | |||
198 | scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version); | ||
199 | |||
200 | dev_dbg(handle->dev, "Reset Version %d.%d\n", | ||
201 | PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); | ||
202 | |||
203 | pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL); | ||
204 | if (!pinfo) | ||
205 | return -ENOMEM; | ||
206 | |||
207 | scmi_reset_attributes_get(handle, pinfo); | ||
208 | |||
209 | pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains, | ||
210 | sizeof(*pinfo->dom_info), GFP_KERNEL); | ||
211 | if (!pinfo->dom_info) | ||
212 | return -ENOMEM; | ||
213 | |||
214 | for (domain = 0; domain < pinfo->num_domains; domain++) { | ||
215 | struct reset_dom_info *dom = pinfo->dom_info + domain; | ||
216 | |||
217 | scmi_reset_domain_attributes_get(handle, domain, dom); | ||
218 | } | ||
219 | |||
220 | handle->reset_ops = &reset_ops; | ||
221 | handle->reset_priv = pinfo; | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int __init scmi_reset_init(void) | ||
227 | { | ||
228 | return scmi_protocol_register(SCMI_PROTOCOL_RESET, | ||
229 | &scmi_reset_protocol_init); | ||
230 | } | ||
231 | subsys_initcall(scmi_reset_init); | ||
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 0e94ab56f679..a400ea805fc2 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c | |||
@@ -9,8 +9,8 @@ | |||
9 | 9 | ||
10 | enum scmi_sensor_protocol_cmd { | 10 | enum scmi_sensor_protocol_cmd { |
11 | SENSOR_DESCRIPTION_GET = 0x3, | 11 | SENSOR_DESCRIPTION_GET = 0x3, |
12 | SENSOR_CONFIG_SET = 0x4, | 12 | SENSOR_TRIP_POINT_NOTIFY = 0x4, |
13 | SENSOR_TRIP_POINT_SET = 0x5, | 13 | SENSOR_TRIP_POINT_CONFIG = 0x5, |
14 | SENSOR_READING_GET = 0x6, | 14 | SENSOR_READING_GET = 0x6, |
15 | }; | 15 | }; |
16 | 16 | ||
@@ -42,9 +42,10 @@ struct scmi_msg_resp_sensor_description { | |||
42 | } desc[0]; | 42 | } desc[0]; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | struct scmi_msg_set_sensor_config { | 45 | struct scmi_msg_sensor_trip_point_notify { |
46 | __le32 id; | 46 | __le32 id; |
47 | __le32 event_control; | 47 | __le32 event_control; |
48 | #define SENSOR_TP_NOTIFY_ALL BIT(0) | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | struct scmi_msg_set_sensor_trip_point { | 51 | struct scmi_msg_set_sensor_trip_point { |
@@ -119,7 +120,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle, | |||
119 | 120 | ||
120 | do { | 121 | do { |
121 | /* Set the number of sensors to be skipped/already read */ | 122 | /* Set the number of sensors to be skipped/already read */ |
122 | *(__le32 *)t->tx.buf = cpu_to_le32(desc_index); | 123 | put_unaligned_le32(desc_index, t->tx.buf); |
123 | 124 | ||
124 | ret = scmi_do_xfer(handle, t); | 125 | ret = scmi_do_xfer(handle, t); |
125 | if (ret) | 126 | if (ret) |
@@ -135,9 +136,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle, | |||
135 | } | 136 | } |
136 | 137 | ||
137 | for (cnt = 0; cnt < num_returned; cnt++) { | 138 | for (cnt = 0; cnt < num_returned; cnt++) { |
138 | u32 attrh; | 139 | u32 attrh, attrl; |
139 | struct scmi_sensor_info *s; | 140 | struct scmi_sensor_info *s; |
140 | 141 | ||
142 | attrl = le32_to_cpu(buf->desc[cnt].attributes_low); | ||
141 | attrh = le32_to_cpu(buf->desc[cnt].attributes_high); | 143 | attrh = le32_to_cpu(buf->desc[cnt].attributes_high); |
142 | s = &si->sensors[desc_index + cnt]; | 144 | s = &si->sensors[desc_index + cnt]; |
143 | s->id = le32_to_cpu(buf->desc[cnt].id); | 145 | s->id = le32_to_cpu(buf->desc[cnt].id); |
@@ -146,6 +148,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle, | |||
146 | /* Sign extend to a full s8 */ | 148 | /* Sign extend to a full s8 */ |
147 | if (s->scale & SENSOR_SCALE_SIGN) | 149 | if (s->scale & SENSOR_SCALE_SIGN) |
148 | s->scale |= SENSOR_SCALE_EXTEND; | 150 | s->scale |= SENSOR_SCALE_EXTEND; |
151 | s->async = SUPPORTS_ASYNC_READ(attrl); | ||
152 | s->num_trip_points = NUM_TRIP_POINTS(attrl); | ||
149 | strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE); | 153 | strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE); |
150 | } | 154 | } |
151 | 155 | ||
@@ -160,15 +164,15 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle, | |||
160 | return ret; | 164 | return ret; |
161 | } | 165 | } |
162 | 166 | ||
163 | static int | 167 | static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle, |
164 | scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id) | 168 | u32 sensor_id, bool enable) |
165 | { | 169 | { |
166 | int ret; | 170 | int ret; |
167 | u32 evt_cntl = BIT(0); | 171 | u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0; |
168 | struct scmi_xfer *t; | 172 | struct scmi_xfer *t; |
169 | struct scmi_msg_set_sensor_config *cfg; | 173 | struct scmi_msg_sensor_trip_point_notify *cfg; |
170 | 174 | ||
171 | ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET, | 175 | ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY, |
172 | SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t); | 176 | SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t); |
173 | if (ret) | 177 | if (ret) |
174 | return ret; | 178 | return ret; |
@@ -183,15 +187,16 @@ scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id) | |||
183 | return ret; | 187 | return ret; |
184 | } | 188 | } |
185 | 189 | ||
186 | static int scmi_sensor_trip_point_set(const struct scmi_handle *handle, | 190 | static int |
187 | u32 sensor_id, u8 trip_id, u64 trip_value) | 191 | scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id, |
192 | u8 trip_id, u64 trip_value) | ||
188 | { | 193 | { |
189 | int ret; | 194 | int ret; |
190 | u32 evt_cntl = SENSOR_TP_BOTH; | 195 | u32 evt_cntl = SENSOR_TP_BOTH; |
191 | struct scmi_xfer *t; | 196 | struct scmi_xfer *t; |
192 | struct scmi_msg_set_sensor_trip_point *trip; | 197 | struct scmi_msg_set_sensor_trip_point *trip; |
193 | 198 | ||
194 | ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET, | 199 | ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG, |
195 | SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t); | 200 | SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t); |
196 | if (ret) | 201 | if (ret) |
197 | return ret; | 202 | return ret; |
@@ -209,11 +214,13 @@ static int scmi_sensor_trip_point_set(const struct scmi_handle *handle, | |||
209 | } | 214 | } |
210 | 215 | ||
211 | static int scmi_sensor_reading_get(const struct scmi_handle *handle, | 216 | static int scmi_sensor_reading_get(const struct scmi_handle *handle, |
212 | u32 sensor_id, bool async, u64 *value) | 217 | u32 sensor_id, u64 *value) |
213 | { | 218 | { |
214 | int ret; | 219 | int ret; |
215 | struct scmi_xfer *t; | 220 | struct scmi_xfer *t; |
216 | struct scmi_msg_sensor_reading_get *sensor; | 221 | struct scmi_msg_sensor_reading_get *sensor; |
222 | struct sensors_info *si = handle->sensor_priv; | ||
223 | struct scmi_sensor_info *s = si->sensors + sensor_id; | ||
217 | 224 | ||
218 | ret = scmi_xfer_get_init(handle, SENSOR_READING_GET, | 225 | ret = scmi_xfer_get_init(handle, SENSOR_READING_GET, |
219 | SCMI_PROTOCOL_SENSOR, sizeof(*sensor), | 226 | SCMI_PROTOCOL_SENSOR, sizeof(*sensor), |
@@ -223,14 +230,18 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle, | |||
223 | 230 | ||
224 | sensor = t->tx.buf; | 231 | sensor = t->tx.buf; |
225 | sensor->id = cpu_to_le32(sensor_id); | 232 | sensor->id = cpu_to_le32(sensor_id); |
226 | sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0); | ||
227 | 233 | ||
228 | ret = scmi_do_xfer(handle, t); | 234 | if (s->async) { |
229 | if (!ret) { | 235 | sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC); |
230 | __le32 *pval = t->rx.buf; | 236 | ret = scmi_do_xfer_with_response(handle, t); |
231 | 237 | if (!ret) | |
232 | *value = le32_to_cpu(*pval); | 238 | *value = get_unaligned_le64((void *) |
233 | *value |= (u64)le32_to_cpu(*(pval + 1)) << 32; | 239 | ((__le32 *)t->rx.buf + 1)); |
240 | } else { | ||
241 | sensor->flags = cpu_to_le32(0); | ||
242 | ret = scmi_do_xfer(handle, t); | ||
243 | if (!ret) | ||
244 | *value = get_unaligned_le64(t->rx.buf); | ||
234 | } | 245 | } |
235 | 246 | ||
236 | scmi_xfer_put(handle, t); | 247 | scmi_xfer_put(handle, t); |
@@ -255,8 +266,8 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle) | |||
255 | static struct scmi_sensor_ops sensor_ops = { | 266 | static struct scmi_sensor_ops sensor_ops = { |
256 | .count_get = scmi_sensor_count_get, | 267 | .count_get = scmi_sensor_count_get, |
257 | .info_get = scmi_sensor_info_get, | 268 | .info_get = scmi_sensor_info_get, |
258 | .configuration_set = scmi_sensor_configuration_set, | 269 | .trip_point_notify = scmi_sensor_trip_point_notify, |
259 | .trip_point_set = scmi_sensor_trip_point_set, | 270 | .trip_point_config = scmi_sensor_trip_point_config, |
260 | .reading_get = scmi_sensor_reading_get, | 271 | .reading_get = scmi_sensor_reading_get, |
261 | }; | 272 | }; |
262 | 273 | ||
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 42b566f8903f..0dbee32da4c6 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig | |||
@@ -1,4 +1,15 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | 1 | # SPDX-License-Identifier: GPL-2.0-only |
2 | config IMX_DSP | ||
3 | bool "IMX DSP Protocol driver" | ||
4 | depends on IMX_MBOX | ||
5 | help | ||
6 | This enables DSP IPC protocol between host AP (Linux) | ||
7 | and the firmware running on DSP. | ||
8 | DSP exists on some i.MX8 processors (e.g i.MX8QM, i.MX8QXP). | ||
9 | |||
10 | It acts like a doorbell. Client might use shared memory to | ||
11 | exchange information with DSP side. | ||
12 | |||
2 | config IMX_SCU | 13 | config IMX_SCU |
3 | bool "IMX SCU Protocol driver" | 14 | bool "IMX SCU Protocol driver" |
4 | depends on IMX_MBOX | 15 | depends on IMX_MBOX |
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile index 802c4ad8e8f9..08bc9ddfbdfb 100644 --- a/drivers/firmware/imx/Makefile +++ b/drivers/firmware/imx/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | obj-$(CONFIG_IMX_DSP) += imx-dsp.o | ||
2 | obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o | 3 | obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o |
3 | obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o | 4 | obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o |
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c new file mode 100644 index 000000000000..a43d2db5cbdb --- /dev/null +++ b/drivers/firmware/imx/imx-dsp.c | |||
@@ -0,0 +1,155 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
2 | /* | ||
3 | * Copyright 2019 NXP | ||
4 | * Author: Daniel Baluta <daniel.baluta@nxp.com> | ||
5 | * | ||
6 | * Implementation of the DSP IPC interface (host side) | ||
7 | */ | ||
8 | |||
9 | #include <linux/firmware/imx/dsp.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/mailbox_client.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/of_platform.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/slab.h> | ||
16 | |||
17 | /* | ||
18 | * imx_dsp_ring_doorbell - triggers an interrupt on the other side (DSP) | ||
19 | * | ||
20 | * @dsp: DSP IPC handle | ||
21 | * @chan_idx: index of the channel where to trigger the interrupt | ||
22 | * | ||
23 | * Returns non-negative value for success, negative value for error | ||
24 | */ | ||
25 | int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, unsigned int idx) | ||
26 | { | ||
27 | int ret; | ||
28 | struct imx_dsp_chan *dsp_chan; | ||
29 | |||
30 | if (idx >= DSP_MU_CHAN_NUM) | ||
31 | return -EINVAL; | ||
32 | |||
33 | dsp_chan = &ipc->chans[idx]; | ||
34 | ret = mbox_send_message(dsp_chan->ch, NULL); | ||
35 | if (ret < 0) | ||
36 | return ret; | ||
37 | |||
38 | return 0; | ||
39 | } | ||
40 | EXPORT_SYMBOL(imx_dsp_ring_doorbell); | ||
41 | |||
42 | /* | ||
43 | * imx_dsp_handle_rx - rx callback used by imx mailbox | ||
44 | * | ||
45 | * @c: mbox client | ||
46 | * @msg: message received | ||
47 | * | ||
48 | * Users of DSP IPC will need to privde handle_reply and handle_request | ||
49 | * callbacks. | ||
50 | */ | ||
51 | static void imx_dsp_handle_rx(struct mbox_client *c, void *msg) | ||
52 | { | ||
53 | struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl); | ||
54 | |||
55 | if (chan->idx == 0) { | ||
56 | chan->ipc->ops->handle_reply(chan->ipc); | ||
57 | } else { | ||
58 | chan->ipc->ops->handle_request(chan->ipc); | ||
59 | imx_dsp_ring_doorbell(chan->ipc, 1); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | static int imx_dsp_probe(struct platform_device *pdev) | ||
64 | { | ||
65 | struct device *dev = &pdev->dev; | ||
66 | struct imx_dsp_ipc *dsp_ipc; | ||
67 | struct imx_dsp_chan *dsp_chan; | ||
68 | struct mbox_client *cl; | ||
69 | char *chan_name; | ||
70 | int ret; | ||
71 | int i, j; | ||
72 | |||
73 | device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent); | ||
74 | |||
75 | dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL); | ||
76 | if (!dsp_ipc) | ||
77 | return -ENOMEM; | ||
78 | |||
79 | for (i = 0; i < DSP_MU_CHAN_NUM; i++) { | ||
80 | if (i < 2) | ||
81 | chan_name = kasprintf(GFP_KERNEL, "txdb%d", i); | ||
82 | else | ||
83 | chan_name = kasprintf(GFP_KERNEL, "rxdb%d", i - 2); | ||
84 | |||
85 | if (!chan_name) | ||
86 | return -ENOMEM; | ||
87 | |||
88 | dsp_chan = &dsp_ipc->chans[i]; | ||
89 | cl = &dsp_chan->cl; | ||
90 | cl->dev = dev; | ||
91 | cl->tx_block = false; | ||
92 | cl->knows_txdone = true; | ||
93 | cl->rx_callback = imx_dsp_handle_rx; | ||
94 | |||
95 | dsp_chan->ipc = dsp_ipc; | ||
96 | dsp_chan->idx = i % 2; | ||
97 | dsp_chan->ch = mbox_request_channel_byname(cl, chan_name); | ||
98 | if (IS_ERR(dsp_chan->ch)) { | ||
99 | ret = PTR_ERR(dsp_chan->ch); | ||
100 | if (ret != -EPROBE_DEFER) | ||
101 | dev_err(dev, "Failed to request mbox chan %s ret %d\n", | ||
102 | chan_name, ret); | ||
103 | goto out; | ||
104 | } | ||
105 | |||
106 | dev_dbg(dev, "request mbox chan %s\n", chan_name); | ||
107 | /* chan_name is not used anymore by framework */ | ||
108 | kfree(chan_name); | ||
109 | } | ||
110 | |||
111 | dsp_ipc->dev = dev; | ||
112 | |||
113 | dev_set_drvdata(dev, dsp_ipc); | ||
114 | |||
115 | dev_info(dev, "NXP i.MX DSP IPC initialized\n"); | ||
116 | |||
117 | return devm_of_platform_populate(dev); | ||
118 | out: | ||
119 | kfree(chan_name); | ||
120 | for (j = 0; j < i; j++) { | ||
121 | dsp_chan = &dsp_ipc->chans[j]; | ||
122 | mbox_free_channel(dsp_chan->ch); | ||
123 | } | ||
124 | |||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | static int imx_dsp_remove(struct platform_device *pdev) | ||
129 | { | ||
130 | struct imx_dsp_chan *dsp_chan; | ||
131 | struct imx_dsp_ipc *dsp_ipc; | ||
132 | int i; | ||
133 | |||
134 | dsp_ipc = dev_get_drvdata(&pdev->dev); | ||
135 | |||
136 | for (i = 0; i < DSP_MU_CHAN_NUM; i++) { | ||
137 | dsp_chan = &dsp_ipc->chans[i]; | ||
138 | mbox_free_channel(dsp_chan->ch); | ||
139 | } | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static struct platform_driver imx_dsp_driver = { | ||
145 | .driver = { | ||
146 | .name = "imx-dsp", | ||
147 | }, | ||
148 | .probe = imx_dsp_probe, | ||
149 | .remove = imx_dsp_remove, | ||
150 | }; | ||
151 | builtin_platform_driver(imx_dsp_driver); | ||
152 | |||
153 | MODULE_AUTHOR("Daniel Baluta <daniel.baluta@nxp.com>"); | ||
154 | MODULE_DESCRIPTION("IMX DSP IPC protocol driver"); | ||
155 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index 480cec69e2c9..b556612207e5 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c | |||
@@ -92,7 +92,8 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { | |||
92 | { "gpt", IMX_SC_R_GPT_0, 5, true, 0 }, | 92 | { "gpt", IMX_SC_R_GPT_0, 5, true, 0 }, |
93 | { "kpp", IMX_SC_R_KPP, 1, false, 0 }, | 93 | { "kpp", IMX_SC_R_KPP, 1, false, 0 }, |
94 | { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 }, | 94 | { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 }, |
95 | { "mu", IMX_SC_R_MU_0A, 14, true, 0 }, | 95 | { "mu_a", IMX_SC_R_MU_0A, 14, true, 0 }, |
96 | { "mu_b", IMX_SC_R_MU_13B, 1, true, 13 }, | ||
96 | 97 | ||
97 | /* CONN SS */ | 98 | /* CONN SS */ |
98 | { "usb", IMX_SC_R_USB_0, 2, true, 0 }, | 99 | { "usb", IMX_SC_R_USB_0, 2, true, 0 }, |
@@ -130,6 +131,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = { | |||
130 | { "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 }, | 131 | { "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 }, |
131 | { "lpuart", IMX_SC_R_UART_0, 4, true, 0 }, | 132 | { "lpuart", IMX_SC_R_UART_0, 4, true, 0 }, |
132 | { "lpspi", IMX_SC_R_SPI_0, 4, true, 0 }, | 133 | { "lpspi", IMX_SC_R_SPI_0, 4, true, 0 }, |
134 | { "irqstr_dsp", IMX_SC_R_IRQSTR_DSP, 1, false, 0 }, | ||
133 | 135 | ||
134 | /* VPU SS */ | 136 | /* VPU SS */ |
135 | { "vpu", IMX_SC_R_VPU, 1, false, 0 }, | 137 | { "vpu", IMX_SC_R_VPU, 1, false, 0 }, |
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 2ddc118dba1b..4802ab170fe5 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
12 | #include <linux/dma-direct.h> | ||
12 | #include <linux/dma-mapping.h> | 13 | #include <linux/dma-mapping.h> |
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
14 | #include <linux/types.h> | 15 | #include <linux/types.h> |
@@ -425,21 +426,23 @@ EXPORT_SYMBOL(qcom_scm_set_remote_state); | |||
425 | * @mem_sz: size of the region. | 426 | * @mem_sz: size of the region. |
426 | * @srcvm: vmid for current set of owners, each set bit in | 427 | * @srcvm: vmid for current set of owners, each set bit in |
427 | * flag indicate a unique owner | 428 | * flag indicate a unique owner |
428 | * @newvm: array having new owners and corrsponding permission | 429 | * @newvm: array having new owners and corresponding permission |
429 | * flags | 430 | * flags |
430 | * @dest_cnt: number of owners in next set. | 431 | * @dest_cnt: number of owners in next set. |
431 | * | 432 | * |
432 | * Return negative errno on failure, 0 on success, with @srcvm updated. | 433 | * Return negative errno on failure or 0 on success with @srcvm updated. |
433 | */ | 434 | */ |
434 | int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, | 435 | int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, |
435 | unsigned int *srcvm, | 436 | unsigned int *srcvm, |
436 | struct qcom_scm_vmperm *newvm, int dest_cnt) | 437 | const struct qcom_scm_vmperm *newvm, |
438 | unsigned int dest_cnt) | ||
437 | { | 439 | { |
438 | struct qcom_scm_current_perm_info *destvm; | 440 | struct qcom_scm_current_perm_info *destvm; |
439 | struct qcom_scm_mem_map_info *mem_to_map; | 441 | struct qcom_scm_mem_map_info *mem_to_map; |
440 | phys_addr_t mem_to_map_phys; | 442 | phys_addr_t mem_to_map_phys; |
441 | phys_addr_t dest_phys; | 443 | phys_addr_t dest_phys; |
442 | phys_addr_t ptr_phys; | 444 | phys_addr_t ptr_phys; |
445 | dma_addr_t ptr_dma; | ||
443 | size_t mem_to_map_sz; | 446 | size_t mem_to_map_sz; |
444 | size_t dest_sz; | 447 | size_t dest_sz; |
445 | size_t src_sz; | 448 | size_t src_sz; |
@@ -447,52 +450,50 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, | |||
447 | int next_vm; | 450 | int next_vm; |
448 | __le32 *src; | 451 | __le32 *src; |
449 | void *ptr; | 452 | void *ptr; |
450 | int ret; | 453 | int ret, i, b; |
451 | int len; | 454 | unsigned long srcvm_bits = *srcvm; |
452 | int i; | ||
453 | 455 | ||
454 | src_sz = hweight_long(*srcvm) * sizeof(*src); | 456 | src_sz = hweight_long(srcvm_bits) * sizeof(*src); |
455 | mem_to_map_sz = sizeof(*mem_to_map); | 457 | mem_to_map_sz = sizeof(*mem_to_map); |
456 | dest_sz = dest_cnt * sizeof(*destvm); | 458 | dest_sz = dest_cnt * sizeof(*destvm); |
457 | ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + | 459 | ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + |
458 | ALIGN(dest_sz, SZ_64); | 460 | ALIGN(dest_sz, SZ_64); |
459 | 461 | ||
460 | ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); | 462 | ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); |
461 | if (!ptr) | 463 | if (!ptr) |
462 | return -ENOMEM; | 464 | return -ENOMEM; |
465 | ptr_phys = dma_to_phys(__scm->dev, ptr_dma); | ||
463 | 466 | ||
464 | /* Fill source vmid detail */ | 467 | /* Fill source vmid detail */ |
465 | src = ptr; | 468 | src = ptr; |
466 | len = hweight_long(*srcvm); | 469 | i = 0; |
467 | for (i = 0; i < len; i++) { | 470 | for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG) |
468 | src[i] = cpu_to_le32(ffs(*srcvm) - 1); | 471 | src[i++] = cpu_to_le32(b); |
469 | *srcvm ^= 1 << (ffs(*srcvm) - 1); | ||
470 | } | ||
471 | 472 | ||
472 | /* Fill details of mem buff to map */ | 473 | /* Fill details of mem buff to map */ |
473 | mem_to_map = ptr + ALIGN(src_sz, SZ_64); | 474 | mem_to_map = ptr + ALIGN(src_sz, SZ_64); |
474 | mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); | 475 | mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); |
475 | mem_to_map[0].mem_addr = cpu_to_le64(mem_addr); | 476 | mem_to_map->mem_addr = cpu_to_le64(mem_addr); |
476 | mem_to_map[0].mem_size = cpu_to_le64(mem_sz); | 477 | mem_to_map->mem_size = cpu_to_le64(mem_sz); |
477 | 478 | ||
478 | next_vm = 0; | 479 | next_vm = 0; |
479 | /* Fill details of next vmid detail */ | 480 | /* Fill details of next vmid detail */ |
480 | destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); | 481 | destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); |
481 | dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); | 482 | dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); |
482 | for (i = 0; i < dest_cnt; i++) { | 483 | for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { |
483 | destvm[i].vmid = cpu_to_le32(newvm[i].vmid); | 484 | destvm->vmid = cpu_to_le32(newvm->vmid); |
484 | destvm[i].perm = cpu_to_le32(newvm[i].perm); | 485 | destvm->perm = cpu_to_le32(newvm->perm); |
485 | destvm[i].ctx = 0; | 486 | destvm->ctx = 0; |
486 | destvm[i].ctx_size = 0; | 487 | destvm->ctx_size = 0; |
487 | next_vm |= BIT(newvm[i].vmid); | 488 | next_vm |= BIT(newvm->vmid); |
488 | } | 489 | } |
489 | 490 | ||
490 | ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, | 491 | ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, |
491 | ptr_phys, src_sz, dest_phys, dest_sz); | 492 | ptr_phys, src_sz, dest_phys, dest_sz); |
492 | dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys); | 493 | dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); |
493 | if (ret) { | 494 | if (ret) { |
494 | dev_err(__scm->dev, | 495 | dev_err(__scm->dev, |
495 | "Assign memory protection call failed %d.\n", ret); | 496 | "Assign memory protection call failed %d\n", ret); |
496 | return -EINVAL; | 497 | return -EINVAL; |
497 | } | 498 | } |
498 | 499 | ||
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index cdee0b45943d..4126be9e3216 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c | |||
@@ -635,6 +635,7 @@ fail: | |||
635 | 635 | ||
636 | /** | 636 | /** |
637 | * ti_sci_cmd_get_device() - command to request for device managed by TISCI | 637 | * ti_sci_cmd_get_device() - command to request for device managed by TISCI |
638 | * that can be shared with other hosts. | ||
638 | * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle | 639 | * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle |
639 | * @id: Device Identifier | 640 | * @id: Device Identifier |
640 | * | 641 | * |
@@ -642,12 +643,30 @@ fail: | |||
642 | * usage count by balancing get_device with put_device. No refcounting is | 643 | * usage count by balancing get_device with put_device. No refcounting is |
643 | * managed by driver for that purpose. | 644 | * managed by driver for that purpose. |
644 | * | 645 | * |
645 | * NOTE: The request is for exclusive access for the processor. | ||
646 | * | ||
647 | * Return: 0 if all went fine, else return appropriate error. | 646 | * Return: 0 if all went fine, else return appropriate error. |
648 | */ | 647 | */ |
649 | static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) | 648 | static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) |
650 | { | 649 | { |
650 | return ti_sci_set_device_state(handle, id, 0, | ||
651 | MSG_DEVICE_SW_STATE_ON); | ||
652 | } | ||
653 | |||
654 | /** | ||
655 | * ti_sci_cmd_get_device_exclusive() - command to request for device managed by | ||
656 | * TISCI that is exclusively owned by the | ||
657 | * requesting host. | ||
658 | * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle | ||
659 | * @id: Device Identifier | ||
660 | * | ||
661 | * Request for the device - NOTE: the client MUST maintain integrity of | ||
662 | * usage count by balancing get_device with put_device. No refcounting is | ||
663 | * managed by driver for that purpose. | ||
664 | * | ||
665 | * Return: 0 if all went fine, else return appropriate error. | ||
666 | */ | ||
667 | static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle, | ||
668 | u32 id) | ||
669 | { | ||
651 | return ti_sci_set_device_state(handle, id, | 670 | return ti_sci_set_device_state(handle, id, |
652 | MSG_FLAG_DEVICE_EXCLUSIVE, | 671 | MSG_FLAG_DEVICE_EXCLUSIVE, |
653 | MSG_DEVICE_SW_STATE_ON); | 672 | MSG_DEVICE_SW_STATE_ON); |
@@ -666,6 +685,26 @@ static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) | |||
666 | */ | 685 | */ |
667 | static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) | 686 | static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) |
668 | { | 687 | { |
688 | return ti_sci_set_device_state(handle, id, 0, | ||
689 | MSG_DEVICE_SW_STATE_RETENTION); | ||
690 | } | ||
691 | |||
692 | /** | ||
693 | * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by | ||
694 | * TISCI that is exclusively owned by | ||
695 | * requesting host. | ||
696 | * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle | ||
697 | * @id: Device Identifier | ||
698 | * | ||
699 | * Request for the device - NOTE: the client MUST maintain integrity of | ||
700 | * usage count by balancing get_device with put_device. No refcounting is | ||
701 | * managed by driver for that purpose. | ||
702 | * | ||
703 | * Return: 0 if all went fine, else return appropriate error. | ||
704 | */ | ||
705 | static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle, | ||
706 | u32 id) | ||
707 | { | ||
669 | return ti_sci_set_device_state(handle, id, | 708 | return ti_sci_set_device_state(handle, id, |
670 | MSG_FLAG_DEVICE_EXCLUSIVE, | 709 | MSG_FLAG_DEVICE_EXCLUSIVE, |
671 | MSG_DEVICE_SW_STATE_RETENTION); | 710 | MSG_DEVICE_SW_STATE_RETENTION); |
@@ -2894,7 +2933,9 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) | |||
2894 | core_ops->reboot_device = ti_sci_cmd_core_reboot; | 2933 | core_ops->reboot_device = ti_sci_cmd_core_reboot; |
2895 | 2934 | ||
2896 | dops->get_device = ti_sci_cmd_get_device; | 2935 | dops->get_device = ti_sci_cmd_get_device; |
2936 | dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive; | ||
2897 | dops->idle_device = ti_sci_cmd_idle_device; | 2937 | dops->idle_device = ti_sci_cmd_idle_device; |
2938 | dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive; | ||
2898 | dops->put_device = ti_sci_cmd_put_device; | 2939 | dops->put_device = ti_sci_cmd_put_device; |
2899 | 2940 | ||
2900 | dops->is_valid = ti_sci_cmd_dev_is_valid; | 2941 | dops->is_valid = ti_sci_cmd_dev_is_valid; |
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c new file mode 100644 index 000000000000..72be58960e54 --- /dev/null +++ b/drivers/firmware/turris-mox-rwtm.c | |||
@@ -0,0 +1,384 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Turris Mox rWTM firmware driver | ||
4 | * | ||
5 | * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> | ||
6 | */ | ||
7 | |||
8 | #include <linux/armada-37xx-rwtm-mailbox.h> | ||
9 | #include <linux/completion.h> | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/hw_random.h> | ||
12 | #include <linux/mailbox_client.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/mutex.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/slab.h> | ||
18 | |||
19 | #define DRIVER_NAME "turris-mox-rwtm" | ||
20 | |||
21 | /* | ||
22 | * The macros and constants below come from Turris Mox's rWTM firmware code. | ||
23 | * This firmware is open source and it's sources can be found at | ||
24 | * https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi. | ||
25 | */ | ||
26 | |||
27 | #define MBOX_STS_SUCCESS (0 << 30) | ||
28 | #define MBOX_STS_FAIL (1 << 30) | ||
29 | #define MBOX_STS_BADCMD (2 << 30) | ||
30 | #define MBOX_STS_ERROR(s) ((s) & (3 << 30)) | ||
31 | #define MBOX_STS_VALUE(s) (((s) >> 10) & 0xfffff) | ||
32 | #define MBOX_STS_CMD(s) ((s) & 0x3ff) | ||
33 | |||
34 | enum mbox_cmd { | ||
35 | MBOX_CMD_GET_RANDOM = 1, | ||
36 | MBOX_CMD_BOARD_INFO = 2, | ||
37 | MBOX_CMD_ECDSA_PUB_KEY = 3, | ||
38 | MBOX_CMD_HASH = 4, | ||
39 | MBOX_CMD_SIGN = 5, | ||
40 | MBOX_CMD_VERIFY = 6, | ||
41 | |||
42 | MBOX_CMD_OTP_READ = 7, | ||
43 | MBOX_CMD_OTP_WRITE = 8, | ||
44 | }; | ||
45 | |||
46 | struct mox_kobject; | ||
47 | |||
48 | struct mox_rwtm { | ||
49 | struct device *dev; | ||
50 | struct mbox_client mbox_client; | ||
51 | struct mbox_chan *mbox; | ||
52 | struct mox_kobject *kobj; | ||
53 | struct hwrng hwrng; | ||
54 | |||
55 | struct armada_37xx_rwtm_rx_msg reply; | ||
56 | |||
57 | void *buf; | ||
58 | dma_addr_t buf_phys; | ||
59 | |||
60 | struct mutex busy; | ||
61 | struct completion cmd_done; | ||
62 | |||
63 | /* board information */ | ||
64 | int has_board_info; | ||
65 | u64 serial_number; | ||
66 | int board_version, ram_size; | ||
67 | u8 mac_address1[6], mac_address2[6]; | ||
68 | |||
69 | /* public key burned in eFuse */ | ||
70 | int has_pubkey; | ||
71 | u8 pubkey[135]; | ||
72 | }; | ||
73 | |||
74 | struct mox_kobject { | ||
75 | struct kobject kobj; | ||
76 | struct mox_rwtm *rwtm; | ||
77 | }; | ||
78 | |||
79 | static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm) | ||
80 | { | ||
81 | return &rwtm->kobj->kobj; | ||
82 | } | ||
83 | |||
84 | static inline struct mox_rwtm *to_rwtm(struct kobject *kobj) | ||
85 | { | ||
86 | return container_of(kobj, struct mox_kobject, kobj)->rwtm; | ||
87 | } | ||
88 | |||
89 | static void mox_kobj_release(struct kobject *kobj) | ||
90 | { | ||
91 | kfree(to_rwtm(kobj)->kobj); | ||
92 | } | ||
93 | |||
94 | static struct kobj_type mox_kobj_ktype = { | ||
95 | .release = mox_kobj_release, | ||
96 | .sysfs_ops = &kobj_sysfs_ops, | ||
97 | }; | ||
98 | |||
99 | static int mox_kobj_create(struct mox_rwtm *rwtm) | ||
100 | { | ||
101 | rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL); | ||
102 | if (!rwtm->kobj) | ||
103 | return -ENOMEM; | ||
104 | |||
105 | kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype); | ||
106 | if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) { | ||
107 | kobject_put(rwtm_to_kobj(rwtm)); | ||
108 | return -ENXIO; | ||
109 | } | ||
110 | |||
111 | rwtm->kobj->rwtm = rwtm; | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | #define MOX_ATTR_RO(name, format, cat) \ | ||
117 | static ssize_t \ | ||
118 | name##_show(struct kobject *kobj, struct kobj_attribute *a, \ | ||
119 | char *buf) \ | ||
120 | { \ | ||
121 | struct mox_rwtm *rwtm = to_rwtm(kobj); \ | ||
122 | if (!rwtm->has_##cat) \ | ||
123 | return -ENODATA; \ | ||
124 | return sprintf(buf, format, rwtm->name); \ | ||
125 | } \ | ||
126 | static struct kobj_attribute mox_attr_##name = __ATTR_RO(name) | ||
127 | |||
128 | MOX_ATTR_RO(serial_number, "%016llX\n", board_info); | ||
129 | MOX_ATTR_RO(board_version, "%i\n", board_info); | ||
130 | MOX_ATTR_RO(ram_size, "%i\n", board_info); | ||
131 | MOX_ATTR_RO(mac_address1, "%pM\n", board_info); | ||
132 | MOX_ATTR_RO(mac_address2, "%pM\n", board_info); | ||
133 | MOX_ATTR_RO(pubkey, "%s\n", pubkey); | ||
134 | |||
135 | static int mox_get_status(enum mbox_cmd cmd, u32 retval) | ||
136 | { | ||
137 | if (MBOX_STS_CMD(retval) != cmd || | ||
138 | MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS) | ||
139 | return -EIO; | ||
140 | else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL) | ||
141 | return -(int)MBOX_STS_VALUE(retval); | ||
142 | else | ||
143 | return MBOX_STS_VALUE(retval); | ||
144 | } | ||
145 | |||
146 | static const struct attribute *mox_rwtm_attrs[] = { | ||
147 | &mox_attr_serial_number.attr, | ||
148 | &mox_attr_board_version.attr, | ||
149 | &mox_attr_ram_size.attr, | ||
150 | &mox_attr_mac_address1.attr, | ||
151 | &mox_attr_mac_address2.attr, | ||
152 | &mox_attr_pubkey.attr, | ||
153 | NULL | ||
154 | }; | ||
155 | |||
156 | static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data) | ||
157 | { | ||
158 | struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev); | ||
159 | struct armada_37xx_rwtm_rx_msg *msg = data; | ||
160 | |||
161 | rwtm->reply = *msg; | ||
162 | complete(&rwtm->cmd_done); | ||
163 | } | ||
164 | |||
165 | static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2) | ||
166 | { | ||
167 | mac[0] = t1 >> 8; | ||
168 | mac[1] = t1; | ||
169 | mac[2] = t2 >> 24; | ||
170 | mac[3] = t2 >> 16; | ||
171 | mac[4] = t2 >> 8; | ||
172 | mac[5] = t2; | ||
173 | } | ||
174 | |||
175 | static int mox_get_board_info(struct mox_rwtm *rwtm) | ||
176 | { | ||
177 | struct armada_37xx_rwtm_tx_msg msg; | ||
178 | struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply; | ||
179 | int ret; | ||
180 | |||
181 | msg.command = MBOX_CMD_BOARD_INFO; | ||
182 | ret = mbox_send_message(rwtm->mbox, &msg); | ||
183 | if (ret < 0) | ||
184 | return ret; | ||
185 | |||
186 | ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); | ||
187 | if (ret < 0) | ||
188 | return ret; | ||
189 | |||
190 | ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval); | ||
191 | if (ret < 0 && ret != -ENODATA) { | ||
192 | return ret; | ||
193 | } else if (ret == -ENODATA) { | ||
194 | dev_warn(rwtm->dev, | ||
195 | "Board does not have manufacturing information burned!\n"); | ||
196 | } else { | ||
197 | rwtm->serial_number = reply->status[1]; | ||
198 | rwtm->serial_number <<= 32; | ||
199 | rwtm->serial_number |= reply->status[0]; | ||
200 | rwtm->board_version = reply->status[2]; | ||
201 | rwtm->ram_size = reply->status[3]; | ||
202 | reply_to_mac_addr(rwtm->mac_address1, reply->status[4], | ||
203 | reply->status[5]); | ||
204 | reply_to_mac_addr(rwtm->mac_address2, reply->status[6], | ||
205 | reply->status[7]); | ||
206 | rwtm->has_board_info = 1; | ||
207 | |||
208 | pr_info("Turris Mox serial number %016llX\n", | ||
209 | rwtm->serial_number); | ||
210 | pr_info(" board version %i\n", rwtm->board_version); | ||
211 | pr_info(" burned RAM size %i MiB\n", rwtm->ram_size); | ||
212 | } | ||
213 | |||
214 | msg.command = MBOX_CMD_ECDSA_PUB_KEY; | ||
215 | ret = mbox_send_message(rwtm->mbox, &msg); | ||
216 | if (ret < 0) | ||
217 | return ret; | ||
218 | |||
219 | ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2); | ||
220 | if (ret < 0) | ||
221 | return ret; | ||
222 | |||
223 | ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval); | ||
224 | if (ret < 0 && ret != -ENODATA) { | ||
225 | return ret; | ||
226 | } else if (ret == -ENODATA) { | ||
227 | dev_warn(rwtm->dev, "Board has no public key burned!\n"); | ||
228 | } else { | ||
229 | u32 *s = reply->status; | ||
230 | |||
231 | rwtm->has_pubkey = 1; | ||
232 | sprintf(rwtm->pubkey, | ||
233 | "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x", | ||
234 | ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], | ||
235 | s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]); | ||
236 | } | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) | ||
242 | { | ||
243 | struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv; | ||
244 | struct armada_37xx_rwtm_tx_msg msg; | ||
245 | int ret; | ||
246 | |||
247 | if (max > 4096) | ||
248 | max = 4096; | ||
249 | |||
250 | msg.command = MBOX_CMD_GET_RANDOM; | ||
251 | msg.args[0] = 1; | ||
252 | msg.args[1] = rwtm->buf_phys; | ||
253 | msg.args[2] = (max + 3) & ~3; | ||
254 | |||
255 | if (!wait) { | ||
256 | if (!mutex_trylock(&rwtm->busy)) | ||
257 | return -EBUSY; | ||
258 | } else { | ||
259 | mutex_lock(&rwtm->busy); | ||
260 | } | ||
261 | |||
262 | ret = mbox_send_message(rwtm->mbox, &msg); | ||
263 | if (ret < 0) | ||
264 | goto unlock_mutex; | ||
265 | |||
266 | ret = wait_for_completion_interruptible(&rwtm->cmd_done); | ||
267 | if (ret < 0) | ||
268 | goto unlock_mutex; | ||
269 | |||
270 | ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval); | ||
271 | if (ret < 0) | ||
272 | goto unlock_mutex; | ||
273 | |||
274 | memcpy(data, rwtm->buf, max); | ||
275 | ret = max; | ||
276 | |||
277 | unlock_mutex: | ||
278 | mutex_unlock(&rwtm->busy); | ||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | static int turris_mox_rwtm_probe(struct platform_device *pdev) | ||
283 | { | ||
284 | struct mox_rwtm *rwtm; | ||
285 | struct device *dev = &pdev->dev; | ||
286 | int ret; | ||
287 | |||
288 | rwtm = devm_kzalloc(dev, sizeof(*rwtm), GFP_KERNEL); | ||
289 | if (!rwtm) | ||
290 | return -ENOMEM; | ||
291 | |||
292 | rwtm->dev = dev; | ||
293 | rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys, | ||
294 | GFP_KERNEL); | ||
295 | if (!rwtm->buf) | ||
296 | return -ENOMEM; | ||
297 | |||
298 | ret = mox_kobj_create(rwtm); | ||
299 | if (ret < 0) { | ||
300 | dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n"); | ||
301 | return ret; | ||
302 | } | ||
303 | |||
304 | ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); | ||
305 | if (ret < 0) { | ||
306 | dev_err(dev, "Cannot create sysfs files!\n"); | ||
307 | goto put_kobj; | ||
308 | } | ||
309 | |||
310 | platform_set_drvdata(pdev, rwtm); | ||
311 | |||
312 | mutex_init(&rwtm->busy); | ||
313 | |||
314 | rwtm->mbox_client.dev = dev; | ||
315 | rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback; | ||
316 | |||
317 | rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0); | ||
318 | if (IS_ERR(rwtm->mbox)) { | ||
319 | ret = PTR_ERR(rwtm->mbox); | ||
320 | if (ret != -EPROBE_DEFER) | ||
321 | dev_err(dev, "Cannot request mailbox channel: %i\n", | ||
322 | ret); | ||
323 | goto remove_files; | ||
324 | } | ||
325 | |||
326 | init_completion(&rwtm->cmd_done); | ||
327 | |||
328 | ret = mox_get_board_info(rwtm); | ||
329 | if (ret < 0) | ||
330 | dev_warn(dev, "Cannot read board information: %i\n", ret); | ||
331 | |||
332 | rwtm->hwrng.name = DRIVER_NAME "_hwrng"; | ||
333 | rwtm->hwrng.read = mox_hwrng_read; | ||
334 | rwtm->hwrng.priv = (unsigned long) rwtm; | ||
335 | rwtm->hwrng.quality = 1024; | ||
336 | |||
337 | ret = devm_hwrng_register(dev, &rwtm->hwrng); | ||
338 | if (ret < 0) { | ||
339 | dev_err(dev, "Cannot register HWRNG: %i\n", ret); | ||
340 | goto free_channel; | ||
341 | } | ||
342 | |||
343 | return 0; | ||
344 | |||
345 | free_channel: | ||
346 | mbox_free_channel(rwtm->mbox); | ||
347 | remove_files: | ||
348 | sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); | ||
349 | put_kobj: | ||
350 | kobject_put(rwtm_to_kobj(rwtm)); | ||
351 | return ret; | ||
352 | } | ||
353 | |||
354 | static int turris_mox_rwtm_remove(struct platform_device *pdev) | ||
355 | { | ||
356 | struct mox_rwtm *rwtm = platform_get_drvdata(pdev); | ||
357 | |||
358 | sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); | ||
359 | kobject_put(rwtm_to_kobj(rwtm)); | ||
360 | mbox_free_channel(rwtm->mbox); | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | static const struct of_device_id turris_mox_rwtm_match[] = { | ||
366 | { .compatible = "cznic,turris-mox-rwtm", }, | ||
367 | { }, | ||
368 | }; | ||
369 | |||
370 | MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match); | ||
371 | |||
372 | static struct platform_driver turris_mox_rwtm_driver = { | ||
373 | .probe = turris_mox_rwtm_probe, | ||
374 | .remove = turris_mox_rwtm_remove, | ||
375 | .driver = { | ||
376 | .name = DRIVER_NAME, | ||
377 | .of_match_table = turris_mox_rwtm_match, | ||
378 | }, | ||
379 | }; | ||
380 | module_platform_driver(turris_mox_rwtm_driver); | ||
381 | |||
382 | MODULE_LICENSE("GPL v2"); | ||
383 | MODULE_DESCRIPTION("Turris Mox rWTM firmware driver"); | ||
384 | MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 305b47ed4532..38e096e6925f 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -1453,6 +1453,15 @@ config GPIO_XRA1403 | |||
1453 | help | 1453 | help |
1454 | GPIO driver for EXAR XRA1403 16-bit SPI-based GPIO expander. | 1454 | GPIO driver for EXAR XRA1403 16-bit SPI-based GPIO expander. |
1455 | 1455 | ||
1456 | config GPIO_MOXTET | ||
1457 | tristate "Turris Mox Moxtet bus GPIO expander" | ||
1458 | depends on MOXTET | ||
1459 | help | ||
1460 | Say yes here if you are building for the Turris Mox router. | ||
1461 | This is the driver needed for configuring the GPIOs via the Moxtet | ||
1462 | bus. For example the Mox module with SFP cage needs this driver | ||
1463 | so that phylink can use corresponding GPIOs. | ||
1464 | |||
1456 | endmenu | 1465 | endmenu |
1457 | 1466 | ||
1458 | menu "USB GPIO expanders" | 1467 | menu "USB GPIO expanders" |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index f3e051fb50e6..d2fd19c15bae 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -92,6 +92,7 @@ obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o | |||
92 | obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o | 92 | obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o |
93 | obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o | 93 | obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o |
94 | obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o | 94 | obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o |
95 | obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o | ||
95 | obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o | 96 | obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o |
96 | obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o | 97 | obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o |
97 | obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o | 98 | obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o |
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c new file mode 100644 index 000000000000..3fd729994a38 --- /dev/null +++ b/drivers/gpio/gpio-moxtet.c | |||
@@ -0,0 +1,179 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Turris Mox Moxtet GPIO expander | ||
4 | * | ||
5 | * Copyright (C) 2018 Marek Behun <marek.behun@nic.cz> | ||
6 | */ | ||
7 | |||
8 | #include <linux/bitops.h> | ||
9 | #include <linux/gpio/driver.h> | ||
10 | #include <linux/moxtet.h> | ||
11 | #include <linux/module.h> | ||
12 | |||
13 | #define MOXTET_GPIO_NGPIOS 12 | ||
14 | #define MOXTET_GPIO_INPUTS 4 | ||
15 | |||
16 | struct moxtet_gpio_desc { | ||
17 | u16 in_mask; | ||
18 | u16 out_mask; | ||
19 | }; | ||
20 | |||
21 | static const struct moxtet_gpio_desc descs[] = { | ||
22 | [TURRIS_MOX_MODULE_SFP] = { | ||
23 | .in_mask = GENMASK(2, 0), | ||
24 | .out_mask = GENMASK(5, 4), | ||
25 | }, | ||
26 | }; | ||
27 | |||
28 | struct moxtet_gpio_chip { | ||
29 | struct device *dev; | ||
30 | struct gpio_chip gpio_chip; | ||
31 | const struct moxtet_gpio_desc *desc; | ||
32 | }; | ||
33 | |||
34 | static int moxtet_gpio_get_value(struct gpio_chip *gc, unsigned int offset) | ||
35 | { | ||
36 | struct moxtet_gpio_chip *chip = gpiochip_get_data(gc); | ||
37 | int ret; | ||
38 | |||
39 | if (chip->desc->in_mask & BIT(offset)) { | ||
40 | ret = moxtet_device_read(chip->dev); | ||
41 | } else if (chip->desc->out_mask & BIT(offset)) { | ||
42 | ret = moxtet_device_written(chip->dev); | ||
43 | if (ret >= 0) | ||
44 | ret <<= MOXTET_GPIO_INPUTS; | ||
45 | } else { | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | |||
49 | if (ret < 0) | ||
50 | return ret; | ||
51 | |||
52 | return !!(ret & BIT(offset)); | ||
53 | } | ||
54 | |||
55 | static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset, | ||
56 | int val) | ||
57 | { | ||
58 | struct moxtet_gpio_chip *chip = gpiochip_get_data(gc); | ||
59 | int state; | ||
60 | |||
61 | state = moxtet_device_written(chip->dev); | ||
62 | if (state < 0) | ||
63 | return; | ||
64 | |||
65 | offset -= MOXTET_GPIO_INPUTS; | ||
66 | |||
67 | if (val) | ||
68 | state |= BIT(offset); | ||
69 | else | ||
70 | state &= ~BIT(offset); | ||
71 | |||
72 | moxtet_device_write(chip->dev, state); | ||
73 | } | ||
74 | |||
75 | static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) | ||
76 | { | ||
77 | struct moxtet_gpio_chip *chip = gpiochip_get_data(gc); | ||
78 | |||
79 | /* All lines are hard wired to be either input or output, not both. */ | ||
80 | if (chip->desc->in_mask & BIT(offset)) | ||
81 | return 1; | ||
82 | else if (chip->desc->out_mask & BIT(offset)) | ||
83 | return 0; | ||
84 | else | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | |||
88 | static int moxtet_gpio_direction_input(struct gpio_chip *gc, | ||
89 | unsigned int offset) | ||
90 | { | ||
91 | struct moxtet_gpio_chip *chip = gpiochip_get_data(gc); | ||
92 | |||
93 | if (chip->desc->in_mask & BIT(offset)) | ||
94 | return 0; | ||
95 | else if (chip->desc->out_mask & BIT(offset)) | ||
96 | return -ENOTSUPP; | ||
97 | else | ||
98 | return -EINVAL; | ||
99 | } | ||
100 | |||
101 | static int moxtet_gpio_direction_output(struct gpio_chip *gc, | ||
102 | unsigned int offset, int val) | ||
103 | { | ||
104 | struct moxtet_gpio_chip *chip = gpiochip_get_data(gc); | ||
105 | |||
106 | if (chip->desc->out_mask & BIT(offset)) | ||
107 | moxtet_gpio_set_value(gc, offset, val); | ||
108 | else if (chip->desc->in_mask & BIT(offset)) | ||
109 | return -ENOTSUPP; | ||
110 | else | ||
111 | return -EINVAL; | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static int moxtet_gpio_probe(struct device *dev) | ||
117 | { | ||
118 | struct moxtet_gpio_chip *chip; | ||
119 | struct device_node *nc = dev->of_node; | ||
120 | int id; | ||
121 | |||
122 | id = to_moxtet_device(dev)->id; | ||
123 | |||
124 | if (id >= ARRAY_SIZE(descs)) { | ||
125 | dev_err(dev, "%pOF Moxtet device id 0x%x is not supported by gpio-moxtet driver\n", | ||
126 | nc, id); | ||
127 | return -ENOTSUPP; | ||
128 | } | ||
129 | |||
130 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); | ||
131 | if (!chip) | ||
132 | return -ENOMEM; | ||
133 | |||
134 | chip->dev = dev; | ||
135 | chip->gpio_chip.parent = dev; | ||
136 | chip->desc = &descs[id]; | ||
137 | |||
138 | dev_set_drvdata(dev, chip); | ||
139 | |||
140 | chip->gpio_chip.label = dev_name(dev); | ||
141 | chip->gpio_chip.get_direction = moxtet_gpio_get_direction; | ||
142 | chip->gpio_chip.direction_input = moxtet_gpio_direction_input; | ||
143 | chip->gpio_chip.direction_output = moxtet_gpio_direction_output; | ||
144 | chip->gpio_chip.get = moxtet_gpio_get_value; | ||
145 | chip->gpio_chip.set = moxtet_gpio_set_value; | ||
146 | chip->gpio_chip.base = -1; | ||
147 | |||
148 | chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS; | ||
149 | |||
150 | chip->gpio_chip.can_sleep = true; | ||
151 | chip->gpio_chip.owner = THIS_MODULE; | ||
152 | |||
153 | return devm_gpiochip_add_data(dev, &chip->gpio_chip, chip); | ||
154 | } | ||
155 | |||
156 | static const struct of_device_id moxtet_gpio_dt_ids[] = { | ||
157 | { .compatible = "cznic,moxtet-gpio", }, | ||
158 | {}, | ||
159 | }; | ||
160 | MODULE_DEVICE_TABLE(of, moxtet_gpio_dt_ids); | ||
161 | |||
162 | static const enum turris_mox_module_id moxtet_gpio_module_table[] = { | ||
163 | TURRIS_MOX_MODULE_SFP, | ||
164 | 0, | ||
165 | }; | ||
166 | |||
167 | static struct moxtet_driver moxtet_gpio_driver = { | ||
168 | .driver = { | ||
169 | .name = "moxtet-gpio", | ||
170 | .of_match_table = moxtet_gpio_dt_ids, | ||
171 | .probe = moxtet_gpio_probe, | ||
172 | }, | ||
173 | .id_table = moxtet_gpio_module_table, | ||
174 | }; | ||
175 | module_moxtet_driver(moxtet_gpio_driver); | ||
176 | |||
177 | MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>"); | ||
178 | MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander"); | ||
179 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 0c93fc5ca762..8a7732c0bef3 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c | |||
@@ -72,7 +72,7 @@ static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, | |||
72 | const struct scmi_handle *h = scmi_sensors->handle; | 72 | const struct scmi_handle *h = scmi_sensors->handle; |
73 | 73 | ||
74 | sensor = *(scmi_sensors->info[type] + channel); | 74 | sensor = *(scmi_sensors->info[type] + channel); |
75 | ret = h->sensor_ops->reading_get(h, sensor->id, false, &value); | 75 | ret = h->sensor_ops->reading_get(h, sensor->id, &value); |
76 | if (ret) | 76 | if (ret) |
77 | return ret; | 77 | return ret; |
78 | 78 | ||
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 93a2d4deb27c..dc9dee55976b 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -151,7 +151,6 @@ config NET_NETX | |||
151 | To compile this driver as a module, choose M here. The module | 151 | To compile this driver as a module, choose M here. The module |
152 | will be called netx-eth. | 152 | will be called netx-eth. |
153 | 153 | ||
154 | source "drivers/net/ethernet/nuvoton/Kconfig" | ||
155 | source "drivers/net/ethernet/nvidia/Kconfig" | 154 | source "drivers/net/ethernet/nvidia/Kconfig" |
156 | source "drivers/net/ethernet/nxp/Kconfig" | 155 | source "drivers/net/ethernet/nxp/Kconfig" |
157 | source "drivers/net/ethernet/oki-semi/Kconfig" | 156 | source "drivers/net/ethernet/oki-semi/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index fb9155cffcff..4bc3c95562bf 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -65,7 +65,6 @@ obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/ | |||
65 | obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/ | 65 | obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/ |
66 | obj-$(CONFIG_NET_VENDOR_NI) += ni/ | 66 | obj-$(CONFIG_NET_VENDOR_NI) += ni/ |
67 | obj-$(CONFIG_NET_NETX) += netx-eth.o | 67 | obj-$(CONFIG_NET_NETX) += netx-eth.o |
68 | obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/ | ||
69 | obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ | 68 | obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ |
70 | obj-$(CONFIG_LPC_ENET) += nxp/ | 69 | obj-$(CONFIG_LPC_ENET) += nxp/ |
71 | obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ | 70 | obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ |
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index 90a8c6bead56..b9c4d48e28e4 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig | |||
@@ -6,8 +6,7 @@ | |||
6 | config NET_VENDOR_MICREL | 6 | config NET_VENDOR_MICREL |
7 | bool "Micrel devices" | 7 | bool "Micrel devices" |
8 | default y | 8 | default y |
9 | depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM || \ | 9 | depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM |
10 | (ARM && ARCH_KS8695) | ||
11 | ---help--- | 10 | ---help--- |
12 | If you have a network (Ethernet) card belonging to this class, say Y. | 11 | If you have a network (Ethernet) card belonging to this class, say Y. |
13 | 12 | ||
@@ -18,14 +17,6 @@ config NET_VENDOR_MICREL | |||
18 | 17 | ||
19 | if NET_VENDOR_MICREL | 18 | if NET_VENDOR_MICREL |
20 | 19 | ||
21 | config ARM_KS8695_ETHER | ||
22 | tristate "KS8695 Ethernet support" | ||
23 | depends on ARM && ARCH_KS8695 | ||
24 | select MII | ||
25 | ---help--- | ||
26 | If you wish to compile a kernel for the KS8695 and want to | ||
27 | use the internal ethernet then you should answer Y to this. | ||
28 | |||
29 | config KS8842 | 20 | config KS8842 |
30 | tristate "Micrel KSZ8841/42 with generic bus interface" | 21 | tristate "Micrel KSZ8841/42 with generic bus interface" |
31 | depends on HAS_IOMEM && DMA_ENGINE | 22 | depends on HAS_IOMEM && DMA_ENGINE |
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile index 848fc1c5a5dc..6d8ac5527aef 100644 --- a/drivers/net/ethernet/micrel/Makefile +++ b/drivers/net/ethernet/micrel/Makefile | |||
@@ -3,7 +3,6 @@ | |||
3 | # Makefile for the Micrel network device drivers. | 3 | # Makefile for the Micrel network device drivers. |
4 | # | 4 | # |
5 | 5 | ||
6 | obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o | ||
7 | obj-$(CONFIG_KS8842) += ks8842.o | 6 | obj-$(CONFIG_KS8842) += ks8842.o |
8 | obj-$(CONFIG_KS8851) += ks8851.o | 7 | obj-$(CONFIG_KS8851) += ks8851.o |
9 | obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o | 8 | obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o |
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c deleted file mode 100644 index 1390ef5323a2..000000000000 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ /dev/null | |||
@@ -1,1632 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Micrel KS8695 (Centaur) Ethernet. | ||
4 | * | ||
5 | * Copyright 2008 Simtec Electronics | ||
6 | * Daniel Silverstone <dsilvers@simtec.co.uk> | ||
7 | * Vincent Sanders <vince@simtec.co.uk> | ||
8 | */ | ||
9 | |||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/etherdevice.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/skbuff.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/crc32.h> | ||
19 | #include <linux/mii.h> | ||
20 | #include <linux/ethtool.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/irq.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/slab.h> | ||
26 | |||
27 | #include <asm/irq.h> | ||
28 | |||
29 | #include <mach/regs-switch.h> | ||
30 | #include <mach/regs-misc.h> | ||
31 | #include <asm/mach/irq.h> | ||
32 | #include <mach/regs-irq.h> | ||
33 | |||
34 | #include "ks8695net.h" | ||
35 | |||
36 | #define MODULENAME "ks8695_ether" | ||
37 | #define MODULEVERSION "1.02" | ||
38 | |||
39 | /* | ||
40 | * Transmit and device reset timeout, default 5 seconds. | ||
41 | */ | ||
42 | static int watchdog = 5000; | ||
43 | |||
44 | /* Hardware structures */ | ||
45 | |||
46 | /** | ||
47 | * struct rx_ring_desc - Receive descriptor ring element | ||
48 | * @status: The status of the descriptor element (E.g. who owns it) | ||
49 | * @length: The number of bytes in the block pointed to by data_ptr | ||
50 | * @data_ptr: The physical address of the data block to receive into | ||
51 | * @next_desc: The physical address of the next descriptor element. | ||
52 | */ | ||
53 | struct rx_ring_desc { | ||
54 | __le32 status; | ||
55 | __le32 length; | ||
56 | __le32 data_ptr; | ||
57 | __le32 next_desc; | ||
58 | }; | ||
59 | |||
60 | /** | ||
61 | * struct tx_ring_desc - Transmit descriptor ring element | ||
62 | * @owner: Who owns the descriptor | ||
63 | * @status: The number of bytes in the block pointed to by data_ptr | ||
64 | * @data_ptr: The physical address of the data block to receive into | ||
65 | * @next_desc: The physical address of the next descriptor element. | ||
66 | */ | ||
67 | struct tx_ring_desc { | ||
68 | __le32 owner; | ||
69 | __le32 status; | ||
70 | __le32 data_ptr; | ||
71 | __le32 next_desc; | ||
72 | }; | ||
73 | |||
74 | /** | ||
75 | * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings. | ||
76 | * @skb: The buffer in the ring | ||
77 | * @dma_ptr: The mapped DMA pointer of the buffer | ||
78 | * @length: The number of bytes mapped to dma_ptr | ||
79 | */ | ||
80 | struct ks8695_skbuff { | ||
81 | struct sk_buff *skb; | ||
82 | dma_addr_t dma_ptr; | ||
83 | u32 length; | ||
84 | }; | ||
85 | |||
86 | /* Private device structure */ | ||
87 | |||
88 | #define MAX_TX_DESC 8 | ||
89 | #define MAX_TX_DESC_MASK 0x7 | ||
90 | #define MAX_RX_DESC 16 | ||
91 | #define MAX_RX_DESC_MASK 0xf | ||
92 | |||
93 | /*napi_weight have better more than rx DMA buffers*/ | ||
94 | #define NAPI_WEIGHT 64 | ||
95 | |||
96 | #define MAX_RXBUF_SIZE 0x700 | ||
97 | |||
98 | #define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC) | ||
99 | #define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC) | ||
100 | #define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE) | ||
101 | |||
102 | /** | ||
103 | * enum ks8695_dtype - Device type | ||
104 | * @KS8695_DTYPE_WAN: This device is a WAN interface | ||
105 | * @KS8695_DTYPE_LAN: This device is a LAN interface | ||
106 | * @KS8695_DTYPE_HPNA: This device is an HPNA interface | ||
107 | */ | ||
108 | enum ks8695_dtype { | ||
109 | KS8695_DTYPE_WAN, | ||
110 | KS8695_DTYPE_LAN, | ||
111 | KS8695_DTYPE_HPNA, | ||
112 | }; | ||
113 | |||
114 | /** | ||
115 | * struct ks8695_priv - Private data for the KS8695 Ethernet | ||
116 | * @in_suspend: Flag to indicate if we're suspending/resuming | ||
117 | * @ndev: The net_device for this interface | ||
118 | * @dev: The platform device object for this interface | ||
119 | * @dtype: The type of this device | ||
120 | * @io_regs: The ioremapped registers for this interface | ||
121 | * @napi : Add support NAPI for Rx | ||
122 | * @rx_irq_name: The textual name of the RX IRQ from the platform data | ||
123 | * @tx_irq_name: The textual name of the TX IRQ from the platform data | ||
124 | * @link_irq_name: The textual name of the link IRQ from the | ||
125 | * platform data if available | ||
126 | * @rx_irq: The IRQ number for the RX IRQ | ||
127 | * @tx_irq: The IRQ number for the TX IRQ | ||
128 | * @link_irq: The IRQ number for the link IRQ if available | ||
129 | * @regs_req: The resource request for the registers region | ||
130 | * @phyiface_req: The resource request for the phy/switch region | ||
131 | * if available | ||
132 | * @phyiface_regs: The ioremapped registers for the phy/switch if available | ||
133 | * @ring_base: The base pointer of the dma coherent memory for the rings | ||
134 | * @ring_base_dma: The DMA mapped equivalent of ring_base | ||
135 | * @tx_ring: The pointer in ring_base of the TX ring | ||
136 | * @tx_ring_used: The number of slots in the TX ring which are occupied | ||
137 | * @tx_ring_next_slot: The next slot to fill in the TX ring | ||
138 | * @tx_ring_dma: The DMA mapped equivalent of tx_ring | ||
139 | * @tx_buffers: The sk_buff mappings for the TX ring | ||
140 | * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables | ||
141 | * @rx_ring: The pointer in ring_base of the RX ring | ||
142 | * @rx_ring_dma: The DMA mapped equivalent of rx_ring | ||
143 | * @rx_buffers: The sk_buff mappings for the RX ring | ||
144 | * @next_rx_desc_read: The next RX descriptor to read from on IRQ | ||
145 | * @rx_lock: A lock to protect Rx irq function | ||
146 | * @msg_enable: The flags for which messages to emit | ||
147 | */ | ||
148 | struct ks8695_priv { | ||
149 | int in_suspend; | ||
150 | struct net_device *ndev; | ||
151 | struct device *dev; | ||
152 | enum ks8695_dtype dtype; | ||
153 | void __iomem *io_regs; | ||
154 | |||
155 | struct napi_struct napi; | ||
156 | |||
157 | const char *rx_irq_name, *tx_irq_name, *link_irq_name; | ||
158 | int rx_irq, tx_irq, link_irq; | ||
159 | |||
160 | struct resource *regs_req, *phyiface_req; | ||
161 | void __iomem *phyiface_regs; | ||
162 | |||
163 | void *ring_base; | ||
164 | dma_addr_t ring_base_dma; | ||
165 | |||
166 | struct tx_ring_desc *tx_ring; | ||
167 | int tx_ring_used; | ||
168 | int tx_ring_next_slot; | ||
169 | dma_addr_t tx_ring_dma; | ||
170 | struct ks8695_skbuff tx_buffers[MAX_TX_DESC]; | ||
171 | spinlock_t txq_lock; | ||
172 | |||
173 | struct rx_ring_desc *rx_ring; | ||
174 | dma_addr_t rx_ring_dma; | ||
175 | struct ks8695_skbuff rx_buffers[MAX_RX_DESC]; | ||
176 | int next_rx_desc_read; | ||
177 | spinlock_t rx_lock; | ||
178 | |||
179 | int msg_enable; | ||
180 | }; | ||
181 | |||
182 | /* Register access */ | ||
183 | |||
184 | /** | ||
185 | * ks8695_readreg - Read from a KS8695 ethernet register | ||
186 | * @ksp: The device to read from | ||
187 | * @reg: The register to read | ||
188 | */ | ||
189 | static inline u32 | ||
190 | ks8695_readreg(struct ks8695_priv *ksp, int reg) | ||
191 | { | ||
192 | return readl(ksp->io_regs + reg); | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * ks8695_writereg - Write to a KS8695 ethernet register | ||
197 | * @ksp: The device to write to | ||
198 | * @reg: The register to write | ||
199 | * @value: The value to write to the register | ||
200 | */ | ||
201 | static inline void | ||
202 | ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value) | ||
203 | { | ||
204 | writel(value, ksp->io_regs + reg); | ||
205 | } | ||
206 | |||
207 | /* Utility functions */ | ||
208 | |||
209 | /** | ||
210 | * ks8695_port_type - Retrieve port-type as user-friendly string | ||
211 | * @ksp: The device to return the type for | ||
212 | * | ||
213 | * Returns a string indicating which of the WAN, LAN or HPNA | ||
214 | * ports this device is likely to represent. | ||
215 | */ | ||
216 | static const char * | ||
217 | ks8695_port_type(struct ks8695_priv *ksp) | ||
218 | { | ||
219 | switch (ksp->dtype) { | ||
220 | case KS8695_DTYPE_LAN: | ||
221 | return "LAN"; | ||
222 | case KS8695_DTYPE_WAN: | ||
223 | return "WAN"; | ||
224 | case KS8695_DTYPE_HPNA: | ||
225 | return "HPNA"; | ||
226 | } | ||
227 | |||
228 | return "UNKNOWN"; | ||
229 | } | ||
230 | |||
231 | /** | ||
232 | * ks8695_update_mac - Update the MAC registers in the device | ||
233 | * @ksp: The device to update | ||
234 | * | ||
235 | * Updates the MAC registers in the KS8695 device from the address in the | ||
236 | * net_device structure associated with this interface. | ||
237 | */ | ||
238 | static void | ||
239 | ks8695_update_mac(struct ks8695_priv *ksp) | ||
240 | { | ||
241 | /* Update the HW with the MAC from the net_device */ | ||
242 | struct net_device *ndev = ksp->ndev; | ||
243 | u32 machigh, maclow; | ||
244 | |||
245 | maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) | | ||
246 | (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0)); | ||
247 | machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0)); | ||
248 | |||
249 | ks8695_writereg(ksp, KS8695_MAL, maclow); | ||
250 | ks8695_writereg(ksp, KS8695_MAH, machigh); | ||
251 | |||
252 | } | ||
253 | |||
254 | /** | ||
255 | * ks8695_refill_rxbuffers - Re-fill the RX buffer ring | ||
256 | * @ksp: The device to refill | ||
257 | * | ||
258 | * Iterates the RX ring of the device looking for empty slots. | ||
259 | * For each empty slot, we allocate and map a new SKB and give it | ||
260 | * to the hardware. | ||
261 | * This can be called from interrupt context safely. | ||
262 | */ | ||
263 | static void | ||
264 | ks8695_refill_rxbuffers(struct ks8695_priv *ksp) | ||
265 | { | ||
266 | /* Run around the RX ring, filling in any missing sk_buff's */ | ||
267 | int buff_n; | ||
268 | |||
269 | for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { | ||
270 | if (!ksp->rx_buffers[buff_n].skb) { | ||
271 | struct sk_buff *skb = | ||
272 | netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE); | ||
273 | dma_addr_t mapping; | ||
274 | |||
275 | ksp->rx_buffers[buff_n].skb = skb; | ||
276 | if (skb == NULL) { | ||
277 | /* Failed to allocate one, perhaps | ||
278 | * we'll try again later. | ||
279 | */ | ||
280 | break; | ||
281 | } | ||
282 | |||
283 | mapping = dma_map_single(ksp->dev, skb->data, | ||
284 | MAX_RXBUF_SIZE, | ||
285 | DMA_FROM_DEVICE); | ||
286 | if (unlikely(dma_mapping_error(ksp->dev, mapping))) { | ||
287 | /* Failed to DMA map this SKB, try later */ | ||
288 | dev_kfree_skb_irq(skb); | ||
289 | ksp->rx_buffers[buff_n].skb = NULL; | ||
290 | break; | ||
291 | } | ||
292 | ksp->rx_buffers[buff_n].dma_ptr = mapping; | ||
293 | ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE; | ||
294 | |||
295 | /* Record this into the DMA ring */ | ||
296 | ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping); | ||
297 | ksp->rx_ring[buff_n].length = | ||
298 | cpu_to_le32(MAX_RXBUF_SIZE); | ||
299 | |||
300 | wmb(); | ||
301 | |||
302 | /* And give ownership over to the hardware */ | ||
303 | ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); | ||
304 | } | ||
305 | } | ||
306 | } | ||
307 | |||
308 | /* Maximum number of multicast addresses which the KS8695 HW supports */ | ||
309 | #define KS8695_NR_ADDRESSES 16 | ||
310 | |||
311 | /** | ||
312 | * ks8695_init_partial_multicast - Init the mcast addr registers | ||
313 | * @ksp: The device to initialise | ||
314 | * @addr: The multicast address list to use | ||
315 | * @nr_addr: The number of addresses in the list | ||
316 | * | ||
317 | * This routine is a helper for ks8695_set_multicast - it writes | ||
318 | * the additional-address registers in the KS8695 ethernet device | ||
319 | * and cleans up any others left behind. | ||
320 | */ | ||
321 | static void | ||
322 | ks8695_init_partial_multicast(struct ks8695_priv *ksp, | ||
323 | struct net_device *ndev) | ||
324 | { | ||
325 | u32 low, high; | ||
326 | int i; | ||
327 | struct netdev_hw_addr *ha; | ||
328 | |||
329 | i = 0; | ||
330 | netdev_for_each_mc_addr(ha, ndev) { | ||
331 | /* Ran out of space in chip? */ | ||
332 | BUG_ON(i == KS8695_NR_ADDRESSES); | ||
333 | |||
334 | low = (ha->addr[2] << 24) | (ha->addr[3] << 16) | | ||
335 | (ha->addr[4] << 8) | (ha->addr[5]); | ||
336 | high = (ha->addr[0] << 8) | (ha->addr[1]); | ||
337 | |||
338 | ks8695_writereg(ksp, KS8695_AAL_(i), low); | ||
339 | ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); | ||
340 | i++; | ||
341 | } | ||
342 | |||
343 | /* Clear the remaining Additional Station Addresses */ | ||
344 | for (; i < KS8695_NR_ADDRESSES; i++) { | ||
345 | ks8695_writereg(ksp, KS8695_AAL_(i), 0); | ||
346 | ks8695_writereg(ksp, KS8695_AAH_(i), 0); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | /* Interrupt handling */ | ||
351 | |||
352 | /** | ||
353 | * ks8695_tx_irq - Transmit IRQ handler | ||
354 | * @irq: The IRQ which went off (ignored) | ||
355 | * @dev_id: The net_device for the interrupt | ||
356 | * | ||
357 | * Process the TX ring, clearing out any transmitted slots. | ||
358 | * Allows the net_device to pass us new packets once slots are | ||
359 | * freed. | ||
360 | */ | ||
361 | static irqreturn_t | ||
362 | ks8695_tx_irq(int irq, void *dev_id) | ||
363 | { | ||
364 | struct net_device *ndev = (struct net_device *)dev_id; | ||
365 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
366 | int buff_n; | ||
367 | |||
368 | for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { | ||
369 | if (ksp->tx_buffers[buff_n].skb && | ||
370 | !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) { | ||
371 | rmb(); | ||
372 | /* An SKB which is not owned by HW is present */ | ||
373 | /* Update the stats for the net_device */ | ||
374 | ndev->stats.tx_packets++; | ||
375 | ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length; | ||
376 | |||
377 | /* Free the packet from the ring */ | ||
378 | ksp->tx_ring[buff_n].data_ptr = 0; | ||
379 | |||
380 | /* Free the sk_buff */ | ||
381 | dma_unmap_single(ksp->dev, | ||
382 | ksp->tx_buffers[buff_n].dma_ptr, | ||
383 | ksp->tx_buffers[buff_n].length, | ||
384 | DMA_TO_DEVICE); | ||
385 | dev_consume_skb_irq(ksp->tx_buffers[buff_n].skb); | ||
386 | ksp->tx_buffers[buff_n].skb = NULL; | ||
387 | ksp->tx_ring_used--; | ||
388 | } | ||
389 | } | ||
390 | |||
391 | netif_wake_queue(ndev); | ||
392 | |||
393 | return IRQ_HANDLED; | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit | ||
398 | * @ksp: Private data for the KS8695 Ethernet | ||
399 | * | ||
400 | * For KS8695 document: | ||
401 | * Interrupt Enable Register (offset 0xE204) | ||
402 | * Bit29 : WAN MAC Receive Interrupt Enable | ||
403 | * Bit16 : LAN MAC Receive Interrupt Enable | ||
404 | * Interrupt Status Register (Offset 0xF208) | ||
405 | * Bit29: WAN MAC Receive Status | ||
406 | * Bit16: LAN MAC Receive Status | ||
407 | * So, this Rx interrupt enable/status bit number is equal | ||
408 | * as Rx IRQ number. | ||
409 | */ | ||
410 | static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp) | ||
411 | { | ||
412 | return ksp->rx_irq; | ||
413 | } | ||
414 | |||
415 | /** | ||
416 | * ks8695_rx_irq - Receive IRQ handler | ||
417 | * @irq: The IRQ which went off (ignored) | ||
418 | * @dev_id: The net_device for the interrupt | ||
419 | * | ||
420 | * Inform NAPI that packet reception needs to be scheduled | ||
421 | */ | ||
422 | |||
423 | static irqreturn_t | ||
424 | ks8695_rx_irq(int irq, void *dev_id) | ||
425 | { | ||
426 | struct net_device *ndev = (struct net_device *)dev_id; | ||
427 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
428 | |||
429 | spin_lock(&ksp->rx_lock); | ||
430 | |||
431 | if (napi_schedule_prep(&ksp->napi)) { | ||
432 | unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN); | ||
433 | unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); | ||
434 | /*disable rx interrupt*/ | ||
435 | status &= ~mask_bit; | ||
436 | writel(status , KS8695_IRQ_VA + KS8695_INTEN); | ||
437 | __napi_schedule(&ksp->napi); | ||
438 | } | ||
439 | |||
440 | spin_unlock(&ksp->rx_lock); | ||
441 | return IRQ_HANDLED; | ||
442 | } | ||
443 | |||
444 | /** | ||
445 | * ks8695_rx - Receive packets called by NAPI poll method | ||
446 | * @ksp: Private data for the KS8695 Ethernet | ||
447 | * @budget: Number of packets allowed to process | ||
448 | */ | ||
449 | static int ks8695_rx(struct ks8695_priv *ksp, int budget) | ||
450 | { | ||
451 | struct net_device *ndev = ksp->ndev; | ||
452 | struct sk_buff *skb; | ||
453 | int buff_n; | ||
454 | u32 flags; | ||
455 | int pktlen; | ||
456 | int received = 0; | ||
457 | |||
458 | buff_n = ksp->next_rx_desc_read; | ||
459 | while (received < budget | ||
460 | && ksp->rx_buffers[buff_n].skb | ||
461 | && (!(ksp->rx_ring[buff_n].status & | ||
462 | cpu_to_le32(RDES_OWN)))) { | ||
463 | rmb(); | ||
464 | flags = le32_to_cpu(ksp->rx_ring[buff_n].status); | ||
465 | |||
466 | /* Found an SKB which we own, this means we | ||
467 | * received a packet | ||
468 | */ | ||
469 | if ((flags & (RDES_FS | RDES_LS)) != | ||
470 | (RDES_FS | RDES_LS)) { | ||
471 | /* This packet is not the first and | ||
472 | * the last segment. Therefore it is | ||
473 | * a "spanning" packet and we can't | ||
474 | * handle it | ||
475 | */ | ||
476 | goto rx_failure; | ||
477 | } | ||
478 | |||
479 | if (flags & (RDES_ES | RDES_RE)) { | ||
480 | /* It's an error packet */ | ||
481 | ndev->stats.rx_errors++; | ||
482 | if (flags & RDES_TL) | ||
483 | ndev->stats.rx_length_errors++; | ||
484 | if (flags & RDES_RF) | ||
485 | ndev->stats.rx_length_errors++; | ||
486 | if (flags & RDES_CE) | ||
487 | ndev->stats.rx_crc_errors++; | ||
488 | if (flags & RDES_RE) | ||
489 | ndev->stats.rx_missed_errors++; | ||
490 | |||
491 | goto rx_failure; | ||
492 | } | ||
493 | |||
494 | pktlen = flags & RDES_FLEN; | ||
495 | pktlen -= 4; /* Drop the CRC */ | ||
496 | |||
497 | /* Retrieve the sk_buff */ | ||
498 | skb = ksp->rx_buffers[buff_n].skb; | ||
499 | |||
500 | /* Clear it from the ring */ | ||
501 | ksp->rx_buffers[buff_n].skb = NULL; | ||
502 | ksp->rx_ring[buff_n].data_ptr = 0; | ||
503 | |||
504 | /* Unmap the SKB */ | ||
505 | dma_unmap_single(ksp->dev, | ||
506 | ksp->rx_buffers[buff_n].dma_ptr, | ||
507 | ksp->rx_buffers[buff_n].length, | ||
508 | DMA_FROM_DEVICE); | ||
509 | |||
510 | /* Relinquish the SKB to the network layer */ | ||
511 | skb_put(skb, pktlen); | ||
512 | skb->protocol = eth_type_trans(skb, ndev); | ||
513 | napi_gro_receive(&ksp->napi, skb); | ||
514 | |||
515 | /* Record stats */ | ||
516 | ndev->stats.rx_packets++; | ||
517 | ndev->stats.rx_bytes += pktlen; | ||
518 | goto rx_finished; | ||
519 | |||
520 | rx_failure: | ||
521 | /* This ring entry is an error, but we can | ||
522 | * re-use the skb | ||
523 | */ | ||
524 | /* Give the ring entry back to the hardware */ | ||
525 | ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); | ||
526 | rx_finished: | ||
527 | received++; | ||
528 | buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; | ||
529 | } | ||
530 | |||
531 | /* And note which RX descriptor we last did */ | ||
532 | ksp->next_rx_desc_read = buff_n; | ||
533 | |||
534 | /* And refill the buffers */ | ||
535 | ks8695_refill_rxbuffers(ksp); | ||
536 | |||
537 | /* Kick the RX DMA engine, in case it became suspended */ | ||
538 | ks8695_writereg(ksp, KS8695_DRSC, 0); | ||
539 | |||
540 | return received; | ||
541 | } | ||
542 | |||
543 | |||
544 | /** | ||
545 | * ks8695_poll - Receive packet by NAPI poll method | ||
546 | * @ksp: Private data for the KS8695 Ethernet | ||
547 | * @budget: The remaining number packets for network subsystem | ||
548 | * | ||
549 | * Invoked by the network core when it requests for new | ||
550 | * packets from the driver | ||
551 | */ | ||
552 | static int ks8695_poll(struct napi_struct *napi, int budget) | ||
553 | { | ||
554 | struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi); | ||
555 | unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN); | ||
556 | unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); | ||
557 | int work_done; | ||
558 | |||
559 | work_done = ks8695_rx(ksp, budget); | ||
560 | |||
561 | if (work_done < budget && napi_complete_done(napi, work_done)) { | ||
562 | unsigned long flags; | ||
563 | |||
564 | spin_lock_irqsave(&ksp->rx_lock, flags); | ||
565 | /* enable rx interrupt */ | ||
566 | writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN); | ||
567 | spin_unlock_irqrestore(&ksp->rx_lock, flags); | ||
568 | } | ||
569 | return work_done; | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * ks8695_link_irq - Link change IRQ handler | ||
574 | * @irq: The IRQ which went off (ignored) | ||
575 | * @dev_id: The net_device for the interrupt | ||
576 | * | ||
577 | * The WAN interface can generate an IRQ when the link changes, | ||
578 | * report this to the net layer and the user. | ||
579 | */ | ||
580 | static irqreturn_t | ||
581 | ks8695_link_irq(int irq, void *dev_id) | ||
582 | { | ||
583 | struct net_device *ndev = (struct net_device *)dev_id; | ||
584 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
585 | u32 ctrl; | ||
586 | |||
587 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
588 | if (ctrl & WMC_WLS) { | ||
589 | netif_carrier_on(ndev); | ||
590 | if (netif_msg_link(ksp)) | ||
591 | dev_info(ksp->dev, | ||
592 | "%s: Link is now up (10%sMbps/%s-duplex)\n", | ||
593 | ndev->name, | ||
594 | (ctrl & WMC_WSS) ? "0" : "", | ||
595 | (ctrl & WMC_WDS) ? "Full" : "Half"); | ||
596 | } else { | ||
597 | netif_carrier_off(ndev); | ||
598 | if (netif_msg_link(ksp)) | ||
599 | dev_info(ksp->dev, "%s: Link is now down.\n", | ||
600 | ndev->name); | ||
601 | } | ||
602 | |||
603 | return IRQ_HANDLED; | ||
604 | } | ||
605 | |||
606 | |||
607 | /* KS8695 Device functions */ | ||
608 | |||
609 | /** | ||
610 | * ks8695_reset - Reset a KS8695 ethernet interface | ||
611 | * @ksp: The interface to reset | ||
612 | * | ||
613 | * Perform an engine reset of the interface and re-program it | ||
614 | * with sensible defaults. | ||
615 | */ | ||
616 | static void | ||
617 | ks8695_reset(struct ks8695_priv *ksp) | ||
618 | { | ||
619 | int reset_timeout = watchdog; | ||
620 | /* Issue the reset via the TX DMA control register */ | ||
621 | ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST); | ||
622 | while (reset_timeout--) { | ||
623 | if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST)) | ||
624 | break; | ||
625 | msleep(1); | ||
626 | } | ||
627 | |||
628 | if (reset_timeout < 0) { | ||
629 | dev_crit(ksp->dev, | ||
630 | "Timeout waiting for DMA engines to reset\n"); | ||
631 | /* And blithely carry on */ | ||
632 | } | ||
633 | |||
634 | /* Definitely wait long enough before attempting to program | ||
635 | * the engines | ||
636 | */ | ||
637 | msleep(10); | ||
638 | |||
639 | /* RX: unicast and broadcast */ | ||
640 | ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB); | ||
641 | /* TX: pad and add CRC */ | ||
642 | ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC); | ||
643 | } | ||
644 | |||
645 | /** | ||
646 | * ks8695_shutdown - Shut down a KS8695 ethernet interface | ||
647 | * @ksp: The interface to shut down | ||
648 | * | ||
649 | * This disables packet RX/TX, cleans up IRQs, drains the rings, | ||
650 | * and basically places the interface into a clean shutdown | ||
651 | * state. | ||
652 | */ | ||
653 | static void | ||
654 | ks8695_shutdown(struct ks8695_priv *ksp) | ||
655 | { | ||
656 | u32 ctrl; | ||
657 | int buff_n; | ||
658 | |||
659 | /* Disable packet transmission */ | ||
660 | ctrl = ks8695_readreg(ksp, KS8695_DTXC); | ||
661 | ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE); | ||
662 | |||
663 | /* Disable packet reception */ | ||
664 | ctrl = ks8695_readreg(ksp, KS8695_DRXC); | ||
665 | ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE); | ||
666 | |||
667 | /* Release the IRQs */ | ||
668 | free_irq(ksp->rx_irq, ksp->ndev); | ||
669 | free_irq(ksp->tx_irq, ksp->ndev); | ||
670 | if (ksp->link_irq != -1) | ||
671 | free_irq(ksp->link_irq, ksp->ndev); | ||
672 | |||
673 | /* Throw away any pending TX packets */ | ||
674 | for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { | ||
675 | if (ksp->tx_buffers[buff_n].skb) { | ||
676 | /* Remove this SKB from the TX ring */ | ||
677 | ksp->tx_ring[buff_n].owner = 0; | ||
678 | ksp->tx_ring[buff_n].status = 0; | ||
679 | ksp->tx_ring[buff_n].data_ptr = 0; | ||
680 | |||
681 | /* Unmap and bin this SKB */ | ||
682 | dma_unmap_single(ksp->dev, | ||
683 | ksp->tx_buffers[buff_n].dma_ptr, | ||
684 | ksp->tx_buffers[buff_n].length, | ||
685 | DMA_TO_DEVICE); | ||
686 | dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); | ||
687 | ksp->tx_buffers[buff_n].skb = NULL; | ||
688 | } | ||
689 | } | ||
690 | |||
691 | /* Purge the RX buffers */ | ||
692 | for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { | ||
693 | if (ksp->rx_buffers[buff_n].skb) { | ||
694 | /* Remove the SKB from the RX ring */ | ||
695 | ksp->rx_ring[buff_n].status = 0; | ||
696 | ksp->rx_ring[buff_n].data_ptr = 0; | ||
697 | |||
698 | /* Unmap and bin the SKB */ | ||
699 | dma_unmap_single(ksp->dev, | ||
700 | ksp->rx_buffers[buff_n].dma_ptr, | ||
701 | ksp->rx_buffers[buff_n].length, | ||
702 | DMA_FROM_DEVICE); | ||
703 | dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb); | ||
704 | ksp->rx_buffers[buff_n].skb = NULL; | ||
705 | } | ||
706 | } | ||
707 | } | ||
708 | |||
709 | |||
710 | /** | ||
711 | * ks8695_setup_irq - IRQ setup helper function | ||
712 | * @irq: The IRQ number to claim | ||
713 | * @irq_name: The name to give the IRQ claimant | ||
714 | * @handler: The function to call to handle the IRQ | ||
715 | * @ndev: The net_device to pass in as the dev_id argument to the handler | ||
716 | * | ||
717 | * Return 0 on success. | ||
718 | */ | ||
719 | static int | ||
720 | ks8695_setup_irq(int irq, const char *irq_name, | ||
721 | irq_handler_t handler, struct net_device *ndev) | ||
722 | { | ||
723 | int ret; | ||
724 | |||
725 | ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev); | ||
726 | |||
727 | if (ret) { | ||
728 | dev_err(&ndev->dev, "failure to request IRQ %d\n", irq); | ||
729 | return ret; | ||
730 | } | ||
731 | |||
732 | return 0; | ||
733 | } | ||
734 | |||
735 | /** | ||
736 | * ks8695_init_net - Initialise a KS8695 ethernet interface | ||
737 | * @ksp: The interface to initialise | ||
738 | * | ||
739 | * This routine fills the RX ring, initialises the DMA engines, | ||
740 | * allocates the IRQs and then starts the packet TX and RX | ||
741 | * engines. | ||
742 | */ | ||
743 | static int | ||
744 | ks8695_init_net(struct ks8695_priv *ksp) | ||
745 | { | ||
746 | int ret; | ||
747 | u32 ctrl; | ||
748 | |||
749 | ks8695_refill_rxbuffers(ksp); | ||
750 | |||
751 | /* Initialise the DMA engines */ | ||
752 | ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma); | ||
753 | ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma); | ||
754 | |||
755 | /* Request the IRQs */ | ||
756 | ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name, | ||
757 | ks8695_rx_irq, ksp->ndev); | ||
758 | if (ret) | ||
759 | return ret; | ||
760 | ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name, | ||
761 | ks8695_tx_irq, ksp->ndev); | ||
762 | if (ret) | ||
763 | return ret; | ||
764 | if (ksp->link_irq != -1) { | ||
765 | ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name, | ||
766 | ks8695_link_irq, ksp->ndev); | ||
767 | if (ret) | ||
768 | return ret; | ||
769 | } | ||
770 | |||
771 | /* Set up the ring indices */ | ||
772 | ksp->next_rx_desc_read = 0; | ||
773 | ksp->tx_ring_next_slot = 0; | ||
774 | ksp->tx_ring_used = 0; | ||
775 | |||
776 | /* Bring up transmission */ | ||
777 | ctrl = ks8695_readreg(ksp, KS8695_DTXC); | ||
778 | /* Enable packet transmission */ | ||
779 | ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE); | ||
780 | |||
781 | /* Bring up the reception */ | ||
782 | ctrl = ks8695_readreg(ksp, KS8695_DRXC); | ||
783 | /* Enable packet reception */ | ||
784 | ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE); | ||
785 | /* And start the DMA engine */ | ||
786 | ks8695_writereg(ksp, KS8695_DRSC, 0); | ||
787 | |||
788 | /* All done */ | ||
789 | return 0; | ||
790 | } | ||
791 | |||
792 | /** | ||
793 | * ks8695_release_device - HW resource release for KS8695 e-net | ||
794 | * @ksp: The device to be freed | ||
795 | * | ||
796 | * This unallocates io memory regions, dma-coherent regions etc | ||
797 | * which were allocated in ks8695_probe. | ||
798 | */ | ||
799 | static void | ||
800 | ks8695_release_device(struct ks8695_priv *ksp) | ||
801 | { | ||
802 | /* Unmap the registers */ | ||
803 | iounmap(ksp->io_regs); | ||
804 | if (ksp->phyiface_regs) | ||
805 | iounmap(ksp->phyiface_regs); | ||
806 | |||
807 | /* And release the request */ | ||
808 | release_resource(ksp->regs_req); | ||
809 | kfree(ksp->regs_req); | ||
810 | if (ksp->phyiface_req) { | ||
811 | release_resource(ksp->phyiface_req); | ||
812 | kfree(ksp->phyiface_req); | ||
813 | } | ||
814 | |||
815 | /* Free the ring buffers */ | ||
816 | dma_free_coherent(ksp->dev, RING_DMA_SIZE, | ||
817 | ksp->ring_base, ksp->ring_base_dma); | ||
818 | } | ||
819 | |||
820 | /* Ethtool support */ | ||
821 | |||
822 | /** | ||
823 | * ks8695_get_msglevel - Get the messages enabled for emission | ||
824 | * @ndev: The network device to read from | ||
825 | */ | ||
826 | static u32 | ||
827 | ks8695_get_msglevel(struct net_device *ndev) | ||
828 | { | ||
829 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
830 | |||
831 | return ksp->msg_enable; | ||
832 | } | ||
833 | |||
834 | /** | ||
835 | * ks8695_set_msglevel - Set the messages enabled for emission | ||
836 | * @ndev: The network device to configure | ||
837 | * @value: The messages to set for emission | ||
838 | */ | ||
839 | static void | ||
840 | ks8695_set_msglevel(struct net_device *ndev, u32 value) | ||
841 | { | ||
842 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
843 | |||
844 | ksp->msg_enable = value; | ||
845 | } | ||
846 | |||
847 | /** | ||
848 | * ks8695_wan_get_link_ksettings - Get device-specific settings. | ||
849 | * @ndev: The network device to read settings from | ||
850 | * @cmd: The ethtool structure to read into | ||
851 | */ | ||
852 | static int | ||
853 | ks8695_wan_get_link_ksettings(struct net_device *ndev, | ||
854 | struct ethtool_link_ksettings *cmd) | ||
855 | { | ||
856 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
857 | u32 ctrl; | ||
858 | u32 supported, advertising; | ||
859 | |||
860 | /* All ports on the KS8695 support these... */ | ||
861 | supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | ||
862 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | ||
863 | SUPPORTED_TP | SUPPORTED_MII); | ||
864 | |||
865 | advertising = ADVERTISED_TP | ADVERTISED_MII; | ||
866 | cmd->base.port = PORT_MII; | ||
867 | supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); | ||
868 | cmd->base.phy_address = 0; | ||
869 | |||
870 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
871 | if ((ctrl & WMC_WAND) == 0) { | ||
872 | /* auto-negotiation is enabled */ | ||
873 | advertising |= ADVERTISED_Autoneg; | ||
874 | if (ctrl & WMC_WANA100F) | ||
875 | advertising |= ADVERTISED_100baseT_Full; | ||
876 | if (ctrl & WMC_WANA100H) | ||
877 | advertising |= ADVERTISED_100baseT_Half; | ||
878 | if (ctrl & WMC_WANA10F) | ||
879 | advertising |= ADVERTISED_10baseT_Full; | ||
880 | if (ctrl & WMC_WANA10H) | ||
881 | advertising |= ADVERTISED_10baseT_Half; | ||
882 | if (ctrl & WMC_WANAP) | ||
883 | advertising |= ADVERTISED_Pause; | ||
884 | cmd->base.autoneg = AUTONEG_ENABLE; | ||
885 | |||
886 | cmd->base.speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; | ||
887 | cmd->base.duplex = (ctrl & WMC_WDS) ? | ||
888 | DUPLEX_FULL : DUPLEX_HALF; | ||
889 | } else { | ||
890 | /* auto-negotiation is disabled */ | ||
891 | cmd->base.autoneg = AUTONEG_DISABLE; | ||
892 | |||
893 | cmd->base.speed = (ctrl & WMC_WANF100) ? | ||
894 | SPEED_100 : SPEED_10; | ||
895 | cmd->base.duplex = (ctrl & WMC_WANFF) ? | ||
896 | DUPLEX_FULL : DUPLEX_HALF; | ||
897 | } | ||
898 | |||
899 | ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, | ||
900 | supported); | ||
901 | ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, | ||
902 | advertising); | ||
903 | |||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | /** | ||
908 | * ks8695_wan_set_link_ksettings - Set device-specific settings. | ||
909 | * @ndev: The network device to configure | ||
910 | * @cmd: The settings to configure | ||
911 | */ | ||
912 | static int | ||
913 | ks8695_wan_set_link_ksettings(struct net_device *ndev, | ||
914 | const struct ethtool_link_ksettings *cmd) | ||
915 | { | ||
916 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
917 | u32 ctrl; | ||
918 | u32 advertising; | ||
919 | |||
920 | ethtool_convert_link_mode_to_legacy_u32(&advertising, | ||
921 | cmd->link_modes.advertising); | ||
922 | |||
923 | if ((cmd->base.speed != SPEED_10) && (cmd->base.speed != SPEED_100)) | ||
924 | return -EINVAL; | ||
925 | if ((cmd->base.duplex != DUPLEX_HALF) && | ||
926 | (cmd->base.duplex != DUPLEX_FULL)) | ||
927 | return -EINVAL; | ||
928 | if (cmd->base.port != PORT_MII) | ||
929 | return -EINVAL; | ||
930 | if ((cmd->base.autoneg != AUTONEG_DISABLE) && | ||
931 | (cmd->base.autoneg != AUTONEG_ENABLE)) | ||
932 | return -EINVAL; | ||
933 | |||
934 | if (cmd->base.autoneg == AUTONEG_ENABLE) { | ||
935 | if ((advertising & (ADVERTISED_10baseT_Half | | ||
936 | ADVERTISED_10baseT_Full | | ||
937 | ADVERTISED_100baseT_Half | | ||
938 | ADVERTISED_100baseT_Full)) == 0) | ||
939 | return -EINVAL; | ||
940 | |||
941 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
942 | |||
943 | ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | | ||
944 | WMC_WANA10F | WMC_WANA10H); | ||
945 | if (advertising & ADVERTISED_100baseT_Full) | ||
946 | ctrl |= WMC_WANA100F; | ||
947 | if (advertising & ADVERTISED_100baseT_Half) | ||
948 | ctrl |= WMC_WANA100H; | ||
949 | if (advertising & ADVERTISED_10baseT_Full) | ||
950 | ctrl |= WMC_WANA10F; | ||
951 | if (advertising & ADVERTISED_10baseT_Half) | ||
952 | ctrl |= WMC_WANA10H; | ||
953 | |||
954 | /* force a re-negotiation */ | ||
955 | ctrl |= WMC_WANR; | ||
956 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
957 | } else { | ||
958 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
959 | |||
960 | /* disable auto-negotiation */ | ||
961 | ctrl |= WMC_WAND; | ||
962 | ctrl &= ~(WMC_WANF100 | WMC_WANFF); | ||
963 | |||
964 | if (cmd->base.speed == SPEED_100) | ||
965 | ctrl |= WMC_WANF100; | ||
966 | if (cmd->base.duplex == DUPLEX_FULL) | ||
967 | ctrl |= WMC_WANFF; | ||
968 | |||
969 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
970 | } | ||
971 | |||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | /** | ||
976 | * ks8695_wan_nwayreset - Restart the autonegotiation on the port. | ||
977 | * @ndev: The network device to restart autoneotiation on | ||
978 | */ | ||
979 | static int | ||
980 | ks8695_wan_nwayreset(struct net_device *ndev) | ||
981 | { | ||
982 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
983 | u32 ctrl; | ||
984 | |||
985 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
986 | |||
987 | if ((ctrl & WMC_WAND) == 0) | ||
988 | writel(ctrl | WMC_WANR, | ||
989 | ksp->phyiface_regs + KS8695_WMC); | ||
990 | else | ||
991 | /* auto-negotiation not enabled */ | ||
992 | return -EINVAL; | ||
993 | |||
994 | return 0; | ||
995 | } | ||
996 | |||
997 | /** | ||
998 | * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising | ||
999 | * @ndev: The device to retrieve settings from | ||
1000 | * @param: The structure to fill out with the information | ||
1001 | */ | ||
1002 | static void | ||
1003 | ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) | ||
1004 | { | ||
1005 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1006 | u32 ctrl; | ||
1007 | |||
1008 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1009 | |||
1010 | /* advertise Pause */ | ||
1011 | param->autoneg = (ctrl & WMC_WANAP); | ||
1012 | |||
1013 | /* current Rx Flow-control */ | ||
1014 | ctrl = ks8695_readreg(ksp, KS8695_DRXC); | ||
1015 | param->rx_pause = (ctrl & DRXC_RFCE); | ||
1016 | |||
1017 | /* current Tx Flow-control */ | ||
1018 | ctrl = ks8695_readreg(ksp, KS8695_DTXC); | ||
1019 | param->tx_pause = (ctrl & DTXC_TFCE); | ||
1020 | } | ||
1021 | |||
1022 | /** | ||
1023 | * ks8695_get_drvinfo - Retrieve driver information | ||
1024 | * @ndev: The network device to retrieve info about | ||
1025 | * @info: The info structure to fill out. | ||
1026 | */ | ||
1027 | static void | ||
1028 | ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) | ||
1029 | { | ||
1030 | strlcpy(info->driver, MODULENAME, sizeof(info->driver)); | ||
1031 | strlcpy(info->version, MODULEVERSION, sizeof(info->version)); | ||
1032 | strlcpy(info->bus_info, dev_name(ndev->dev.parent), | ||
1033 | sizeof(info->bus_info)); | ||
1034 | } | ||
1035 | |||
1036 | static const struct ethtool_ops ks8695_ethtool_ops = { | ||
1037 | .get_msglevel = ks8695_get_msglevel, | ||
1038 | .set_msglevel = ks8695_set_msglevel, | ||
1039 | .get_drvinfo = ks8695_get_drvinfo, | ||
1040 | }; | ||
1041 | |||
1042 | static const struct ethtool_ops ks8695_wan_ethtool_ops = { | ||
1043 | .get_msglevel = ks8695_get_msglevel, | ||
1044 | .set_msglevel = ks8695_set_msglevel, | ||
1045 | .nway_reset = ks8695_wan_nwayreset, | ||
1046 | .get_link = ethtool_op_get_link, | ||
1047 | .get_pauseparam = ks8695_wan_get_pause, | ||
1048 | .get_drvinfo = ks8695_get_drvinfo, | ||
1049 | .get_link_ksettings = ks8695_wan_get_link_ksettings, | ||
1050 | .set_link_ksettings = ks8695_wan_set_link_ksettings, | ||
1051 | }; | ||
1052 | |||
1053 | /* Network device interface functions */ | ||
1054 | |||
1055 | /** | ||
1056 | * ks8695_set_mac - Update MAC in net dev and HW | ||
1057 | * @ndev: The network device to update | ||
1058 | * @addr: The new MAC address to set | ||
1059 | */ | ||
1060 | static int | ||
1061 | ks8695_set_mac(struct net_device *ndev, void *addr) | ||
1062 | { | ||
1063 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1064 | struct sockaddr *address = addr; | ||
1065 | |||
1066 | if (!is_valid_ether_addr(address->sa_data)) | ||
1067 | return -EADDRNOTAVAIL; | ||
1068 | |||
1069 | memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len); | ||
1070 | |||
1071 | ks8695_update_mac(ksp); | ||
1072 | |||
1073 | dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n", | ||
1074 | ndev->name, ndev->dev_addr); | ||
1075 | |||
1076 | return 0; | ||
1077 | } | ||
1078 | |||
1079 | /** | ||
1080 | * ks8695_set_multicast - Set up the multicast behaviour of the interface | ||
1081 | * @ndev: The net_device to configure | ||
1082 | * | ||
1083 | * This routine, called by the net layer, configures promiscuity | ||
1084 | * and multicast reception behaviour for the interface. | ||
1085 | */ | ||
1086 | static void | ||
1087 | ks8695_set_multicast(struct net_device *ndev) | ||
1088 | { | ||
1089 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1090 | u32 ctrl; | ||
1091 | |||
1092 | ctrl = ks8695_readreg(ksp, KS8695_DRXC); | ||
1093 | |||
1094 | if (ndev->flags & IFF_PROMISC) { | ||
1095 | /* enable promiscuous mode */ | ||
1096 | ctrl |= DRXC_RA; | ||
1097 | } else if (ndev->flags & ~IFF_PROMISC) { | ||
1098 | /* disable promiscuous mode */ | ||
1099 | ctrl &= ~DRXC_RA; | ||
1100 | } | ||
1101 | |||
1102 | if (ndev->flags & IFF_ALLMULTI) { | ||
1103 | /* enable all multicast mode */ | ||
1104 | ctrl |= DRXC_RM; | ||
1105 | } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) { | ||
1106 | /* more specific multicast addresses than can be | ||
1107 | * handled in hardware | ||
1108 | */ | ||
1109 | ctrl |= DRXC_RM; | ||
1110 | } else { | ||
1111 | /* enable specific multicasts */ | ||
1112 | ctrl &= ~DRXC_RM; | ||
1113 | ks8695_init_partial_multicast(ksp, ndev); | ||
1114 | } | ||
1115 | |||
1116 | ks8695_writereg(ksp, KS8695_DRXC, ctrl); | ||
1117 | } | ||
1118 | |||
1119 | /** | ||
1120 | * ks8695_timeout - Handle a network tx/rx timeout. | ||
1121 | * @ndev: The net_device which timed out. | ||
1122 | * | ||
1123 | * A network transaction timed out, reset the device. | ||
1124 | */ | ||
1125 | static void | ||
1126 | ks8695_timeout(struct net_device *ndev) | ||
1127 | { | ||
1128 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1129 | |||
1130 | netif_stop_queue(ndev); | ||
1131 | ks8695_shutdown(ksp); | ||
1132 | |||
1133 | ks8695_reset(ksp); | ||
1134 | |||
1135 | ks8695_update_mac(ksp); | ||
1136 | |||
1137 | /* We ignore the return from this since it managed to init | ||
1138 | * before it probably will be okay to init again. | ||
1139 | */ | ||
1140 | ks8695_init_net(ksp); | ||
1141 | |||
1142 | /* Reconfigure promiscuity etc */ | ||
1143 | ks8695_set_multicast(ndev); | ||
1144 | |||
1145 | /* And start the TX queue once more */ | ||
1146 | netif_start_queue(ndev); | ||
1147 | } | ||
1148 | |||
1149 | /** | ||
1150 | * ks8695_start_xmit - Start a packet transmission | ||
1151 | * @skb: The packet to transmit | ||
1152 | * @ndev: The network device to send the packet on | ||
1153 | * | ||
1154 | * This routine, called by the net layer, takes ownership of the | ||
1155 | * sk_buff and adds it to the TX ring. It then kicks the TX DMA | ||
1156 | * engine to ensure transmission begins. | ||
1157 | */ | ||
1158 | static netdev_tx_t | ||
1159 | ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev) | ||
1160 | { | ||
1161 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1162 | int buff_n; | ||
1163 | dma_addr_t dmap; | ||
1164 | |||
1165 | spin_lock_irq(&ksp->txq_lock); | ||
1166 | |||
1167 | if (ksp->tx_ring_used == MAX_TX_DESC) { | ||
1168 | /* Somehow we got entered when we have no room */ | ||
1169 | spin_unlock_irq(&ksp->txq_lock); | ||
1170 | return NETDEV_TX_BUSY; | ||
1171 | } | ||
1172 | |||
1173 | buff_n = ksp->tx_ring_next_slot; | ||
1174 | |||
1175 | BUG_ON(ksp->tx_buffers[buff_n].skb); | ||
1176 | |||
1177 | dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE); | ||
1178 | if (unlikely(dma_mapping_error(ksp->dev, dmap))) { | ||
1179 | /* Failed to DMA map this SKB, give it back for now */ | ||
1180 | spin_unlock_irq(&ksp->txq_lock); | ||
1181 | dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\ | ||
1182 | "transmission, trying later\n", ndev->name); | ||
1183 | return NETDEV_TX_BUSY; | ||
1184 | } | ||
1185 | |||
1186 | ksp->tx_buffers[buff_n].dma_ptr = dmap; | ||
1187 | /* Mapped okay, store the buffer pointer and length for later */ | ||
1188 | ksp->tx_buffers[buff_n].skb = skb; | ||
1189 | ksp->tx_buffers[buff_n].length = skb->len; | ||
1190 | |||
1191 | /* Fill out the TX descriptor */ | ||
1192 | ksp->tx_ring[buff_n].data_ptr = | ||
1193 | cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr); | ||
1194 | ksp->tx_ring[buff_n].status = | ||
1195 | cpu_to_le32(TDES_IC | TDES_FS | TDES_LS | | ||
1196 | (skb->len & TDES_TBS)); | ||
1197 | |||
1198 | wmb(); | ||
1199 | |||
1200 | /* Hand it over to the hardware */ | ||
1201 | ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN); | ||
1202 | |||
1203 | if (++ksp->tx_ring_used == MAX_TX_DESC) | ||
1204 | netif_stop_queue(ndev); | ||
1205 | |||
1206 | /* Kick the TX DMA in case it decided to go IDLE */ | ||
1207 | ks8695_writereg(ksp, KS8695_DTSC, 0); | ||
1208 | |||
1209 | /* And update the next ring slot */ | ||
1210 | ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK; | ||
1211 | |||
1212 | spin_unlock_irq(&ksp->txq_lock); | ||
1213 | return NETDEV_TX_OK; | ||
1214 | } | ||
1215 | |||
1216 | /** | ||
1217 | * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface | ||
1218 | * @ndev: The net_device to stop | ||
1219 | * | ||
1220 | * This disables the TX queue and cleans up a KS8695 ethernet | ||
1221 | * device. | ||
1222 | */ | ||
1223 | static int | ||
1224 | ks8695_stop(struct net_device *ndev) | ||
1225 | { | ||
1226 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1227 | |||
1228 | netif_stop_queue(ndev); | ||
1229 | napi_disable(&ksp->napi); | ||
1230 | |||
1231 | ks8695_shutdown(ksp); | ||
1232 | |||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1236 | /** | ||
1237 | * ks8695_open - Open (bring up) a KS8695 ethernet interface | ||
1238 | * @ndev: The net_device to open | ||
1239 | * | ||
1240 | * This resets, configures the MAC, initialises the RX ring and | ||
1241 | * DMA engines and starts the TX queue for a KS8695 ethernet | ||
1242 | * device. | ||
1243 | */ | ||
1244 | static int | ||
1245 | ks8695_open(struct net_device *ndev) | ||
1246 | { | ||
1247 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1248 | int ret; | ||
1249 | |||
1250 | ks8695_reset(ksp); | ||
1251 | |||
1252 | ks8695_update_mac(ksp); | ||
1253 | |||
1254 | ret = ks8695_init_net(ksp); | ||
1255 | if (ret) { | ||
1256 | ks8695_shutdown(ksp); | ||
1257 | return ret; | ||
1258 | } | ||
1259 | |||
1260 | napi_enable(&ksp->napi); | ||
1261 | netif_start_queue(ndev); | ||
1262 | |||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
1266 | /* Platform device driver */ | ||
1267 | |||
1268 | /** | ||
1269 | * ks8695_init_switch - Init LAN switch to known good defaults. | ||
1270 | * @ksp: The device to initialise | ||
1271 | * | ||
1272 | * This initialises the LAN switch in the KS8695 to a known-good | ||
1273 | * set of defaults. | ||
1274 | */ | ||
1275 | static void | ||
1276 | ks8695_init_switch(struct ks8695_priv *ksp) | ||
1277 | { | ||
1278 | u32 ctrl; | ||
1279 | |||
1280 | /* Default value for SEC0 according to datasheet */ | ||
1281 | ctrl = 0x40819e00; | ||
1282 | |||
1283 | /* LED0 = Speed LED1 = Link/Activity */ | ||
1284 | ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S); | ||
1285 | ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY); | ||
1286 | |||
1287 | /* Enable Switch */ | ||
1288 | ctrl |= SEC0_ENABLE; | ||
1289 | |||
1290 | writel(ctrl, ksp->phyiface_regs + KS8695_SEC0); | ||
1291 | |||
1292 | /* Defaults for SEC1 */ | ||
1293 | writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1); | ||
1294 | } | ||
1295 | |||
1296 | /** | ||
1297 | * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults | ||
1298 | * @ksp: The device to initialise | ||
1299 | * | ||
1300 | * This initialises a KS8695's WAN phy to sensible values for | ||
1301 | * autonegotiation etc. | ||
1302 | */ | ||
1303 | static void | ||
1304 | ks8695_init_wan_phy(struct ks8695_priv *ksp) | ||
1305 | { | ||
1306 | u32 ctrl; | ||
1307 | |||
1308 | /* Support auto-negotiation */ | ||
1309 | ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H | | ||
1310 | WMC_WANA10F | WMC_WANA10H); | ||
1311 | |||
1312 | /* LED0 = Activity , LED1 = Link */ | ||
1313 | ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK); | ||
1314 | |||
1315 | /* Restart Auto-negotiation */ | ||
1316 | ctrl |= WMC_WANR; | ||
1317 | |||
1318 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
1319 | |||
1320 | writel(0, ksp->phyiface_regs + KS8695_WPPM); | ||
1321 | writel(0, ksp->phyiface_regs + KS8695_PPS); | ||
1322 | } | ||
1323 | |||
1324 | static const struct net_device_ops ks8695_netdev_ops = { | ||
1325 | .ndo_open = ks8695_open, | ||
1326 | .ndo_stop = ks8695_stop, | ||
1327 | .ndo_start_xmit = ks8695_start_xmit, | ||
1328 | .ndo_tx_timeout = ks8695_timeout, | ||
1329 | .ndo_set_mac_address = ks8695_set_mac, | ||
1330 | .ndo_validate_addr = eth_validate_addr, | ||
1331 | .ndo_set_rx_mode = ks8695_set_multicast, | ||
1332 | }; | ||
1333 | |||
1334 | /** | ||
1335 | * ks8695_probe - Probe and initialise a KS8695 ethernet interface | ||
1336 | * @pdev: The platform device to probe | ||
1337 | * | ||
1338 | * Initialise a KS8695 ethernet device from platform data. | ||
1339 | * | ||
1340 | * This driver requires at least one IORESOURCE_MEM for the | ||
1341 | * registers and two IORESOURCE_IRQ for the RX and TX IRQs | ||
1342 | * respectively. It can optionally take an additional | ||
1343 | * IORESOURCE_MEM for the switch or phy in the case of the lan or | ||
1344 | * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan | ||
1345 | * port. | ||
1346 | */ | ||
1347 | static int | ||
1348 | ks8695_probe(struct platform_device *pdev) | ||
1349 | { | ||
1350 | struct ks8695_priv *ksp; | ||
1351 | struct net_device *ndev; | ||
1352 | struct resource *regs_res, *phyiface_res; | ||
1353 | struct resource *rxirq_res, *txirq_res, *linkirq_res; | ||
1354 | int ret = 0; | ||
1355 | int buff_n; | ||
1356 | bool inv_mac_addr = false; | ||
1357 | u32 machigh, maclow; | ||
1358 | |||
1359 | /* Initialise a net_device */ | ||
1360 | ndev = alloc_etherdev(sizeof(struct ks8695_priv)); | ||
1361 | if (!ndev) | ||
1362 | return -ENOMEM; | ||
1363 | |||
1364 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
1365 | |||
1366 | dev_dbg(&pdev->dev, "ks8695_probe() called\n"); | ||
1367 | |||
1368 | /* Configure our private structure a little */ | ||
1369 | ksp = netdev_priv(ndev); | ||
1370 | |||
1371 | ksp->dev = &pdev->dev; | ||
1372 | ksp->ndev = ndev; | ||
1373 | ksp->msg_enable = NETIF_MSG_LINK; | ||
1374 | |||
1375 | /* Retrieve resources */ | ||
1376 | regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1377 | phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1378 | |||
1379 | rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1380 | txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | ||
1381 | linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2); | ||
1382 | |||
1383 | if (!(regs_res && rxirq_res && txirq_res)) { | ||
1384 | dev_err(ksp->dev, "insufficient resources\n"); | ||
1385 | ret = -ENOENT; | ||
1386 | goto failure; | ||
1387 | } | ||
1388 | |||
1389 | ksp->regs_req = request_mem_region(regs_res->start, | ||
1390 | resource_size(regs_res), | ||
1391 | pdev->name); | ||
1392 | |||
1393 | if (!ksp->regs_req) { | ||
1394 | dev_err(ksp->dev, "cannot claim register space\n"); | ||
1395 | ret = -EIO; | ||
1396 | goto failure; | ||
1397 | } | ||
1398 | |||
1399 | ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res)); | ||
1400 | |||
1401 | if (!ksp->io_regs) { | ||
1402 | dev_err(ksp->dev, "failed to ioremap registers\n"); | ||
1403 | ret = -EINVAL; | ||
1404 | goto failure; | ||
1405 | } | ||
1406 | |||
1407 | if (phyiface_res) { | ||
1408 | ksp->phyiface_req = | ||
1409 | request_mem_region(phyiface_res->start, | ||
1410 | resource_size(phyiface_res), | ||
1411 | phyiface_res->name); | ||
1412 | |||
1413 | if (!ksp->phyiface_req) { | ||
1414 | dev_err(ksp->dev, | ||
1415 | "cannot claim switch register space\n"); | ||
1416 | ret = -EIO; | ||
1417 | goto failure; | ||
1418 | } | ||
1419 | |||
1420 | ksp->phyiface_regs = ioremap(phyiface_res->start, | ||
1421 | resource_size(phyiface_res)); | ||
1422 | |||
1423 | if (!ksp->phyiface_regs) { | ||
1424 | dev_err(ksp->dev, | ||
1425 | "failed to ioremap switch registers\n"); | ||
1426 | ret = -EINVAL; | ||
1427 | goto failure; | ||
1428 | } | ||
1429 | } | ||
1430 | |||
1431 | ksp->rx_irq = rxirq_res->start; | ||
1432 | ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX"; | ||
1433 | ksp->tx_irq = txirq_res->start; | ||
1434 | ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX"; | ||
1435 | ksp->link_irq = (linkirq_res ? linkirq_res->start : -1); | ||
1436 | ksp->link_irq_name = (linkirq_res && linkirq_res->name) ? | ||
1437 | linkirq_res->name : "Ethernet Link"; | ||
1438 | |||
1439 | /* driver system setup */ | ||
1440 | ndev->netdev_ops = &ks8695_netdev_ops; | ||
1441 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); | ||
1442 | |||
1443 | netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); | ||
1444 | |||
1445 | /* Retrieve the default MAC addr from the chip. */ | ||
1446 | /* The bootloader should have left it in there for us. */ | ||
1447 | |||
1448 | machigh = ks8695_readreg(ksp, KS8695_MAH); | ||
1449 | maclow = ks8695_readreg(ksp, KS8695_MAL); | ||
1450 | |||
1451 | ndev->dev_addr[0] = (machigh >> 8) & 0xFF; | ||
1452 | ndev->dev_addr[1] = machigh & 0xFF; | ||
1453 | ndev->dev_addr[2] = (maclow >> 24) & 0xFF; | ||
1454 | ndev->dev_addr[3] = (maclow >> 16) & 0xFF; | ||
1455 | ndev->dev_addr[4] = (maclow >> 8) & 0xFF; | ||
1456 | ndev->dev_addr[5] = maclow & 0xFF; | ||
1457 | |||
1458 | if (!is_valid_ether_addr(ndev->dev_addr)) | ||
1459 | inv_mac_addr = true; | ||
1460 | |||
1461 | /* In order to be efficient memory-wise, we allocate both | ||
1462 | * rings in one go. | ||
1463 | */ | ||
1464 | ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE, | ||
1465 | &ksp->ring_base_dma, GFP_KERNEL); | ||
1466 | if (!ksp->ring_base) { | ||
1467 | ret = -ENOMEM; | ||
1468 | goto failure; | ||
1469 | } | ||
1470 | |||
1471 | /* Specify the TX DMA ring buffer */ | ||
1472 | ksp->tx_ring = ksp->ring_base; | ||
1473 | ksp->tx_ring_dma = ksp->ring_base_dma; | ||
1474 | |||
1475 | /* And initialise the queue's lock */ | ||
1476 | spin_lock_init(&ksp->txq_lock); | ||
1477 | spin_lock_init(&ksp->rx_lock); | ||
1478 | |||
1479 | /* Specify the RX DMA ring buffer */ | ||
1480 | ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE; | ||
1481 | ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE; | ||
1482 | |||
1483 | /* Zero the descriptor rings */ | ||
1484 | memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE); | ||
1485 | memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE); | ||
1486 | |||
1487 | /* Build the rings */ | ||
1488 | for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { | ||
1489 | ksp->tx_ring[buff_n].next_desc = | ||
1490 | cpu_to_le32(ksp->tx_ring_dma + | ||
1491 | (sizeof(struct tx_ring_desc) * | ||
1492 | ((buff_n + 1) & MAX_TX_DESC_MASK))); | ||
1493 | } | ||
1494 | |||
1495 | for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { | ||
1496 | ksp->rx_ring[buff_n].next_desc = | ||
1497 | cpu_to_le32(ksp->rx_ring_dma + | ||
1498 | (sizeof(struct rx_ring_desc) * | ||
1499 | ((buff_n + 1) & MAX_RX_DESC_MASK))); | ||
1500 | } | ||
1501 | |||
1502 | /* Initialise the port (physically) */ | ||
1503 | if (ksp->phyiface_regs && ksp->link_irq == -1) { | ||
1504 | ks8695_init_switch(ksp); | ||
1505 | ksp->dtype = KS8695_DTYPE_LAN; | ||
1506 | ndev->ethtool_ops = &ks8695_ethtool_ops; | ||
1507 | } else if (ksp->phyiface_regs && ksp->link_irq != -1) { | ||
1508 | ks8695_init_wan_phy(ksp); | ||
1509 | ksp->dtype = KS8695_DTYPE_WAN; | ||
1510 | ndev->ethtool_ops = &ks8695_wan_ethtool_ops; | ||
1511 | } else { | ||
1512 | /* No initialisation since HPNA does not have a PHY */ | ||
1513 | ksp->dtype = KS8695_DTYPE_HPNA; | ||
1514 | ndev->ethtool_ops = &ks8695_ethtool_ops; | ||
1515 | } | ||
1516 | |||
1517 | /* And bring up the net_device with the net core */ | ||
1518 | platform_set_drvdata(pdev, ndev); | ||
1519 | ret = register_netdev(ndev); | ||
1520 | |||
1521 | if (ret == 0) { | ||
1522 | if (inv_mac_addr) | ||
1523 | dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n", | ||
1524 | ndev->name); | ||
1525 | dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n", | ||
1526 | ks8695_port_type(ksp), ndev->dev_addr); | ||
1527 | } else { | ||
1528 | /* Report the failure to register the net_device */ | ||
1529 | dev_err(ksp->dev, "ks8695net: failed to register netdev.\n"); | ||
1530 | goto failure; | ||
1531 | } | ||
1532 | |||
1533 | /* All is well */ | ||
1534 | return 0; | ||
1535 | |||
1536 | /* Error exit path */ | ||
1537 | failure: | ||
1538 | ks8695_release_device(ksp); | ||
1539 | free_netdev(ndev); | ||
1540 | |||
1541 | return ret; | ||
1542 | } | ||
1543 | |||
1544 | /** | ||
1545 | * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device. | ||
1546 | * @pdev: The device to suspend | ||
1547 | * @state: The suspend state | ||
1548 | * | ||
1549 | * This routine detaches and shuts down a KS8695 ethernet device. | ||
1550 | */ | ||
1551 | static int | ||
1552 | ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state) | ||
1553 | { | ||
1554 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1555 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1556 | |||
1557 | ksp->in_suspend = 1; | ||
1558 | |||
1559 | if (netif_running(ndev)) { | ||
1560 | netif_device_detach(ndev); | ||
1561 | ks8695_shutdown(ksp); | ||
1562 | } | ||
1563 | |||
1564 | return 0; | ||
1565 | } | ||
1566 | |||
1567 | /** | ||
1568 | * ks8695_drv_resume - Resume a KS8695 ethernet platform device. | ||
1569 | * @pdev: The device to resume | ||
1570 | * | ||
1571 | * This routine re-initialises and re-attaches a KS8695 ethernet | ||
1572 | * device. | ||
1573 | */ | ||
1574 | static int | ||
1575 | ks8695_drv_resume(struct platform_device *pdev) | ||
1576 | { | ||
1577 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1578 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1579 | |||
1580 | if (netif_running(ndev)) { | ||
1581 | ks8695_reset(ksp); | ||
1582 | ks8695_init_net(ksp); | ||
1583 | ks8695_set_multicast(ndev); | ||
1584 | netif_device_attach(ndev); | ||
1585 | } | ||
1586 | |||
1587 | ksp->in_suspend = 0; | ||
1588 | |||
1589 | return 0; | ||
1590 | } | ||
1591 | |||
1592 | /** | ||
1593 | * ks8695_drv_remove - Remove a KS8695 net device on driver unload. | ||
1594 | * @pdev: The platform device to remove | ||
1595 | * | ||
1596 | * This unregisters and releases a KS8695 ethernet device. | ||
1597 | */ | ||
1598 | static int | ||
1599 | ks8695_drv_remove(struct platform_device *pdev) | ||
1600 | { | ||
1601 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1602 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1603 | |||
1604 | netif_napi_del(&ksp->napi); | ||
1605 | |||
1606 | unregister_netdev(ndev); | ||
1607 | ks8695_release_device(ksp); | ||
1608 | free_netdev(ndev); | ||
1609 | |||
1610 | dev_dbg(&pdev->dev, "released and freed device\n"); | ||
1611 | return 0; | ||
1612 | } | ||
1613 | |||
1614 | static struct platform_driver ks8695_driver = { | ||
1615 | .driver = { | ||
1616 | .name = MODULENAME, | ||
1617 | }, | ||
1618 | .probe = ks8695_probe, | ||
1619 | .remove = ks8695_drv_remove, | ||
1620 | .suspend = ks8695_drv_suspend, | ||
1621 | .resume = ks8695_drv_resume, | ||
1622 | }; | ||
1623 | |||
1624 | module_platform_driver(ks8695_driver); | ||
1625 | |||
1626 | MODULE_AUTHOR("Simtec Electronics"); | ||
1627 | MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); | ||
1628 | MODULE_LICENSE("GPL"); | ||
1629 | MODULE_ALIAS("platform:" MODULENAME); | ||
1630 | |||
1631 | module_param(watchdog, int, 0400); | ||
1632 | MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); | ||
diff --git a/drivers/net/ethernet/micrel/ks8695net.h b/drivers/net/ethernet/micrel/ks8695net.h deleted file mode 100644 index b18fad4ad5fd..000000000000 --- a/drivers/net/ethernet/micrel/ks8695net.h +++ /dev/null | |||
@@ -1,108 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Micrel KS8695 (Centaur) Ethernet. | ||
4 | * | ||
5 | * Copyright 2008 Simtec Electronics | ||
6 | * Daniel Silverstone <dsilvers@simtec.co.uk> | ||
7 | * Vincent Sanders <vince@simtec.co.uk> | ||
8 | */ | ||
9 | |||
10 | #ifndef KS8695NET_H | ||
11 | #define KS8695NET_H | ||
12 | |||
13 | /* Receive descriptor flags */ | ||
14 | #define RDES_OWN (1 << 31) /* Ownership */ | ||
15 | #define RDES_FS (1 << 30) /* First Descriptor */ | ||
16 | #define RDES_LS (1 << 29) /* Last Descriptor */ | ||
17 | #define RDES_IPE (1 << 28) /* IP Checksum error */ | ||
18 | #define RDES_TCPE (1 << 27) /* TCP Checksum error */ | ||
19 | #define RDES_UDPE (1 << 26) /* UDP Checksum error */ | ||
20 | #define RDES_ES (1 << 25) /* Error summary */ | ||
21 | #define RDES_MF (1 << 24) /* Multicast Frame */ | ||
22 | #define RDES_RE (1 << 19) /* MII Error reported */ | ||
23 | #define RDES_TL (1 << 18) /* Frame too Long */ | ||
24 | #define RDES_RF (1 << 17) /* Runt Frame */ | ||
25 | #define RDES_CE (1 << 16) /* CRC error */ | ||
26 | #define RDES_FT (1 << 15) /* Frame Type */ | ||
27 | #define RDES_FLEN (0x7ff) /* Frame Length */ | ||
28 | |||
29 | #define RDES_RER (1 << 25) /* Receive End of Ring */ | ||
30 | #define RDES_RBS (0x7ff) /* Receive Buffer Size */ | ||
31 | |||
32 | /* Transmit descriptor flags */ | ||
33 | |||
34 | #define TDES_OWN (1 << 31) /* Ownership */ | ||
35 | |||
36 | #define TDES_IC (1 << 31) /* Interrupt on Completion */ | ||
37 | #define TDES_FS (1 << 30) /* First Segment */ | ||
38 | #define TDES_LS (1 << 29) /* Last Segment */ | ||
39 | #define TDES_IPCKG (1 << 28) /* IP Checksum generate */ | ||
40 | #define TDES_TCPCKG (1 << 27) /* TCP Checksum generate */ | ||
41 | #define TDES_UDPCKG (1 << 26) /* UDP Checksum generate */ | ||
42 | #define TDES_TER (1 << 25) /* Transmit End of Ring */ | ||
43 | #define TDES_TBS (0x7ff) /* Transmit Buffer Size */ | ||
44 | |||
45 | /* | ||
46 | * Network controller register offsets | ||
47 | */ | ||
48 | #define KS8695_DTXC (0x00) /* DMA Transmit Control */ | ||
49 | #define KS8695_DRXC (0x04) /* DMA Receive Control */ | ||
50 | #define KS8695_DTSC (0x08) /* DMA Transmit Start Command */ | ||
51 | #define KS8695_DRSC (0x0c) /* DMA Receive Start Command */ | ||
52 | #define KS8695_TDLB (0x10) /* Transmit Descriptor List | ||
53 | * Base Address | ||
54 | */ | ||
55 | #define KS8695_RDLB (0x14) /* Receive Descriptor List | ||
56 | * Base Address | ||
57 | */ | ||
58 | #define KS8695_MAL (0x18) /* MAC Station Address Low */ | ||
59 | #define KS8695_MAH (0x1c) /* MAC Station Address High */ | ||
60 | #define KS8695_AAL_(n) (0x80 + ((n)*8)) /* MAC Additional | ||
61 | * Station Address | ||
62 | * (0..15) Low | ||
63 | */ | ||
64 | #define KS8695_AAH_(n) (0x84 + ((n)*8)) /* MAC Additional | ||
65 | * Station Address | ||
66 | * (0..15) High | ||
67 | */ | ||
68 | |||
69 | |||
70 | /* DMA Transmit Control Register */ | ||
71 | #define DTXC_TRST (1 << 31) /* Soft Reset */ | ||
72 | #define DTXC_TBS (0x3f << 24) /* Transmit Burst Size */ | ||
73 | #define DTXC_TUCG (1 << 18) /* Transmit UDP | ||
74 | * Checksum Generate | ||
75 | */ | ||
76 | #define DTXC_TTCG (1 << 17) /* Transmit TCP | ||
77 | * Checksum Generate | ||
78 | */ | ||
79 | #define DTXC_TICG (1 << 16) /* Transmit IP | ||
80 | * Checksum Generate | ||
81 | */ | ||
82 | #define DTXC_TFCE (1 << 9) /* Transmit Flow | ||
83 | * Control Enable | ||
84 | */ | ||
85 | #define DTXC_TLB (1 << 8) /* Loopback mode */ | ||
86 | #define DTXC_TEP (1 << 2) /* Transmit Enable Padding */ | ||
87 | #define DTXC_TAC (1 << 1) /* Transmit Add CRC */ | ||
88 | #define DTXC_TE (1 << 0) /* TX Enable */ | ||
89 | |||
90 | /* DMA Receive Control Register */ | ||
91 | #define DRXC_RBS (0x3f << 24) /* Receive Burst Size */ | ||
92 | #define DRXC_RUCC (1 << 18) /* Receive UDP Checksum check */ | ||
93 | #define DRXC_RTCG (1 << 17) /* Receive TCP Checksum check */ | ||
94 | #define DRXC_RICG (1 << 16) /* Receive IP Checksum check */ | ||
95 | #define DRXC_RFCE (1 << 9) /* Receive Flow Control | ||
96 | * Enable | ||
97 | */ | ||
98 | #define DRXC_RB (1 << 6) /* Receive Broadcast */ | ||
99 | #define DRXC_RM (1 << 5) /* Receive Multicast */ | ||
100 | #define DRXC_RU (1 << 4) /* Receive Unicast */ | ||
101 | #define DRXC_RERR (1 << 3) /* Receive Error Frame */ | ||
102 | #define DRXC_RA (1 << 2) /* Receive All */ | ||
103 | #define DRXC_RE (1 << 0) /* RX Enable */ | ||
104 | |||
105 | /* Additional Station Address High */ | ||
106 | #define AAH_E (1 << 31) /* Address Enabled */ | ||
107 | |||
108 | #endif /* KS8695NET_H */ | ||
diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig deleted file mode 100644 index 325e26c549f8..000000000000 --- a/drivers/net/ethernet/nuvoton/Kconfig +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | ||
2 | # | ||
3 | # Nuvoton network device configuration | ||
4 | # | ||
5 | |||
6 | config NET_VENDOR_NUVOTON | ||
7 | bool "Nuvoton devices" | ||
8 | default y | ||
9 | depends on ARM && ARCH_W90X900 | ||
10 | ---help--- | ||
11 | If you have a network (Ethernet) card belonging to this class, say Y. | ||
12 | |||
13 | Note that the answer to this question doesn't directly affect the | ||
14 | kernel: saying N will just cause the configurator to skip all | ||
15 | the questions about Nuvoton cards. If you say Y, you will be asked | ||
16 | for your specific card in the following questions. | ||
17 | |||
18 | if NET_VENDOR_NUVOTON | ||
19 | |||
20 | config W90P910_ETH | ||
21 | tristate "Nuvoton w90p910 Ethernet support" | ||
22 | depends on ARM && ARCH_W90X900 | ||
23 | select PHYLIB | ||
24 | select MII | ||
25 | ---help--- | ||
26 | Say Y here if you want to use built-in Ethernet ports | ||
27 | on w90p910 processor. | ||
28 | |||
29 | endif # NET_VENDOR_NUVOTON | ||
diff --git a/drivers/net/ethernet/nuvoton/Makefile b/drivers/net/ethernet/nuvoton/Makefile deleted file mode 100644 index 66f6e728d54b..000000000000 --- a/drivers/net/ethernet/nuvoton/Makefile +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | ||
2 | # | ||
3 | # Makefile for the Nuvoton network device drivers. | ||
4 | # | ||
5 | |||
6 | obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o | ||
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c deleted file mode 100644 index 3d73970b3a2e..000000000000 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ /dev/null | |||
@@ -1,1082 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * Copyright (c) 2008-2009 Nuvoton technology corporation. | ||
4 | * | ||
5 | * Wan ZongShun <mcuos.com@gmail.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/mii.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/etherdevice.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/ethtool.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/clk.h> | ||
18 | #include <linux/gfp.h> | ||
19 | |||
20 | #define DRV_MODULE_NAME "w90p910-emc" | ||
21 | #define DRV_MODULE_VERSION "0.1" | ||
22 | |||
23 | /* Ethernet MAC Registers */ | ||
24 | #define REG_CAMCMR 0x00 | ||
25 | #define REG_CAMEN 0x04 | ||
26 | #define REG_CAMM_BASE 0x08 | ||
27 | #define REG_CAML_BASE 0x0c | ||
28 | #define REG_TXDLSA 0x88 | ||
29 | #define REG_RXDLSA 0x8C | ||
30 | #define REG_MCMDR 0x90 | ||
31 | #define REG_MIID 0x94 | ||
32 | #define REG_MIIDA 0x98 | ||
33 | #define REG_FFTCR 0x9C | ||
34 | #define REG_TSDR 0xa0 | ||
35 | #define REG_RSDR 0xa4 | ||
36 | #define REG_DMARFC 0xa8 | ||
37 | #define REG_MIEN 0xac | ||
38 | #define REG_MISTA 0xb0 | ||
39 | #define REG_CTXDSA 0xcc | ||
40 | #define REG_CTXBSA 0xd0 | ||
41 | #define REG_CRXDSA 0xd4 | ||
42 | #define REG_CRXBSA 0xd8 | ||
43 | |||
44 | /* mac controller bit */ | ||
45 | #define MCMDR_RXON 0x01 | ||
46 | #define MCMDR_ACP (0x01 << 3) | ||
47 | #define MCMDR_SPCRC (0x01 << 5) | ||
48 | #define MCMDR_TXON (0x01 << 8) | ||
49 | #define MCMDR_FDUP (0x01 << 18) | ||
50 | #define MCMDR_ENMDC (0x01 << 19) | ||
51 | #define MCMDR_OPMOD (0x01 << 20) | ||
52 | #define SWR (0x01 << 24) | ||
53 | |||
54 | /* cam command regiser */ | ||
55 | #define CAMCMR_AUP 0x01 | ||
56 | #define CAMCMR_AMP (0x01 << 1) | ||
57 | #define CAMCMR_ABP (0x01 << 2) | ||
58 | #define CAMCMR_CCAM (0x01 << 3) | ||
59 | #define CAMCMR_ECMP (0x01 << 4) | ||
60 | #define CAM0EN 0x01 | ||
61 | |||
62 | /* mac mii controller bit */ | ||
63 | #define MDCCR (0x0a << 20) | ||
64 | #define PHYAD (0x01 << 8) | ||
65 | #define PHYWR (0x01 << 16) | ||
66 | #define PHYBUSY (0x01 << 17) | ||
67 | #define PHYPRESP (0x01 << 18) | ||
68 | #define CAM_ENTRY_SIZE 0x08 | ||
69 | |||
70 | /* rx and tx status */ | ||
71 | #define TXDS_TXCP (0x01 << 19) | ||
72 | #define RXDS_CRCE (0x01 << 17) | ||
73 | #define RXDS_PTLE (0x01 << 19) | ||
74 | #define RXDS_RXGD (0x01 << 20) | ||
75 | #define RXDS_ALIE (0x01 << 21) | ||
76 | #define RXDS_RP (0x01 << 22) | ||
77 | |||
78 | /* mac interrupt status*/ | ||
79 | #define MISTA_EXDEF (0x01 << 19) | ||
80 | #define MISTA_TXBERR (0x01 << 24) | ||
81 | #define MISTA_TDU (0x01 << 23) | ||
82 | #define MISTA_RDU (0x01 << 10) | ||
83 | #define MISTA_RXBERR (0x01 << 11) | ||
84 | |||
85 | #define ENSTART 0x01 | ||
86 | #define ENRXINTR 0x01 | ||
87 | #define ENRXGD (0x01 << 4) | ||
88 | #define ENRXBERR (0x01 << 11) | ||
89 | #define ENTXINTR (0x01 << 16) | ||
90 | #define ENTXCP (0x01 << 18) | ||
91 | #define ENTXABT (0x01 << 21) | ||
92 | #define ENTXBERR (0x01 << 24) | ||
93 | #define ENMDC (0x01 << 19) | ||
94 | #define PHYBUSY (0x01 << 17) | ||
95 | #define MDCCR_VAL 0xa00000 | ||
96 | |||
97 | /* rx and tx owner bit */ | ||
98 | #define RX_OWEN_DMA (0x01 << 31) | ||
99 | #define RX_OWEN_CPU (~(0x03 << 30)) | ||
100 | #define TX_OWEN_DMA (0x01 << 31) | ||
101 | #define TX_OWEN_CPU (~(0x01 << 31)) | ||
102 | |||
103 | /* tx frame desc controller bit */ | ||
104 | #define MACTXINTEN 0x04 | ||
105 | #define CRCMODE 0x02 | ||
106 | #define PADDINGMODE 0x01 | ||
107 | |||
108 | /* fftcr controller bit */ | ||
109 | #define TXTHD (0x03 << 8) | ||
110 | #define BLENGTH (0x01 << 20) | ||
111 | |||
112 | /* global setting for driver */ | ||
113 | #define RX_DESC_SIZE 50 | ||
114 | #define TX_DESC_SIZE 10 | ||
115 | #define MAX_RBUFF_SZ 0x600 | ||
116 | #define MAX_TBUFF_SZ 0x600 | ||
117 | #define TX_TIMEOUT (HZ/2) | ||
118 | #define DELAY 1000 | ||
119 | #define CAM0 0x0 | ||
120 | |||
121 | static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg); | ||
122 | |||
123 | struct w90p910_rxbd { | ||
124 | unsigned int sl; | ||
125 | unsigned int buffer; | ||
126 | unsigned int reserved; | ||
127 | unsigned int next; | ||
128 | }; | ||
129 | |||
130 | struct w90p910_txbd { | ||
131 | unsigned int mode; | ||
132 | unsigned int buffer; | ||
133 | unsigned int sl; | ||
134 | unsigned int next; | ||
135 | }; | ||
136 | |||
137 | struct recv_pdesc { | ||
138 | struct w90p910_rxbd desclist[RX_DESC_SIZE]; | ||
139 | char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ]; | ||
140 | }; | ||
141 | |||
142 | struct tran_pdesc { | ||
143 | struct w90p910_txbd desclist[TX_DESC_SIZE]; | ||
144 | char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ]; | ||
145 | }; | ||
146 | |||
147 | struct w90p910_ether { | ||
148 | struct recv_pdesc *rdesc; | ||
149 | struct tran_pdesc *tdesc; | ||
150 | dma_addr_t rdesc_phys; | ||
151 | dma_addr_t tdesc_phys; | ||
152 | struct platform_device *pdev; | ||
153 | struct resource *res; | ||
154 | struct sk_buff *skb; | ||
155 | struct clk *clk; | ||
156 | struct clk *rmiiclk; | ||
157 | struct mii_if_info mii; | ||
158 | struct timer_list check_timer; | ||
159 | void __iomem *reg; | ||
160 | int rxirq; | ||
161 | int txirq; | ||
162 | unsigned int cur_tx; | ||
163 | unsigned int cur_rx; | ||
164 | unsigned int finish_tx; | ||
165 | unsigned int rx_packets; | ||
166 | unsigned int rx_bytes; | ||
167 | unsigned int start_tx_ptr; | ||
168 | unsigned int start_rx_ptr; | ||
169 | unsigned int linkflag; | ||
170 | }; | ||
171 | |||
172 | static void update_linkspeed_register(struct net_device *dev, | ||
173 | unsigned int speed, unsigned int duplex) | ||
174 | { | ||
175 | struct w90p910_ether *ether = netdev_priv(dev); | ||
176 | unsigned int val; | ||
177 | |||
178 | val = __raw_readl(ether->reg + REG_MCMDR); | ||
179 | |||
180 | if (speed == SPEED_100) { | ||
181 | /* 100 full/half duplex */ | ||
182 | if (duplex == DUPLEX_FULL) { | ||
183 | val |= (MCMDR_OPMOD | MCMDR_FDUP); | ||
184 | } else { | ||
185 | val |= MCMDR_OPMOD; | ||
186 | val &= ~MCMDR_FDUP; | ||
187 | } | ||
188 | } else { | ||
189 | /* 10 full/half duplex */ | ||
190 | if (duplex == DUPLEX_FULL) { | ||
191 | val |= MCMDR_FDUP; | ||
192 | val &= ~MCMDR_OPMOD; | ||
193 | } else { | ||
194 | val &= ~(MCMDR_FDUP | MCMDR_OPMOD); | ||
195 | } | ||
196 | } | ||
197 | |||
198 | __raw_writel(val, ether->reg + REG_MCMDR); | ||
199 | } | ||
200 | |||
201 | static void update_linkspeed(struct net_device *dev) | ||
202 | { | ||
203 | struct w90p910_ether *ether = netdev_priv(dev); | ||
204 | struct platform_device *pdev; | ||
205 | unsigned int bmsr, bmcr, lpa, speed, duplex; | ||
206 | |||
207 | pdev = ether->pdev; | ||
208 | |||
209 | if (!mii_link_ok(ðer->mii)) { | ||
210 | ether->linkflag = 0x0; | ||
211 | netif_carrier_off(dev); | ||
212 | dev_warn(&pdev->dev, "%s: Link down.\n", dev->name); | ||
213 | return; | ||
214 | } | ||
215 | |||
216 | if (ether->linkflag == 1) | ||
217 | return; | ||
218 | |||
219 | bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR); | ||
220 | bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR); | ||
221 | |||
222 | if (bmcr & BMCR_ANENABLE) { | ||
223 | if (!(bmsr & BMSR_ANEGCOMPLETE)) | ||
224 | return; | ||
225 | |||
226 | lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA); | ||
227 | |||
228 | if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) | ||
229 | speed = SPEED_100; | ||
230 | else | ||
231 | speed = SPEED_10; | ||
232 | |||
233 | if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) | ||
234 | duplex = DUPLEX_FULL; | ||
235 | else | ||
236 | duplex = DUPLEX_HALF; | ||
237 | |||
238 | } else { | ||
239 | speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; | ||
240 | duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; | ||
241 | } | ||
242 | |||
243 | update_linkspeed_register(dev, speed, duplex); | ||
244 | |||
245 | dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed, | ||
246 | (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); | ||
247 | ether->linkflag = 0x01; | ||
248 | |||
249 | netif_carrier_on(dev); | ||
250 | } | ||
251 | |||
252 | static void w90p910_check_link(struct timer_list *t) | ||
253 | { | ||
254 | struct w90p910_ether *ether = from_timer(ether, t, check_timer); | ||
255 | struct net_device *dev = ether->mii.dev; | ||
256 | |||
257 | update_linkspeed(dev); | ||
258 | mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000)); | ||
259 | } | ||
260 | |||
261 | static void w90p910_write_cam(struct net_device *dev, | ||
262 | unsigned int x, unsigned char *pval) | ||
263 | { | ||
264 | struct w90p910_ether *ether = netdev_priv(dev); | ||
265 | unsigned int msw, lsw; | ||
266 | |||
267 | msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3]; | ||
268 | |||
269 | lsw = (pval[4] << 24) | (pval[5] << 16); | ||
270 | |||
271 | __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE); | ||
272 | __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE); | ||
273 | } | ||
274 | |||
275 | static int w90p910_init_desc(struct net_device *dev) | ||
276 | { | ||
277 | struct w90p910_ether *ether; | ||
278 | struct w90p910_txbd *tdesc; | ||
279 | struct w90p910_rxbd *rdesc; | ||
280 | struct platform_device *pdev; | ||
281 | unsigned int i; | ||
282 | |||
283 | ether = netdev_priv(dev); | ||
284 | pdev = ether->pdev; | ||
285 | |||
286 | ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), | ||
287 | ðer->tdesc_phys, GFP_KERNEL); | ||
288 | if (!ether->tdesc) | ||
289 | return -ENOMEM; | ||
290 | |||
291 | ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc), | ||
292 | ðer->rdesc_phys, GFP_KERNEL); | ||
293 | if (!ether->rdesc) { | ||
294 | dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), | ||
295 | ether->tdesc, ether->tdesc_phys); | ||
296 | return -ENOMEM; | ||
297 | } | ||
298 | |||
299 | for (i = 0; i < TX_DESC_SIZE; i++) { | ||
300 | unsigned int offset; | ||
301 | |||
302 | tdesc = &(ether->tdesc->desclist[i]); | ||
303 | |||
304 | if (i == TX_DESC_SIZE - 1) | ||
305 | offset = offsetof(struct tran_pdesc, desclist[0]); | ||
306 | else | ||
307 | offset = offsetof(struct tran_pdesc, desclist[i + 1]); | ||
308 | |||
309 | tdesc->next = ether->tdesc_phys + offset; | ||
310 | tdesc->buffer = ether->tdesc_phys + | ||
311 | offsetof(struct tran_pdesc, tran_buf[i]); | ||
312 | tdesc->sl = 0; | ||
313 | tdesc->mode = 0; | ||
314 | } | ||
315 | |||
316 | ether->start_tx_ptr = ether->tdesc_phys; | ||
317 | |||
318 | for (i = 0; i < RX_DESC_SIZE; i++) { | ||
319 | unsigned int offset; | ||
320 | |||
321 | rdesc = &(ether->rdesc->desclist[i]); | ||
322 | |||
323 | if (i == RX_DESC_SIZE - 1) | ||
324 | offset = offsetof(struct recv_pdesc, desclist[0]); | ||
325 | else | ||
326 | offset = offsetof(struct recv_pdesc, desclist[i + 1]); | ||
327 | |||
328 | rdesc->next = ether->rdesc_phys + offset; | ||
329 | rdesc->sl = RX_OWEN_DMA; | ||
330 | rdesc->buffer = ether->rdesc_phys + | ||
331 | offsetof(struct recv_pdesc, recv_buf[i]); | ||
332 | } | ||
333 | |||
334 | ether->start_rx_ptr = ether->rdesc_phys; | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static void w90p910_set_fifo_threshold(struct net_device *dev) | ||
340 | { | ||
341 | struct w90p910_ether *ether = netdev_priv(dev); | ||
342 | unsigned int val; | ||
343 | |||
344 | val = TXTHD | BLENGTH; | ||
345 | __raw_writel(val, ether->reg + REG_FFTCR); | ||
346 | } | ||
347 | |||
348 | static void w90p910_return_default_idle(struct net_device *dev) | ||
349 | { | ||
350 | struct w90p910_ether *ether = netdev_priv(dev); | ||
351 | unsigned int val; | ||
352 | |||
353 | val = __raw_readl(ether->reg + REG_MCMDR); | ||
354 | val |= SWR; | ||
355 | __raw_writel(val, ether->reg + REG_MCMDR); | ||
356 | } | ||
357 | |||
358 | static void w90p910_trigger_rx(struct net_device *dev) | ||
359 | { | ||
360 | struct w90p910_ether *ether = netdev_priv(dev); | ||
361 | |||
362 | __raw_writel(ENSTART, ether->reg + REG_RSDR); | ||
363 | } | ||
364 | |||
365 | static void w90p910_trigger_tx(struct net_device *dev) | ||
366 | { | ||
367 | struct w90p910_ether *ether = netdev_priv(dev); | ||
368 | |||
369 | __raw_writel(ENSTART, ether->reg + REG_TSDR); | ||
370 | } | ||
371 | |||
372 | static void w90p910_enable_mac_interrupt(struct net_device *dev) | ||
373 | { | ||
374 | struct w90p910_ether *ether = netdev_priv(dev); | ||
375 | unsigned int val; | ||
376 | |||
377 | val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP; | ||
378 | val |= ENTXBERR | ENRXBERR | ENTXABT; | ||
379 | |||
380 | __raw_writel(val, ether->reg + REG_MIEN); | ||
381 | } | ||
382 | |||
383 | static void w90p910_get_and_clear_int(struct net_device *dev, | ||
384 | unsigned int *val) | ||
385 | { | ||
386 | struct w90p910_ether *ether = netdev_priv(dev); | ||
387 | |||
388 | *val = __raw_readl(ether->reg + REG_MISTA); | ||
389 | __raw_writel(*val, ether->reg + REG_MISTA); | ||
390 | } | ||
391 | |||
392 | static void w90p910_set_global_maccmd(struct net_device *dev) | ||
393 | { | ||
394 | struct w90p910_ether *ether = netdev_priv(dev); | ||
395 | unsigned int val; | ||
396 | |||
397 | val = __raw_readl(ether->reg + REG_MCMDR); | ||
398 | val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC; | ||
399 | __raw_writel(val, ether->reg + REG_MCMDR); | ||
400 | } | ||
401 | |||
402 | static void w90p910_enable_cam(struct net_device *dev) | ||
403 | { | ||
404 | struct w90p910_ether *ether = netdev_priv(dev); | ||
405 | unsigned int val; | ||
406 | |||
407 | w90p910_write_cam(dev, CAM0, dev->dev_addr); | ||
408 | |||
409 | val = __raw_readl(ether->reg + REG_CAMEN); | ||
410 | val |= CAM0EN; | ||
411 | __raw_writel(val, ether->reg + REG_CAMEN); | ||
412 | } | ||
413 | |||
414 | static void w90p910_enable_cam_command(struct net_device *dev) | ||
415 | { | ||
416 | struct w90p910_ether *ether = netdev_priv(dev); | ||
417 | unsigned int val; | ||
418 | |||
419 | val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP; | ||
420 | __raw_writel(val, ether->reg + REG_CAMCMR); | ||
421 | } | ||
422 | |||
423 | static void w90p910_enable_tx(struct net_device *dev, unsigned int enable) | ||
424 | { | ||
425 | struct w90p910_ether *ether = netdev_priv(dev); | ||
426 | unsigned int val; | ||
427 | |||
428 | val = __raw_readl(ether->reg + REG_MCMDR); | ||
429 | |||
430 | if (enable) | ||
431 | val |= MCMDR_TXON; | ||
432 | else | ||
433 | val &= ~MCMDR_TXON; | ||
434 | |||
435 | __raw_writel(val, ether->reg + REG_MCMDR); | ||
436 | } | ||
437 | |||
438 | static void w90p910_enable_rx(struct net_device *dev, unsigned int enable) | ||
439 | { | ||
440 | struct w90p910_ether *ether = netdev_priv(dev); | ||
441 | unsigned int val; | ||
442 | |||
443 | val = __raw_readl(ether->reg + REG_MCMDR); | ||
444 | |||
445 | if (enable) | ||
446 | val |= MCMDR_RXON; | ||
447 | else | ||
448 | val &= ~MCMDR_RXON; | ||
449 | |||
450 | __raw_writel(val, ether->reg + REG_MCMDR); | ||
451 | } | ||
452 | |||
453 | static void w90p910_set_curdest(struct net_device *dev) | ||
454 | { | ||
455 | struct w90p910_ether *ether = netdev_priv(dev); | ||
456 | |||
457 | __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA); | ||
458 | __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA); | ||
459 | } | ||
460 | |||
461 | static void w90p910_reset_mac(struct net_device *dev) | ||
462 | { | ||
463 | struct w90p910_ether *ether = netdev_priv(dev); | ||
464 | |||
465 | w90p910_enable_tx(dev, 0); | ||
466 | w90p910_enable_rx(dev, 0); | ||
467 | w90p910_set_fifo_threshold(dev); | ||
468 | w90p910_return_default_idle(dev); | ||
469 | |||
470 | if (!netif_queue_stopped(dev)) | ||
471 | netif_stop_queue(dev); | ||
472 | |||
473 | w90p910_init_desc(dev); | ||
474 | |||
475 | netif_trans_update(dev); /* prevent tx timeout */ | ||
476 | ether->cur_tx = 0x0; | ||
477 | ether->finish_tx = 0x0; | ||
478 | ether->cur_rx = 0x0; | ||
479 | |||
480 | w90p910_set_curdest(dev); | ||
481 | w90p910_enable_cam(dev); | ||
482 | w90p910_enable_cam_command(dev); | ||
483 | w90p910_enable_mac_interrupt(dev); | ||
484 | w90p910_enable_tx(dev, 1); | ||
485 | w90p910_enable_rx(dev, 1); | ||
486 | w90p910_trigger_tx(dev); | ||
487 | w90p910_trigger_rx(dev); | ||
488 | |||
489 | netif_trans_update(dev); /* prevent tx timeout */ | ||
490 | |||
491 | if (netif_queue_stopped(dev)) | ||
492 | netif_wake_queue(dev); | ||
493 | } | ||
494 | |||
495 | static void w90p910_mdio_write(struct net_device *dev, | ||
496 | int phy_id, int reg, int data) | ||
497 | { | ||
498 | struct w90p910_ether *ether = netdev_priv(dev); | ||
499 | struct platform_device *pdev; | ||
500 | unsigned int val, i; | ||
501 | |||
502 | pdev = ether->pdev; | ||
503 | |||
504 | __raw_writel(data, ether->reg + REG_MIID); | ||
505 | |||
506 | val = (phy_id << 0x08) | reg; | ||
507 | val |= PHYBUSY | PHYWR | MDCCR_VAL; | ||
508 | __raw_writel(val, ether->reg + REG_MIIDA); | ||
509 | |||
510 | for (i = 0; i < DELAY; i++) { | ||
511 | if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) | ||
512 | break; | ||
513 | } | ||
514 | |||
515 | if (i == DELAY) | ||
516 | dev_warn(&pdev->dev, "mdio write timed out\n"); | ||
517 | } | ||
518 | |||
519 | static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg) | ||
520 | { | ||
521 | struct w90p910_ether *ether = netdev_priv(dev); | ||
522 | struct platform_device *pdev; | ||
523 | unsigned int val, i, data; | ||
524 | |||
525 | pdev = ether->pdev; | ||
526 | |||
527 | val = (phy_id << 0x08) | reg; | ||
528 | val |= PHYBUSY | MDCCR_VAL; | ||
529 | __raw_writel(val, ether->reg + REG_MIIDA); | ||
530 | |||
531 | for (i = 0; i < DELAY; i++) { | ||
532 | if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) | ||
533 | break; | ||
534 | } | ||
535 | |||
536 | if (i == DELAY) { | ||
537 | dev_warn(&pdev->dev, "mdio read timed out\n"); | ||
538 | data = 0xffff; | ||
539 | } else { | ||
540 | data = __raw_readl(ether->reg + REG_MIID); | ||
541 | } | ||
542 | |||
543 | return data; | ||
544 | } | ||
545 | |||
546 | static int w90p910_set_mac_address(struct net_device *dev, void *addr) | ||
547 | { | ||
548 | struct sockaddr *address = addr; | ||
549 | |||
550 | if (!is_valid_ether_addr(address->sa_data)) | ||
551 | return -EADDRNOTAVAIL; | ||
552 | |||
553 | memcpy(dev->dev_addr, address->sa_data, dev->addr_len); | ||
554 | w90p910_write_cam(dev, CAM0, dev->dev_addr); | ||
555 | |||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | static int w90p910_ether_close(struct net_device *dev) | ||
560 | { | ||
561 | struct w90p910_ether *ether = netdev_priv(dev); | ||
562 | struct platform_device *pdev; | ||
563 | |||
564 | pdev = ether->pdev; | ||
565 | |||
566 | dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc), | ||
567 | ether->rdesc, ether->rdesc_phys); | ||
568 | dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), | ||
569 | ether->tdesc, ether->tdesc_phys); | ||
570 | |||
571 | netif_stop_queue(dev); | ||
572 | |||
573 | del_timer_sync(ðer->check_timer); | ||
574 | clk_disable(ether->rmiiclk); | ||
575 | clk_disable(ether->clk); | ||
576 | |||
577 | free_irq(ether->txirq, dev); | ||
578 | free_irq(ether->rxirq, dev); | ||
579 | |||
580 | return 0; | ||
581 | } | ||
582 | |||
583 | static int w90p910_send_frame(struct net_device *dev, | ||
584 | unsigned char *data, int length) | ||
585 | { | ||
586 | struct w90p910_ether *ether; | ||
587 | struct w90p910_txbd *txbd; | ||
588 | struct platform_device *pdev; | ||
589 | unsigned char *buffer; | ||
590 | |||
591 | ether = netdev_priv(dev); | ||
592 | pdev = ether->pdev; | ||
593 | |||
594 | txbd = ðer->tdesc->desclist[ether->cur_tx]; | ||
595 | buffer = ether->tdesc->tran_buf[ether->cur_tx]; | ||
596 | |||
597 | if (length > 1514) { | ||
598 | dev_err(&pdev->dev, "send data %d bytes, check it\n", length); | ||
599 | length = 1514; | ||
600 | } | ||
601 | |||
602 | txbd->sl = length & 0xFFFF; | ||
603 | |||
604 | memcpy(buffer, data, length); | ||
605 | |||
606 | txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN; | ||
607 | |||
608 | w90p910_enable_tx(dev, 1); | ||
609 | |||
610 | w90p910_trigger_tx(dev); | ||
611 | |||
612 | if (++ether->cur_tx >= TX_DESC_SIZE) | ||
613 | ether->cur_tx = 0; | ||
614 | |||
615 | txbd = ðer->tdesc->desclist[ether->cur_tx]; | ||
616 | |||
617 | if (txbd->mode & TX_OWEN_DMA) | ||
618 | netif_stop_queue(dev); | ||
619 | |||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
624 | { | ||
625 | struct w90p910_ether *ether = netdev_priv(dev); | ||
626 | |||
627 | if (!(w90p910_send_frame(dev, skb->data, skb->len))) { | ||
628 | ether->skb = skb; | ||
629 | dev_consume_skb_irq(skb); | ||
630 | return 0; | ||
631 | } | ||
632 | return -EAGAIN; | ||
633 | } | ||
634 | |||
635 | static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id) | ||
636 | { | ||
637 | struct w90p910_ether *ether; | ||
638 | struct w90p910_txbd *txbd; | ||
639 | struct platform_device *pdev; | ||
640 | struct net_device *dev; | ||
641 | unsigned int cur_entry, entry, status; | ||
642 | |||
643 | dev = dev_id; | ||
644 | ether = netdev_priv(dev); | ||
645 | pdev = ether->pdev; | ||
646 | |||
647 | w90p910_get_and_clear_int(dev, &status); | ||
648 | |||
649 | cur_entry = __raw_readl(ether->reg + REG_CTXDSA); | ||
650 | |||
651 | entry = ether->tdesc_phys + | ||
652 | offsetof(struct tran_pdesc, desclist[ether->finish_tx]); | ||
653 | |||
654 | while (entry != cur_entry) { | ||
655 | txbd = ðer->tdesc->desclist[ether->finish_tx]; | ||
656 | |||
657 | if (++ether->finish_tx >= TX_DESC_SIZE) | ||
658 | ether->finish_tx = 0; | ||
659 | |||
660 | if (txbd->sl & TXDS_TXCP) { | ||
661 | dev->stats.tx_packets++; | ||
662 | dev->stats.tx_bytes += txbd->sl & 0xFFFF; | ||
663 | } else { | ||
664 | dev->stats.tx_errors++; | ||
665 | } | ||
666 | |||
667 | txbd->sl = 0x0; | ||
668 | txbd->mode = 0x0; | ||
669 | |||
670 | if (netif_queue_stopped(dev)) | ||
671 | netif_wake_queue(dev); | ||
672 | |||
673 | entry = ether->tdesc_phys + | ||
674 | offsetof(struct tran_pdesc, desclist[ether->finish_tx]); | ||
675 | } | ||
676 | |||
677 | if (status & MISTA_EXDEF) { | ||
678 | dev_err(&pdev->dev, "emc defer exceed interrupt\n"); | ||
679 | } else if (status & MISTA_TXBERR) { | ||
680 | dev_err(&pdev->dev, "emc bus error interrupt\n"); | ||
681 | w90p910_reset_mac(dev); | ||
682 | } else if (status & MISTA_TDU) { | ||
683 | if (netif_queue_stopped(dev)) | ||
684 | netif_wake_queue(dev); | ||
685 | } | ||
686 | |||
687 | return IRQ_HANDLED; | ||
688 | } | ||
689 | |||
690 | static void netdev_rx(struct net_device *dev) | ||
691 | { | ||
692 | struct w90p910_ether *ether; | ||
693 | struct w90p910_rxbd *rxbd; | ||
694 | struct platform_device *pdev; | ||
695 | struct sk_buff *skb; | ||
696 | unsigned char *data; | ||
697 | unsigned int length, status, val, entry; | ||
698 | |||
699 | ether = netdev_priv(dev); | ||
700 | pdev = ether->pdev; | ||
701 | |||
702 | rxbd = ðer->rdesc->desclist[ether->cur_rx]; | ||
703 | |||
704 | do { | ||
705 | val = __raw_readl(ether->reg + REG_CRXDSA); | ||
706 | |||
707 | entry = ether->rdesc_phys + | ||
708 | offsetof(struct recv_pdesc, desclist[ether->cur_rx]); | ||
709 | |||
710 | if (val == entry) | ||
711 | break; | ||
712 | |||
713 | status = rxbd->sl; | ||
714 | length = status & 0xFFFF; | ||
715 | |||
716 | if (status & RXDS_RXGD) { | ||
717 | data = ether->rdesc->recv_buf[ether->cur_rx]; | ||
718 | skb = netdev_alloc_skb(dev, length + 2); | ||
719 | if (!skb) { | ||
720 | dev->stats.rx_dropped++; | ||
721 | return; | ||
722 | } | ||
723 | |||
724 | skb_reserve(skb, 2); | ||
725 | skb_put(skb, length); | ||
726 | skb_copy_to_linear_data(skb, data, length); | ||
727 | skb->protocol = eth_type_trans(skb, dev); | ||
728 | dev->stats.rx_packets++; | ||
729 | dev->stats.rx_bytes += length; | ||
730 | netif_rx(skb); | ||
731 | } else { | ||
732 | dev->stats.rx_errors++; | ||
733 | |||
734 | if (status & RXDS_RP) { | ||
735 | dev_err(&pdev->dev, "rx runt err\n"); | ||
736 | dev->stats.rx_length_errors++; | ||
737 | } else if (status & RXDS_CRCE) { | ||
738 | dev_err(&pdev->dev, "rx crc err\n"); | ||
739 | dev->stats.rx_crc_errors++; | ||
740 | } else if (status & RXDS_ALIE) { | ||
741 | dev_err(&pdev->dev, "rx alignment err\n"); | ||
742 | dev->stats.rx_frame_errors++; | ||
743 | } else if (status & RXDS_PTLE) { | ||
744 | dev_err(&pdev->dev, "rx longer err\n"); | ||
745 | dev->stats.rx_over_errors++; | ||
746 | } | ||
747 | } | ||
748 | |||
749 | rxbd->sl = RX_OWEN_DMA; | ||
750 | rxbd->reserved = 0x0; | ||
751 | |||
752 | if (++ether->cur_rx >= RX_DESC_SIZE) | ||
753 | ether->cur_rx = 0; | ||
754 | |||
755 | rxbd = ðer->rdesc->desclist[ether->cur_rx]; | ||
756 | |||
757 | } while (1); | ||
758 | } | ||
759 | |||
760 | static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id) | ||
761 | { | ||
762 | struct net_device *dev; | ||
763 | struct w90p910_ether *ether; | ||
764 | struct platform_device *pdev; | ||
765 | unsigned int status; | ||
766 | |||
767 | dev = dev_id; | ||
768 | ether = netdev_priv(dev); | ||
769 | pdev = ether->pdev; | ||
770 | |||
771 | w90p910_get_and_clear_int(dev, &status); | ||
772 | |||
773 | if (status & MISTA_RDU) { | ||
774 | netdev_rx(dev); | ||
775 | w90p910_trigger_rx(dev); | ||
776 | |||
777 | return IRQ_HANDLED; | ||
778 | } else if (status & MISTA_RXBERR) { | ||
779 | dev_err(&pdev->dev, "emc rx bus error\n"); | ||
780 | w90p910_reset_mac(dev); | ||
781 | } | ||
782 | |||
783 | netdev_rx(dev); | ||
784 | return IRQ_HANDLED; | ||
785 | } | ||
786 | |||
787 | static int w90p910_ether_open(struct net_device *dev) | ||
788 | { | ||
789 | struct w90p910_ether *ether; | ||
790 | struct platform_device *pdev; | ||
791 | |||
792 | ether = netdev_priv(dev); | ||
793 | pdev = ether->pdev; | ||
794 | |||
795 | w90p910_reset_mac(dev); | ||
796 | w90p910_set_fifo_threshold(dev); | ||
797 | w90p910_set_curdest(dev); | ||
798 | w90p910_enable_cam(dev); | ||
799 | w90p910_enable_cam_command(dev); | ||
800 | w90p910_enable_mac_interrupt(dev); | ||
801 | w90p910_set_global_maccmd(dev); | ||
802 | w90p910_enable_rx(dev, 1); | ||
803 | |||
804 | clk_enable(ether->rmiiclk); | ||
805 | clk_enable(ether->clk); | ||
806 | |||
807 | ether->rx_packets = 0x0; | ||
808 | ether->rx_bytes = 0x0; | ||
809 | |||
810 | if (request_irq(ether->txirq, w90p910_tx_interrupt, | ||
811 | 0x0, pdev->name, dev)) { | ||
812 | dev_err(&pdev->dev, "register irq tx failed\n"); | ||
813 | return -EAGAIN; | ||
814 | } | ||
815 | |||
816 | if (request_irq(ether->rxirq, w90p910_rx_interrupt, | ||
817 | 0x0, pdev->name, dev)) { | ||
818 | dev_err(&pdev->dev, "register irq rx failed\n"); | ||
819 | free_irq(ether->txirq, dev); | ||
820 | return -EAGAIN; | ||
821 | } | ||
822 | |||
823 | mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000)); | ||
824 | netif_start_queue(dev); | ||
825 | w90p910_trigger_rx(dev); | ||
826 | |||
827 | dev_info(&pdev->dev, "%s is OPENED\n", dev->name); | ||
828 | |||
829 | return 0; | ||
830 | } | ||
831 | |||
832 | static void w90p910_ether_set_multicast_list(struct net_device *dev) | ||
833 | { | ||
834 | struct w90p910_ether *ether; | ||
835 | unsigned int rx_mode; | ||
836 | |||
837 | ether = netdev_priv(dev); | ||
838 | |||
839 | if (dev->flags & IFF_PROMISC) | ||
840 | rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; | ||
841 | else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) | ||
842 | rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; | ||
843 | else | ||
844 | rx_mode = CAMCMR_ECMP | CAMCMR_ABP; | ||
845 | __raw_writel(rx_mode, ether->reg + REG_CAMCMR); | ||
846 | } | ||
847 | |||
848 | static int w90p910_ether_ioctl(struct net_device *dev, | ||
849 | struct ifreq *ifr, int cmd) | ||
850 | { | ||
851 | struct w90p910_ether *ether = netdev_priv(dev); | ||
852 | struct mii_ioctl_data *data = if_mii(ifr); | ||
853 | |||
854 | return generic_mii_ioctl(ðer->mii, data, cmd, NULL); | ||
855 | } | ||
856 | |||
857 | static void w90p910_get_drvinfo(struct net_device *dev, | ||
858 | struct ethtool_drvinfo *info) | ||
859 | { | ||
860 | strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); | ||
861 | strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); | ||
862 | } | ||
863 | |||
864 | static int w90p910_get_link_ksettings(struct net_device *dev, | ||
865 | struct ethtool_link_ksettings *cmd) | ||
866 | { | ||
867 | struct w90p910_ether *ether = netdev_priv(dev); | ||
868 | |||
869 | mii_ethtool_get_link_ksettings(ðer->mii, cmd); | ||
870 | |||
871 | return 0; | ||
872 | } | ||
873 | |||
874 | static int w90p910_set_link_ksettings(struct net_device *dev, | ||
875 | const struct ethtool_link_ksettings *cmd) | ||
876 | { | ||
877 | struct w90p910_ether *ether = netdev_priv(dev); | ||
878 | return mii_ethtool_set_link_ksettings(ðer->mii, cmd); | ||
879 | } | ||
880 | |||
881 | static int w90p910_nway_reset(struct net_device *dev) | ||
882 | { | ||
883 | struct w90p910_ether *ether = netdev_priv(dev); | ||
884 | return mii_nway_restart(ðer->mii); | ||
885 | } | ||
886 | |||
887 | static u32 w90p910_get_link(struct net_device *dev) | ||
888 | { | ||
889 | struct w90p910_ether *ether = netdev_priv(dev); | ||
890 | return mii_link_ok(ðer->mii); | ||
891 | } | ||
892 | |||
893 | static const struct ethtool_ops w90p910_ether_ethtool_ops = { | ||
894 | .get_drvinfo = w90p910_get_drvinfo, | ||
895 | .nway_reset = w90p910_nway_reset, | ||
896 | .get_link = w90p910_get_link, | ||
897 | .get_link_ksettings = w90p910_get_link_ksettings, | ||
898 | .set_link_ksettings = w90p910_set_link_ksettings, | ||
899 | }; | ||
900 | |||
901 | static const struct net_device_ops w90p910_ether_netdev_ops = { | ||
902 | .ndo_open = w90p910_ether_open, | ||
903 | .ndo_stop = w90p910_ether_close, | ||
904 | .ndo_start_xmit = w90p910_ether_start_xmit, | ||
905 | .ndo_set_rx_mode = w90p910_ether_set_multicast_list, | ||
906 | .ndo_set_mac_address = w90p910_set_mac_address, | ||
907 | .ndo_do_ioctl = w90p910_ether_ioctl, | ||
908 | .ndo_validate_addr = eth_validate_addr, | ||
909 | }; | ||
910 | |||
911 | static void get_mac_address(struct net_device *dev) | ||
912 | { | ||
913 | struct w90p910_ether *ether = netdev_priv(dev); | ||
914 | struct platform_device *pdev; | ||
915 | char addr[ETH_ALEN]; | ||
916 | |||
917 | pdev = ether->pdev; | ||
918 | |||
919 | addr[0] = 0x00; | ||
920 | addr[1] = 0x02; | ||
921 | addr[2] = 0xac; | ||
922 | addr[3] = 0x55; | ||
923 | addr[4] = 0x88; | ||
924 | addr[5] = 0xa8; | ||
925 | |||
926 | if (is_valid_ether_addr(addr)) | ||
927 | memcpy(dev->dev_addr, &addr, ETH_ALEN); | ||
928 | else | ||
929 | dev_err(&pdev->dev, "invalid mac address\n"); | ||
930 | } | ||
931 | |||
932 | static int w90p910_ether_setup(struct net_device *dev) | ||
933 | { | ||
934 | struct w90p910_ether *ether = netdev_priv(dev); | ||
935 | |||
936 | dev->netdev_ops = &w90p910_ether_netdev_ops; | ||
937 | dev->ethtool_ops = &w90p910_ether_ethtool_ops; | ||
938 | |||
939 | dev->tx_queue_len = 16; | ||
940 | dev->dma = 0x0; | ||
941 | dev->watchdog_timeo = TX_TIMEOUT; | ||
942 | |||
943 | get_mac_address(dev); | ||
944 | |||
945 | ether->cur_tx = 0x0; | ||
946 | ether->cur_rx = 0x0; | ||
947 | ether->finish_tx = 0x0; | ||
948 | ether->linkflag = 0x0; | ||
949 | ether->mii.phy_id = 0x01; | ||
950 | ether->mii.phy_id_mask = 0x1f; | ||
951 | ether->mii.reg_num_mask = 0x1f; | ||
952 | ether->mii.dev = dev; | ||
953 | ether->mii.mdio_read = w90p910_mdio_read; | ||
954 | ether->mii.mdio_write = w90p910_mdio_write; | ||
955 | |||
956 | timer_setup(ðer->check_timer, w90p910_check_link, 0); | ||
957 | |||
958 | return 0; | ||
959 | } | ||
960 | |||
961 | static int w90p910_ether_probe(struct platform_device *pdev) | ||
962 | { | ||
963 | struct w90p910_ether *ether; | ||
964 | struct net_device *dev; | ||
965 | int error; | ||
966 | |||
967 | dev = alloc_etherdev(sizeof(struct w90p910_ether)); | ||
968 | if (!dev) | ||
969 | return -ENOMEM; | ||
970 | |||
971 | ether = netdev_priv(dev); | ||
972 | |||
973 | ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
974 | if (ether->res == NULL) { | ||
975 | dev_err(&pdev->dev, "failed to get I/O memory\n"); | ||
976 | error = -ENXIO; | ||
977 | goto failed_free; | ||
978 | } | ||
979 | |||
980 | if (!request_mem_region(ether->res->start, | ||
981 | resource_size(ether->res), pdev->name)) { | ||
982 | dev_err(&pdev->dev, "failed to request I/O memory\n"); | ||
983 | error = -EBUSY; | ||
984 | goto failed_free; | ||
985 | } | ||
986 | |||
987 | ether->reg = ioremap(ether->res->start, resource_size(ether->res)); | ||
988 | if (ether->reg == NULL) { | ||
989 | dev_err(&pdev->dev, "failed to remap I/O memory\n"); | ||
990 | error = -ENXIO; | ||
991 | goto failed_free_mem; | ||
992 | } | ||
993 | |||
994 | ether->txirq = platform_get_irq(pdev, 0); | ||
995 | if (ether->txirq < 0) { | ||
996 | dev_err(&pdev->dev, "failed to get ether tx irq\n"); | ||
997 | error = -ENXIO; | ||
998 | goto failed_free_io; | ||
999 | } | ||
1000 | |||
1001 | ether->rxirq = platform_get_irq(pdev, 1); | ||
1002 | if (ether->rxirq < 0) { | ||
1003 | dev_err(&pdev->dev, "failed to get ether rx irq\n"); | ||
1004 | error = -ENXIO; | ||
1005 | goto failed_free_io; | ||
1006 | } | ||
1007 | |||
1008 | platform_set_drvdata(pdev, dev); | ||
1009 | |||
1010 | ether->clk = clk_get(&pdev->dev, NULL); | ||
1011 | if (IS_ERR(ether->clk)) { | ||
1012 | dev_err(&pdev->dev, "failed to get ether clock\n"); | ||
1013 | error = PTR_ERR(ether->clk); | ||
1014 | goto failed_free_io; | ||
1015 | } | ||
1016 | |||
1017 | ether->rmiiclk = clk_get(&pdev->dev, "RMII"); | ||
1018 | if (IS_ERR(ether->rmiiclk)) { | ||
1019 | dev_err(&pdev->dev, "failed to get ether clock\n"); | ||
1020 | error = PTR_ERR(ether->rmiiclk); | ||
1021 | goto failed_put_clk; | ||
1022 | } | ||
1023 | |||
1024 | ether->pdev = pdev; | ||
1025 | |||
1026 | w90p910_ether_setup(dev); | ||
1027 | |||
1028 | error = register_netdev(dev); | ||
1029 | if (error != 0) { | ||
1030 | dev_err(&pdev->dev, "Register EMC w90p910 FAILED\n"); | ||
1031 | error = -ENODEV; | ||
1032 | goto failed_put_rmiiclk; | ||
1033 | } | ||
1034 | |||
1035 | return 0; | ||
1036 | failed_put_rmiiclk: | ||
1037 | clk_put(ether->rmiiclk); | ||
1038 | failed_put_clk: | ||
1039 | clk_put(ether->clk); | ||
1040 | failed_free_io: | ||
1041 | iounmap(ether->reg); | ||
1042 | failed_free_mem: | ||
1043 | release_mem_region(ether->res->start, resource_size(ether->res)); | ||
1044 | failed_free: | ||
1045 | free_netdev(dev); | ||
1046 | return error; | ||
1047 | } | ||
1048 | |||
1049 | static int w90p910_ether_remove(struct platform_device *pdev) | ||
1050 | { | ||
1051 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1052 | struct w90p910_ether *ether = netdev_priv(dev); | ||
1053 | |||
1054 | unregister_netdev(dev); | ||
1055 | |||
1056 | clk_put(ether->rmiiclk); | ||
1057 | clk_put(ether->clk); | ||
1058 | |||
1059 | iounmap(ether->reg); | ||
1060 | release_mem_region(ether->res->start, resource_size(ether->res)); | ||
1061 | |||
1062 | del_timer_sync(ðer->check_timer); | ||
1063 | |||
1064 | free_netdev(dev); | ||
1065 | return 0; | ||
1066 | } | ||
1067 | |||
1068 | static struct platform_driver w90p910_ether_driver = { | ||
1069 | .probe = w90p910_ether_probe, | ||
1070 | .remove = w90p910_ether_remove, | ||
1071 | .driver = { | ||
1072 | .name = "nuc900-emc", | ||
1073 | }, | ||
1074 | }; | ||
1075 | |||
1076 | module_platform_driver(w90p910_ether_driver); | ||
1077 | |||
1078 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); | ||
1079 | MODULE_DESCRIPTION("w90p910 MAC driver!"); | ||
1080 | MODULE_LICENSE("GPL"); | ||
1081 | MODULE_ALIAS("platform:nuc900-emc"); | ||
1082 | |||
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 21efb7d39d62..7b07281aa0ae 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig | |||
@@ -116,9 +116,20 @@ config RESET_QCOM_PDC | |||
116 | to control reset signals provided by PDC for Modem, Compute, | 116 | to control reset signals provided by PDC for Modem, Compute, |
117 | Display, GPU, Debug, AOP, Sensors, Audio, SP and APPS. | 117 | Display, GPU, Debug, AOP, Sensors, Audio, SP and APPS. |
118 | 118 | ||
119 | config RESET_SCMI | ||
120 | tristate "Reset driver controlled via ARM SCMI interface" | ||
121 | depends on ARM_SCMI_PROTOCOL || COMPILE_TEST | ||
122 | default ARM_SCMI_PROTOCOL | ||
123 | help | ||
124 | This driver provides support for reset signal/domains that are | ||
125 | controlled by firmware that implements the SCMI interface. | ||
126 | |||
127 | This driver uses SCMI Message Protocol to interact with the | ||
128 | firmware controlling all the reset signals. | ||
129 | |||
119 | config RESET_SIMPLE | 130 | config RESET_SIMPLE |
120 | bool "Simple Reset Controller Driver" if COMPILE_TEST | 131 | bool "Simple Reset Controller Driver" if COMPILE_TEST |
121 | default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN | 132 | default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN || ARC |
122 | help | 133 | help |
123 | This enables a simple reset controller driver for reset lines that | 134 | This enables a simple reset controller driver for reset lines that |
124 | that can be asserted and deasserted by toggling bits in a contiguous, | 135 | that can be asserted and deasserted by toggling bits in a contiguous, |
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 61456b8f659c..cf60ce526064 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile | |||
@@ -18,6 +18,7 @@ obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o | |||
18 | obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o | 18 | obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o |
19 | obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o | 19 | obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o |
20 | obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o | 20 | obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o |
21 | obj-$(CONFIG_RESET_SCMI) += reset-scmi.o | ||
21 | obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o | 22 | obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o |
22 | obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o | 23 | obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o |
23 | obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o | 24 | obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o |
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c index 3ecd770f910b..1443a55a0c29 100644 --- a/drivers/reset/reset-imx7.c +++ b/drivers/reset/reset-imx7.c | |||
@@ -169,9 +169,9 @@ static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = { | |||
169 | [IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) }, | 169 | [IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) }, |
170 | [IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) }, | 170 | [IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) }, |
171 | [IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) }, | 171 | [IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) }, |
172 | [IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) }, | 172 | [IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) }, |
173 | [IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) }, | 173 | [IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) }, |
174 | [IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) }, | 174 | [IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) }, |
175 | [IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, | 175 | [IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, |
176 | BIT(2) | BIT(1) }, | 176 | BIT(2) | BIT(1) }, |
177 | [IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) }, | 177 | [IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) }, |
@@ -220,9 +220,9 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev, | |||
220 | 220 | ||
221 | case IMX8MQ_RESET_PCIE_CTRL_APPS_EN: | 221 | case IMX8MQ_RESET_PCIE_CTRL_APPS_EN: |
222 | case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */ | 222 | case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */ |
223 | case IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N: /* fallthrough */ | 223 | case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: /* fallthrough */ |
224 | case IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N: /* fallthrough */ | 224 | case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: /* fallthrough */ |
225 | case IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N: /* fallthrough */ | 225 | case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: /* fallthrough */ |
226 | case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */ | 226 | case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */ |
227 | case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */ | 227 | case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */ |
228 | value = assert ? 0 : bit; | 228 | value = assert ? 0 : bit; |
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c index 5242e0679df7..7d05d766e1ea 100644 --- a/drivers/reset/reset-meson.c +++ b/drivers/reset/reset-meson.c | |||
@@ -1,58 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause | ||
1 | /* | 2 | /* |
2 | * Amlogic Meson Reset Controller driver | 3 | * Amlogic Meson Reset Controller driver |
3 | * | 4 | * |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
5 | * redistributing this file, you may do so under either license. | ||
6 | * | ||
7 | * GPL LICENSE SUMMARY | ||
8 | * | ||
9 | * Copyright (c) 2016 BayLibre, SAS. | ||
10 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of version 2 of the GNU General Public License as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, but | ||
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
23 | * The full GNU General Public License is included in this distribution | ||
24 | * in the file called COPYING. | ||
25 | * | ||
26 | * BSD LICENSE | ||
27 | * | ||
28 | * Copyright (c) 2016 BayLibre, SAS. | 5 | * Copyright (c) 2016 BayLibre, SAS. |
29 | * Author: Neil Armstrong <narmstrong@baylibre.com> | 6 | * Author: Neil Armstrong <narmstrong@baylibre.com> |
30 | * | ||
31 | * Redistribution and use in source and binary forms, with or without | ||
32 | * modification, are permitted provided that the following conditions | ||
33 | * are met: | ||
34 | * | ||
35 | * * Redistributions of source code must retain the above copyright | ||
36 | * notice, this list of conditions and the following disclaimer. | ||
37 | * * Redistributions in binary form must reproduce the above copyright | ||
38 | * notice, this list of conditions and the following disclaimer in | ||
39 | * the documentation and/or other materials provided with the | ||
40 | * distribution. | ||
41 | * * Neither the name of Intel Corporation nor the names of its | ||
42 | * contributors may be used to endorse or promote products derived | ||
43 | * from this software without specific prior written permission. | ||
44 | * | ||
45 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
46 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
47 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
48 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
49 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
50 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
51 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
52 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
53 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
54 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
55 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
56 | */ | 7 | */ |
57 | #include <linux/err.h> | 8 | #include <linux/err.h> |
58 | #include <linux/init.h> | 9 | #include <linux/init.h> |
diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c new file mode 100644 index 000000000000..c6d3c8427f14 --- /dev/null +++ b/drivers/reset/reset-scmi.c | |||
@@ -0,0 +1,124 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * ARM System Control and Management Interface (ARM SCMI) reset driver | ||
4 | * | ||
5 | * Copyright (C) 2019 ARM Ltd. | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/of.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/reset-controller.h> | ||
12 | #include <linux/scmi_protocol.h> | ||
13 | |||
14 | /** | ||
15 | * struct scmi_reset_data - reset controller information structure | ||
16 | * @rcdev: reset controller entity | ||
17 | * @handle: ARM SCMI handle used for communication with system controller | ||
18 | */ | ||
19 | struct scmi_reset_data { | ||
20 | struct reset_controller_dev rcdev; | ||
21 | const struct scmi_handle *handle; | ||
22 | }; | ||
23 | |||
24 | #define to_scmi_reset_data(p) container_of((p), struct scmi_reset_data, rcdev) | ||
25 | #define to_scmi_handle(p) (to_scmi_reset_data(p)->handle) | ||
26 | |||
27 | /** | ||
28 | * scmi_reset_assert() - assert device reset | ||
29 | * @rcdev: reset controller entity | ||
30 | * @id: ID of the reset to be asserted | ||
31 | * | ||
32 | * This function implements the reset driver op to assert a device's reset | ||
33 | * using the ARM SCMI protocol. | ||
34 | * | ||
35 | * Return: 0 for successful request, else a corresponding error value | ||
36 | */ | ||
37 | static int | ||
38 | scmi_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) | ||
39 | { | ||
40 | const struct scmi_handle *handle = to_scmi_handle(rcdev); | ||
41 | |||
42 | return handle->reset_ops->assert(handle, id); | ||
43 | } | ||
44 | |||
45 | /** | ||
46 | * scmi_reset_deassert() - deassert device reset | ||
47 | * @rcdev: reset controller entity | ||
48 | * @id: ID of the reset to be deasserted | ||
49 | * | ||
50 | * This function implements the reset driver op to deassert a device's reset | ||
51 | * using the ARM SCMI protocol. | ||
52 | * | ||
53 | * Return: 0 for successful request, else a corresponding error value | ||
54 | */ | ||
55 | static int | ||
56 | scmi_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) | ||
57 | { | ||
58 | const struct scmi_handle *handle = to_scmi_handle(rcdev); | ||
59 | |||
60 | return handle->reset_ops->deassert(handle, id); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * scmi_reset_reset() - reset the device | ||
65 | * @rcdev: reset controller entity | ||
66 | * @id: ID of the reset signal to be reset(assert + deassert) | ||
67 | * | ||
68 | * This function implements the reset driver op to trigger a device's | ||
69 | * reset signal using the ARM SCMI protocol. | ||
70 | * | ||
71 | * Return: 0 for successful request, else a corresponding error value | ||
72 | */ | ||
73 | static int | ||
74 | scmi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id) | ||
75 | { | ||
76 | const struct scmi_handle *handle = to_scmi_handle(rcdev); | ||
77 | |||
78 | return handle->reset_ops->reset(handle, id); | ||
79 | } | ||
80 | |||
81 | static const struct reset_control_ops scmi_reset_ops = { | ||
82 | .assert = scmi_reset_assert, | ||
83 | .deassert = scmi_reset_deassert, | ||
84 | .reset = scmi_reset_reset, | ||
85 | }; | ||
86 | |||
87 | static int scmi_reset_probe(struct scmi_device *sdev) | ||
88 | { | ||
89 | struct scmi_reset_data *data; | ||
90 | struct device *dev = &sdev->dev; | ||
91 | struct device_node *np = dev->of_node; | ||
92 | const struct scmi_handle *handle = sdev->handle; | ||
93 | |||
94 | if (!handle || !handle->reset_ops) | ||
95 | return -ENODEV; | ||
96 | |||
97 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | ||
98 | if (!data) | ||
99 | return -ENOMEM; | ||
100 | |||
101 | data->rcdev.ops = &scmi_reset_ops; | ||
102 | data->rcdev.owner = THIS_MODULE; | ||
103 | data->rcdev.of_node = np; | ||
104 | data->rcdev.nr_resets = handle->reset_ops->num_domains_get(handle); | ||
105 | |||
106 | return devm_reset_controller_register(dev, &data->rcdev); | ||
107 | } | ||
108 | |||
109 | static const struct scmi_device_id scmi_id_table[] = { | ||
110 | { SCMI_PROTOCOL_RESET }, | ||
111 | { }, | ||
112 | }; | ||
113 | MODULE_DEVICE_TABLE(scmi, scmi_id_table); | ||
114 | |||
115 | static struct scmi_driver scmi_reset_driver = { | ||
116 | .name = "scmi-reset", | ||
117 | .probe = scmi_reset_probe, | ||
118 | .id_table = scmi_id_table, | ||
119 | }; | ||
120 | module_scmi_driver(scmi_reset_driver); | ||
121 | |||
122 | MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); | ||
123 | MODULE_DESCRIPTION("ARM SCMI reset controller driver"); | ||
124 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c index 1154f7b1f4dd..067e7e7b34f1 100644 --- a/drivers/reset/reset-simple.c +++ b/drivers/reset/reset-simple.c | |||
@@ -127,6 +127,9 @@ static const struct of_device_id reset_simple_dt_ids[] = { | |||
127 | { .compatible = "aspeed,ast2500-lpc-reset" }, | 127 | { .compatible = "aspeed,ast2500-lpc-reset" }, |
128 | { .compatible = "bitmain,bm1880-reset", | 128 | { .compatible = "bitmain,bm1880-reset", |
129 | .data = &reset_simple_active_low }, | 129 | .data = &reset_simple_active_low }, |
130 | { .compatible = "snps,dw-high-reset" }, | ||
131 | { .compatible = "snps,dw-low-reset", | ||
132 | .data = &reset_simple_active_low }, | ||
130 | { /* sentinel */ }, | 133 | { /* sentinel */ }, |
131 | }; | 134 | }; |
132 | 135 | ||
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig index 23bfb8ef4fdb..bc2c912949bd 100644 --- a/drivers/soc/amlogic/Kconfig +++ b/drivers/soc/amlogic/Kconfig | |||
@@ -37,6 +37,17 @@ config MESON_GX_PM_DOMAINS | |||
37 | Say yes to expose Amlogic Meson GX Power Domains as | 37 | Say yes to expose Amlogic Meson GX Power Domains as |
38 | Generic Power Domains. | 38 | Generic Power Domains. |
39 | 39 | ||
40 | config MESON_EE_PM_DOMAINS | ||
41 | bool "Amlogic Meson Everything-Else Power Domains driver" | ||
42 | depends on ARCH_MESON || COMPILE_TEST | ||
43 | depends on PM && OF | ||
44 | default ARCH_MESON | ||
45 | select PM_GENERIC_DOMAINS | ||
46 | select PM_GENERIC_DOMAINS_OF | ||
47 | help | ||
48 | Say yes to expose Amlogic Meson Everything-Else Power Domains as | ||
49 | Generic Power Domains. | ||
50 | |||
40 | config MESON_MX_SOCINFO | 51 | config MESON_MX_SOCINFO |
41 | bool "Amlogic Meson MX SoC Information driver" | 52 | bool "Amlogic Meson MX SoC Information driver" |
42 | depends on ARCH_MESON || COMPILE_TEST | 53 | depends on ARCH_MESON || COMPILE_TEST |
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile index f2e4ed171297..de79d044b545 100644 --- a/drivers/soc/amlogic/Makefile +++ b/drivers/soc/amlogic/Makefile | |||
@@ -4,3 +4,4 @@ obj-$(CONFIG_MESON_CLK_MEASURE) += meson-clk-measure.o | |||
4 | obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o | 4 | obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o |
5 | obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o | 5 | obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o |
6 | obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o | 6 | obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o |
7 | obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o | ||
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c index 19d4cbc93a17..0fa47d77577d 100644 --- a/drivers/soc/amlogic/meson-clk-measure.c +++ b/drivers/soc/amlogic/meson-clk-measure.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/debugfs.h> | 11 | #include <linux/debugfs.h> |
12 | #include <linux/regmap.h> | 12 | #include <linux/regmap.h> |
13 | 13 | ||
14 | static DEFINE_MUTEX(measure_lock); | ||
15 | |||
14 | #define MSR_CLK_DUTY 0x0 | 16 | #define MSR_CLK_DUTY 0x0 |
15 | #define MSR_CLK_REG0 0x4 | 17 | #define MSR_CLK_REG0 0x4 |
16 | #define MSR_CLK_REG1 0x8 | 18 | #define MSR_CLK_REG1 0x8 |
@@ -322,6 +324,8 @@ static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = { | |||
322 | CLK_MSR_ID(84, "co_tx"), | 324 | CLK_MSR_ID(84, "co_tx"), |
323 | CLK_MSR_ID(89, "hdmi_todig"), | 325 | CLK_MSR_ID(89, "hdmi_todig"), |
324 | CLK_MSR_ID(90, "hdmitx_sys"), | 326 | CLK_MSR_ID(90, "hdmitx_sys"), |
327 | CLK_MSR_ID(91, "sys_cpub_div16"), | ||
328 | CLK_MSR_ID(92, "sys_pll_cpub_div16"), | ||
325 | CLK_MSR_ID(94, "eth_phy_rx"), | 329 | CLK_MSR_ID(94, "eth_phy_rx"), |
326 | CLK_MSR_ID(95, "eth_phy_pll"), | 330 | CLK_MSR_ID(95, "eth_phy_pll"), |
327 | CLK_MSR_ID(96, "vpu_b"), | 331 | CLK_MSR_ID(96, "vpu_b"), |
@@ -353,6 +357,136 @@ static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = { | |||
353 | CLK_MSR_ID(122, "audio_pdm_dclk"), | 357 | CLK_MSR_ID(122, "audio_pdm_dclk"), |
354 | }; | 358 | }; |
355 | 359 | ||
360 | static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = { | ||
361 | CLK_MSR_ID(0, "ring_osc_out_ee_0"), | ||
362 | CLK_MSR_ID(1, "ring_osc_out_ee_1"), | ||
363 | CLK_MSR_ID(2, "ring_osc_out_ee_2"), | ||
364 | CLK_MSR_ID(3, "ring_osc_out_ee_3"), | ||
365 | CLK_MSR_ID(4, "gp0_pll"), | ||
366 | CLK_MSR_ID(5, "gp1_pll"), | ||
367 | CLK_MSR_ID(6, "enci"), | ||
368 | CLK_MSR_ID(7, "clk81"), | ||
369 | CLK_MSR_ID(8, "encp"), | ||
370 | CLK_MSR_ID(9, "encl"), | ||
371 | CLK_MSR_ID(10, "vdac"), | ||
372 | CLK_MSR_ID(11, "eth_tx"), | ||
373 | CLK_MSR_ID(12, "hifi_pll"), | ||
374 | CLK_MSR_ID(13, "mod_tcon"), | ||
375 | CLK_MSR_ID(14, "fec_0"), | ||
376 | CLK_MSR_ID(15, "fec_1"), | ||
377 | CLK_MSR_ID(16, "fec_2"), | ||
378 | CLK_MSR_ID(17, "sys_pll_div16"), | ||
379 | CLK_MSR_ID(18, "sys_cpu_div16"), | ||
380 | CLK_MSR_ID(19, "lcd_an_ph2"), | ||
381 | CLK_MSR_ID(20, "rtc_osc_out"), | ||
382 | CLK_MSR_ID(21, "lcd_an_ph3"), | ||
383 | CLK_MSR_ID(22, "eth_phy_ref"), | ||
384 | CLK_MSR_ID(23, "mpll_50m"), | ||
385 | CLK_MSR_ID(24, "eth_125m"), | ||
386 | CLK_MSR_ID(25, "eth_rmii"), | ||
387 | CLK_MSR_ID(26, "sc_int"), | ||
388 | CLK_MSR_ID(27, "in_mac"), | ||
389 | CLK_MSR_ID(28, "sar_adc"), | ||
390 | CLK_MSR_ID(29, "pcie_inp"), | ||
391 | CLK_MSR_ID(30, "pcie_inn"), | ||
392 | CLK_MSR_ID(31, "mpll_test_out"), | ||
393 | CLK_MSR_ID(32, "vdec"), | ||
394 | CLK_MSR_ID(34, "eth_mpll_50m"), | ||
395 | CLK_MSR_ID(35, "mali"), | ||
396 | CLK_MSR_ID(36, "hdmi_tx_pixel"), | ||
397 | CLK_MSR_ID(37, "cdac"), | ||
398 | CLK_MSR_ID(38, "vdin_meas"), | ||
399 | CLK_MSR_ID(39, "bt656"), | ||
400 | CLK_MSR_ID(40, "arm_ring_osc_out_4"), | ||
401 | CLK_MSR_ID(41, "eth_rx_or_rmii"), | ||
402 | CLK_MSR_ID(42, "mp0_out"), | ||
403 | CLK_MSR_ID(43, "fclk_div5"), | ||
404 | CLK_MSR_ID(44, "pwm_b"), | ||
405 | CLK_MSR_ID(45, "pwm_a"), | ||
406 | CLK_MSR_ID(46, "vpu"), | ||
407 | CLK_MSR_ID(47, "ddr_dpll_pt"), | ||
408 | CLK_MSR_ID(48, "mp1_out"), | ||
409 | CLK_MSR_ID(49, "mp2_out"), | ||
410 | CLK_MSR_ID(50, "mp3_out"), | ||
411 | CLK_MSR_ID(51, "sd_emmc_c"), | ||
412 | CLK_MSR_ID(52, "sd_emmc_b"), | ||
413 | CLK_MSR_ID(53, "sd_emmc_a"), | ||
414 | CLK_MSR_ID(54, "vpu_clkc"), | ||
415 | CLK_MSR_ID(55, "vid_pll_div_out"), | ||
416 | CLK_MSR_ID(56, "wave420l_a"), | ||
417 | CLK_MSR_ID(57, "wave420l_c"), | ||
418 | CLK_MSR_ID(58, "wave420l_b"), | ||
419 | CLK_MSR_ID(59, "hcodec"), | ||
420 | CLK_MSR_ID(60, "arm_ring_osc_out_5"), | ||
421 | CLK_MSR_ID(61, "gpio_msr"), | ||
422 | CLK_MSR_ID(62, "hevcb"), | ||
423 | CLK_MSR_ID(63, "dsi_meas"), | ||
424 | CLK_MSR_ID(64, "spicc_1"), | ||
425 | CLK_MSR_ID(65, "spicc_0"), | ||
426 | CLK_MSR_ID(66, "vid_lock"), | ||
427 | CLK_MSR_ID(67, "dsi_phy"), | ||
428 | CLK_MSR_ID(68, "hdcp22_esm"), | ||
429 | CLK_MSR_ID(69, "hdcp22_skp"), | ||
430 | CLK_MSR_ID(70, "pwm_f"), | ||
431 | CLK_MSR_ID(71, "pwm_e"), | ||
432 | CLK_MSR_ID(72, "pwm_d"), | ||
433 | CLK_MSR_ID(73, "pwm_c"), | ||
434 | CLK_MSR_ID(74, "arm_ring_osc_out_6"), | ||
435 | CLK_MSR_ID(75, "hevcf"), | ||
436 | CLK_MSR_ID(76, "arm_ring_osc_out_7"), | ||
437 | CLK_MSR_ID(77, "rng_ring_osc_0"), | ||
438 | CLK_MSR_ID(78, "rng_ring_osc_1"), | ||
439 | CLK_MSR_ID(79, "rng_ring_osc_2"), | ||
440 | CLK_MSR_ID(80, "rng_ring_osc_3"), | ||
441 | CLK_MSR_ID(81, "vapb"), | ||
442 | CLK_MSR_ID(82, "ge2d"), | ||
443 | CLK_MSR_ID(83, "co_rx"), | ||
444 | CLK_MSR_ID(84, "co_tx"), | ||
445 | CLK_MSR_ID(85, "arm_ring_osc_out_8"), | ||
446 | CLK_MSR_ID(86, "arm_ring_osc_out_9"), | ||
447 | CLK_MSR_ID(87, "mipi_dsi_phy"), | ||
448 | CLK_MSR_ID(88, "cis2_adapt"), | ||
449 | CLK_MSR_ID(89, "hdmi_todig"), | ||
450 | CLK_MSR_ID(90, "hdmitx_sys"), | ||
451 | CLK_MSR_ID(91, "nna_core"), | ||
452 | CLK_MSR_ID(92, "nna_axi"), | ||
453 | CLK_MSR_ID(93, "vad"), | ||
454 | CLK_MSR_ID(94, "eth_phy_rx"), | ||
455 | CLK_MSR_ID(95, "eth_phy_pll"), | ||
456 | CLK_MSR_ID(96, "vpu_b"), | ||
457 | CLK_MSR_ID(97, "cpu_b_tmp"), | ||
458 | CLK_MSR_ID(98, "ts"), | ||
459 | CLK_MSR_ID(99, "arm_ring_osc_out_10"), | ||
460 | CLK_MSR_ID(100, "arm_ring_osc_out_11"), | ||
461 | CLK_MSR_ID(101, "arm_ring_osc_out_12"), | ||
462 | CLK_MSR_ID(102, "arm_ring_osc_out_13"), | ||
463 | CLK_MSR_ID(103, "arm_ring_osc_out_14"), | ||
464 | CLK_MSR_ID(104, "arm_ring_osc_out_15"), | ||
465 | CLK_MSR_ID(105, "arm_ring_osc_out_16"), | ||
466 | CLK_MSR_ID(106, "ephy_test"), | ||
467 | CLK_MSR_ID(107, "au_dac_g128x"), | ||
468 | CLK_MSR_ID(108, "audio_locker_out"), | ||
469 | CLK_MSR_ID(109, "audio_locker_in"), | ||
470 | CLK_MSR_ID(110, "audio_tdmout_c_sclk"), | ||
471 | CLK_MSR_ID(111, "audio_tdmout_b_sclk"), | ||
472 | CLK_MSR_ID(112, "audio_tdmout_a_sclk"), | ||
473 | CLK_MSR_ID(113, "audio_tdmin_lb_sclk"), | ||
474 | CLK_MSR_ID(114, "audio_tdmin_c_sclk"), | ||
475 | CLK_MSR_ID(115, "audio_tdmin_b_sclk"), | ||
476 | CLK_MSR_ID(116, "audio_tdmin_a_sclk"), | ||
477 | CLK_MSR_ID(117, "audio_resample"), | ||
478 | CLK_MSR_ID(118, "audio_pdm_sys"), | ||
479 | CLK_MSR_ID(119, "audio_spdifout_b"), | ||
480 | CLK_MSR_ID(120, "audio_spdifout"), | ||
481 | CLK_MSR_ID(121, "audio_spdifin"), | ||
482 | CLK_MSR_ID(122, "audio_pdm_dclk"), | ||
483 | CLK_MSR_ID(123, "audio_resampled"), | ||
484 | CLK_MSR_ID(124, "earcrx_pll"), | ||
485 | CLK_MSR_ID(125, "earcrx_pll_test"), | ||
486 | CLK_MSR_ID(126, "csi_phy0"), | ||
487 | CLK_MSR_ID(127, "csi2_data"), | ||
488 | }; | ||
489 | |||
356 | static int meson_measure_id(struct meson_msr_id *clk_msr_id, | 490 | static int meson_measure_id(struct meson_msr_id *clk_msr_id, |
357 | unsigned int duration) | 491 | unsigned int duration) |
358 | { | 492 | { |
@@ -360,6 +494,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id, | |||
360 | unsigned int val; | 494 | unsigned int val; |
361 | int ret; | 495 | int ret; |
362 | 496 | ||
497 | ret = mutex_lock_interruptible(&measure_lock); | ||
498 | if (ret) | ||
499 | return ret; | ||
500 | |||
363 | regmap_write(priv->regmap, MSR_CLK_REG0, 0); | 501 | regmap_write(priv->regmap, MSR_CLK_REG0, 0); |
364 | 502 | ||
365 | /* Set measurement duration */ | 503 | /* Set measurement duration */ |
@@ -377,8 +515,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id, | |||
377 | 515 | ||
378 | ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0, | 516 | ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0, |
379 | val, !(val & MSR_BUSY), 10, 10000); | 517 | val, !(val & MSR_BUSY), 10, 10000); |
380 | if (ret) | 518 | if (ret) { |
519 | mutex_unlock(&measure_lock); | ||
381 | return ret; | 520 | return ret; |
521 | } | ||
382 | 522 | ||
383 | /* Disable */ | 523 | /* Disable */ |
384 | regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0); | 524 | regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0); |
@@ -386,6 +526,8 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id, | |||
386 | /* Get the value in multiple of gate time counts */ | 526 | /* Get the value in multiple of gate time counts */ |
387 | regmap_read(priv->regmap, MSR_CLK_REG2, &val); | 527 | regmap_read(priv->regmap, MSR_CLK_REG2, &val); |
388 | 528 | ||
529 | mutex_unlock(&measure_lock); | ||
530 | |||
389 | if (val >= MSR_VAL_MASK) | 531 | if (val >= MSR_VAL_MASK) |
390 | return -EINVAL; | 532 | return -EINVAL; |
391 | 533 | ||
@@ -533,6 +675,10 @@ static const struct of_device_id meson_msr_match_table[] = { | |||
533 | .compatible = "amlogic,meson-g12a-clk-measure", | 675 | .compatible = "amlogic,meson-g12a-clk-measure", |
534 | .data = (void *)clk_msr_g12a, | 676 | .data = (void *)clk_msr_g12a, |
535 | }, | 677 | }, |
678 | { | ||
679 | .compatible = "amlogic,meson-sm1-clk-measure", | ||
680 | .data = (void *)clk_msr_sm1, | ||
681 | }, | ||
536 | { /* sentinel */ } | 682 | { /* sentinel */ } |
537 | }; | 683 | }; |
538 | 684 | ||
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c new file mode 100644 index 000000000000..5823f5b67d16 --- /dev/null +++ b/drivers/soc/amlogic/meson-ee-pwrc.c | |||
@@ -0,0 +1,492 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
2 | /* | ||
3 | * Copyright (c) 2019 BayLibre, SAS | ||
4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/of_address.h> | ||
8 | #include <linux/platform_device.h> | ||
9 | #include <linux/pm_domain.h> | ||
10 | #include <linux/bitfield.h> | ||
11 | #include <linux/regmap.h> | ||
12 | #include <linux/mfd/syscon.h> | ||
13 | #include <linux/of_device.h> | ||
14 | #include <linux/reset-controller.h> | ||
15 | #include <linux/reset.h> | ||
16 | #include <linux/clk.h> | ||
17 | #include <dt-bindings/power/meson-g12a-power.h> | ||
18 | #include <dt-bindings/power/meson-sm1-power.h> | ||
19 | |||
20 | /* AO Offsets */ | ||
21 | |||
22 | #define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2) | ||
23 | #define AO_RTI_GEN_PWR_ISO0 (0x3b << 2) | ||
24 | |||
25 | /* HHI Offsets */ | ||
26 | |||
27 | #define HHI_MEM_PD_REG0 (0x40 << 2) | ||
28 | #define HHI_VPU_MEM_PD_REG0 (0x41 << 2) | ||
29 | #define HHI_VPU_MEM_PD_REG1 (0x42 << 2) | ||
30 | #define HHI_VPU_MEM_PD_REG3 (0x43 << 2) | ||
31 | #define HHI_VPU_MEM_PD_REG4 (0x44 << 2) | ||
32 | #define HHI_AUDIO_MEM_PD_REG0 (0x45 << 2) | ||
33 | #define HHI_NANOQ_MEM_PD_REG0 (0x46 << 2) | ||
34 | #define HHI_NANOQ_MEM_PD_REG1 (0x47 << 2) | ||
35 | #define HHI_VPU_MEM_PD_REG2 (0x4d << 2) | ||
36 | |||
37 | struct meson_ee_pwrc; | ||
38 | struct meson_ee_pwrc_domain; | ||
39 | |||
40 | struct meson_ee_pwrc_mem_domain { | ||
41 | unsigned int reg; | ||
42 | unsigned int mask; | ||
43 | }; | ||
44 | |||
45 | struct meson_ee_pwrc_top_domain { | ||
46 | unsigned int sleep_reg; | ||
47 | unsigned int sleep_mask; | ||
48 | unsigned int iso_reg; | ||
49 | unsigned int iso_mask; | ||
50 | }; | ||
51 | |||
52 | struct meson_ee_pwrc_domain_desc { | ||
53 | char *name; | ||
54 | unsigned int reset_names_count; | ||
55 | unsigned int clk_names_count; | ||
56 | struct meson_ee_pwrc_top_domain *top_pd; | ||
57 | unsigned int mem_pd_count; | ||
58 | struct meson_ee_pwrc_mem_domain *mem_pd; | ||
59 | bool (*get_power)(struct meson_ee_pwrc_domain *pwrc_domain); | ||
60 | }; | ||
61 | |||
62 | struct meson_ee_pwrc_domain_data { | ||
63 | unsigned int count; | ||
64 | struct meson_ee_pwrc_domain_desc *domains; | ||
65 | }; | ||
66 | |||
67 | /* TOP Power Domains */ | ||
68 | |||
69 | static struct meson_ee_pwrc_top_domain g12a_pwrc_vpu = { | ||
70 | .sleep_reg = AO_RTI_GEN_PWR_SLEEP0, | ||
71 | .sleep_mask = BIT(8), | ||
72 | .iso_reg = AO_RTI_GEN_PWR_SLEEP0, | ||
73 | .iso_mask = BIT(9), | ||
74 | }; | ||
75 | |||
76 | #define SM1_EE_PD(__bit) \ | ||
77 | { \ | ||
78 | .sleep_reg = AO_RTI_GEN_PWR_SLEEP0, \ | ||
79 | .sleep_mask = BIT(__bit), \ | ||
80 | .iso_reg = AO_RTI_GEN_PWR_ISO0, \ | ||
81 | .iso_mask = BIT(__bit), \ | ||
82 | } | ||
83 | |||
84 | static struct meson_ee_pwrc_top_domain sm1_pwrc_vpu = SM1_EE_PD(8); | ||
85 | static struct meson_ee_pwrc_top_domain sm1_pwrc_nna = SM1_EE_PD(16); | ||
86 | static struct meson_ee_pwrc_top_domain sm1_pwrc_usb = SM1_EE_PD(17); | ||
87 | static struct meson_ee_pwrc_top_domain sm1_pwrc_pci = SM1_EE_PD(18); | ||
88 | static struct meson_ee_pwrc_top_domain sm1_pwrc_ge2d = SM1_EE_PD(19); | ||
89 | |||
90 | /* Memory PD Domains */ | ||
91 | |||
92 | #define VPU_MEMPD(__reg) \ | ||
93 | { __reg, GENMASK(1, 0) }, \ | ||
94 | { __reg, GENMASK(3, 2) }, \ | ||
95 | { __reg, GENMASK(5, 4) }, \ | ||
96 | { __reg, GENMASK(7, 6) }, \ | ||
97 | { __reg, GENMASK(9, 8) }, \ | ||
98 | { __reg, GENMASK(11, 10) }, \ | ||
99 | { __reg, GENMASK(13, 12) }, \ | ||
100 | { __reg, GENMASK(15, 14) }, \ | ||
101 | { __reg, GENMASK(17, 16) }, \ | ||
102 | { __reg, GENMASK(19, 18) }, \ | ||
103 | { __reg, GENMASK(21, 20) }, \ | ||
104 | { __reg, GENMASK(23, 22) }, \ | ||
105 | { __reg, GENMASK(25, 24) }, \ | ||
106 | { __reg, GENMASK(27, 26) }, \ | ||
107 | { __reg, GENMASK(29, 28) }, \ | ||
108 | { __reg, GENMASK(31, 30) } | ||
109 | |||
110 | #define VPU_HHI_MEMPD(__reg) \ | ||
111 | { __reg, BIT(8) }, \ | ||
112 | { __reg, BIT(9) }, \ | ||
113 | { __reg, BIT(10) }, \ | ||
114 | { __reg, BIT(11) }, \ | ||
115 | { __reg, BIT(12) }, \ | ||
116 | { __reg, BIT(13) }, \ | ||
117 | { __reg, BIT(14) }, \ | ||
118 | { __reg, BIT(15) } | ||
119 | |||
120 | static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = { | ||
121 | VPU_MEMPD(HHI_VPU_MEM_PD_REG0), | ||
122 | VPU_MEMPD(HHI_VPU_MEM_PD_REG1), | ||
123 | VPU_MEMPD(HHI_VPU_MEM_PD_REG2), | ||
124 | VPU_HHI_MEMPD(HHI_MEM_PD_REG0), | ||
125 | }; | ||
126 | |||
127 | static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_eth[] = { | ||
128 | { HHI_MEM_PD_REG0, GENMASK(3, 2) }, | ||
129 | }; | ||
130 | |||
131 | static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = { | ||
132 | VPU_MEMPD(HHI_VPU_MEM_PD_REG0), | ||
133 | VPU_MEMPD(HHI_VPU_MEM_PD_REG1), | ||
134 | VPU_MEMPD(HHI_VPU_MEM_PD_REG2), | ||
135 | VPU_MEMPD(HHI_VPU_MEM_PD_REG3), | ||
136 | { HHI_VPU_MEM_PD_REG4, GENMASK(1, 0) }, | ||
137 | { HHI_VPU_MEM_PD_REG4, GENMASK(3, 2) }, | ||
138 | { HHI_VPU_MEM_PD_REG4, GENMASK(5, 4) }, | ||
139 | { HHI_VPU_MEM_PD_REG4, GENMASK(7, 6) }, | ||
140 | VPU_HHI_MEMPD(HHI_MEM_PD_REG0), | ||
141 | }; | ||
142 | |||
143 | static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_nna[] = { | ||
144 | { HHI_NANOQ_MEM_PD_REG0, 0xff }, | ||
145 | { HHI_NANOQ_MEM_PD_REG1, 0xff }, | ||
146 | }; | ||
147 | |||
148 | static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_usb[] = { | ||
149 | { HHI_MEM_PD_REG0, GENMASK(31, 30) }, | ||
150 | }; | ||
151 | |||
152 | static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_pcie[] = { | ||
153 | { HHI_MEM_PD_REG0, GENMASK(29, 26) }, | ||
154 | }; | ||
155 | |||
156 | static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_ge2d[] = { | ||
157 | { HHI_MEM_PD_REG0, GENMASK(25, 18) }, | ||
158 | }; | ||
159 | |||
160 | static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = { | ||
161 | { HHI_MEM_PD_REG0, GENMASK(5, 4) }, | ||
162 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(1, 0) }, | ||
163 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(3, 2) }, | ||
164 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(5, 4) }, | ||
165 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(7, 6) }, | ||
166 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(13, 12) }, | ||
167 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(15, 14) }, | ||
168 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(17, 16) }, | ||
169 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(19, 18) }, | ||
170 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(21, 20) }, | ||
171 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(23, 22) }, | ||
172 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(25, 24) }, | ||
173 | { HHI_AUDIO_MEM_PD_REG0, GENMASK(27, 26) }, | ||
174 | }; | ||
175 | |||
176 | #define VPU_PD(__name, __top_pd, __mem, __get_power, __resets, __clks) \ | ||
177 | { \ | ||
178 | .name = __name, \ | ||
179 | .reset_names_count = __resets, \ | ||
180 | .clk_names_count = __clks, \ | ||
181 | .top_pd = __top_pd, \ | ||
182 | .mem_pd_count = ARRAY_SIZE(__mem), \ | ||
183 | .mem_pd = __mem, \ | ||
184 | .get_power = __get_power, \ | ||
185 | } | ||
186 | |||
187 | #define TOP_PD(__name, __top_pd, __mem, __get_power) \ | ||
188 | { \ | ||
189 | .name = __name, \ | ||
190 | .top_pd = __top_pd, \ | ||
191 | .mem_pd_count = ARRAY_SIZE(__mem), \ | ||
192 | .mem_pd = __mem, \ | ||
193 | .get_power = __get_power, \ | ||
194 | } | ||
195 | |||
196 | #define MEM_PD(__name, __mem) \ | ||
197 | TOP_PD(__name, NULL, __mem, NULL) | ||
198 | |||
199 | static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain); | ||
200 | |||
201 | static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = { | ||
202 | [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &g12a_pwrc_vpu, g12a_pwrc_mem_vpu, | ||
203 | pwrc_ee_get_power, 11, 2), | ||
204 | [PWRC_G12A_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth), | ||
205 | }; | ||
206 | |||
207 | static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = { | ||
208 | [PWRC_SM1_VPU_ID] = VPU_PD("VPU", &sm1_pwrc_vpu, sm1_pwrc_mem_vpu, | ||
209 | pwrc_ee_get_power, 11, 2), | ||
210 | [PWRC_SM1_NNA_ID] = TOP_PD("NNA", &sm1_pwrc_nna, sm1_pwrc_mem_nna, | ||
211 | pwrc_ee_get_power), | ||
212 | [PWRC_SM1_USB_ID] = TOP_PD("USB", &sm1_pwrc_usb, sm1_pwrc_mem_usb, | ||
213 | pwrc_ee_get_power), | ||
214 | [PWRC_SM1_PCIE_ID] = TOP_PD("PCI", &sm1_pwrc_pci, sm1_pwrc_mem_pcie, | ||
215 | pwrc_ee_get_power), | ||
216 | [PWRC_SM1_GE2D_ID] = TOP_PD("GE2D", &sm1_pwrc_ge2d, sm1_pwrc_mem_ge2d, | ||
217 | pwrc_ee_get_power), | ||
218 | [PWRC_SM1_AUDIO_ID] = MEM_PD("AUDIO", sm1_pwrc_mem_audio), | ||
219 | [PWRC_SM1_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth), | ||
220 | }; | ||
221 | |||
222 | struct meson_ee_pwrc_domain { | ||
223 | struct generic_pm_domain base; | ||
224 | bool enabled; | ||
225 | struct meson_ee_pwrc *pwrc; | ||
226 | struct meson_ee_pwrc_domain_desc desc; | ||
227 | struct clk_bulk_data *clks; | ||
228 | int num_clks; | ||
229 | struct reset_control *rstc; | ||
230 | int num_rstc; | ||
231 | }; | ||
232 | |||
233 | struct meson_ee_pwrc { | ||
234 | struct regmap *regmap_ao; | ||
235 | struct regmap *regmap_hhi; | ||
236 | struct meson_ee_pwrc_domain *domains; | ||
237 | struct genpd_onecell_data xlate; | ||
238 | }; | ||
239 | |||
240 | static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain) | ||
241 | { | ||
242 | u32 reg; | ||
243 | |||
244 | regmap_read(pwrc_domain->pwrc->regmap_ao, | ||
245 | pwrc_domain->desc.top_pd->sleep_reg, ®); | ||
246 | |||
247 | return (reg & pwrc_domain->desc.top_pd->sleep_mask); | ||
248 | } | ||
249 | |||
250 | static int meson_ee_pwrc_off(struct generic_pm_domain *domain) | ||
251 | { | ||
252 | struct meson_ee_pwrc_domain *pwrc_domain = | ||
253 | container_of(domain, struct meson_ee_pwrc_domain, base); | ||
254 | int i; | ||
255 | |||
256 | if (pwrc_domain->desc.top_pd) | ||
257 | regmap_update_bits(pwrc_domain->pwrc->regmap_ao, | ||
258 | pwrc_domain->desc.top_pd->sleep_reg, | ||
259 | pwrc_domain->desc.top_pd->sleep_mask, | ||
260 | pwrc_domain->desc.top_pd->sleep_mask); | ||
261 | udelay(20); | ||
262 | |||
263 | for (i = 0 ; i < pwrc_domain->desc.mem_pd_count ; ++i) | ||
264 | regmap_update_bits(pwrc_domain->pwrc->regmap_hhi, | ||
265 | pwrc_domain->desc.mem_pd[i].reg, | ||
266 | pwrc_domain->desc.mem_pd[i].mask, | ||
267 | pwrc_domain->desc.mem_pd[i].mask); | ||
268 | |||
269 | udelay(20); | ||
270 | |||
271 | if (pwrc_domain->desc.top_pd) | ||
272 | regmap_update_bits(pwrc_domain->pwrc->regmap_ao, | ||
273 | pwrc_domain->desc.top_pd->iso_reg, | ||
274 | pwrc_domain->desc.top_pd->iso_mask, | ||
275 | pwrc_domain->desc.top_pd->iso_mask); | ||
276 | |||
277 | if (pwrc_domain->num_clks) { | ||
278 | msleep(20); | ||
279 | clk_bulk_disable_unprepare(pwrc_domain->num_clks, | ||
280 | pwrc_domain->clks); | ||
281 | } | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int meson_ee_pwrc_on(struct generic_pm_domain *domain) | ||
287 | { | ||
288 | struct meson_ee_pwrc_domain *pwrc_domain = | ||
289 | container_of(domain, struct meson_ee_pwrc_domain, base); | ||
290 | int i, ret; | ||
291 | |||
292 | if (pwrc_domain->desc.top_pd) | ||
293 | regmap_update_bits(pwrc_domain->pwrc->regmap_ao, | ||
294 | pwrc_domain->desc.top_pd->sleep_reg, | ||
295 | pwrc_domain->desc.top_pd->sleep_mask, 0); | ||
296 | udelay(20); | ||
297 | |||
298 | for (i = 0 ; i < pwrc_domain->desc.mem_pd_count ; ++i) | ||
299 | regmap_update_bits(pwrc_domain->pwrc->regmap_hhi, | ||
300 | pwrc_domain->desc.mem_pd[i].reg, | ||
301 | pwrc_domain->desc.mem_pd[i].mask, 0); | ||
302 | |||
303 | udelay(20); | ||
304 | |||
305 | ret = reset_control_assert(pwrc_domain->rstc); | ||
306 | if (ret) | ||
307 | return ret; | ||
308 | |||
309 | if (pwrc_domain->desc.top_pd) | ||
310 | regmap_update_bits(pwrc_domain->pwrc->regmap_ao, | ||
311 | pwrc_domain->desc.top_pd->iso_reg, | ||
312 | pwrc_domain->desc.top_pd->iso_mask, 0); | ||
313 | |||
314 | ret = reset_control_deassert(pwrc_domain->rstc); | ||
315 | if (ret) | ||
316 | return ret; | ||
317 | |||
318 | return clk_bulk_prepare_enable(pwrc_domain->num_clks, | ||
319 | pwrc_domain->clks); | ||
320 | } | ||
321 | |||
322 | static int meson_ee_pwrc_init_domain(struct platform_device *pdev, | ||
323 | struct meson_ee_pwrc *pwrc, | ||
324 | struct meson_ee_pwrc_domain *dom) | ||
325 | { | ||
326 | dom->pwrc = pwrc; | ||
327 | dom->num_rstc = dom->desc.reset_names_count; | ||
328 | dom->num_clks = dom->desc.clk_names_count; | ||
329 | |||
330 | if (dom->num_rstc) { | ||
331 | int count = reset_control_get_count(&pdev->dev); | ||
332 | |||
333 | if (count != dom->num_rstc) | ||
334 | dev_warn(&pdev->dev, "Invalid resets count %d for domain %s\n", | ||
335 | count, dom->desc.name); | ||
336 | |||
337 | dom->rstc = devm_reset_control_array_get(&pdev->dev, false, | ||
338 | false); | ||
339 | if (IS_ERR(dom->rstc)) | ||
340 | return PTR_ERR(dom->rstc); | ||
341 | } | ||
342 | |||
343 | if (dom->num_clks) { | ||
344 | int ret = devm_clk_bulk_get_all(&pdev->dev, &dom->clks); | ||
345 | if (ret < 0) | ||
346 | return ret; | ||
347 | |||
348 | if (dom->num_clks != ret) { | ||
349 | dev_warn(&pdev->dev, "Invalid clocks count %d for domain %s\n", | ||
350 | ret, dom->desc.name); | ||
351 | dom->num_clks = ret; | ||
352 | } | ||
353 | } | ||
354 | |||
355 | dom->base.name = dom->desc.name; | ||
356 | dom->base.power_on = meson_ee_pwrc_on; | ||
357 | dom->base.power_off = meson_ee_pwrc_off; | ||
358 | |||
359 | /* | ||
360 | * TOFIX: This is a special case for the VPU power domain, which can | ||
361 | * be enabled previously by the bootloader. In this case the VPU | ||
362 | * pipeline may be functional but no driver maybe never attach | ||
363 | * to this power domain, and if the domain is disabled it could | ||
364 | * cause system errors. This is why the pm_domain_always_on_gov | ||
365 | * is used here. | ||
366 | * For the same reason, the clocks should be enabled in case | ||
367 | * we need to power the domain off, otherwise the internal clocks | ||
368 | * prepare/enable counters won't be in sync. | ||
369 | */ | ||
370 | if (dom->num_clks && dom->desc.get_power && !dom->desc.get_power(dom)) { | ||
371 | int ret = clk_bulk_prepare_enable(dom->num_clks, dom->clks); | ||
372 | if (ret) | ||
373 | return ret; | ||
374 | |||
375 | pm_genpd_init(&dom->base, &pm_domain_always_on_gov, false); | ||
376 | } else | ||
377 | pm_genpd_init(&dom->base, NULL, | ||
378 | (dom->desc.get_power ? | ||
379 | dom->desc.get_power(dom) : true)); | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | static int meson_ee_pwrc_probe(struct platform_device *pdev) | ||
385 | { | ||
386 | const struct meson_ee_pwrc_domain_data *match; | ||
387 | struct regmap *regmap_ao, *regmap_hhi; | ||
388 | struct meson_ee_pwrc *pwrc; | ||
389 | int i, ret; | ||
390 | |||
391 | match = of_device_get_match_data(&pdev->dev); | ||
392 | if (!match) { | ||
393 | dev_err(&pdev->dev, "failed to get match data\n"); | ||
394 | return -ENODEV; | ||
395 | } | ||
396 | |||
397 | pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL); | ||
398 | if (!pwrc) | ||
399 | return -ENOMEM; | ||
400 | |||
401 | pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count, | ||
402 | sizeof(*pwrc->xlate.domains), | ||
403 | GFP_KERNEL); | ||
404 | if (!pwrc->xlate.domains) | ||
405 | return -ENOMEM; | ||
406 | |||
407 | pwrc->domains = devm_kcalloc(&pdev->dev, match->count, | ||
408 | sizeof(*pwrc->domains), GFP_KERNEL); | ||
409 | if (!pwrc->domains) | ||
410 | return -ENOMEM; | ||
411 | |||
412 | pwrc->xlate.num_domains = match->count; | ||
413 | |||
414 | regmap_hhi = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node)); | ||
415 | if (IS_ERR(regmap_hhi)) { | ||
416 | dev_err(&pdev->dev, "failed to get HHI regmap\n"); | ||
417 | return PTR_ERR(regmap_hhi); | ||
418 | } | ||
419 | |||
420 | regmap_ao = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | ||
421 | "amlogic,ao-sysctrl"); | ||
422 | if (IS_ERR(regmap_ao)) { | ||
423 | dev_err(&pdev->dev, "failed to get AO regmap\n"); | ||
424 | return PTR_ERR(regmap_ao); | ||
425 | } | ||
426 | |||
427 | pwrc->regmap_ao = regmap_ao; | ||
428 | pwrc->regmap_hhi = regmap_hhi; | ||
429 | |||
430 | platform_set_drvdata(pdev, pwrc); | ||
431 | |||
432 | for (i = 0 ; i < match->count ; ++i) { | ||
433 | struct meson_ee_pwrc_domain *dom = &pwrc->domains[i]; | ||
434 | |||
435 | memcpy(&dom->desc, &match->domains[i], sizeof(dom->desc)); | ||
436 | |||
437 | ret = meson_ee_pwrc_init_domain(pdev, pwrc, dom); | ||
438 | if (ret) | ||
439 | return ret; | ||
440 | |||
441 | pwrc->xlate.domains[i] = &dom->base; | ||
442 | } | ||
443 | |||
444 | of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate); | ||
445 | |||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | static void meson_ee_pwrc_shutdown(struct platform_device *pdev) | ||
450 | { | ||
451 | struct meson_ee_pwrc *pwrc = platform_get_drvdata(pdev); | ||
452 | int i; | ||
453 | |||
454 | for (i = 0 ; i < pwrc->xlate.num_domains ; ++i) { | ||
455 | struct meson_ee_pwrc_domain *dom = &pwrc->domains[i]; | ||
456 | |||
457 | if (dom->desc.get_power && !dom->desc.get_power(dom)) | ||
458 | meson_ee_pwrc_off(&dom->base); | ||
459 | } | ||
460 | } | ||
461 | |||
462 | static struct meson_ee_pwrc_domain_data meson_ee_g12a_pwrc_data = { | ||
463 | .count = ARRAY_SIZE(g12a_pwrc_domains), | ||
464 | .domains = g12a_pwrc_domains, | ||
465 | }; | ||
466 | |||
467 | static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = { | ||
468 | .count = ARRAY_SIZE(sm1_pwrc_domains), | ||
469 | .domains = sm1_pwrc_domains, | ||
470 | }; | ||
471 | |||
472 | static const struct of_device_id meson_ee_pwrc_match_table[] = { | ||
473 | { | ||
474 | .compatible = "amlogic,meson-g12a-pwrc", | ||
475 | .data = &meson_ee_g12a_pwrc_data, | ||
476 | }, | ||
477 | { | ||
478 | .compatible = "amlogic,meson-sm1-pwrc", | ||
479 | .data = &meson_ee_sm1_pwrc_data, | ||
480 | }, | ||
481 | { /* sentinel */ } | ||
482 | }; | ||
483 | |||
484 | static struct platform_driver meson_ee_pwrc_driver = { | ||
485 | .probe = meson_ee_pwrc_probe, | ||
486 | .shutdown = meson_ee_pwrc_shutdown, | ||
487 | .driver = { | ||
488 | .name = "meson_ee_pwrc", | ||
489 | .of_match_table = meson_ee_pwrc_match_table, | ||
490 | }, | ||
491 | }; | ||
492 | builtin_platform_driver(meson_ee_pwrc_driver); | ||
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index bca34954518e..6d0d04f163cb 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c | |||
@@ -39,6 +39,7 @@ static const struct meson_gx_soc_id { | |||
39 | { "TXHD", 0x27 }, | 39 | { "TXHD", 0x27 }, |
40 | { "G12A", 0x28 }, | 40 | { "G12A", 0x28 }, |
41 | { "G12B", 0x29 }, | 41 | { "G12B", 0x29 }, |
42 | { "SM1", 0x2b }, | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | static const struct meson_gx_package_id { | 45 | static const struct meson_gx_package_id { |
@@ -65,6 +66,8 @@ static const struct meson_gx_package_id { | |||
65 | { "S905D2", 0x28, 0x10, 0xf0 }, | 66 | { "S905D2", 0x28, 0x10, 0xf0 }, |
66 | { "S905X2", 0x28, 0x40, 0xf0 }, | 67 | { "S905X2", 0x28, 0x40, 0xf0 }, |
67 | { "S922X", 0x29, 0x40, 0xf0 }, | 68 | { "S922X", 0x29, 0x40, 0xf0 }, |
69 | { "A311D", 0x29, 0x10, 0xf0 }, | ||
70 | { "S905X3", 0x2b, 0x5, 0xf }, | ||
68 | }; | 71 | }; |
69 | 72 | ||
70 | static inline unsigned int socinfo_to_major(u32 socinfo) | 73 | static inline unsigned int socinfo_to_major(u32 socinfo) |
@@ -138,8 +141,10 @@ static int __init meson_gx_socinfo_init(void) | |||
138 | } | 141 | } |
139 | 142 | ||
140 | /* check if chip-id is available */ | 143 | /* check if chip-id is available */ |
141 | if (!of_property_read_bool(np, "amlogic,has-chip-id")) | 144 | if (!of_property_read_bool(np, "amlogic,has-chip-id")) { |
145 | of_node_put(np); | ||
142 | return -ENODEV; | 146 | return -ENODEV; |
147 | } | ||
143 | 148 | ||
144 | /* node should be a syscon */ | 149 | /* node should be a syscon */ |
145 | regmap = syscon_node_to_regmap(np); | 150 | regmap = syscon_node_to_regmap(np); |
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c index 9168d8ddc932..27243f706f37 100644 --- a/drivers/soc/fsl/dpaa2-console.c +++ b/drivers/soc/fsl/dpaa2-console.c | |||
@@ -73,7 +73,7 @@ static u64 get_mc_fw_base_address(void) | |||
73 | 73 | ||
74 | mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr)); | 74 | mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr)); |
75 | if (!mcfbaregs) { | 75 | if (!mcfbaregs) { |
76 | pr_err("could not map MC Firmaware Base registers\n"); | 76 | pr_err("could not map MC Firmware Base registers\n"); |
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | 79 | ||
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c index b9539ef2c3cd..518a8e081b49 100644 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c | |||
@@ -305,8 +305,6 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service, | |||
305 | list_del(&ctx->node); | 305 | list_del(&ctx->node); |
306 | spin_unlock_irqrestore(&d->lock_notifications, irqflags); | 306 | spin_unlock_irqrestore(&d->lock_notifications, irqflags); |
307 | 307 | ||
308 | if (dev) | ||
309 | device_link_remove(dev, d->dev); | ||
310 | } | 308 | } |
311 | EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister); | 309 | EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister); |
312 | 310 | ||
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 1ef8068c8dd3..34810f9bb2ee 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c | |||
@@ -102,6 +102,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = { | |||
102 | .svr = 0x87360000, | 102 | .svr = 0x87360000, |
103 | .mask = 0xff3f0000, | 103 | .mask = 0xff3f0000, |
104 | }, | 104 | }, |
105 | /* Die: LS1028A, SoC: LS1028A */ | ||
106 | { .die = "LS1028A", | ||
107 | .svr = 0x870b0000, | ||
108 | .mask = 0xff3f0000, | ||
109 | }, | ||
105 | { }, | 110 | { }, |
106 | }; | 111 | }; |
107 | 112 | ||
@@ -224,6 +229,7 @@ static const struct of_device_id fsl_guts_of_match[] = { | |||
224 | { .compatible = "fsl,ls1012a-dcfg", }, | 229 | { .compatible = "fsl,ls1012a-dcfg", }, |
225 | { .compatible = "fsl,ls1046a-dcfg", }, | 230 | { .compatible = "fsl,ls1046a-dcfg", }, |
226 | { .compatible = "fsl,lx2160a-dcfg", }, | 231 | { .compatible = "fsl,lx2160a-dcfg", }, |
232 | { .compatible = "fsl,ls1028a-dcfg", }, | ||
227 | {} | 233 | {} |
228 | }; | 234 | }; |
229 | MODULE_DEVICE_TABLE(of, fsl_guts_of_match); | 235 | MODULE_DEVICE_TABLE(of, fsl_guts_of_match); |
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index f84ab596bde8..f4fb527d8301 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c | |||
@@ -635,30 +635,31 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits) | |||
635 | return 0; | 635 | return 0; |
636 | } | 636 | } |
637 | 637 | ||
638 | static int bm_shutdown_pool(u32 bpid) | 638 | int bm_shutdown_pool(u32 bpid) |
639 | { | 639 | { |
640 | int err = 0; | ||
640 | struct bm_mc_command *bm_cmd; | 641 | struct bm_mc_command *bm_cmd; |
641 | union bm_mc_result *bm_res; | 642 | union bm_mc_result *bm_res; |
642 | 643 | ||
644 | |||
645 | struct bman_portal *p = get_affine_portal(); | ||
643 | while (1) { | 646 | while (1) { |
644 | struct bman_portal *p = get_affine_portal(); | ||
645 | /* Acquire buffers until empty */ | 647 | /* Acquire buffers until empty */ |
646 | bm_cmd = bm_mc_start(&p->p); | 648 | bm_cmd = bm_mc_start(&p->p); |
647 | bm_cmd->bpid = bpid; | 649 | bm_cmd->bpid = bpid; |
648 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); | 650 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); |
649 | if (!bm_mc_result_timeout(&p->p, &bm_res)) { | 651 | if (!bm_mc_result_timeout(&p->p, &bm_res)) { |
650 | put_affine_portal(); | ||
651 | pr_crit("BMan Acquire Command timedout\n"); | 652 | pr_crit("BMan Acquire Command timedout\n"); |
652 | return -ETIMEDOUT; | 653 | err = -ETIMEDOUT; |
654 | goto done; | ||
653 | } | 655 | } |
654 | if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { | 656 | if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { |
655 | put_affine_portal(); | ||
656 | /* Pool is empty */ | 657 | /* Pool is empty */ |
657 | return 0; | 658 | goto done; |
658 | } | 659 | } |
659 | put_affine_portal(); | ||
660 | } | 660 | } |
661 | 661 | done: | |
662 | put_affine_portal(); | ||
662 | return 0; | 663 | return 0; |
663 | } | 664 | } |
664 | 665 | ||
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index 7c3cc968053c..cb24a08be084 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c | |||
@@ -97,17 +97,40 @@ static void bm_get_version(u16 *id, u8 *major, u8 *minor) | |||
97 | /* signal transactions for FBPRs with higher priority */ | 97 | /* signal transactions for FBPRs with higher priority */ |
98 | #define FBPR_AR_RPRIO_HI BIT(30) | 98 | #define FBPR_AR_RPRIO_HI BIT(30) |
99 | 99 | ||
100 | static void bm_set_memory(u64 ba, u32 size) | 100 | /* Track if probe has occurred and if cleanup is required */ |
101 | static int __bman_probed; | ||
102 | static int __bman_requires_cleanup; | ||
103 | |||
104 | |||
105 | static int bm_set_memory(u64 ba, u32 size) | ||
101 | { | 106 | { |
107 | u32 bar, bare; | ||
102 | u32 exp = ilog2(size); | 108 | u32 exp = ilog2(size); |
103 | /* choke if size isn't within range */ | 109 | /* choke if size isn't within range */ |
104 | DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 && | 110 | DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 && |
105 | is_power_of_2(size)); | 111 | is_power_of_2(size)); |
106 | /* choke if '[e]ba' has lower-alignment than 'size' */ | 112 | /* choke if '[e]ba' has lower-alignment than 'size' */ |
107 | DPAA_ASSERT(!(ba & (size - 1))); | 113 | DPAA_ASSERT(!(ba & (size - 1))); |
114 | |||
115 | /* Check to see if BMan has already been initialized */ | ||
116 | bar = bm_ccsr_in(REG_FBPR_BAR); | ||
117 | if (bar) { | ||
118 | /* Maker sure ba == what was programmed) */ | ||
119 | bare = bm_ccsr_in(REG_FBPR_BARE); | ||
120 | if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) { | ||
121 | pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n", | ||
122 | ba, bare, bar); | ||
123 | return -ENOMEM; | ||
124 | } | ||
125 | pr_info("BMan BAR already configured\n"); | ||
126 | __bman_requires_cleanup = 1; | ||
127 | return 1; | ||
128 | } | ||
129 | |||
108 | bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba)); | 130 | bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba)); |
109 | bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba)); | 131 | bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba)); |
110 | bm_ccsr_out(REG_FBPR_AR, exp - 1); | 132 | bm_ccsr_out(REG_FBPR_AR, exp - 1); |
133 | return 0; | ||
111 | } | 134 | } |
112 | 135 | ||
113 | /* | 136 | /* |
@@ -120,7 +143,6 @@ static void bm_set_memory(u64 ba, u32 size) | |||
120 | */ | 143 | */ |
121 | static dma_addr_t fbpr_a; | 144 | static dma_addr_t fbpr_a; |
122 | static size_t fbpr_sz; | 145 | static size_t fbpr_sz; |
123 | static int __bman_probed; | ||
124 | 146 | ||
125 | static int bman_fbpr(struct reserved_mem *rmem) | 147 | static int bman_fbpr(struct reserved_mem *rmem) |
126 | { | 148 | { |
@@ -173,6 +195,16 @@ int bman_is_probed(void) | |||
173 | } | 195 | } |
174 | EXPORT_SYMBOL_GPL(bman_is_probed); | 196 | EXPORT_SYMBOL_GPL(bman_is_probed); |
175 | 197 | ||
198 | int bman_requires_cleanup(void) | ||
199 | { | ||
200 | return __bman_requires_cleanup; | ||
201 | } | ||
202 | |||
203 | void bman_done_cleanup(void) | ||
204 | { | ||
205 | __bman_requires_cleanup = 0; | ||
206 | } | ||
207 | |||
176 | static int fsl_bman_probe(struct platform_device *pdev) | 208 | static int fsl_bman_probe(struct platform_device *pdev) |
177 | { | 209 | { |
178 | int ret, err_irq; | 210 | int ret, err_irq; |
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index cf4f10d6f590..923c44063a9a 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c | |||
@@ -100,7 +100,7 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
100 | struct device_node *node = dev->of_node; | 100 | struct device_node *node = dev->of_node; |
101 | struct bm_portal_config *pcfg; | 101 | struct bm_portal_config *pcfg; |
102 | struct resource *addr_phys[2]; | 102 | struct resource *addr_phys[2]; |
103 | int irq, cpu, err; | 103 | int irq, cpu, err, i; |
104 | 104 | ||
105 | err = bman_is_probed(); | 105 | err = bman_is_probed(); |
106 | if (!err) | 106 | if (!err) |
@@ -135,10 +135,8 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
135 | pcfg->cpu = -1; | 135 | pcfg->cpu = -1; |
136 | 136 | ||
137 | irq = platform_get_irq(pdev, 0); | 137 | irq = platform_get_irq(pdev, 0); |
138 | if (irq <= 0) { | 138 | if (irq <= 0) |
139 | dev_err(dev, "Can't get %pOF IRQ'\n", node); | ||
140 | goto err_ioremap1; | 139 | goto err_ioremap1; |
141 | } | ||
142 | pcfg->irq = irq; | 140 | pcfg->irq = irq; |
143 | 141 | ||
144 | pcfg->addr_virt_ce = memremap(addr_phys[0]->start, | 142 | pcfg->addr_virt_ce = memremap(addr_phys[0]->start, |
@@ -178,6 +176,22 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
178 | if (!cpu_online(cpu)) | 176 | if (!cpu_online(cpu)) |
179 | bman_offline_cpu(cpu); | 177 | bman_offline_cpu(cpu); |
180 | 178 | ||
179 | if (__bman_portals_probed == 1 && bman_requires_cleanup()) { | ||
180 | /* | ||
181 | * BMan wasn't reset prior to boot (Kexec for example) | ||
182 | * Empty all the buffer pools so they are in reset state | ||
183 | */ | ||
184 | for (i = 0; i < BM_POOL_MAX; i++) { | ||
185 | err = bm_shutdown_pool(i); | ||
186 | if (err) { | ||
187 | dev_err(dev, "Failed to shutdown bpool %d\n", | ||
188 | i); | ||
189 | goto err_portal_init; | ||
190 | } | ||
191 | } | ||
192 | bman_done_cleanup(); | ||
193 | } | ||
194 | |||
181 | return 0; | 195 | return 0; |
182 | 196 | ||
183 | err_portal_init: | 197 | err_portal_init: |
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h index 751ce90383b7..aa3981e04965 100644 --- a/drivers/soc/fsl/qbman/bman_priv.h +++ b/drivers/soc/fsl/qbman/bman_priv.h | |||
@@ -76,3 +76,8 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits); | |||
76 | 76 | ||
77 | const struct bm_portal_config * | 77 | const struct bm_portal_config * |
78 | bman_get_bm_portal_config(const struct bman_portal *portal); | 78 | bman_get_bm_portal_config(const struct bman_portal *portal); |
79 | |||
80 | int bman_requires_cleanup(void); | ||
81 | void bman_done_cleanup(void); | ||
82 | |||
83 | int bm_shutdown_pool(u32 bpid); | ||
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c index e6d48dccb8d5..9dd8bb571dbc 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.c +++ b/drivers/soc/fsl/qbman/dpaa_sys.c | |||
@@ -37,42 +37,53 @@ | |||
37 | int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, | 37 | int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, |
38 | size_t *size) | 38 | size_t *size) |
39 | { | 39 | { |
40 | int ret; | ||
41 | struct device_node *mem_node; | 40 | struct device_node *mem_node; |
42 | u64 size64; | 41 | struct reserved_mem *rmem; |
42 | struct property *prop; | ||
43 | int len, err; | ||
44 | __be32 *res_array; | ||
43 | 45 | ||
44 | ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx); | 46 | mem_node = of_parse_phandle(dev->of_node, "memory-region", idx); |
45 | if (ret) { | 47 | if (!mem_node) { |
46 | dev_err(dev, | ||
47 | "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n", | ||
48 | idx, ret); | ||
49 | return -ENODEV; | ||
50 | } | ||
51 | mem_node = of_parse_phandle(dev->of_node, "memory-region", 0); | ||
52 | if (mem_node) { | ||
53 | ret = of_property_read_u64(mem_node, "size", &size64); | ||
54 | if (ret) { | ||
55 | dev_err(dev, "of_address_to_resource fails 0x%x\n", | ||
56 | ret); | ||
57 | return -ENODEV; | ||
58 | } | ||
59 | *size = size64; | ||
60 | } else { | ||
61 | dev_err(dev, "No memory-region found for index %d\n", idx); | 48 | dev_err(dev, "No memory-region found for index %d\n", idx); |
62 | return -ENODEV; | 49 | return -ENODEV; |
63 | } | 50 | } |
64 | 51 | ||
65 | if (!dma_alloc_coherent(dev, *size, addr, 0)) { | 52 | rmem = of_reserved_mem_lookup(mem_node); |
66 | dev_err(dev, "DMA Alloc memory failed\n"); | 53 | if (!rmem) { |
54 | dev_err(dev, "of_reserved_mem_lookup() returned NULL\n"); | ||
67 | return -ENODEV; | 55 | return -ENODEV; |
68 | } | 56 | } |
57 | *addr = rmem->base; | ||
58 | *size = rmem->size; | ||
69 | 59 | ||
70 | /* | 60 | /* |
71 | * Disassociate the reserved memory area from the device | 61 | * Check if the reg property exists - if not insert the node |
72 | * because a device can only have one DMA memory area. This | 62 | * so upon kexec() the same memory region address will be preserved. |
73 | * should be fine since the memory is allocated and initialized | 63 | * This is needed because QBMan HW does not allow the base address/ |
74 | * and only ever accessed by the QBMan device from now on | 64 | * size to be modified once set. |
75 | */ | 65 | */ |
76 | of_reserved_mem_device_release(dev); | 66 | prop = of_find_property(mem_node, "reg", &len); |
67 | if (!prop) { | ||
68 | prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL); | ||
69 | if (!prop) | ||
70 | return -ENOMEM; | ||
71 | prop->value = res_array = devm_kzalloc(dev, sizeof(__be32) * 4, | ||
72 | GFP_KERNEL); | ||
73 | if (!prop->value) | ||
74 | return -ENOMEM; | ||
75 | res_array[0] = cpu_to_be32(upper_32_bits(*addr)); | ||
76 | res_array[1] = cpu_to_be32(lower_32_bits(*addr)); | ||
77 | res_array[2] = cpu_to_be32(upper_32_bits(*size)); | ||
78 | res_array[3] = cpu_to_be32(lower_32_bits(*size)); | ||
79 | prop->length = sizeof(__be32) * 4; | ||
80 | prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL); | ||
81 | if (!prop->name) | ||
82 | return -ENOMEM; | ||
83 | err = of_add_property(mem_node, prop); | ||
84 | if (err) | ||
85 | return err; | ||
86 | } | ||
87 | |||
77 | return 0; | 88 | return 0; |
78 | } | 89 | } |
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 636f83f781f5..bf68d86d80ee 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c | |||
@@ -1018,6 +1018,20 @@ static inline void put_affine_portal(void) | |||
1018 | put_cpu_var(qman_affine_portal); | 1018 | put_cpu_var(qman_affine_portal); |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | |||
1022 | static inline struct qman_portal *get_portal_for_channel(u16 channel) | ||
1023 | { | ||
1024 | int i; | ||
1025 | |||
1026 | for (i = 0; i < num_possible_cpus(); i++) { | ||
1027 | if (affine_portals[i] && | ||
1028 | affine_portals[i]->config->channel == channel) | ||
1029 | return affine_portals[i]; | ||
1030 | } | ||
1031 | |||
1032 | return NULL; | ||
1033 | } | ||
1034 | |||
1021 | static struct workqueue_struct *qm_portal_wq; | 1035 | static struct workqueue_struct *qm_portal_wq; |
1022 | 1036 | ||
1023 | int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh) | 1037 | int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh) |
@@ -1070,6 +1084,20 @@ int qman_wq_alloc(void) | |||
1070 | return 0; | 1084 | return 0; |
1071 | } | 1085 | } |
1072 | 1086 | ||
1087 | |||
1088 | void qman_enable_irqs(void) | ||
1089 | { | ||
1090 | int i; | ||
1091 | |||
1092 | for (i = 0; i < num_possible_cpus(); i++) { | ||
1093 | if (affine_portals[i]) { | ||
1094 | qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff); | ||
1095 | qm_out(&affine_portals[i]->p, QM_REG_IIR, 0); | ||
1096 | } | ||
1097 | |||
1098 | } | ||
1099 | } | ||
1100 | |||
1073 | /* | 1101 | /* |
1074 | * This is what everything can wait on, even if it migrates to a different cpu | 1102 | * This is what everything can wait on, even if it migrates to a different cpu |
1075 | * to the one whose affine portal it is waiting on. | 1103 | * to the one whose affine portal it is waiting on. |
@@ -1164,6 +1192,7 @@ static int drain_mr_fqrni(struct qm_portal *p) | |||
1164 | { | 1192 | { |
1165 | const union qm_mr_entry *msg; | 1193 | const union qm_mr_entry *msg; |
1166 | loop: | 1194 | loop: |
1195 | qm_mr_pvb_update(p); | ||
1167 | msg = qm_mr_current(p); | 1196 | msg = qm_mr_current(p); |
1168 | if (!msg) { | 1197 | if (!msg) { |
1169 | /* | 1198 | /* |
@@ -1180,7 +1209,8 @@ loop: | |||
1180 | * entries well before the ring has been fully consumed, so | 1209 | * entries well before the ring has been fully consumed, so |
1181 | * we're being *really* paranoid here. | 1210 | * we're being *really* paranoid here. |
1182 | */ | 1211 | */ |
1183 | msleep(1); | 1212 | mdelay(1); |
1213 | qm_mr_pvb_update(p); | ||
1184 | msg = qm_mr_current(p); | 1214 | msg = qm_mr_current(p); |
1185 | if (!msg) | 1215 | if (!msg) |
1186 | return 0; | 1216 | return 0; |
@@ -1267,8 +1297,8 @@ static int qman_create_portal(struct qman_portal *portal, | |||
1267 | qm_out(p, QM_REG_ISDR, isdr); | 1297 | qm_out(p, QM_REG_ISDR, isdr); |
1268 | portal->irq_sources = 0; | 1298 | portal->irq_sources = 0; |
1269 | qm_out(p, QM_REG_IER, 0); | 1299 | qm_out(p, QM_REG_IER, 0); |
1270 | qm_out(p, QM_REG_ISR, 0xffffffff); | ||
1271 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); | 1300 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); |
1301 | qm_out(p, QM_REG_IIR, 1); | ||
1272 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { | 1302 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { |
1273 | dev_err(c->dev, "request_irq() failed\n"); | 1303 | dev_err(c->dev, "request_irq() failed\n"); |
1274 | goto fail_irq; | 1304 | goto fail_irq; |
@@ -1288,7 +1318,7 @@ static int qman_create_portal(struct qman_portal *portal, | |||
1288 | isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); | 1318 | isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); |
1289 | qm_out(p, QM_REG_ISDR, isdr); | 1319 | qm_out(p, QM_REG_ISDR, isdr); |
1290 | if (qm_dqrr_current(p)) { | 1320 | if (qm_dqrr_current(p)) { |
1291 | dev_err(c->dev, "DQRR unclean\n"); | 1321 | dev_dbg(c->dev, "DQRR unclean\n"); |
1292 | qm_dqrr_cdc_consume_n(p, 0xffff); | 1322 | qm_dqrr_cdc_consume_n(p, 0xffff); |
1293 | } | 1323 | } |
1294 | if (qm_mr_current(p) && drain_mr_fqrni(p)) { | 1324 | if (qm_mr_current(p) && drain_mr_fqrni(p)) { |
@@ -1301,8 +1331,10 @@ static int qman_create_portal(struct qman_portal *portal, | |||
1301 | } | 1331 | } |
1302 | /* Success */ | 1332 | /* Success */ |
1303 | portal->config = c; | 1333 | portal->config = c; |
1334 | qm_out(p, QM_REG_ISR, 0xffffffff); | ||
1304 | qm_out(p, QM_REG_ISDR, 0); | 1335 | qm_out(p, QM_REG_ISDR, 0); |
1305 | qm_out(p, QM_REG_IIR, 0); | 1336 | if (!qman_requires_cleanup()) |
1337 | qm_out(p, QM_REG_IIR, 0); | ||
1306 | /* Write a sane SDQCR */ | 1338 | /* Write a sane SDQCR */ |
1307 | qm_dqrr_sdqcr_set(p, portal->sdqcr); | 1339 | qm_dqrr_sdqcr_set(p, portal->sdqcr); |
1308 | return 0; | 1340 | return 0; |
@@ -2581,9 +2613,9 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, | |||
2581 | #define qm_dqrr_drain_nomatch(p) \ | 2613 | #define qm_dqrr_drain_nomatch(p) \ |
2582 | _qm_dqrr_consume_and_match(p, 0, 0, false) | 2614 | _qm_dqrr_consume_and_match(p, 0, 0, false) |
2583 | 2615 | ||
2584 | static int qman_shutdown_fq(u32 fqid) | 2616 | int qman_shutdown_fq(u32 fqid) |
2585 | { | 2617 | { |
2586 | struct qman_portal *p; | 2618 | struct qman_portal *p, *channel_portal; |
2587 | struct device *dev; | 2619 | struct device *dev; |
2588 | union qm_mc_command *mcc; | 2620 | union qm_mc_command *mcc; |
2589 | union qm_mc_result *mcr; | 2621 | union qm_mc_result *mcr; |
@@ -2623,17 +2655,28 @@ static int qman_shutdown_fq(u32 fqid) | |||
2623 | channel = qm_fqd_get_chan(&mcr->queryfq.fqd); | 2655 | channel = qm_fqd_get_chan(&mcr->queryfq.fqd); |
2624 | wq = qm_fqd_get_wq(&mcr->queryfq.fqd); | 2656 | wq = qm_fqd_get_wq(&mcr->queryfq.fqd); |
2625 | 2657 | ||
2658 | if (channel < qm_channel_pool1) { | ||
2659 | channel_portal = get_portal_for_channel(channel); | ||
2660 | if (channel_portal == NULL) { | ||
2661 | dev_err(dev, "Can't find portal for dedicated channel 0x%x\n", | ||
2662 | channel); | ||
2663 | ret = -EIO; | ||
2664 | goto out; | ||
2665 | } | ||
2666 | } else | ||
2667 | channel_portal = p; | ||
2668 | |||
2626 | switch (state) { | 2669 | switch (state) { |
2627 | case QM_MCR_NP_STATE_TEN_SCHED: | 2670 | case QM_MCR_NP_STATE_TEN_SCHED: |
2628 | case QM_MCR_NP_STATE_TRU_SCHED: | 2671 | case QM_MCR_NP_STATE_TRU_SCHED: |
2629 | case QM_MCR_NP_STATE_ACTIVE: | 2672 | case QM_MCR_NP_STATE_ACTIVE: |
2630 | case QM_MCR_NP_STATE_PARKED: | 2673 | case QM_MCR_NP_STATE_PARKED: |
2631 | orl_empty = 0; | 2674 | orl_empty = 0; |
2632 | mcc = qm_mc_start(&p->p); | 2675 | mcc = qm_mc_start(&channel_portal->p); |
2633 | qm_fqid_set(&mcc->fq, fqid); | 2676 | qm_fqid_set(&mcc->fq, fqid); |
2634 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | 2677 | qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE); |
2635 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2678 | if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) { |
2636 | dev_err(dev, "QUERYFQ_NP timeout\n"); | 2679 | dev_err(dev, "ALTER_RETIRE timeout\n"); |
2637 | ret = -ETIMEDOUT; | 2680 | ret = -ETIMEDOUT; |
2638 | goto out; | 2681 | goto out; |
2639 | } | 2682 | } |
@@ -2641,6 +2684,9 @@ static int qman_shutdown_fq(u32 fqid) | |||
2641 | QM_MCR_VERB_ALTER_RETIRE); | 2684 | QM_MCR_VERB_ALTER_RETIRE); |
2642 | res = mcr->result; /* Make a copy as we reuse MCR below */ | 2685 | res = mcr->result; /* Make a copy as we reuse MCR below */ |
2643 | 2686 | ||
2687 | if (res == QM_MCR_RESULT_OK) | ||
2688 | drain_mr_fqrni(&channel_portal->p); | ||
2689 | |||
2644 | if (res == QM_MCR_RESULT_PENDING) { | 2690 | if (res == QM_MCR_RESULT_PENDING) { |
2645 | /* | 2691 | /* |
2646 | * Need to wait for the FQRN in the message ring, which | 2692 | * Need to wait for the FQRN in the message ring, which |
@@ -2670,21 +2716,25 @@ static int qman_shutdown_fq(u32 fqid) | |||
2670 | } | 2716 | } |
2671 | /* Set the sdqcr to drain this channel */ | 2717 | /* Set the sdqcr to drain this channel */ |
2672 | if (channel < qm_channel_pool1) | 2718 | if (channel < qm_channel_pool1) |
2673 | qm_dqrr_sdqcr_set(&p->p, | 2719 | qm_dqrr_sdqcr_set(&channel_portal->p, |
2674 | QM_SDQCR_TYPE_ACTIVE | | 2720 | QM_SDQCR_TYPE_ACTIVE | |
2675 | QM_SDQCR_CHANNELS_DEDICATED); | 2721 | QM_SDQCR_CHANNELS_DEDICATED); |
2676 | else | 2722 | else |
2677 | qm_dqrr_sdqcr_set(&p->p, | 2723 | qm_dqrr_sdqcr_set(&channel_portal->p, |
2678 | QM_SDQCR_TYPE_ACTIVE | | 2724 | QM_SDQCR_TYPE_ACTIVE | |
2679 | QM_SDQCR_CHANNELS_POOL_CONV | 2725 | QM_SDQCR_CHANNELS_POOL_CONV |
2680 | (channel)); | 2726 | (channel)); |
2681 | do { | 2727 | do { |
2682 | /* Keep draining DQRR while checking the MR*/ | 2728 | /* Keep draining DQRR while checking the MR*/ |
2683 | qm_dqrr_drain_nomatch(&p->p); | 2729 | qm_dqrr_drain_nomatch(&channel_portal->p); |
2684 | /* Process message ring too */ | 2730 | /* Process message ring too */ |
2685 | found_fqrn = qm_mr_drain(&p->p, FQRN); | 2731 | found_fqrn = qm_mr_drain(&channel_portal->p, |
2732 | FQRN); | ||
2686 | cpu_relax(); | 2733 | cpu_relax(); |
2687 | } while (!found_fqrn); | 2734 | } while (!found_fqrn); |
2735 | /* Restore SDQCR */ | ||
2736 | qm_dqrr_sdqcr_set(&channel_portal->p, | ||
2737 | channel_portal->sdqcr); | ||
2688 | 2738 | ||
2689 | } | 2739 | } |
2690 | if (res != QM_MCR_RESULT_OK && | 2740 | if (res != QM_MCR_RESULT_OK && |
@@ -2715,9 +2765,8 @@ static int qman_shutdown_fq(u32 fqid) | |||
2715 | * Wait for a dequeue and process the dequeues, | 2765 | * Wait for a dequeue and process the dequeues, |
2716 | * making sure to empty the ring completely | 2766 | * making sure to empty the ring completely |
2717 | */ | 2767 | */ |
2718 | } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); | 2768 | } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); |
2719 | } | 2769 | } |
2720 | qm_dqrr_sdqcr_set(&p->p, 0); | ||
2721 | 2770 | ||
2722 | while (!orl_empty) { | 2771 | while (!orl_empty) { |
2723 | /* Wait for the ORL to have been completely drained */ | 2772 | /* Wait for the ORL to have been completely drained */ |
@@ -2754,7 +2803,7 @@ static int qman_shutdown_fq(u32 fqid) | |||
2754 | 2803 | ||
2755 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == | 2804 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == |
2756 | QM_MCR_VERB_ALTER_OOS); | 2805 | QM_MCR_VERB_ALTER_OOS); |
2757 | if (mcr->result) { | 2806 | if (mcr->result != QM_MCR_RESULT_OK) { |
2758 | dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", | 2807 | dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", |
2759 | fqid, mcr->result); | 2808 | fqid, mcr->result); |
2760 | ret = -EIO; | 2809 | ret = -EIO; |
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index a6bb43007d03..157659fd033a 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c | |||
@@ -274,6 +274,7 @@ static u32 __iomem *qm_ccsr_start; | |||
274 | /* A SDQCR mask comprising all the available/visible pool channels */ | 274 | /* A SDQCR mask comprising all the available/visible pool channels */ |
275 | static u32 qm_pools_sdqcr; | 275 | static u32 qm_pools_sdqcr; |
276 | static int __qman_probed; | 276 | static int __qman_probed; |
277 | static int __qman_requires_cleanup; | ||
277 | 278 | ||
278 | static inline u32 qm_ccsr_in(u32 offset) | 279 | static inline u32 qm_ccsr_in(u32 offset) |
279 | { | 280 | { |
@@ -340,19 +341,55 @@ static void qm_get_version(u16 *id, u8 *major, u8 *minor) | |||
340 | } | 341 | } |
341 | 342 | ||
342 | #define PFDR_AR_EN BIT(31) | 343 | #define PFDR_AR_EN BIT(31) |
343 | static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size) | 344 | static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size) |
344 | { | 345 | { |
346 | void *ptr; | ||
345 | u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; | 347 | u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; |
346 | u32 exp = ilog2(size); | 348 | u32 exp = ilog2(size); |
349 | u32 bar, bare; | ||
347 | 350 | ||
348 | /* choke if size isn't within range */ | 351 | /* choke if size isn't within range */ |
349 | DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && | 352 | DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) && |
350 | is_power_of_2(size)); | 353 | is_power_of_2(size)); |
351 | /* choke if 'ba' has lower-alignment than 'size' */ | 354 | /* choke if 'ba' has lower-alignment than 'size' */ |
352 | DPAA_ASSERT(!(ba & (size - 1))); | 355 | DPAA_ASSERT(!(ba & (size - 1))); |
356 | |||
357 | /* Check to see if QMan has already been initialized */ | ||
358 | bar = qm_ccsr_in(offset + REG_offset_BAR); | ||
359 | if (bar) { | ||
360 | /* Maker sure ba == what was programmed) */ | ||
361 | bare = qm_ccsr_in(offset); | ||
362 | if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) { | ||
363 | pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n", | ||
364 | ba, bare, bar); | ||
365 | return -ENOMEM; | ||
366 | } | ||
367 | __qman_requires_cleanup = 1; | ||
368 | /* Return 1 to indicate memory was previously programmed */ | ||
369 | return 1; | ||
370 | } | ||
371 | /* Need to temporarily map the area to make sure it is zeroed */ | ||
372 | ptr = memremap(ba, size, MEMREMAP_WB); | ||
373 | if (!ptr) { | ||
374 | pr_crit("memremap() of QMan private memory failed\n"); | ||
375 | return -ENOMEM; | ||
376 | } | ||
377 | memset(ptr, 0, size); | ||
378 | |||
379 | #ifdef CONFIG_PPC | ||
380 | /* | ||
381 | * PPC doesn't appear to flush the cache on memunmap() but the | ||
382 | * cache must be flushed since QMan does non coherent accesses | ||
383 | * to this memory | ||
384 | */ | ||
385 | flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size); | ||
386 | #endif | ||
387 | memunmap(ptr); | ||
388 | |||
353 | qm_ccsr_out(offset, upper_32_bits(ba)); | 389 | qm_ccsr_out(offset, upper_32_bits(ba)); |
354 | qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba)); | 390 | qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba)); |
355 | qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); | 391 | qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1)); |
392 | return 0; | ||
356 | } | 393 | } |
357 | 394 | ||
358 | static void qm_set_pfdr_threshold(u32 th, u8 k) | 395 | static void qm_set_pfdr_threshold(u32 th, u8 k) |
@@ -455,7 +492,7 @@ RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr); | |||
455 | 492 | ||
456 | #endif | 493 | #endif |
457 | 494 | ||
458 | static unsigned int qm_get_fqid_maxcnt(void) | 495 | unsigned int qm_get_fqid_maxcnt(void) |
459 | { | 496 | { |
460 | return fqd_sz / 64; | 497 | return fqd_sz / 64; |
461 | } | 498 | } |
@@ -571,12 +608,19 @@ static int qman_init_ccsr(struct device *dev) | |||
571 | int i, err; | 608 | int i, err; |
572 | 609 | ||
573 | /* FQD memory */ | 610 | /* FQD memory */ |
574 | qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); | 611 | err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz); |
612 | if (err < 0) | ||
613 | return err; | ||
575 | /* PFDR memory */ | 614 | /* PFDR memory */ |
576 | qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); | 615 | err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz); |
577 | err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); | 616 | if (err < 0) |
578 | if (err) | ||
579 | return err; | 617 | return err; |
618 | /* Only initialize PFDRs if the QMan was not initialized before */ | ||
619 | if (err == 0) { | ||
620 | err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8); | ||
621 | if (err) | ||
622 | return err; | ||
623 | } | ||
580 | /* thresholds */ | 624 | /* thresholds */ |
581 | qm_set_pfdr_threshold(512, 64); | 625 | qm_set_pfdr_threshold(512, 64); |
582 | qm_set_sfdr_threshold(128); | 626 | qm_set_sfdr_threshold(128); |
@@ -693,6 +737,18 @@ int qman_is_probed(void) | |||
693 | } | 737 | } |
694 | EXPORT_SYMBOL_GPL(qman_is_probed); | 738 | EXPORT_SYMBOL_GPL(qman_is_probed); |
695 | 739 | ||
740 | int qman_requires_cleanup(void) | ||
741 | { | ||
742 | return __qman_requires_cleanup; | ||
743 | } | ||
744 | |||
745 | void qman_done_cleanup(void) | ||
746 | { | ||
747 | qman_enable_irqs(); | ||
748 | __qman_requires_cleanup = 0; | ||
749 | } | ||
750 | |||
751 | |||
696 | static int fsl_qman_probe(struct platform_device *pdev) | 752 | static int fsl_qman_probe(struct platform_device *pdev) |
697 | { | 753 | { |
698 | struct device *dev = &pdev->dev; | 754 | struct device *dev = &pdev->dev; |
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index e2186b681d87..5685b6706893 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c | |||
@@ -233,7 +233,7 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
233 | struct device_node *node = dev->of_node; | 233 | struct device_node *node = dev->of_node; |
234 | struct qm_portal_config *pcfg; | 234 | struct qm_portal_config *pcfg; |
235 | struct resource *addr_phys[2]; | 235 | struct resource *addr_phys[2]; |
236 | int irq, cpu, err; | 236 | int irq, cpu, err, i; |
237 | u32 val; | 237 | u32 val; |
238 | 238 | ||
239 | err = qman_is_probed(); | 239 | err = qman_is_probed(); |
@@ -275,10 +275,8 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
275 | pcfg->channel = val; | 275 | pcfg->channel = val; |
276 | pcfg->cpu = -1; | 276 | pcfg->cpu = -1; |
277 | irq = platform_get_irq(pdev, 0); | 277 | irq = platform_get_irq(pdev, 0); |
278 | if (irq <= 0) { | 278 | if (irq <= 0) |
279 | dev_err(dev, "Can't get %pOF IRQ\n", node); | ||
280 | goto err_ioremap1; | 279 | goto err_ioremap1; |
281 | } | ||
282 | pcfg->irq = irq; | 280 | pcfg->irq = irq; |
283 | 281 | ||
284 | pcfg->addr_virt_ce = memremap(addr_phys[0]->start, | 282 | pcfg->addr_virt_ce = memremap(addr_phys[0]->start, |
@@ -325,6 +323,22 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
325 | if (!cpu_online(cpu)) | 323 | if (!cpu_online(cpu)) |
326 | qman_offline_cpu(cpu); | 324 | qman_offline_cpu(cpu); |
327 | 325 | ||
326 | if (__qman_portals_probed == 1 && qman_requires_cleanup()) { | ||
327 | /* | ||
328 | * QMan wasn't reset prior to boot (Kexec for example) | ||
329 | * Empty all the frame queues so they are in reset state | ||
330 | */ | ||
331 | for (i = 0; i < qm_get_fqid_maxcnt(); i++) { | ||
332 | err = qman_shutdown_fq(i); | ||
333 | if (err) { | ||
334 | dev_err(dev, "Failed to shutdown frame queue %d\n", | ||
335 | i); | ||
336 | goto err_portal_init; | ||
337 | } | ||
338 | } | ||
339 | qman_done_cleanup(); | ||
340 | } | ||
341 | |||
328 | return 0; | 342 | return 0; |
329 | 343 | ||
330 | err_portal_init: | 344 | err_portal_init: |
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 04515718cfd9..fd1cf543fb81 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h | |||
@@ -272,3 +272,11 @@ extern struct qman_portal *affine_portals[NR_CPUS]; | |||
272 | extern struct qman_portal *qman_dma_portal; | 272 | extern struct qman_portal *qman_dma_portal; |
273 | const struct qm_portal_config *qman_get_qm_portal_config( | 273 | const struct qm_portal_config *qman_get_qm_portal_config( |
274 | struct qman_portal *portal); | 274 | struct qman_portal *portal); |
275 | |||
276 | unsigned int qm_get_fqid_maxcnt(void); | ||
277 | |||
278 | int qman_shutdown_fq(u32 fqid); | ||
279 | |||
280 | int qman_requires_cleanup(void); | ||
281 | void qman_done_cleanup(void); | ||
282 | void qman_enable_irqs(void); | ||
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index c9519e62308c..417df7e19281 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * General Purpose functions for the global management of the | 10 | * General Purpose functions for the global management of the |
11 | * QUICC Engine (QE). | 11 | * QUICC Engine (QE). |
12 | */ | 12 | */ |
13 | #include <linux/bitmap.h> | ||
13 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
@@ -39,29 +40,32 @@ static DEFINE_SPINLOCK(qe_lock); | |||
39 | DEFINE_SPINLOCK(cmxgcr_lock); | 40 | DEFINE_SPINLOCK(cmxgcr_lock); |
40 | EXPORT_SYMBOL(cmxgcr_lock); | 41 | EXPORT_SYMBOL(cmxgcr_lock); |
41 | 42 | ||
42 | /* QE snum state */ | ||
43 | enum qe_snum_state { | ||
44 | QE_SNUM_STATE_USED, | ||
45 | QE_SNUM_STATE_FREE | ||
46 | }; | ||
47 | |||
48 | /* QE snum */ | ||
49 | struct qe_snum { | ||
50 | u8 num; | ||
51 | enum qe_snum_state state; | ||
52 | }; | ||
53 | |||
54 | /* We allocate this here because it is used almost exclusively for | 43 | /* We allocate this here because it is used almost exclusively for |
55 | * the communication processor devices. | 44 | * the communication processor devices. |
56 | */ | 45 | */ |
57 | struct qe_immap __iomem *qe_immr; | 46 | struct qe_immap __iomem *qe_immr; |
58 | EXPORT_SYMBOL(qe_immr); | 47 | EXPORT_SYMBOL(qe_immr); |
59 | 48 | ||
60 | static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */ | 49 | static u8 snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */ |
50 | static DECLARE_BITMAP(snum_state, QE_NUM_OF_SNUM); | ||
61 | static unsigned int qe_num_of_snum; | 51 | static unsigned int qe_num_of_snum; |
62 | 52 | ||
63 | static phys_addr_t qebase = -1; | 53 | static phys_addr_t qebase = -1; |
64 | 54 | ||
55 | static struct device_node *qe_get_device_node(void) | ||
56 | { | ||
57 | struct device_node *qe; | ||
58 | |||
59 | /* | ||
60 | * Newer device trees have an "fsl,qe" compatible property for the QE | ||
61 | * node, but we still need to support older device trees. | ||
62 | */ | ||
63 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | ||
64 | if (qe) | ||
65 | return qe; | ||
66 | return of_find_node_by_type(NULL, "qe"); | ||
67 | } | ||
68 | |||
65 | static phys_addr_t get_qe_base(void) | 69 | static phys_addr_t get_qe_base(void) |
66 | { | 70 | { |
67 | struct device_node *qe; | 71 | struct device_node *qe; |
@@ -71,12 +75,9 @@ static phys_addr_t get_qe_base(void) | |||
71 | if (qebase != -1) | 75 | if (qebase != -1) |
72 | return qebase; | 76 | return qebase; |
73 | 77 | ||
74 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | 78 | qe = qe_get_device_node(); |
75 | if (!qe) { | 79 | if (!qe) |
76 | qe = of_find_node_by_type(NULL, "qe"); | 80 | return qebase; |
77 | if (!qe) | ||
78 | return qebase; | ||
79 | } | ||
80 | 81 | ||
81 | ret = of_address_to_resource(qe, 0, &res); | 82 | ret = of_address_to_resource(qe, 0, &res); |
82 | if (!ret) | 83 | if (!ret) |
@@ -170,12 +171,9 @@ unsigned int qe_get_brg_clk(void) | |||
170 | if (brg_clk) | 171 | if (brg_clk) |
171 | return brg_clk; | 172 | return brg_clk; |
172 | 173 | ||
173 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | 174 | qe = qe_get_device_node(); |
174 | if (!qe) { | 175 | if (!qe) |
175 | qe = of_find_node_by_type(NULL, "qe"); | 176 | return brg_clk; |
176 | if (!qe) | ||
177 | return brg_clk; | ||
178 | } | ||
179 | 177 | ||
180 | prop = of_get_property(qe, "brg-frequency", &size); | 178 | prop = of_get_property(qe, "brg-frequency", &size); |
181 | if (prop && size == sizeof(*prop)) | 179 | if (prop && size == sizeof(*prop)) |
@@ -281,7 +279,6 @@ EXPORT_SYMBOL(qe_clock_source); | |||
281 | */ | 279 | */ |
282 | static void qe_snums_init(void) | 280 | static void qe_snums_init(void) |
283 | { | 281 | { |
284 | int i; | ||
285 | static const u8 snum_init_76[] = { | 282 | static const u8 snum_init_76[] = { |
286 | 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D, | 283 | 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D, |
287 | 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89, | 284 | 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89, |
@@ -302,19 +299,39 @@ static void qe_snums_init(void) | |||
302 | 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59, | 299 | 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59, |
303 | 0x68, 0x69, 0x78, 0x79, 0x80, 0x81, | 300 | 0x68, 0x69, 0x78, 0x79, 0x80, 0x81, |
304 | }; | 301 | }; |
305 | static const u8 *snum_init; | 302 | struct device_node *qe; |
303 | const u8 *snum_init; | ||
304 | int i; | ||
306 | 305 | ||
307 | qe_num_of_snum = qe_get_num_of_snums(); | 306 | bitmap_zero(snum_state, QE_NUM_OF_SNUM); |
307 | qe_num_of_snum = 28; /* The default number of snum for threads is 28 */ | ||
308 | qe = qe_get_device_node(); | ||
309 | if (qe) { | ||
310 | i = of_property_read_variable_u8_array(qe, "fsl,qe-snums", | ||
311 | snums, 1, QE_NUM_OF_SNUM); | ||
312 | if (i > 0) { | ||
313 | of_node_put(qe); | ||
314 | qe_num_of_snum = i; | ||
315 | return; | ||
316 | } | ||
317 | /* | ||
318 | * Fall back to legacy binding of using the value of | ||
319 | * fsl,qe-num-snums to choose one of the static arrays | ||
320 | * above. | ||
321 | */ | ||
322 | of_property_read_u32(qe, "fsl,qe-num-snums", &qe_num_of_snum); | ||
323 | of_node_put(qe); | ||
324 | } | ||
308 | 325 | ||
309 | if (qe_num_of_snum == 76) | 326 | if (qe_num_of_snum == 76) { |
310 | snum_init = snum_init_76; | 327 | snum_init = snum_init_76; |
311 | else | 328 | } else if (qe_num_of_snum == 28 || qe_num_of_snum == 46) { |
312 | snum_init = snum_init_46; | 329 | snum_init = snum_init_46; |
313 | 330 | } else { | |
314 | for (i = 0; i < qe_num_of_snum; i++) { | 331 | pr_err("QE: unsupported value of fsl,qe-num-snums: %u\n", qe_num_of_snum); |
315 | snums[i].num = snum_init[i]; | 332 | return; |
316 | snums[i].state = QE_SNUM_STATE_FREE; | ||
317 | } | 333 | } |
334 | memcpy(snums, snum_init, qe_num_of_snum); | ||
318 | } | 335 | } |
319 | 336 | ||
320 | int qe_get_snum(void) | 337 | int qe_get_snum(void) |
@@ -324,12 +341,10 @@ int qe_get_snum(void) | |||
324 | int i; | 341 | int i; |
325 | 342 | ||
326 | spin_lock_irqsave(&qe_lock, flags); | 343 | spin_lock_irqsave(&qe_lock, flags); |
327 | for (i = 0; i < qe_num_of_snum; i++) { | 344 | i = find_first_zero_bit(snum_state, qe_num_of_snum); |
328 | if (snums[i].state == QE_SNUM_STATE_FREE) { | 345 | if (i < qe_num_of_snum) { |
329 | snums[i].state = QE_SNUM_STATE_USED; | 346 | set_bit(i, snum_state); |
330 | snum = snums[i].num; | 347 | snum = snums[i]; |
331 | break; | ||
332 | } | ||
333 | } | 348 | } |
334 | spin_unlock_irqrestore(&qe_lock, flags); | 349 | spin_unlock_irqrestore(&qe_lock, flags); |
335 | 350 | ||
@@ -339,14 +354,10 @@ EXPORT_SYMBOL(qe_get_snum); | |||
339 | 354 | ||
340 | void qe_put_snum(u8 snum) | 355 | void qe_put_snum(u8 snum) |
341 | { | 356 | { |
342 | int i; | 357 | const u8 *p = memchr(snums, snum, qe_num_of_snum); |
343 | 358 | ||
344 | for (i = 0; i < qe_num_of_snum; i++) { | 359 | if (p) |
345 | if (snums[i].num == snum) { | 360 | clear_bit(p - snums, snum_state); |
346 | snums[i].state = QE_SNUM_STATE_FREE; | ||
347 | break; | ||
348 | } | ||
349 | } | ||
350 | } | 361 | } |
351 | EXPORT_SYMBOL(qe_put_snum); | 362 | EXPORT_SYMBOL(qe_put_snum); |
352 | 363 | ||
@@ -572,16 +583,9 @@ struct qe_firmware_info *qe_get_firmware_info(void) | |||
572 | 583 | ||
573 | initialized = 1; | 584 | initialized = 1; |
574 | 585 | ||
575 | /* | 586 | qe = qe_get_device_node(); |
576 | * Newer device trees have an "fsl,qe" compatible property for the QE | 587 | if (!qe) |
577 | * node, but we still need to support older device trees. | 588 | return NULL; |
578 | */ | ||
579 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | ||
580 | if (!qe) { | ||
581 | qe = of_find_node_by_type(NULL, "qe"); | ||
582 | if (!qe) | ||
583 | return NULL; | ||
584 | } | ||
585 | 589 | ||
586 | /* Find the 'firmware' child node */ | 590 | /* Find the 'firmware' child node */ |
587 | fw = of_get_child_by_name(qe, "firmware"); | 591 | fw = of_get_child_by_name(qe, "firmware"); |
@@ -627,16 +631,9 @@ unsigned int qe_get_num_of_risc(void) | |||
627 | unsigned int num_of_risc = 0; | 631 | unsigned int num_of_risc = 0; |
628 | const u32 *prop; | 632 | const u32 *prop; |
629 | 633 | ||
630 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | 634 | qe = qe_get_device_node(); |
631 | if (!qe) { | 635 | if (!qe) |
632 | /* Older devices trees did not have an "fsl,qe" | 636 | return num_of_risc; |
633 | * compatible property, so we need to look for | ||
634 | * the QE node by name. | ||
635 | */ | ||
636 | qe = of_find_node_by_type(NULL, "qe"); | ||
637 | if (!qe) | ||
638 | return num_of_risc; | ||
639 | } | ||
640 | 637 | ||
641 | prop = of_get_property(qe, "fsl,qe-num-riscs", &size); | 638 | prop = of_get_property(qe, "fsl,qe-num-riscs", &size); |
642 | if (prop && size == sizeof(*prop)) | 639 | if (prop && size == sizeof(*prop)) |
@@ -650,37 +647,7 @@ EXPORT_SYMBOL(qe_get_num_of_risc); | |||
650 | 647 | ||
651 | unsigned int qe_get_num_of_snums(void) | 648 | unsigned int qe_get_num_of_snums(void) |
652 | { | 649 | { |
653 | struct device_node *qe; | 650 | return qe_num_of_snum; |
654 | int size; | ||
655 | unsigned int num_of_snums; | ||
656 | const u32 *prop; | ||
657 | |||
658 | num_of_snums = 28; /* The default number of snum for threads is 28 */ | ||
659 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | ||
660 | if (!qe) { | ||
661 | /* Older devices trees did not have an "fsl,qe" | ||
662 | * compatible property, so we need to look for | ||
663 | * the QE node by name. | ||
664 | */ | ||
665 | qe = of_find_node_by_type(NULL, "qe"); | ||
666 | if (!qe) | ||
667 | return num_of_snums; | ||
668 | } | ||
669 | |||
670 | prop = of_get_property(qe, "fsl,qe-num-snums", &size); | ||
671 | if (prop && size == sizeof(*prop)) { | ||
672 | num_of_snums = *prop; | ||
673 | if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { | ||
674 | /* No QE ever has fewer than 28 SNUMs */ | ||
675 | pr_err("QE: number of snum is invalid\n"); | ||
676 | of_node_put(qe); | ||
677 | return -EINVAL; | ||
678 | } | ||
679 | } | ||
680 | |||
681 | of_node_put(qe); | ||
682 | |||
683 | return num_of_snums; | ||
684 | } | 651 | } |
685 | EXPORT_SYMBOL(qe_get_num_of_snums); | 652 | EXPORT_SYMBOL(qe_get_num_of_snums); |
686 | 653 | ||
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 31b8d002d855..b0dffb06c05d 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c | |||
@@ -198,7 +198,7 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, | |||
198 | err = regulator_disable(domain->regulator); | 198 | err = regulator_disable(domain->regulator); |
199 | if (err) | 199 | if (err) |
200 | dev_err(domain->dev, | 200 | dev_err(domain->dev, |
201 | "failed to disable regulator: %d\n", ret); | 201 | "failed to disable regulator: %d\n", err); |
202 | /* Preserve earlier error code */ | 202 | /* Preserve earlier error code */ |
203 | ret = ret ?: err; | 203 | ret = ret ?: err; |
204 | } | 204 | } |
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c index 676f612f6488..50831ebf126a 100644 --- a/drivers/soc/imx/soc-imx-scu.c +++ b/drivers/soc/imx/soc-imx-scu.c | |||
@@ -27,6 +27,40 @@ struct imx_sc_msg_misc_get_soc_id { | |||
27 | } data; | 27 | } data; |
28 | } __packed; | 28 | } __packed; |
29 | 29 | ||
30 | struct imx_sc_msg_misc_get_soc_uid { | ||
31 | struct imx_sc_rpc_msg hdr; | ||
32 | u32 uid_low; | ||
33 | u32 uid_high; | ||
34 | } __packed; | ||
35 | |||
36 | static ssize_t soc_uid_show(struct device *dev, | ||
37 | struct device_attribute *attr, char *buf) | ||
38 | { | ||
39 | struct imx_sc_msg_misc_get_soc_uid msg; | ||
40 | struct imx_sc_rpc_msg *hdr = &msg.hdr; | ||
41 | u64 soc_uid; | ||
42 | int ret; | ||
43 | |||
44 | hdr->ver = IMX_SC_RPC_VERSION; | ||
45 | hdr->svc = IMX_SC_RPC_SVC_MISC; | ||
46 | hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID; | ||
47 | hdr->size = 1; | ||
48 | |||
49 | ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false); | ||
50 | if (ret) { | ||
51 | pr_err("%s: get soc uid failed, ret %d\n", __func__, ret); | ||
52 | return ret; | ||
53 | } | ||
54 | |||
55 | soc_uid = msg.uid_high; | ||
56 | soc_uid <<= 32; | ||
57 | soc_uid |= msg.uid_low; | ||
58 | |||
59 | return sprintf(buf, "%016llX\n", soc_uid); | ||
60 | } | ||
61 | |||
62 | static DEVICE_ATTR_RO(soc_uid); | ||
63 | |||
30 | static int imx_scu_soc_id(void) | 64 | static int imx_scu_soc_id(void) |
31 | { | 65 | { |
32 | struct imx_sc_msg_misc_get_soc_id msg; | 66 | struct imx_sc_msg_misc_get_soc_id msg; |
@@ -102,6 +136,11 @@ static int imx_scu_soc_probe(struct platform_device *pdev) | |||
102 | goto free_revision; | 136 | goto free_revision; |
103 | } | 137 | } |
104 | 138 | ||
139 | ret = device_create_file(soc_device_to_device(soc_dev), | ||
140 | &dev_attr_soc_uid); | ||
141 | if (ret) | ||
142 | goto free_revision; | ||
143 | |||
105 | return 0; | 144 | return 0; |
106 | 145 | ||
107 | free_revision: | 146 | free_revision: |
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c index f924ae8c6514..b9831576dd25 100644 --- a/drivers/soc/imx/soc-imx8.c +++ b/drivers/soc/imx/soc-imx8.c | |||
@@ -16,6 +16,9 @@ | |||
16 | #define IMX8MQ_SW_INFO_B1 0x40 | 16 | #define IMX8MQ_SW_INFO_B1 0x40 |
17 | #define IMX8MQ_SW_MAGIC_B1 0xff0055aa | 17 | #define IMX8MQ_SW_MAGIC_B1 0xff0055aa |
18 | 18 | ||
19 | #define OCOTP_UID_LOW 0x410 | ||
20 | #define OCOTP_UID_HIGH 0x420 | ||
21 | |||
19 | /* Same as ANADIG_DIGPROG_IMX7D */ | 22 | /* Same as ANADIG_DIGPROG_IMX7D */ |
20 | #define ANADIG_DIGPROG_IMX8MM 0x800 | 23 | #define ANADIG_DIGPROG_IMX8MM 0x800 |
21 | 24 | ||
@@ -24,6 +27,16 @@ struct imx8_soc_data { | |||
24 | u32 (*soc_revision)(void); | 27 | u32 (*soc_revision)(void); |
25 | }; | 28 | }; |
26 | 29 | ||
30 | static u64 soc_uid; | ||
31 | |||
32 | static ssize_t soc_uid_show(struct device *dev, | ||
33 | struct device_attribute *attr, char *buf) | ||
34 | { | ||
35 | return sprintf(buf, "%016llX\n", soc_uid); | ||
36 | } | ||
37 | |||
38 | static DEVICE_ATTR_RO(soc_uid); | ||
39 | |||
27 | static u32 __init imx8mq_soc_revision(void) | 40 | static u32 __init imx8mq_soc_revision(void) |
28 | { | 41 | { |
29 | struct device_node *np; | 42 | struct device_node *np; |
@@ -42,6 +55,10 @@ static u32 __init imx8mq_soc_revision(void) | |||
42 | if (magic == IMX8MQ_SW_MAGIC_B1) | 55 | if (magic == IMX8MQ_SW_MAGIC_B1) |
43 | rev = REV_B1; | 56 | rev = REV_B1; |
44 | 57 | ||
58 | soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); | ||
59 | soc_uid <<= 32; | ||
60 | soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); | ||
61 | |||
45 | iounmap(ocotp_base); | 62 | iounmap(ocotp_base); |
46 | 63 | ||
47 | out: | 64 | out: |
@@ -49,6 +66,26 @@ out: | |||
49 | return rev; | 66 | return rev; |
50 | } | 67 | } |
51 | 68 | ||
69 | static void __init imx8mm_soc_uid(void) | ||
70 | { | ||
71 | void __iomem *ocotp_base; | ||
72 | struct device_node *np; | ||
73 | |||
74 | np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp"); | ||
75 | if (!np) | ||
76 | return; | ||
77 | |||
78 | ocotp_base = of_iomap(np, 0); | ||
79 | WARN_ON(!ocotp_base); | ||
80 | |||
81 | soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); | ||
82 | soc_uid <<= 32; | ||
83 | soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); | ||
84 | |||
85 | iounmap(ocotp_base); | ||
86 | of_node_put(np); | ||
87 | } | ||
88 | |||
52 | static u32 __init imx8mm_soc_revision(void) | 89 | static u32 __init imx8mm_soc_revision(void) |
53 | { | 90 | { |
54 | struct device_node *np; | 91 | struct device_node *np; |
@@ -66,6 +103,9 @@ static u32 __init imx8mm_soc_revision(void) | |||
66 | 103 | ||
67 | iounmap(anatop_base); | 104 | iounmap(anatop_base); |
68 | of_node_put(np); | 105 | of_node_put(np); |
106 | |||
107 | imx8mm_soc_uid(); | ||
108 | |||
69 | return rev; | 109 | return rev; |
70 | } | 110 | } |
71 | 111 | ||
@@ -140,6 +180,11 @@ static int __init imx8_soc_init(void) | |||
140 | goto free_rev; | 180 | goto free_rev; |
141 | } | 181 | } |
142 | 182 | ||
183 | ret = device_create_file(soc_device_to_device(soc_dev), | ||
184 | &dev_attr_soc_uid); | ||
185 | if (ret) | ||
186 | goto free_rev; | ||
187 | |||
143 | if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) | 188 | if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) |
144 | platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); | 189 | platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); |
145 | 190 | ||
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index ff9fef5a032b..7aa0517ff2f3 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c | |||
@@ -136,7 +136,7 @@ static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code, | |||
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset) | 139 | int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) |
140 | { | 140 | { |
141 | u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) | | 141 | u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) | |
142 | (subsys << CMDQ_SUBSYS_SHIFT); | 142 | (subsys << CMDQ_SUBSYS_SHIFT); |
@@ -145,8 +145,8 @@ int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset) | |||
145 | } | 145 | } |
146 | EXPORT_SYMBOL(cmdq_pkt_write); | 146 | EXPORT_SYMBOL(cmdq_pkt_write); |
147 | 147 | ||
148 | int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, | 148 | int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, |
149 | u32 subsys, u32 offset, u32 mask) | 149 | u16 offset, u32 value, u32 mask) |
150 | { | 150 | { |
151 | u32 offset_mask = offset; | 151 | u32 offset_mask = offset; |
152 | int err = 0; | 152 | int err = 0; |
@@ -161,7 +161,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, | |||
161 | } | 161 | } |
162 | EXPORT_SYMBOL(cmdq_pkt_write_mask); | 162 | EXPORT_SYMBOL(cmdq_pkt_write_mask); |
163 | 163 | ||
164 | int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event) | 164 | int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event) |
165 | { | 165 | { |
166 | u32 arg_b; | 166 | u32 arg_b; |
167 | 167 | ||
@@ -181,7 +181,7 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event) | |||
181 | } | 181 | } |
182 | EXPORT_SYMBOL(cmdq_pkt_wfe); | 182 | EXPORT_SYMBOL(cmdq_pkt_wfe); |
183 | 183 | ||
184 | int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event) | 184 | int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) |
185 | { | 185 | { |
186 | if (event >= CMDQ_MAX_EVENT) | 186 | if (event >= CMDQ_MAX_EVENT) |
187 | return -EINVAL; | 187 | return -EINVAL; |
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index a6d1bfb17279..661e47acc354 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig | |||
@@ -175,6 +175,14 @@ config QCOM_SMSM | |||
175 | Say yes here to support the Qualcomm Shared Memory State Machine. | 175 | Say yes here to support the Qualcomm Shared Memory State Machine. |
176 | The state machine is represented by bits in shared memory. | 176 | The state machine is represented by bits in shared memory. |
177 | 177 | ||
178 | config QCOM_SOCINFO | ||
179 | tristate "Qualcomm socinfo driver" | ||
180 | depends on QCOM_SMEM | ||
181 | select SOC_BUS | ||
182 | help | ||
183 | Say yes here to support the Qualcomm socinfo driver, providing | ||
184 | information about the SoC to user space. | ||
185 | |||
178 | config QCOM_WCNSS_CTRL | 186 | config QCOM_WCNSS_CTRL |
179 | tristate "Qualcomm WCNSS control driver" | 187 | tristate "Qualcomm WCNSS control driver" |
180 | depends on ARCH_QCOM || COMPILE_TEST | 188 | depends on ARCH_QCOM || COMPILE_TEST |
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index eeb088beb15f..162788701a77 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile | |||
@@ -18,6 +18,7 @@ obj-$(CONFIG_QCOM_SMEM) += smem.o | |||
18 | obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o | 18 | obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o |
19 | obj-$(CONFIG_QCOM_SMP2P) += smp2p.o | 19 | obj-$(CONFIG_QCOM_SMP2P) += smp2p.o |
20 | obj-$(CONFIG_QCOM_SMSM) += smsm.o | 20 | obj-$(CONFIG_QCOM_SMSM) += smsm.o |
21 | obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o | ||
21 | obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o | 22 | obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o |
22 | obj-$(CONFIG_QCOM_APR) += apr.o | 23 | obj-$(CONFIG_QCOM_APR) += apr.o |
23 | obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o | 24 | obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o |
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 5f885196f4d0..33a27e6c6d67 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/pm_domain.h> | 12 | #include <linux/pm_domain.h> |
13 | #include <linux/thermal.h> | ||
14 | #include <linux/slab.h> | ||
13 | 15 | ||
14 | #define QMP_DESC_MAGIC 0x0 | 16 | #define QMP_DESC_MAGIC 0x0 |
15 | #define QMP_DESC_VERSION 0x4 | 17 | #define QMP_DESC_VERSION 0x4 |
@@ -40,6 +42,17 @@ | |||
40 | /* 64 bytes is enough to store the requests and provides padding to 4 bytes */ | 42 | /* 64 bytes is enough to store the requests and provides padding to 4 bytes */ |
41 | #define QMP_MSG_LEN 64 | 43 | #define QMP_MSG_LEN 64 |
42 | 44 | ||
45 | #define QMP_NUM_COOLING_RESOURCES 2 | ||
46 | |||
47 | static bool qmp_cdev_init_state = 1; | ||
48 | |||
49 | struct qmp_cooling_device { | ||
50 | struct thermal_cooling_device *cdev; | ||
51 | struct qmp *qmp; | ||
52 | char *name; | ||
53 | bool state; | ||
54 | }; | ||
55 | |||
43 | /** | 56 | /** |
44 | * struct qmp - driver state for QMP implementation | 57 | * struct qmp - driver state for QMP implementation |
45 | * @msgram: iomem referencing the message RAM used for communication | 58 | * @msgram: iomem referencing the message RAM used for communication |
@@ -69,6 +82,7 @@ struct qmp { | |||
69 | 82 | ||
70 | struct clk_hw qdss_clk; | 83 | struct clk_hw qdss_clk; |
71 | struct genpd_onecell_data pd_data; | 84 | struct genpd_onecell_data pd_data; |
85 | struct qmp_cooling_device *cooling_devs; | ||
72 | }; | 86 | }; |
73 | 87 | ||
74 | struct qmp_pd { | 88 | struct qmp_pd { |
@@ -385,6 +399,118 @@ static void qmp_pd_remove(struct qmp *qmp) | |||
385 | pm_genpd_remove(data->domains[i]); | 399 | pm_genpd_remove(data->domains[i]); |
386 | } | 400 | } |
387 | 401 | ||
402 | static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev, | ||
403 | unsigned long *state) | ||
404 | { | ||
405 | *state = qmp_cdev_init_state; | ||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev, | ||
410 | unsigned long *state) | ||
411 | { | ||
412 | struct qmp_cooling_device *qmp_cdev = cdev->devdata; | ||
413 | |||
414 | *state = qmp_cdev->state; | ||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, | ||
419 | unsigned long state) | ||
420 | { | ||
421 | struct qmp_cooling_device *qmp_cdev = cdev->devdata; | ||
422 | char buf[QMP_MSG_LEN] = {}; | ||
423 | bool cdev_state; | ||
424 | int ret; | ||
425 | |||
426 | /* Normalize state */ | ||
427 | cdev_state = !!state; | ||
428 | |||
429 | if (qmp_cdev->state == state) | ||
430 | return 0; | ||
431 | |||
432 | snprintf(buf, sizeof(buf), | ||
433 | "{class: volt_flr, event:zero_temp, res:%s, value:%s}", | ||
434 | qmp_cdev->name, | ||
435 | cdev_state ? "off" : "on"); | ||
436 | |||
437 | ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf)); | ||
438 | |||
439 | if (!ret) | ||
440 | qmp_cdev->state = cdev_state; | ||
441 | |||
442 | return ret; | ||
443 | } | ||
444 | |||
445 | static struct thermal_cooling_device_ops qmp_cooling_device_ops = { | ||
446 | .get_max_state = qmp_cdev_get_max_state, | ||
447 | .get_cur_state = qmp_cdev_get_cur_state, | ||
448 | .set_cur_state = qmp_cdev_set_cur_state, | ||
449 | }; | ||
450 | |||
451 | static int qmp_cooling_device_add(struct qmp *qmp, | ||
452 | struct qmp_cooling_device *qmp_cdev, | ||
453 | struct device_node *node) | ||
454 | { | ||
455 | char *cdev_name = (char *)node->name; | ||
456 | |||
457 | qmp_cdev->qmp = qmp; | ||
458 | qmp_cdev->state = qmp_cdev_init_state; | ||
459 | qmp_cdev->name = cdev_name; | ||
460 | qmp_cdev->cdev = devm_thermal_of_cooling_device_register | ||
461 | (qmp->dev, node, | ||
462 | cdev_name, | ||
463 | qmp_cdev, &qmp_cooling_device_ops); | ||
464 | |||
465 | if (IS_ERR(qmp_cdev->cdev)) | ||
466 | dev_err(qmp->dev, "unable to register %s cooling device\n", | ||
467 | cdev_name); | ||
468 | |||
469 | return PTR_ERR_OR_ZERO(qmp_cdev->cdev); | ||
470 | } | ||
471 | |||
472 | static int qmp_cooling_devices_register(struct qmp *qmp) | ||
473 | { | ||
474 | struct device_node *np, *child; | ||
475 | int count = QMP_NUM_COOLING_RESOURCES; | ||
476 | int ret; | ||
477 | |||
478 | np = qmp->dev->of_node; | ||
479 | |||
480 | qmp->cooling_devs = devm_kcalloc(qmp->dev, count, | ||
481 | sizeof(*qmp->cooling_devs), | ||
482 | GFP_KERNEL); | ||
483 | |||
484 | if (!qmp->cooling_devs) | ||
485 | return -ENOMEM; | ||
486 | |||
487 | for_each_available_child_of_node(np, child) { | ||
488 | if (!of_find_property(child, "#cooling-cells", NULL)) | ||
489 | continue; | ||
490 | ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], | ||
491 | child); | ||
492 | if (ret) | ||
493 | goto unroll; | ||
494 | } | ||
495 | |||
496 | return 0; | ||
497 | |||
498 | unroll: | ||
499 | while (--count >= 0) | ||
500 | thermal_cooling_device_unregister | ||
501 | (qmp->cooling_devs[count].cdev); | ||
502 | |||
503 | return ret; | ||
504 | } | ||
505 | |||
506 | static void qmp_cooling_devices_remove(struct qmp *qmp) | ||
507 | { | ||
508 | int i; | ||
509 | |||
510 | for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++) | ||
511 | thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); | ||
512 | } | ||
513 | |||
388 | static int qmp_probe(struct platform_device *pdev) | 514 | static int qmp_probe(struct platform_device *pdev) |
389 | { | 515 | { |
390 | struct resource *res; | 516 | struct resource *res; |
@@ -433,6 +559,10 @@ static int qmp_probe(struct platform_device *pdev) | |||
433 | if (ret) | 559 | if (ret) |
434 | goto err_remove_qdss_clk; | 560 | goto err_remove_qdss_clk; |
435 | 561 | ||
562 | ret = qmp_cooling_devices_register(qmp); | ||
563 | if (ret) | ||
564 | dev_err(&pdev->dev, "failed to register aoss cooling devices\n"); | ||
565 | |||
436 | platform_set_drvdata(pdev, qmp); | 566 | platform_set_drvdata(pdev, qmp); |
437 | 567 | ||
438 | return 0; | 568 | return 0; |
@@ -453,6 +583,7 @@ static int qmp_remove(struct platform_device *pdev) | |||
453 | 583 | ||
454 | qmp_qdss_clk_remove(qmp); | 584 | qmp_qdss_clk_remove(qmp); |
455 | qmp_pd_remove(qmp); | 585 | qmp_pd_remove(qmp); |
586 | qmp_cooling_devices_remove(qmp); | ||
456 | 587 | ||
457 | qmp_close(qmp); | 588 | qmp_close(qmp); |
458 | mbox_free_channel(qmp->mbox_chan); | 589 | mbox_free_channel(qmp->mbox_chan); |
@@ -461,7 +592,9 @@ static int qmp_remove(struct platform_device *pdev) | |||
461 | } | 592 | } |
462 | 593 | ||
463 | static const struct of_device_id qmp_dt_match[] = { | 594 | static const struct of_device_id qmp_dt_match[] = { |
595 | { .compatible = "qcom,sc7180-aoss-qmp", }, | ||
464 | { .compatible = "qcom,sdm845-aoss-qmp", }, | 596 | { .compatible = "qcom,sdm845-aoss-qmp", }, |
597 | { .compatible = "qcom,sm8150-aoss-qmp", }, | ||
465 | {} | 598 | {} |
466 | }; | 599 | }; |
467 | MODULE_DEVICE_TABLE(of, qmp_dt_match); | 600 | MODULE_DEVICE_TABLE(of, qmp_dt_match); |
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index f27c00d82ae4..28c19bcb2f20 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c | |||
@@ -84,7 +84,7 @@ | |||
84 | #define SMEM_GLOBAL_HOST 0xfffe | 84 | #define SMEM_GLOBAL_HOST 0xfffe |
85 | 85 | ||
86 | /* Max number of processors/hosts in a system */ | 86 | /* Max number of processors/hosts in a system */ |
87 | #define SMEM_HOST_COUNT 10 | 87 | #define SMEM_HOST_COUNT 11 |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * struct smem_proc_comm - proc_comm communication struct (legacy) | 90 | * struct smem_proc_comm - proc_comm communication struct (legacy) |
@@ -268,6 +268,7 @@ struct qcom_smem { | |||
268 | struct smem_partition_header *partitions[SMEM_HOST_COUNT]; | 268 | struct smem_partition_header *partitions[SMEM_HOST_COUNT]; |
269 | size_t cacheline[SMEM_HOST_COUNT]; | 269 | size_t cacheline[SMEM_HOST_COUNT]; |
270 | u32 item_count; | 270 | u32 item_count; |
271 | struct platform_device *socinfo; | ||
271 | 272 | ||
272 | unsigned num_regions; | 273 | unsigned num_regions; |
273 | struct smem_region regions[]; | 274 | struct smem_region regions[]; |
@@ -963,11 +964,19 @@ static int qcom_smem_probe(struct platform_device *pdev) | |||
963 | 964 | ||
964 | __smem = smem; | 965 | __smem = smem; |
965 | 966 | ||
967 | smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo", | ||
968 | PLATFORM_DEVID_NONE, NULL, | ||
969 | 0); | ||
970 | if (IS_ERR(smem->socinfo)) | ||
971 | dev_dbg(&pdev->dev, "failed to register socinfo device\n"); | ||
972 | |||
966 | return 0; | 973 | return 0; |
967 | } | 974 | } |
968 | 975 | ||
969 | static int qcom_smem_remove(struct platform_device *pdev) | 976 | static int qcom_smem_remove(struct platform_device *pdev) |
970 | { | 977 | { |
978 | platform_device_unregister(__smem->socinfo); | ||
979 | |||
971 | hwspin_lock_free(__smem->hwlock); | 980 | hwspin_lock_free(__smem->hwlock); |
972 | __smem = NULL; | 981 | __smem = NULL; |
973 | 982 | ||
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c new file mode 100644 index 000000000000..a39ea5061dc5 --- /dev/null +++ b/drivers/soc/qcom/socinfo.c | |||
@@ -0,0 +1,476 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved. | ||
4 | * Copyright (c) 2017-2019, Linaro Ltd. | ||
5 | */ | ||
6 | |||
7 | #include <linux/debugfs.h> | ||
8 | #include <linux/err.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/random.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/soc/qcom/smem.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/sys_soc.h> | ||
16 | #include <linux/types.h> | ||
17 | |||
18 | /* | ||
19 | * SoC version type with major number in the upper 16 bits and minor | ||
20 | * number in the lower 16 bits. | ||
21 | */ | ||
22 | #define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff) | ||
23 | #define SOCINFO_MINOR(ver) ((ver) & 0xffff) | ||
24 | #define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff)) | ||
25 | |||
26 | #define SMEM_SOCINFO_BUILD_ID_LENGTH 32 | ||
27 | |||
28 | /* | ||
29 | * SMEM item id, used to acquire handles to respective | ||
30 | * SMEM region. | ||
31 | */ | ||
32 | #define SMEM_HW_SW_BUILD_ID 137 | ||
33 | |||
34 | #ifdef CONFIG_DEBUG_FS | ||
35 | #define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32 | ||
36 | #define SMEM_IMAGE_VERSION_SIZE 4096 | ||
37 | #define SMEM_IMAGE_VERSION_NAME_SIZE 75 | ||
38 | #define SMEM_IMAGE_VERSION_VARIANT_SIZE 20 | ||
39 | #define SMEM_IMAGE_VERSION_OEM_SIZE 32 | ||
40 | |||
41 | /* | ||
42 | * SMEM Image table indices | ||
43 | */ | ||
44 | #define SMEM_IMAGE_TABLE_BOOT_INDEX 0 | ||
45 | #define SMEM_IMAGE_TABLE_TZ_INDEX 1 | ||
46 | #define SMEM_IMAGE_TABLE_RPM_INDEX 3 | ||
47 | #define SMEM_IMAGE_TABLE_APPS_INDEX 10 | ||
48 | #define SMEM_IMAGE_TABLE_MPSS_INDEX 11 | ||
49 | #define SMEM_IMAGE_TABLE_ADSP_INDEX 12 | ||
50 | #define SMEM_IMAGE_TABLE_CNSS_INDEX 13 | ||
51 | #define SMEM_IMAGE_TABLE_VIDEO_INDEX 14 | ||
52 | #define SMEM_IMAGE_VERSION_TABLE 469 | ||
53 | |||
54 | /* | ||
55 | * SMEM Image table names | ||
56 | */ | ||
57 | static const char *const socinfo_image_names[] = { | ||
58 | [SMEM_IMAGE_TABLE_ADSP_INDEX] = "adsp", | ||
59 | [SMEM_IMAGE_TABLE_APPS_INDEX] = "apps", | ||
60 | [SMEM_IMAGE_TABLE_BOOT_INDEX] = "boot", | ||
61 | [SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss", | ||
62 | [SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss", | ||
63 | [SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm", | ||
64 | [SMEM_IMAGE_TABLE_TZ_INDEX] = "tz", | ||
65 | [SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video", | ||
66 | }; | ||
67 | |||
68 | static const char *const pmic_models[] = { | ||
69 | [0] = "Unknown PMIC model", | ||
70 | [9] = "PM8994", | ||
71 | [11] = "PM8916", | ||
72 | [13] = "PM8058", | ||
73 | [14] = "PM8028", | ||
74 | [15] = "PM8901", | ||
75 | [16] = "PM8027", | ||
76 | [17] = "ISL9519", | ||
77 | [18] = "PM8921", | ||
78 | [19] = "PM8018", | ||
79 | [20] = "PM8015", | ||
80 | [21] = "PM8014", | ||
81 | [22] = "PM8821", | ||
82 | [23] = "PM8038", | ||
83 | [24] = "PM8922", | ||
84 | [25] = "PM8917", | ||
85 | }; | ||
86 | #endif /* CONFIG_DEBUG_FS */ | ||
87 | |||
88 | /* Socinfo SMEM item structure */ | ||
89 | struct socinfo { | ||
90 | __le32 fmt; | ||
91 | __le32 id; | ||
92 | __le32 ver; | ||
93 | char build_id[SMEM_SOCINFO_BUILD_ID_LENGTH]; | ||
94 | /* Version 2 */ | ||
95 | __le32 raw_id; | ||
96 | __le32 raw_ver; | ||
97 | /* Version 3 */ | ||
98 | __le32 hw_plat; | ||
99 | /* Version 4 */ | ||
100 | __le32 plat_ver; | ||
101 | /* Version 5 */ | ||
102 | __le32 accessory_chip; | ||
103 | /* Version 6 */ | ||
104 | __le32 hw_plat_subtype; | ||
105 | /* Version 7 */ | ||
106 | __le32 pmic_model; | ||
107 | __le32 pmic_die_rev; | ||
108 | /* Version 8 */ | ||
109 | __le32 pmic_model_1; | ||
110 | __le32 pmic_die_rev_1; | ||
111 | __le32 pmic_model_2; | ||
112 | __le32 pmic_die_rev_2; | ||
113 | /* Version 9 */ | ||
114 | __le32 foundry_id; | ||
115 | /* Version 10 */ | ||
116 | __le32 serial_num; | ||
117 | /* Version 11 */ | ||
118 | __le32 num_pmics; | ||
119 | __le32 pmic_array_offset; | ||
120 | /* Version 12 */ | ||
121 | __le32 chip_family; | ||
122 | __le32 raw_device_family; | ||
123 | __le32 raw_device_num; | ||
124 | }; | ||
125 | |||
126 | #ifdef CONFIG_DEBUG_FS | ||
127 | struct socinfo_params { | ||
128 | u32 raw_device_family; | ||
129 | u32 hw_plat_subtype; | ||
130 | u32 accessory_chip; | ||
131 | u32 raw_device_num; | ||
132 | u32 chip_family; | ||
133 | u32 foundry_id; | ||
134 | u32 plat_ver; | ||
135 | u32 raw_ver; | ||
136 | u32 hw_plat; | ||
137 | u32 fmt; | ||
138 | }; | ||
139 | |||
140 | struct smem_image_version { | ||
141 | char name[SMEM_IMAGE_VERSION_NAME_SIZE]; | ||
142 | char variant[SMEM_IMAGE_VERSION_VARIANT_SIZE]; | ||
143 | char pad; | ||
144 | char oem[SMEM_IMAGE_VERSION_OEM_SIZE]; | ||
145 | }; | ||
146 | #endif /* CONFIG_DEBUG_FS */ | ||
147 | |||
148 | struct qcom_socinfo { | ||
149 | struct soc_device *soc_dev; | ||
150 | struct soc_device_attribute attr; | ||
151 | #ifdef CONFIG_DEBUG_FS | ||
152 | struct dentry *dbg_root; | ||
153 | struct socinfo_params info; | ||
154 | #endif /* CONFIG_DEBUG_FS */ | ||
155 | }; | ||
156 | |||
157 | struct soc_id { | ||
158 | unsigned int id; | ||
159 | const char *name; | ||
160 | }; | ||
161 | |||
162 | static const struct soc_id soc_id[] = { | ||
163 | { 87, "MSM8960" }, | ||
164 | { 109, "APQ8064" }, | ||
165 | { 122, "MSM8660A" }, | ||
166 | { 123, "MSM8260A" }, | ||
167 | { 124, "APQ8060A" }, | ||
168 | { 126, "MSM8974" }, | ||
169 | { 130, "MPQ8064" }, | ||
170 | { 138, "MSM8960AB" }, | ||
171 | { 139, "APQ8060AB" }, | ||
172 | { 140, "MSM8260AB" }, | ||
173 | { 141, "MSM8660AB" }, | ||
174 | { 178, "APQ8084" }, | ||
175 | { 184, "APQ8074" }, | ||
176 | { 185, "MSM8274" }, | ||
177 | { 186, "MSM8674" }, | ||
178 | { 194, "MSM8974PRO" }, | ||
179 | { 206, "MSM8916" }, | ||
180 | { 208, "APQ8074-AA" }, | ||
181 | { 209, "APQ8074-AB" }, | ||
182 | { 210, "APQ8074PRO" }, | ||
183 | { 211, "MSM8274-AA" }, | ||
184 | { 212, "MSM8274-AB" }, | ||
185 | { 213, "MSM8274PRO" }, | ||
186 | { 214, "MSM8674-AA" }, | ||
187 | { 215, "MSM8674-AB" }, | ||
188 | { 216, "MSM8674PRO" }, | ||
189 | { 217, "MSM8974-AA" }, | ||
190 | { 218, "MSM8974-AB" }, | ||
191 | { 246, "MSM8996" }, | ||
192 | { 247, "APQ8016" }, | ||
193 | { 248, "MSM8216" }, | ||
194 | { 249, "MSM8116" }, | ||
195 | { 250, "MSM8616" }, | ||
196 | { 291, "APQ8096" }, | ||
197 | { 305, "MSM8996SG" }, | ||
198 | { 310, "MSM8996AU" }, | ||
199 | { 311, "APQ8096AU" }, | ||
200 | { 312, "APQ8096SG" }, | ||
201 | }; | ||
202 | |||
203 | static const char *socinfo_machine(struct device *dev, unsigned int id) | ||
204 | { | ||
205 | int idx; | ||
206 | |||
207 | for (idx = 0; idx < ARRAY_SIZE(soc_id); idx++) { | ||
208 | if (soc_id[idx].id == id) | ||
209 | return soc_id[idx].name; | ||
210 | } | ||
211 | |||
212 | return NULL; | ||
213 | } | ||
214 | |||
215 | #ifdef CONFIG_DEBUG_FS | ||
216 | |||
217 | #define QCOM_OPEN(name, _func) \ | ||
218 | static int qcom_open_##name(struct inode *inode, struct file *file) \ | ||
219 | { \ | ||
220 | return single_open(file, _func, inode->i_private); \ | ||
221 | } \ | ||
222 | \ | ||
223 | static const struct file_operations qcom_ ##name## _ops = { \ | ||
224 | .open = qcom_open_##name, \ | ||
225 | .read = seq_read, \ | ||
226 | .llseek = seq_lseek, \ | ||
227 | .release = single_release, \ | ||
228 | } | ||
229 | |||
230 | #define DEBUGFS_ADD(info, name) \ | ||
231 | debugfs_create_file(__stringify(name), 0400, \ | ||
232 | qcom_socinfo->dbg_root, \ | ||
233 | info, &qcom_ ##name## _ops) | ||
234 | |||
235 | |||
236 | static int qcom_show_build_id(struct seq_file *seq, void *p) | ||
237 | { | ||
238 | struct socinfo *socinfo = seq->private; | ||
239 | |||
240 | seq_printf(seq, "%s\n", socinfo->build_id); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static int qcom_show_pmic_model(struct seq_file *seq, void *p) | ||
246 | { | ||
247 | struct socinfo *socinfo = seq->private; | ||
248 | int model = SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_model)); | ||
249 | |||
250 | if (model < 0) | ||
251 | return -EINVAL; | ||
252 | |||
253 | seq_printf(seq, "%s\n", pmic_models[model]); | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p) | ||
259 | { | ||
260 | struct socinfo *socinfo = seq->private; | ||
261 | |||
262 | seq_printf(seq, "%u.%u\n", | ||
263 | SOCINFO_MAJOR(le32_to_cpu(socinfo->pmic_die_rev)), | ||
264 | SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_die_rev))); | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | QCOM_OPEN(build_id, qcom_show_build_id); | ||
270 | QCOM_OPEN(pmic_model, qcom_show_pmic_model); | ||
271 | QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision); | ||
272 | |||
273 | #define DEFINE_IMAGE_OPS(type) \ | ||
274 | static int show_image_##type(struct seq_file *seq, void *p) \ | ||
275 | { \ | ||
276 | struct smem_image_version *image_version = seq->private; \ | ||
277 | seq_puts(seq, image_version->type); \ | ||
278 | seq_puts(seq, "\n"); \ | ||
279 | return 0; \ | ||
280 | } \ | ||
281 | static int open_image_##type(struct inode *inode, struct file *file) \ | ||
282 | { \ | ||
283 | return single_open(file, show_image_##type, inode->i_private); \ | ||
284 | } \ | ||
285 | \ | ||
286 | static const struct file_operations qcom_image_##type##_ops = { \ | ||
287 | .open = open_image_##type, \ | ||
288 | .read = seq_read, \ | ||
289 | .llseek = seq_lseek, \ | ||
290 | .release = single_release, \ | ||
291 | } | ||
292 | |||
293 | DEFINE_IMAGE_OPS(name); | ||
294 | DEFINE_IMAGE_OPS(variant); | ||
295 | DEFINE_IMAGE_OPS(oem); | ||
296 | |||
297 | static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, | ||
298 | struct socinfo *info) | ||
299 | { | ||
300 | struct smem_image_version *versions; | ||
301 | struct dentry *dentry; | ||
302 | size_t size; | ||
303 | int i; | ||
304 | |||
305 | qcom_socinfo->dbg_root = debugfs_create_dir("qcom_socinfo", NULL); | ||
306 | |||
307 | qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt); | ||
308 | |||
309 | switch (qcom_socinfo->info.fmt) { | ||
310 | case SOCINFO_VERSION(0, 12): | ||
311 | qcom_socinfo->info.chip_family = | ||
312 | __le32_to_cpu(info->chip_family); | ||
313 | qcom_socinfo->info.raw_device_family = | ||
314 | __le32_to_cpu(info->raw_device_family); | ||
315 | qcom_socinfo->info.raw_device_num = | ||
316 | __le32_to_cpu(info->raw_device_num); | ||
317 | |||
318 | debugfs_create_x32("chip_family", 0400, qcom_socinfo->dbg_root, | ||
319 | &qcom_socinfo->info.chip_family); | ||
320 | debugfs_create_x32("raw_device_family", 0400, | ||
321 | qcom_socinfo->dbg_root, | ||
322 | &qcom_socinfo->info.raw_device_family); | ||
323 | debugfs_create_x32("raw_device_number", 0400, | ||
324 | qcom_socinfo->dbg_root, | ||
325 | &qcom_socinfo->info.raw_device_num); | ||
326 | /* Fall through */ | ||
327 | case SOCINFO_VERSION(0, 11): | ||
328 | case SOCINFO_VERSION(0, 10): | ||
329 | case SOCINFO_VERSION(0, 9): | ||
330 | qcom_socinfo->info.foundry_id = __le32_to_cpu(info->foundry_id); | ||
331 | |||
332 | debugfs_create_u32("foundry_id", 0400, qcom_socinfo->dbg_root, | ||
333 | &qcom_socinfo->info.foundry_id); | ||
334 | /* Fall through */ | ||
335 | case SOCINFO_VERSION(0, 8): | ||
336 | case SOCINFO_VERSION(0, 7): | ||
337 | DEBUGFS_ADD(info, pmic_model); | ||
338 | DEBUGFS_ADD(info, pmic_die_rev); | ||
339 | /* Fall through */ | ||
340 | case SOCINFO_VERSION(0, 6): | ||
341 | qcom_socinfo->info.hw_plat_subtype = | ||
342 | __le32_to_cpu(info->hw_plat_subtype); | ||
343 | |||
344 | debugfs_create_u32("hardware_platform_subtype", 0400, | ||
345 | qcom_socinfo->dbg_root, | ||
346 | &qcom_socinfo->info.hw_plat_subtype); | ||
347 | /* Fall through */ | ||
348 | case SOCINFO_VERSION(0, 5): | ||
349 | qcom_socinfo->info.accessory_chip = | ||
350 | __le32_to_cpu(info->accessory_chip); | ||
351 | |||
352 | debugfs_create_u32("accessory_chip", 0400, | ||
353 | qcom_socinfo->dbg_root, | ||
354 | &qcom_socinfo->info.accessory_chip); | ||
355 | /* Fall through */ | ||
356 | case SOCINFO_VERSION(0, 4): | ||
357 | qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver); | ||
358 | |||
359 | debugfs_create_u32("platform_version", 0400, | ||
360 | qcom_socinfo->dbg_root, | ||
361 | &qcom_socinfo->info.plat_ver); | ||
362 | /* Fall through */ | ||
363 | case SOCINFO_VERSION(0, 3): | ||
364 | qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat); | ||
365 | |||
366 | debugfs_create_u32("hardware_platform", 0400, | ||
367 | qcom_socinfo->dbg_root, | ||
368 | &qcom_socinfo->info.hw_plat); | ||
369 | /* Fall through */ | ||
370 | case SOCINFO_VERSION(0, 2): | ||
371 | qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver); | ||
372 | |||
373 | debugfs_create_u32("raw_version", 0400, qcom_socinfo->dbg_root, | ||
374 | &qcom_socinfo->info.raw_ver); | ||
375 | /* Fall through */ | ||
376 | case SOCINFO_VERSION(0, 1): | ||
377 | DEBUGFS_ADD(info, build_id); | ||
378 | break; | ||
379 | } | ||
380 | |||
381 | versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_IMAGE_VERSION_TABLE, | ||
382 | &size); | ||
383 | |||
384 | for (i = 0; i < ARRAY_SIZE(socinfo_image_names); i++) { | ||
385 | if (!socinfo_image_names[i]) | ||
386 | continue; | ||
387 | |||
388 | dentry = debugfs_create_dir(socinfo_image_names[i], | ||
389 | qcom_socinfo->dbg_root); | ||
390 | debugfs_create_file("name", 0400, dentry, &versions[i], | ||
391 | &qcom_image_name_ops); | ||
392 | debugfs_create_file("variant", 0400, dentry, &versions[i], | ||
393 | &qcom_image_variant_ops); | ||
394 | debugfs_create_file("oem", 0400, dentry, &versions[i], | ||
395 | &qcom_image_oem_ops); | ||
396 | } | ||
397 | } | ||
398 | |||
399 | static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo) | ||
400 | { | ||
401 | debugfs_remove_recursive(qcom_socinfo->dbg_root); | ||
402 | } | ||
403 | #else | ||
404 | static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, | ||
405 | struct socinfo *info) | ||
406 | { | ||
407 | } | ||
408 | static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo) { } | ||
409 | #endif /* CONFIG_DEBUG_FS */ | ||
410 | |||
411 | static int qcom_socinfo_probe(struct platform_device *pdev) | ||
412 | { | ||
413 | struct qcom_socinfo *qs; | ||
414 | struct socinfo *info; | ||
415 | size_t item_size; | ||
416 | |||
417 | info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, | ||
418 | &item_size); | ||
419 | if (IS_ERR(info)) { | ||
420 | dev_err(&pdev->dev, "Couldn't find socinfo\n"); | ||
421 | return PTR_ERR(info); | ||
422 | } | ||
423 | |||
424 | qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL); | ||
425 | if (!qs) | ||
426 | return -ENOMEM; | ||
427 | |||
428 | qs->attr.family = "Snapdragon"; | ||
429 | qs->attr.machine = socinfo_machine(&pdev->dev, | ||
430 | le32_to_cpu(info->id)); | ||
431 | qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", | ||
432 | SOCINFO_MAJOR(le32_to_cpu(info->ver)), | ||
433 | SOCINFO_MINOR(le32_to_cpu(info->ver))); | ||
434 | if (offsetof(struct socinfo, serial_num) <= item_size) | ||
435 | qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, | ||
436 | "%u", | ||
437 | le32_to_cpu(info->serial_num)); | ||
438 | |||
439 | qs->soc_dev = soc_device_register(&qs->attr); | ||
440 | if (IS_ERR(qs->soc_dev)) | ||
441 | return PTR_ERR(qs->soc_dev); | ||
442 | |||
443 | socinfo_debugfs_init(qs, info); | ||
444 | |||
445 | /* Feed the soc specific unique data into entropy pool */ | ||
446 | add_device_randomness(info, item_size); | ||
447 | |||
448 | platform_set_drvdata(pdev, qs->soc_dev); | ||
449 | |||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | static int qcom_socinfo_remove(struct platform_device *pdev) | ||
454 | { | ||
455 | struct qcom_socinfo *qs = platform_get_drvdata(pdev); | ||
456 | |||
457 | soc_device_unregister(qs->soc_dev); | ||
458 | |||
459 | socinfo_debugfs_exit(qs); | ||
460 | |||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | static struct platform_driver qcom_socinfo_driver = { | ||
465 | .probe = qcom_socinfo_probe, | ||
466 | .remove = qcom_socinfo_remove, | ||
467 | .driver = { | ||
468 | .name = "qcom-socinfo", | ||
469 | }, | ||
470 | }; | ||
471 | |||
472 | module_platform_driver(qcom_socinfo_driver); | ||
473 | |||
474 | MODULE_DESCRIPTION("Qualcomm SoCinfo driver"); | ||
475 | MODULE_LICENSE("GPL v2"); | ||
476 | MODULE_ALIAS("platform:qcom-socinfo"); | ||
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 2bbf49e5d441..3c5e017bacba 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig | |||
@@ -55,6 +55,7 @@ config ARCH_EMEV2 | |||
55 | 55 | ||
56 | config ARCH_R7S72100 | 56 | config ARCH_R7S72100 |
57 | bool "RZ/A1H (R7S72100)" | 57 | bool "RZ/A1H (R7S72100)" |
58 | select ARM_ERRATA_754322 | ||
58 | select PM | 59 | select PM |
59 | select PM_GENERIC_DOMAINS | 60 | select PM_GENERIC_DOMAINS |
60 | select RENESAS_OSTM | 61 | select RENESAS_OSTM |
@@ -72,12 +73,14 @@ config ARCH_R8A73A4 | |||
72 | bool "R-Mobile APE6 (R8A73A40)" | 73 | bool "R-Mobile APE6 (R8A73A40)" |
73 | select ARCH_RMOBILE | 74 | select ARCH_RMOBILE |
74 | select ARM_ERRATA_798181 if SMP | 75 | select ARM_ERRATA_798181 if SMP |
76 | select ARM_ERRATA_814220 | ||
75 | select HAVE_ARM_ARCH_TIMER | 77 | select HAVE_ARM_ARCH_TIMER |
76 | select RENESAS_IRQC | 78 | select RENESAS_IRQC |
77 | 79 | ||
78 | config ARCH_R8A7740 | 80 | config ARCH_R8A7740 |
79 | bool "R-Mobile A1 (R8A77400)" | 81 | bool "R-Mobile A1 (R8A77400)" |
80 | select ARCH_RMOBILE | 82 | select ARCH_RMOBILE |
83 | select ARM_ERRATA_754322 | ||
81 | select RENESAS_INTC_IRQPIN | 84 | select RENESAS_INTC_IRQPIN |
82 | 85 | ||
83 | config ARCH_R8A7743 | 86 | config ARCH_R8A7743 |
@@ -95,20 +98,24 @@ config ARCH_R8A7744 | |||
95 | config ARCH_R8A7745 | 98 | config ARCH_R8A7745 |
96 | bool "RZ/G1E (R8A77450)" | 99 | bool "RZ/G1E (R8A77450)" |
97 | select ARCH_RCAR_GEN2 | 100 | select ARCH_RCAR_GEN2 |
101 | select ARM_ERRATA_814220 | ||
98 | select SYSC_R8A7745 | 102 | select SYSC_R8A7745 |
99 | 103 | ||
100 | config ARCH_R8A77470 | 104 | config ARCH_R8A77470 |
101 | bool "RZ/G1C (R8A77470)" | 105 | bool "RZ/G1C (R8A77470)" |
102 | select ARCH_RCAR_GEN2 | 106 | select ARCH_RCAR_GEN2 |
107 | select ARM_ERRATA_814220 | ||
103 | select SYSC_R8A77470 | 108 | select SYSC_R8A77470 |
104 | 109 | ||
105 | config ARCH_R8A7778 | 110 | config ARCH_R8A7778 |
106 | bool "R-Car M1A (R8A77781)" | 111 | bool "R-Car M1A (R8A77781)" |
107 | select ARCH_RCAR_GEN1 | 112 | select ARCH_RCAR_GEN1 |
113 | select ARM_ERRATA_754322 | ||
108 | 114 | ||
109 | config ARCH_R8A7779 | 115 | config ARCH_R8A7779 |
110 | bool "R-Car H1 (R8A77790)" | 116 | bool "R-Car H1 (R8A77790)" |
111 | select ARCH_RCAR_GEN1 | 117 | select ARCH_RCAR_GEN1 |
118 | select ARM_ERRATA_754322 | ||
112 | select HAVE_ARM_SCU if SMP | 119 | select HAVE_ARM_SCU if SMP |
113 | select HAVE_ARM_TWD if SMP | 120 | select HAVE_ARM_TWD if SMP |
114 | select SYSC_R8A7779 | 121 | select SYSC_R8A7779 |
@@ -117,6 +124,7 @@ config ARCH_R8A7790 | |||
117 | bool "R-Car H2 (R8A77900)" | 124 | bool "R-Car H2 (R8A77900)" |
118 | select ARCH_RCAR_GEN2 | 125 | select ARCH_RCAR_GEN2 |
119 | select ARM_ERRATA_798181 if SMP | 126 | select ARM_ERRATA_798181 if SMP |
127 | select ARM_ERRATA_814220 | ||
120 | select I2C | 128 | select I2C |
121 | select SYSC_R8A7790 | 129 | select SYSC_R8A7790 |
122 | 130 | ||
@@ -143,15 +151,18 @@ config ARCH_R8A7793 | |||
143 | config ARCH_R8A7794 | 151 | config ARCH_R8A7794 |
144 | bool "R-Car E2 (R8A77940)" | 152 | bool "R-Car E2 (R8A77940)" |
145 | select ARCH_RCAR_GEN2 | 153 | select ARCH_RCAR_GEN2 |
154 | select ARM_ERRATA_814220 | ||
146 | select SYSC_R8A7794 | 155 | select SYSC_R8A7794 |
147 | 156 | ||
148 | config ARCH_R9A06G032 | 157 | config ARCH_R9A06G032 |
149 | bool "RZ/N1D (R9A06G032)" | 158 | bool "RZ/N1D (R9A06G032)" |
150 | select ARCH_RZN1 | 159 | select ARCH_RZN1 |
160 | select ARM_ERRATA_814220 | ||
151 | 161 | ||
152 | config ARCH_SH73A0 | 162 | config ARCH_SH73A0 |
153 | bool "SH-Mobile AG5 (R8A73A00)" | 163 | bool "SH-Mobile AG5 (R8A73A00)" |
154 | select ARCH_RMOBILE | 164 | select ARCH_RMOBILE |
165 | select ARM_ERRATA_754322 | ||
155 | select HAVE_ARM_SCU if SMP | 166 | select HAVE_ARM_SCU if SMP |
156 | select HAVE_ARM_TWD if SMP | 167 | select HAVE_ARM_TWD if SMP |
157 | select RENESAS_INTC_IRQPIN | 168 | select RENESAS_INTC_IRQPIN |
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index 0c80fab4f8de..59b5e6b10272 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c | |||
@@ -170,7 +170,7 @@ struct rcar_sysc_pd { | |||
170 | struct generic_pm_domain genpd; | 170 | struct generic_pm_domain genpd; |
171 | struct rcar_sysc_ch ch; | 171 | struct rcar_sysc_ch ch; |
172 | unsigned int flags; | 172 | unsigned int flags; |
173 | char name[0]; | 173 | char name[]; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | static inline struct rcar_sysc_pd *to_rcar_pd(struct generic_pm_domain *d) | 176 | static inline struct rcar_sysc_pd *to_rcar_pd(struct generic_pm_domain *d) |
@@ -200,7 +200,6 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) | |||
200 | { | 200 | { |
201 | struct generic_pm_domain *genpd = &pd->genpd; | 201 | struct generic_pm_domain *genpd = &pd->genpd; |
202 | const char *name = pd->genpd.name; | 202 | const char *name = pd->genpd.name; |
203 | struct dev_power_governor *gov = &simple_qos_governor; | ||
204 | int error; | 203 | int error; |
205 | 204 | ||
206 | if (pd->flags & PD_CPU) { | 205 | if (pd->flags & PD_CPU) { |
@@ -254,7 +253,7 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) | |||
254 | rcar_sysc_power(&pd->ch, true); | 253 | rcar_sysc_power(&pd->ch, true); |
255 | 254 | ||
256 | finalize: | 255 | finalize: |
257 | error = pm_genpd_init(genpd, gov, false); | 256 | error = pm_genpd_init(genpd, &simple_qos_governor, false); |
258 | if (error) | 257 | if (error) |
259 | pr_err("Failed to init PM domain %s: %d\n", name, error); | 258 | pr_err("Failed to init PM domain %s: %d\n", name, error); |
260 | 259 | ||
@@ -346,7 +345,7 @@ static int __init rcar_sysc_pd_init(void) | |||
346 | if (info->init) { | 345 | if (info->init) { |
347 | error = info->init(); | 346 | error = info->init(); |
348 | if (error) | 347 | if (error) |
349 | return error; | 348 | goto out_put; |
350 | } | 349 | } |
351 | 350 | ||
352 | has_cpg_mstp = of_find_compatible_node(NULL, NULL, | 351 | has_cpg_mstp = of_find_compatible_node(NULL, NULL, |
diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c index 421ae1c887d8..54b616ad4a62 100644 --- a/drivers/soc/renesas/rmobile-sysc.c +++ b/drivers/soc/renesas/rmobile-sysc.c | |||
@@ -48,12 +48,8 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) | |||
48 | static int rmobile_pd_power_down(struct generic_pm_domain *genpd) | 48 | static int rmobile_pd_power_down(struct generic_pm_domain *genpd) |
49 | { | 49 | { |
50 | struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd); | 50 | struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd); |
51 | unsigned int mask; | 51 | unsigned int mask = BIT(rmobile_pd->bit_shift); |
52 | 52 | ||
53 | if (rmobile_pd->bit_shift == ~0) | ||
54 | return -EBUSY; | ||
55 | |||
56 | mask = BIT(rmobile_pd->bit_shift); | ||
57 | if (rmobile_pd->suspend) { | 53 | if (rmobile_pd->suspend) { |
58 | int ret = rmobile_pd->suspend(); | 54 | int ret = rmobile_pd->suspend(); |
59 | 55 | ||
@@ -80,14 +76,10 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd) | |||
80 | 76 | ||
81 | static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd) | 77 | static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd) |
82 | { | 78 | { |
83 | unsigned int mask; | 79 | unsigned int mask = BIT(rmobile_pd->bit_shift); |
84 | unsigned int retry_count; | 80 | unsigned int retry_count; |
85 | int ret = 0; | 81 | int ret = 0; |
86 | 82 | ||
87 | if (rmobile_pd->bit_shift == ~0) | ||
88 | return 0; | ||
89 | |||
90 | mask = BIT(rmobile_pd->bit_shift); | ||
91 | if (__raw_readl(rmobile_pd->base + PSTR) & mask) | 83 | if (__raw_readl(rmobile_pd->base + PSTR) & mask) |
92 | return ret; | 84 | return ret; |
93 | 85 | ||
@@ -122,11 +114,15 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) | |||
122 | struct dev_power_governor *gov = rmobile_pd->gov; | 114 | struct dev_power_governor *gov = rmobile_pd->gov; |
123 | 115 | ||
124 | genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; | 116 | genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; |
125 | genpd->power_off = rmobile_pd_power_down; | 117 | genpd->attach_dev = cpg_mstp_attach_dev; |
126 | genpd->power_on = rmobile_pd_power_up; | 118 | genpd->detach_dev = cpg_mstp_detach_dev; |
127 | genpd->attach_dev = cpg_mstp_attach_dev; | 119 | |
128 | genpd->detach_dev = cpg_mstp_detach_dev; | 120 | if (!(genpd->flags & GENPD_FLAG_ALWAYS_ON)) { |
129 | __rmobile_pd_power_up(rmobile_pd); | 121 | genpd->power_off = rmobile_pd_power_down; |
122 | genpd->power_on = rmobile_pd_power_up; | ||
123 | __rmobile_pd_power_up(rmobile_pd); | ||
124 | } | ||
125 | |||
130 | pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); | 126 | pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); |
131 | } | 127 | } |
132 | 128 | ||
@@ -270,6 +266,11 @@ static void __init rmobile_setup_pm_domain(struct device_node *np, | |||
270 | break; | 266 | break; |
271 | 267 | ||
272 | case PD_NORMAL: | 268 | case PD_NORMAL: |
269 | if (pd->bit_shift == ~0) { | ||
270 | /* Top-level always-on domain */ | ||
271 | pr_debug("PM domain %s is always-on domain\n", name); | ||
272 | pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; | ||
273 | } | ||
273 | break; | 274 | break; |
274 | } | 275 | } |
275 | 276 | ||
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index 2186285fda92..33ad0de2de3c 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig | |||
@@ -7,6 +7,12 @@ menuconfig SOC_SAMSUNG | |||
7 | 7 | ||
8 | if SOC_SAMSUNG | 8 | if SOC_SAMSUNG |
9 | 9 | ||
10 | config EXYNOS_CHIPID | ||
11 | bool "Exynos Chipid controller driver" if COMPILE_TEST | ||
12 | depends on ARCH_EXYNOS || COMPILE_TEST | ||
13 | select MFD_SYSCON | ||
14 | select SOC_BUS | ||
15 | |||
10 | config EXYNOS_PMU | 16 | config EXYNOS_PMU |
11 | bool "Exynos PMU controller driver" if COMPILE_TEST | 17 | bool "Exynos PMU controller driver" if COMPILE_TEST |
12 | depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST) | 18 | depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST) |
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile index 29f294baac6e..3b6a8797416c 100644 --- a/drivers/soc/samsung/Makefile +++ b/drivers/soc/samsung/Makefile | |||
@@ -1,4 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | |||
3 | obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o | ||
2 | obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o | 4 | obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o |
3 | 5 | ||
4 | obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \ | 6 | obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \ |
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c new file mode 100644 index 000000000000..c55a47cfe617 --- /dev/null +++ b/drivers/soc/samsung/exynos-chipid.c | |||
@@ -0,0 +1,105 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2019 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com/ | ||
5 | * | ||
6 | * EXYNOS - CHIP ID support | ||
7 | * Author: Pankaj Dubey <pankaj.dubey@samsung.com> | ||
8 | * Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/io.h> | ||
12 | #include <linux/mfd/syscon.h> | ||
13 | #include <linux/of.h> | ||
14 | #include <linux/regmap.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/soc/samsung/exynos-chipid.h> | ||
17 | #include <linux/sys_soc.h> | ||
18 | |||
19 | static const struct exynos_soc_id { | ||
20 | const char *name; | ||
21 | unsigned int id; | ||
22 | } soc_ids[] = { | ||
23 | { "EXYNOS3250", 0xE3472000 }, | ||
24 | { "EXYNOS4210", 0x43200000 }, /* EVT0 revision */ | ||
25 | { "EXYNOS4210", 0x43210000 }, | ||
26 | { "EXYNOS4212", 0x43220000 }, | ||
27 | { "EXYNOS4412", 0xE4412000 }, | ||
28 | { "EXYNOS5250", 0x43520000 }, | ||
29 | { "EXYNOS5260", 0xE5260000 }, | ||
30 | { "EXYNOS5410", 0xE5410000 }, | ||
31 | { "EXYNOS5420", 0xE5420000 }, | ||
32 | { "EXYNOS5440", 0xE5440000 }, | ||
33 | { "EXYNOS5800", 0xE5422000 }, | ||
34 | { "EXYNOS7420", 0xE7420000 }, | ||
35 | { "EXYNOS5433", 0xE5433000 }, | ||
36 | }; | ||
37 | |||
38 | static const char * __init product_id_to_soc_id(unsigned int product_id) | ||
39 | { | ||
40 | int i; | ||
41 | |||
42 | for (i = 0; i < ARRAY_SIZE(soc_ids); i++) | ||
43 | if ((product_id & EXYNOS_MASK) == soc_ids[i].id) | ||
44 | return soc_ids[i].name; | ||
45 | return NULL; | ||
46 | } | ||
47 | |||
48 | int __init exynos_chipid_early_init(void) | ||
49 | { | ||
50 | struct soc_device_attribute *soc_dev_attr; | ||
51 | struct soc_device *soc_dev; | ||
52 | struct device_node *root; | ||
53 | struct regmap *regmap; | ||
54 | u32 product_id; | ||
55 | u32 revision; | ||
56 | int ret; | ||
57 | |||
58 | regmap = syscon_regmap_lookup_by_compatible("samsung,exynos4210-chipid"); | ||
59 | if (IS_ERR(regmap)) | ||
60 | return PTR_ERR(regmap); | ||
61 | |||
62 | ret = regmap_read(regmap, EXYNOS_CHIPID_REG_PRO_ID, &product_id); | ||
63 | if (ret < 0) | ||
64 | return ret; | ||
65 | |||
66 | revision = product_id & EXYNOS_REV_MASK; | ||
67 | |||
68 | soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); | ||
69 | if (!soc_dev_attr) | ||
70 | return -ENOMEM; | ||
71 | |||
72 | soc_dev_attr->family = "Samsung Exynos"; | ||
73 | |||
74 | root = of_find_node_by_path("/"); | ||
75 | of_property_read_string(root, "model", &soc_dev_attr->machine); | ||
76 | of_node_put(root); | ||
77 | |||
78 | soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%x", revision); | ||
79 | soc_dev_attr->soc_id = product_id_to_soc_id(product_id); | ||
80 | if (!soc_dev_attr->soc_id) { | ||
81 | pr_err("Unknown SoC\n"); | ||
82 | ret = -ENODEV; | ||
83 | goto err; | ||
84 | } | ||
85 | |||
86 | /* please note that the actual registration will be deferred */ | ||
87 | soc_dev = soc_device_register(soc_dev_attr); | ||
88 | if (IS_ERR(soc_dev)) { | ||
89 | ret = PTR_ERR(soc_dev); | ||
90 | goto err; | ||
91 | } | ||
92 | |||
93 | /* it is too early to use dev_info() here (soc_dev is NULL) */ | ||
94 | pr_info("soc soc0: Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n", | ||
95 | soc_dev_attr->soc_id, product_id, revision); | ||
96 | |||
97 | return 0; | ||
98 | |||
99 | err: | ||
100 | kfree(soc_dev_attr->revision); | ||
101 | kfree(soc_dev_attr); | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | early_initcall(exynos_chipid_early_init); | ||
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c index 97817dd7ba24..8c2a2f23982c 100644 --- a/drivers/soc/ti/ti_sci_pm_domains.c +++ b/drivers/soc/ti/ti_sci_pm_domains.c | |||
@@ -15,15 +15,19 @@ | |||
15 | #include <linux/pm_domain.h> | 15 | #include <linux/pm_domain.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/soc/ti/ti_sci_protocol.h> | 17 | #include <linux/soc/ti/ti_sci_protocol.h> |
18 | #include <dt-bindings/soc/ti,sci_pm_domain.h> | ||
18 | 19 | ||
19 | /** | 20 | /** |
20 | * struct ti_sci_genpd_dev_data: holds data needed for every device attached | 21 | * struct ti_sci_genpd_dev_data: holds data needed for every device attached |
21 | * to this genpd | 22 | * to this genpd |
22 | * @idx: index of the device that identifies it with the system | 23 | * @idx: index of the device that identifies it with the system |
23 | * control processor. | 24 | * control processor. |
25 | * @exclusive: Permissions for exclusive request or shared request of the | ||
26 | * device. | ||
24 | */ | 27 | */ |
25 | struct ti_sci_genpd_dev_data { | 28 | struct ti_sci_genpd_dev_data { |
26 | int idx; | 29 | int idx; |
30 | u8 exclusive; | ||
27 | }; | 31 | }; |
28 | 32 | ||
29 | /** | 33 | /** |
@@ -55,6 +59,14 @@ static int ti_sci_dev_id(struct device *dev) | |||
55 | return sci_dev_data->idx; | 59 | return sci_dev_data->idx; |
56 | } | 60 | } |
57 | 61 | ||
62 | static u8 is_ti_sci_dev_exclusive(struct device *dev) | ||
63 | { | ||
64 | struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev); | ||
65 | struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data; | ||
66 | |||
67 | return sci_dev_data->exclusive; | ||
68 | } | ||
69 | |||
58 | /** | 70 | /** |
59 | * ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle | 71 | * ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle |
60 | * @dev: pointer to device associated with this genpd | 72 | * @dev: pointer to device associated with this genpd |
@@ -79,7 +91,10 @@ static int ti_sci_dev_start(struct device *dev) | |||
79 | const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev); | 91 | const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev); |
80 | int idx = ti_sci_dev_id(dev); | 92 | int idx = ti_sci_dev_id(dev); |
81 | 93 | ||
82 | return ti_sci->ops.dev_ops.get_device(ti_sci, idx); | 94 | if (is_ti_sci_dev_exclusive(dev)) |
95 | return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci, idx); | ||
96 | else | ||
97 | return ti_sci->ops.dev_ops.get_device(ti_sci, idx); | ||
83 | } | 98 | } |
84 | 99 | ||
85 | /** | 100 | /** |
@@ -110,7 +125,7 @@ static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain, | |||
110 | if (ret < 0) | 125 | if (ret < 0) |
111 | return ret; | 126 | return ret; |
112 | 127 | ||
113 | if (pd_args.args_count != 1) | 128 | if (pd_args.args_count != 1 && pd_args.args_count != 2) |
114 | return -EINVAL; | 129 | return -EINVAL; |
115 | 130 | ||
116 | idx = pd_args.args[0]; | 131 | idx = pd_args.args[0]; |
@@ -128,6 +143,10 @@ static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain, | |||
128 | return -ENOMEM; | 143 | return -ENOMEM; |
129 | 144 | ||
130 | sci_dev_data->idx = idx; | 145 | sci_dev_data->idx = idx; |
146 | /* Enable the exclusive permissions by default */ | ||
147 | sci_dev_data->exclusive = TI_SCI_PD_EXCLUSIVE; | ||
148 | if (pd_args.args_count == 2) | ||
149 | sci_dev_data->exclusive = pd_args.args[1] & 0x1; | ||
131 | 150 | ||
132 | genpd_data = dev_gpd_data(dev); | 151 | genpd_data = dev_gpd_data(dev); |
133 | genpd_data->data = sci_dev_data; | 152 | genpd_data->data = sci_dev_data; |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 6ee514fd0920..0f0fdb57198f 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -450,13 +450,6 @@ config SPI_NPCM_PSPI | |||
450 | This driver provides support for Nuvoton NPCM BMC | 450 | This driver provides support for Nuvoton NPCM BMC |
451 | Peripheral SPI controller in master mode. | 451 | Peripheral SPI controller in master mode. |
452 | 452 | ||
453 | config SPI_NUC900 | ||
454 | tristate "Nuvoton NUC900 series SPI" | ||
455 | depends on ARCH_W90X900 | ||
456 | select SPI_BITBANG | ||
457 | help | ||
458 | SPI driver for Nuvoton NUC900 series ARM SoCs | ||
459 | |||
460 | config SPI_LANTIQ_SSC | 453 | config SPI_LANTIQ_SSC |
461 | tristate "Lantiq SSC SPI controller" | 454 | tristate "Lantiq SSC SPI controller" |
462 | depends on LANTIQ || COMPILE_TEST | 455 | depends on LANTIQ || COMPILE_TEST |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index adbebee93a75..bb49c9e6d0a0 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -65,7 +65,6 @@ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o | |||
65 | obj-$(CONFIG_SPI_MXS) += spi-mxs.o | 65 | obj-$(CONFIG_SPI_MXS) += spi-mxs.o |
66 | obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o | 66 | obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o |
67 | obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o | 67 | obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o |
68 | obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o | ||
69 | obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o | 68 | obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o |
70 | obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o | 69 | obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o |
71 | spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o | 70 | spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o |
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c deleted file mode 100644 index 61400358f4be..000000000000 --- a/drivers/spi/spi-nuc900.c +++ /dev/null | |||
@@ -1,426 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * Copyright (c) 2009 Nuvoton technology. | ||
4 | * Wan ZongShun <mcuos.com@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/clk.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/gpio.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | #include <linux/spi/spi.h> | ||
21 | #include <linux/spi/spi_bitbang.h> | ||
22 | |||
23 | #include <linux/platform_data/spi-nuc900.h> | ||
24 | |||
25 | /* usi registers offset */ | ||
26 | #define USI_CNT 0x00 | ||
27 | #define USI_DIV 0x04 | ||
28 | #define USI_SSR 0x08 | ||
29 | #define USI_RX0 0x10 | ||
30 | #define USI_TX0 0x10 | ||
31 | |||
32 | /* usi register bit */ | ||
33 | #define ENINT (0x01 << 17) | ||
34 | #define ENFLG (0x01 << 16) | ||
35 | #define SLEEP (0x0f << 12) | ||
36 | #define TXNUM (0x03 << 8) | ||
37 | #define TXBITLEN (0x1f << 3) | ||
38 | #define TXNEG (0x01 << 2) | ||
39 | #define RXNEG (0x01 << 1) | ||
40 | #define LSB (0x01 << 10) | ||
41 | #define SELECTLEV (0x01 << 2) | ||
42 | #define SELECTPOL (0x01 << 31) | ||
43 | #define SELECTSLAVE 0x01 | ||
44 | #define GOBUSY 0x01 | ||
45 | |||
46 | struct nuc900_spi { | ||
47 | struct spi_bitbang bitbang; | ||
48 | struct completion done; | ||
49 | void __iomem *regs; | ||
50 | int irq; | ||
51 | int len; | ||
52 | int count; | ||
53 | const unsigned char *tx; | ||
54 | unsigned char *rx; | ||
55 | struct clk *clk; | ||
56 | struct spi_master *master; | ||
57 | struct nuc900_spi_info *pdata; | ||
58 | spinlock_t lock; | ||
59 | }; | ||
60 | |||
61 | static inline struct nuc900_spi *to_hw(struct spi_device *sdev) | ||
62 | { | ||
63 | return spi_master_get_devdata(sdev->master); | ||
64 | } | ||
65 | |||
66 | static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr) | ||
67 | { | ||
68 | struct nuc900_spi *hw = to_hw(spi); | ||
69 | unsigned int val; | ||
70 | unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0; | ||
71 | unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0; | ||
72 | unsigned long flags; | ||
73 | |||
74 | spin_lock_irqsave(&hw->lock, flags); | ||
75 | |||
76 | val = __raw_readl(hw->regs + USI_SSR); | ||
77 | |||
78 | if (!cs) | ||
79 | val &= ~SELECTLEV; | ||
80 | else | ||
81 | val |= SELECTLEV; | ||
82 | |||
83 | if (!ssr) | ||
84 | val &= ~SELECTSLAVE; | ||
85 | else | ||
86 | val |= SELECTSLAVE; | ||
87 | |||
88 | __raw_writel(val, hw->regs + USI_SSR); | ||
89 | |||
90 | val = __raw_readl(hw->regs + USI_CNT); | ||
91 | |||
92 | if (!cpol) | ||
93 | val &= ~SELECTPOL; | ||
94 | else | ||
95 | val |= SELECTPOL; | ||
96 | |||
97 | __raw_writel(val, hw->regs + USI_CNT); | ||
98 | |||
99 | spin_unlock_irqrestore(&hw->lock, flags); | ||
100 | } | ||
101 | |||
102 | static void nuc900_spi_chipsel(struct spi_device *spi, int value) | ||
103 | { | ||
104 | switch (value) { | ||
105 | case BITBANG_CS_INACTIVE: | ||
106 | nuc900_slave_select(spi, 0); | ||
107 | break; | ||
108 | |||
109 | case BITBANG_CS_ACTIVE: | ||
110 | nuc900_slave_select(spi, 1); | ||
111 | break; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, unsigned int txnum) | ||
116 | { | ||
117 | unsigned int val; | ||
118 | unsigned long flags; | ||
119 | |||
120 | spin_lock_irqsave(&hw->lock, flags); | ||
121 | |||
122 | val = __raw_readl(hw->regs + USI_CNT) & ~TXNUM; | ||
123 | |||
124 | if (txnum) | ||
125 | val |= txnum << 0x08; | ||
126 | |||
127 | __raw_writel(val, hw->regs + USI_CNT); | ||
128 | |||
129 | spin_unlock_irqrestore(&hw->lock, flags); | ||
130 | |||
131 | } | ||
132 | |||
133 | static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw, | ||
134 | unsigned int txbitlen) | ||
135 | { | ||
136 | unsigned int val; | ||
137 | unsigned long flags; | ||
138 | |||
139 | spin_lock_irqsave(&hw->lock, flags); | ||
140 | |||
141 | val = __raw_readl(hw->regs + USI_CNT) & ~TXBITLEN; | ||
142 | |||
143 | val |= (txbitlen << 0x03); | ||
144 | |||
145 | __raw_writel(val, hw->regs + USI_CNT); | ||
146 | |||
147 | spin_unlock_irqrestore(&hw->lock, flags); | ||
148 | } | ||
149 | |||
150 | static void nuc900_spi_gobusy(struct nuc900_spi *hw) | ||
151 | { | ||
152 | unsigned int val; | ||
153 | unsigned long flags; | ||
154 | |||
155 | spin_lock_irqsave(&hw->lock, flags); | ||
156 | |||
157 | val = __raw_readl(hw->regs + USI_CNT); | ||
158 | |||
159 | val |= GOBUSY; | ||
160 | |||
161 | __raw_writel(val, hw->regs + USI_CNT); | ||
162 | |||
163 | spin_unlock_irqrestore(&hw->lock, flags); | ||
164 | } | ||
165 | |||
166 | static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count) | ||
167 | { | ||
168 | return hw->tx ? hw->tx[count] : 0; | ||
169 | } | ||
170 | |||
171 | static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | ||
172 | { | ||
173 | struct nuc900_spi *hw = to_hw(spi); | ||
174 | |||
175 | hw->tx = t->tx_buf; | ||
176 | hw->rx = t->rx_buf; | ||
177 | hw->len = t->len; | ||
178 | hw->count = 0; | ||
179 | |||
180 | __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0); | ||
181 | |||
182 | nuc900_spi_gobusy(hw); | ||
183 | |||
184 | wait_for_completion(&hw->done); | ||
185 | |||
186 | return hw->count; | ||
187 | } | ||
188 | |||
189 | static irqreturn_t nuc900_spi_irq(int irq, void *dev) | ||
190 | { | ||
191 | struct nuc900_spi *hw = dev; | ||
192 | unsigned int status; | ||
193 | unsigned int count = hw->count; | ||
194 | |||
195 | status = __raw_readl(hw->regs + USI_CNT); | ||
196 | __raw_writel(status, hw->regs + USI_CNT); | ||
197 | |||
198 | if (status & ENFLG) { | ||
199 | hw->count++; | ||
200 | |||
201 | if (hw->rx) | ||
202 | hw->rx[count] = __raw_readl(hw->regs + USI_RX0); | ||
203 | count++; | ||
204 | |||
205 | if (count < hw->len) { | ||
206 | __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0); | ||
207 | nuc900_spi_gobusy(hw); | ||
208 | } else { | ||
209 | complete(&hw->done); | ||
210 | } | ||
211 | |||
212 | return IRQ_HANDLED; | ||
213 | } | ||
214 | |||
215 | complete(&hw->done); | ||
216 | return IRQ_HANDLED; | ||
217 | } | ||
218 | |||
219 | static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge) | ||
220 | { | ||
221 | unsigned int val; | ||
222 | unsigned long flags; | ||
223 | |||
224 | spin_lock_irqsave(&hw->lock, flags); | ||
225 | |||
226 | val = __raw_readl(hw->regs + USI_CNT); | ||
227 | |||
228 | if (edge) | ||
229 | val |= TXNEG; | ||
230 | else | ||
231 | val &= ~TXNEG; | ||
232 | __raw_writel(val, hw->regs + USI_CNT); | ||
233 | |||
234 | spin_unlock_irqrestore(&hw->lock, flags); | ||
235 | } | ||
236 | |||
237 | static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge) | ||
238 | { | ||
239 | unsigned int val; | ||
240 | unsigned long flags; | ||
241 | |||
242 | spin_lock_irqsave(&hw->lock, flags); | ||
243 | |||
244 | val = __raw_readl(hw->regs + USI_CNT); | ||
245 | |||
246 | if (edge) | ||
247 | val |= RXNEG; | ||
248 | else | ||
249 | val &= ~RXNEG; | ||
250 | __raw_writel(val, hw->regs + USI_CNT); | ||
251 | |||
252 | spin_unlock_irqrestore(&hw->lock, flags); | ||
253 | } | ||
254 | |||
255 | static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb) | ||
256 | { | ||
257 | unsigned int val; | ||
258 | unsigned long flags; | ||
259 | |||
260 | spin_lock_irqsave(&hw->lock, flags); | ||
261 | |||
262 | val = __raw_readl(hw->regs + USI_CNT); | ||
263 | |||
264 | if (lsb) | ||
265 | val |= LSB; | ||
266 | else | ||
267 | val &= ~LSB; | ||
268 | __raw_writel(val, hw->regs + USI_CNT); | ||
269 | |||
270 | spin_unlock_irqrestore(&hw->lock, flags); | ||
271 | } | ||
272 | |||
273 | static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep) | ||
274 | { | ||
275 | unsigned int val; | ||
276 | unsigned long flags; | ||
277 | |||
278 | spin_lock_irqsave(&hw->lock, flags); | ||
279 | |||
280 | val = __raw_readl(hw->regs + USI_CNT) & ~SLEEP; | ||
281 | |||
282 | if (sleep) | ||
283 | val |= (sleep << 12); | ||
284 | |||
285 | __raw_writel(val, hw->regs + USI_CNT); | ||
286 | |||
287 | spin_unlock_irqrestore(&hw->lock, flags); | ||
288 | } | ||
289 | |||
290 | static void nuc900_enable_int(struct nuc900_spi *hw) | ||
291 | { | ||
292 | unsigned int val; | ||
293 | unsigned long flags; | ||
294 | |||
295 | spin_lock_irqsave(&hw->lock, flags); | ||
296 | |||
297 | val = __raw_readl(hw->regs + USI_CNT); | ||
298 | |||
299 | val |= ENINT; | ||
300 | |||
301 | __raw_writel(val, hw->regs + USI_CNT); | ||
302 | |||
303 | spin_unlock_irqrestore(&hw->lock, flags); | ||
304 | } | ||
305 | |||
306 | static void nuc900_set_divider(struct nuc900_spi *hw) | ||
307 | { | ||
308 | __raw_writel(hw->pdata->divider, hw->regs + USI_DIV); | ||
309 | } | ||
310 | |||
311 | static void nuc900_init_spi(struct nuc900_spi *hw) | ||
312 | { | ||
313 | clk_enable(hw->clk); | ||
314 | spin_lock_init(&hw->lock); | ||
315 | |||
316 | nuc900_tx_edge(hw, hw->pdata->txneg); | ||
317 | nuc900_rx_edge(hw, hw->pdata->rxneg); | ||
318 | nuc900_send_first(hw, hw->pdata->lsb); | ||
319 | nuc900_set_sleep(hw, hw->pdata->sleep); | ||
320 | nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen); | ||
321 | nuc900_spi_setup_txnum(hw, hw->pdata->txnum); | ||
322 | nuc900_set_divider(hw); | ||
323 | nuc900_enable_int(hw); | ||
324 | } | ||
325 | |||
326 | static int nuc900_spi_probe(struct platform_device *pdev) | ||
327 | { | ||
328 | struct nuc900_spi *hw; | ||
329 | struct spi_master *master; | ||
330 | int err = 0; | ||
331 | |||
332 | master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi)); | ||
333 | if (master == NULL) { | ||
334 | dev_err(&pdev->dev, "No memory for spi_master\n"); | ||
335 | return -ENOMEM; | ||
336 | } | ||
337 | |||
338 | hw = spi_master_get_devdata(master); | ||
339 | hw->master = master; | ||
340 | hw->pdata = dev_get_platdata(&pdev->dev); | ||
341 | |||
342 | if (hw->pdata == NULL) { | ||
343 | dev_err(&pdev->dev, "No platform data supplied\n"); | ||
344 | err = -ENOENT; | ||
345 | goto err_pdata; | ||
346 | } | ||
347 | |||
348 | platform_set_drvdata(pdev, hw); | ||
349 | init_completion(&hw->done); | ||
350 | |||
351 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | ||
352 | if (hw->pdata->lsb) | ||
353 | master->mode_bits |= SPI_LSB_FIRST; | ||
354 | master->num_chipselect = hw->pdata->num_cs; | ||
355 | master->bus_num = hw->pdata->bus_num; | ||
356 | hw->bitbang.master = hw->master; | ||
357 | hw->bitbang.chipselect = nuc900_spi_chipsel; | ||
358 | hw->bitbang.txrx_bufs = nuc900_spi_txrx; | ||
359 | |||
360 | hw->regs = devm_platform_ioremap_resource(pdev, 0); | ||
361 | if (IS_ERR(hw->regs)) { | ||
362 | err = PTR_ERR(hw->regs); | ||
363 | goto err_pdata; | ||
364 | } | ||
365 | |||
366 | hw->irq = platform_get_irq(pdev, 0); | ||
367 | if (hw->irq < 0) { | ||
368 | err = -ENOENT; | ||
369 | goto err_pdata; | ||
370 | } | ||
371 | |||
372 | err = devm_request_irq(&pdev->dev, hw->irq, nuc900_spi_irq, 0, | ||
373 | pdev->name, hw); | ||
374 | if (err) { | ||
375 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); | ||
376 | goto err_pdata; | ||
377 | } | ||
378 | |||
379 | hw->clk = devm_clk_get(&pdev->dev, "spi"); | ||
380 | if (IS_ERR(hw->clk)) { | ||
381 | dev_err(&pdev->dev, "No clock for device\n"); | ||
382 | err = PTR_ERR(hw->clk); | ||
383 | goto err_pdata; | ||
384 | } | ||
385 | |||
386 | mfp_set_groupg(&pdev->dev, NULL); | ||
387 | nuc900_init_spi(hw); | ||
388 | |||
389 | err = spi_bitbang_start(&hw->bitbang); | ||
390 | if (err) { | ||
391 | dev_err(&pdev->dev, "Failed to register SPI master\n"); | ||
392 | goto err_register; | ||
393 | } | ||
394 | |||
395 | return 0; | ||
396 | |||
397 | err_register: | ||
398 | clk_disable(hw->clk); | ||
399 | err_pdata: | ||
400 | spi_master_put(hw->master); | ||
401 | return err; | ||
402 | } | ||
403 | |||
404 | static int nuc900_spi_remove(struct platform_device *dev) | ||
405 | { | ||
406 | struct nuc900_spi *hw = platform_get_drvdata(dev); | ||
407 | |||
408 | spi_bitbang_stop(&hw->bitbang); | ||
409 | clk_disable(hw->clk); | ||
410 | spi_master_put(hw->master); | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static struct platform_driver nuc900_spi_driver = { | ||
415 | .probe = nuc900_spi_probe, | ||
416 | .remove = nuc900_spi_remove, | ||
417 | .driver = { | ||
418 | .name = "nuc900-spi", | ||
419 | }, | ||
420 | }; | ||
421 | module_platform_driver(nuc900_spi_driver); | ||
422 | |||
423 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); | ||
424 | MODULE_DESCRIPTION("nuc900 spi driver!"); | ||
425 | MODULE_LICENSE("GPL"); | ||
426 | MODULE_ALIAS("platform:nuc900-spi"); | ||
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index aa942703ae65..13b0269a0abc 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c | |||
@@ -148,6 +148,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) | |||
148 | */ | 148 | */ |
149 | optee_cq_wait_for_completion(&optee->call_queue, &w); | 149 | optee_cq_wait_for_completion(&optee->call_queue, &w); |
150 | } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { | 150 | } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { |
151 | might_sleep(); | ||
151 | param.a0 = res.a0; | 152 | param.a0 = res.a0; |
152 | param.a1 = res.a1; | 153 | param.a1 = res.a1; |
153 | param.a2 = res.a2; | 154 | param.a2 = res.a2; |
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 6b2de93bd302..5f83cd715387 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig | |||
@@ -1924,20 +1924,6 @@ config FB_S3C2410_DEBUG | |||
1924 | Turn on debugging messages. Note that you can set/unset at run time | 1924 | Turn on debugging messages. Note that you can set/unset at run time |
1925 | through sysfs | 1925 | through sysfs |
1926 | 1926 | ||
1927 | config FB_NUC900 | ||
1928 | tristate "NUC900 LCD framebuffer support" | ||
1929 | depends on FB && ARCH_W90X900 | ||
1930 | select FB_CFB_FILLRECT | ||
1931 | select FB_CFB_COPYAREA | ||
1932 | select FB_CFB_IMAGEBLIT | ||
1933 | ---help--- | ||
1934 | Frame buffer driver for the built-in LCD controller in the Nuvoton | ||
1935 | NUC900 processor | ||
1936 | |||
1937 | config GPM1040A0_320X240 | ||
1938 | bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD" | ||
1939 | depends on FB_NUC900 | ||
1940 | |||
1941 | config FB_SM501 | 1927 | config FB_SM501 |
1942 | tristate "Silicon Motion SM501 framebuffer support" | 1928 | tristate "Silicon Motion SM501 framebuffer support" |
1943 | depends on FB && MFD_SM501 | 1929 | depends on FB && MFD_SM501 |
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 7dc4861a93e6..aab7155884ea 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile | |||
@@ -116,7 +116,6 @@ obj-y += omap2/ | |||
116 | obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o | 116 | obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o |
117 | obj-$(CONFIG_FB_CARMINE) += carminefb.o | 117 | obj-$(CONFIG_FB_CARMINE) += carminefb.o |
118 | obj-$(CONFIG_FB_MB862XX) += mb862xx/ | 118 | obj-$(CONFIG_FB_MB862XX) += mb862xx/ |
119 | obj-$(CONFIG_FB_NUC900) += nuc900fb.o | ||
120 | obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o | 119 | obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o |
121 | obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o | 120 | obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o |
122 | obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o | 121 | obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o |
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index b1cf248f3291..2d3dcc52fcf3 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/cpufreq.h> | 20 | #include <linux/cpufreq.h> |
21 | #include <linux/console.h> | 21 | #include <linux/console.h> |
22 | #include <linux/regulator/consumer.h> | ||
22 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
@@ -164,7 +165,7 @@ struct da8xx_fb_par { | |||
164 | struct notifier_block freq_transition; | 165 | struct notifier_block freq_transition; |
165 | #endif | 166 | #endif |
166 | unsigned int lcdc_clk_rate; | 167 | unsigned int lcdc_clk_rate; |
167 | void (*panel_power_ctrl)(int); | 168 | struct regulator *lcd_supply; |
168 | u32 pseudo_palette[16]; | 169 | u32 pseudo_palette[16]; |
169 | struct fb_videomode mode; | 170 | struct fb_videomode mode; |
170 | struct lcd_ctrl_config cfg; | 171 | struct lcd_ctrl_config cfg; |
@@ -1066,33 +1067,30 @@ static void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par) | |||
1066 | static int fb_remove(struct platform_device *dev) | 1067 | static int fb_remove(struct platform_device *dev) |
1067 | { | 1068 | { |
1068 | struct fb_info *info = dev_get_drvdata(&dev->dev); | 1069 | struct fb_info *info = dev_get_drvdata(&dev->dev); |
1069 | 1070 | struct da8xx_fb_par *par = info->par; | |
1070 | if (info) { | 1071 | int ret; |
1071 | struct da8xx_fb_par *par = info->par; | ||
1072 | 1072 | ||
1073 | #ifdef CONFIG_CPU_FREQ | 1073 | #ifdef CONFIG_CPU_FREQ |
1074 | lcd_da8xx_cpufreq_deregister(par); | 1074 | lcd_da8xx_cpufreq_deregister(par); |
1075 | #endif | 1075 | #endif |
1076 | if (par->panel_power_ctrl) | 1076 | if (par->lcd_supply) { |
1077 | par->panel_power_ctrl(0); | 1077 | ret = regulator_disable(par->lcd_supply); |
1078 | if (ret) | ||
1079 | return ret; | ||
1080 | } | ||
1078 | 1081 | ||
1079 | lcd_disable_raster(DA8XX_FRAME_WAIT); | 1082 | lcd_disable_raster(DA8XX_FRAME_WAIT); |
1080 | lcdc_write(0, LCD_RASTER_CTRL_REG); | 1083 | lcdc_write(0, LCD_RASTER_CTRL_REG); |
1081 | 1084 | ||
1082 | /* disable DMA */ | 1085 | /* disable DMA */ |
1083 | lcdc_write(0, LCD_DMA_CTRL_REG); | 1086 | lcdc_write(0, LCD_DMA_CTRL_REG); |
1084 | 1087 | ||
1085 | unregister_framebuffer(info); | 1088 | unregister_framebuffer(info); |
1086 | fb_dealloc_cmap(&info->cmap); | 1089 | fb_dealloc_cmap(&info->cmap); |
1087 | dma_free_coherent(par->dev, PALETTE_SIZE, par->v_palette_base, | 1090 | pm_runtime_put_sync(&dev->dev); |
1088 | par->p_palette_base); | 1091 | pm_runtime_disable(&dev->dev); |
1089 | dma_free_coherent(par->dev, par->vram_size, par->vram_virt, | 1092 | framebuffer_release(info); |
1090 | par->vram_phys); | ||
1091 | pm_runtime_put_sync(&dev->dev); | ||
1092 | pm_runtime_disable(&dev->dev); | ||
1093 | framebuffer_release(info); | ||
1094 | 1093 | ||
1095 | } | ||
1096 | return 0; | 1094 | return 0; |
1097 | } | 1095 | } |
1098 | 1096 | ||
@@ -1179,15 +1177,21 @@ static int cfb_blank(int blank, struct fb_info *info) | |||
1179 | case FB_BLANK_UNBLANK: | 1177 | case FB_BLANK_UNBLANK: |
1180 | lcd_enable_raster(); | 1178 | lcd_enable_raster(); |
1181 | 1179 | ||
1182 | if (par->panel_power_ctrl) | 1180 | if (par->lcd_supply) { |
1183 | par->panel_power_ctrl(1); | 1181 | ret = regulator_enable(par->lcd_supply); |
1182 | if (ret) | ||
1183 | return ret; | ||
1184 | } | ||
1184 | break; | 1185 | break; |
1185 | case FB_BLANK_NORMAL: | 1186 | case FB_BLANK_NORMAL: |
1186 | case FB_BLANK_VSYNC_SUSPEND: | 1187 | case FB_BLANK_VSYNC_SUSPEND: |
1187 | case FB_BLANK_HSYNC_SUSPEND: | 1188 | case FB_BLANK_HSYNC_SUSPEND: |
1188 | case FB_BLANK_POWERDOWN: | 1189 | case FB_BLANK_POWERDOWN: |
1189 | if (par->panel_power_ctrl) | 1190 | if (par->lcd_supply) { |
1190 | par->panel_power_ctrl(0); | 1191 | ret = regulator_disable(par->lcd_supply); |
1192 | if (ret) | ||
1193 | return ret; | ||
1194 | } | ||
1191 | 1195 | ||
1192 | lcd_disable_raster(DA8XX_FRAME_WAIT); | 1196 | lcd_disable_raster(DA8XX_FRAME_WAIT); |
1193 | break; | 1197 | break; |
@@ -1328,7 +1332,6 @@ static int fb_probe(struct platform_device *device) | |||
1328 | { | 1332 | { |
1329 | struct da8xx_lcdc_platform_data *fb_pdata = | 1333 | struct da8xx_lcdc_platform_data *fb_pdata = |
1330 | dev_get_platdata(&device->dev); | 1334 | dev_get_platdata(&device->dev); |
1331 | struct resource *lcdc_regs; | ||
1332 | struct lcd_ctrl_config *lcd_cfg; | 1335 | struct lcd_ctrl_config *lcd_cfg; |
1333 | struct fb_videomode *lcdc_info; | 1336 | struct fb_videomode *lcdc_info; |
1334 | struct fb_info *da8xx_fb_info; | 1337 | struct fb_info *da8xx_fb_info; |
@@ -1346,8 +1349,7 @@ static int fb_probe(struct platform_device *device) | |||
1346 | if (lcdc_info == NULL) | 1349 | if (lcdc_info == NULL) |
1347 | return -ENODEV; | 1350 | return -ENODEV; |
1348 | 1351 | ||
1349 | lcdc_regs = platform_get_resource(device, IORESOURCE_MEM, 0); | 1352 | da8xx_fb_reg_base = devm_platform_ioremap_resource(device, 0); |
1350 | da8xx_fb_reg_base = devm_ioremap_resource(&device->dev, lcdc_regs); | ||
1351 | if (IS_ERR(da8xx_fb_reg_base)) | 1353 | if (IS_ERR(da8xx_fb_reg_base)) |
1352 | return PTR_ERR(da8xx_fb_reg_base); | 1354 | return PTR_ERR(da8xx_fb_reg_base); |
1353 | 1355 | ||
@@ -1395,9 +1397,19 @@ static int fb_probe(struct platform_device *device) | |||
1395 | par->dev = &device->dev; | 1397 | par->dev = &device->dev; |
1396 | par->lcdc_clk = tmp_lcdc_clk; | 1398 | par->lcdc_clk = tmp_lcdc_clk; |
1397 | par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk); | 1399 | par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk); |
1398 | if (fb_pdata->panel_power_ctrl) { | 1400 | |
1399 | par->panel_power_ctrl = fb_pdata->panel_power_ctrl; | 1401 | par->lcd_supply = devm_regulator_get_optional(&device->dev, "lcd"); |
1400 | par->panel_power_ctrl(1); | 1402 | if (IS_ERR(par->lcd_supply)) { |
1403 | if (PTR_ERR(par->lcd_supply) == -EPROBE_DEFER) { | ||
1404 | ret = -EPROBE_DEFER; | ||
1405 | goto err_pm_runtime_disable; | ||
1406 | } | ||
1407 | |||
1408 | par->lcd_supply = NULL; | ||
1409 | } else { | ||
1410 | ret = regulator_enable(par->lcd_supply); | ||
1411 | if (ret) | ||
1412 | goto err_pm_runtime_disable; | ||
1401 | } | 1413 | } |
1402 | 1414 | ||
1403 | fb_videomode_to_var(&da8xx_fb_var, lcdc_info); | 1415 | fb_videomode_to_var(&da8xx_fb_var, lcdc_info); |
@@ -1411,10 +1423,10 @@ static int fb_probe(struct platform_device *device) | |||
1411 | par->vram_size = roundup(par->vram_size/8, ulcm); | 1423 | par->vram_size = roundup(par->vram_size/8, ulcm); |
1412 | par->vram_size = par->vram_size * LCD_NUM_BUFFERS; | 1424 | par->vram_size = par->vram_size * LCD_NUM_BUFFERS; |
1413 | 1425 | ||
1414 | par->vram_virt = dma_alloc_coherent(par->dev, | 1426 | par->vram_virt = dmam_alloc_coherent(par->dev, |
1415 | par->vram_size, | 1427 | par->vram_size, |
1416 | &par->vram_phys, | 1428 | &par->vram_phys, |
1417 | GFP_KERNEL | GFP_DMA); | 1429 | GFP_KERNEL | GFP_DMA); |
1418 | if (!par->vram_virt) { | 1430 | if (!par->vram_virt) { |
1419 | dev_err(&device->dev, | 1431 | dev_err(&device->dev, |
1420 | "GLCD: kmalloc for frame buffer failed\n"); | 1432 | "GLCD: kmalloc for frame buffer failed\n"); |
@@ -1432,20 +1444,20 @@ static int fb_probe(struct platform_device *device) | |||
1432 | da8xx_fb_fix.line_length - 1; | 1444 | da8xx_fb_fix.line_length - 1; |
1433 | 1445 | ||
1434 | /* allocate palette buffer */ | 1446 | /* allocate palette buffer */ |
1435 | par->v_palette_base = dma_alloc_coherent(par->dev, PALETTE_SIZE, | 1447 | par->v_palette_base = dmam_alloc_coherent(par->dev, PALETTE_SIZE, |
1436 | &par->p_palette_base, | 1448 | &par->p_palette_base, |
1437 | GFP_KERNEL | GFP_DMA); | 1449 | GFP_KERNEL | GFP_DMA); |
1438 | if (!par->v_palette_base) { | 1450 | if (!par->v_palette_base) { |
1439 | dev_err(&device->dev, | 1451 | dev_err(&device->dev, |
1440 | "GLCD: kmalloc for palette buffer failed\n"); | 1452 | "GLCD: kmalloc for palette buffer failed\n"); |
1441 | ret = -EINVAL; | 1453 | ret = -EINVAL; |
1442 | goto err_release_fb_mem; | 1454 | goto err_release_fb; |
1443 | } | 1455 | } |
1444 | 1456 | ||
1445 | par->irq = platform_get_irq(device, 0); | 1457 | par->irq = platform_get_irq(device, 0); |
1446 | if (par->irq < 0) { | 1458 | if (par->irq < 0) { |
1447 | ret = -ENOENT; | 1459 | ret = -ENOENT; |
1448 | goto err_release_pl_mem; | 1460 | goto err_release_fb; |
1449 | } | 1461 | } |
1450 | 1462 | ||
1451 | da8xx_fb_var.grayscale = | 1463 | da8xx_fb_var.grayscale = |
@@ -1463,7 +1475,7 @@ static int fb_probe(struct platform_device *device) | |||
1463 | 1475 | ||
1464 | ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); | 1476 | ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); |
1465 | if (ret) | 1477 | if (ret) |
1466 | goto err_release_pl_mem; | 1478 | goto err_release_fb; |
1467 | da8xx_fb_info->cmap.len = par->palette_sz; | 1479 | da8xx_fb_info->cmap.len = par->palette_sz; |
1468 | 1480 | ||
1469 | /* initialize var_screeninfo */ | 1481 | /* initialize var_screeninfo */ |
@@ -1517,14 +1529,6 @@ err_cpu_freq: | |||
1517 | err_dealloc_cmap: | 1529 | err_dealloc_cmap: |
1518 | fb_dealloc_cmap(&da8xx_fb_info->cmap); | 1530 | fb_dealloc_cmap(&da8xx_fb_info->cmap); |
1519 | 1531 | ||
1520 | err_release_pl_mem: | ||
1521 | dma_free_coherent(par->dev, PALETTE_SIZE, par->v_palette_base, | ||
1522 | par->p_palette_base); | ||
1523 | |||
1524 | err_release_fb_mem: | ||
1525 | dma_free_coherent(par->dev, par->vram_size, par->vram_virt, | ||
1526 | par->vram_phys); | ||
1527 | |||
1528 | err_release_fb: | 1532 | err_release_fb: |
1529 | framebuffer_release(da8xx_fb_info); | 1533 | framebuffer_release(da8xx_fb_info); |
1530 | 1534 | ||
@@ -1603,10 +1607,14 @@ static int fb_suspend(struct device *dev) | |||
1603 | { | 1607 | { |
1604 | struct fb_info *info = dev_get_drvdata(dev); | 1608 | struct fb_info *info = dev_get_drvdata(dev); |
1605 | struct da8xx_fb_par *par = info->par; | 1609 | struct da8xx_fb_par *par = info->par; |
1610 | int ret; | ||
1606 | 1611 | ||
1607 | console_lock(); | 1612 | console_lock(); |
1608 | if (par->panel_power_ctrl) | 1613 | if (par->lcd_supply) { |
1609 | par->panel_power_ctrl(0); | 1614 | ret = regulator_disable(par->lcd_supply); |
1615 | if (ret) | ||
1616 | return ret; | ||
1617 | } | ||
1610 | 1618 | ||
1611 | fb_set_suspend(info, 1); | 1619 | fb_set_suspend(info, 1); |
1612 | lcd_disable_raster(DA8XX_FRAME_WAIT); | 1620 | lcd_disable_raster(DA8XX_FRAME_WAIT); |
@@ -1620,6 +1628,7 @@ static int fb_resume(struct device *dev) | |||
1620 | { | 1628 | { |
1621 | struct fb_info *info = dev_get_drvdata(dev); | 1629 | struct fb_info *info = dev_get_drvdata(dev); |
1622 | struct da8xx_fb_par *par = info->par; | 1630 | struct da8xx_fb_par *par = info->par; |
1631 | int ret; | ||
1623 | 1632 | ||
1624 | console_lock(); | 1633 | console_lock(); |
1625 | pm_runtime_get_sync(dev); | 1634 | pm_runtime_get_sync(dev); |
@@ -1627,8 +1636,11 @@ static int fb_resume(struct device *dev) | |||
1627 | if (par->blank == FB_BLANK_UNBLANK) { | 1636 | if (par->blank == FB_BLANK_UNBLANK) { |
1628 | lcd_enable_raster(); | 1637 | lcd_enable_raster(); |
1629 | 1638 | ||
1630 | if (par->panel_power_ctrl) | 1639 | if (par->lcd_supply) { |
1631 | par->panel_power_ctrl(1); | 1640 | ret = regulator_enable(par->lcd_supply); |
1641 | if (ret) | ||
1642 | return ret; | ||
1643 | } | ||
1632 | } | 1644 | } |
1633 | 1645 | ||
1634 | fb_set_suspend(info, 0); | 1646 | fb_set_suspend(info, 0); |
diff --git a/drivers/video/fbdev/nuc900fb.c b/drivers/video/fbdev/nuc900fb.c deleted file mode 100644 index 4fd851598584..000000000000 --- a/drivers/video/fbdev/nuc900fb.c +++ /dev/null | |||
@@ -1,760 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * | ||
4 | * Copyright (c) 2009 Nuvoton technology corporation | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Description: | ||
8 | * Nuvoton LCD Controller Driver | ||
9 | * Author: | ||
10 | * Wang Qiang (rurality.linux@gmail.com) 2009/12/11 | ||
11 | */ | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/tty.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/fb.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/wait.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/clk.h> | ||
29 | #include <linux/cpufreq.h> | ||
30 | #include <linux/io.h> | ||
31 | #include <linux/pm.h> | ||
32 | #include <linux/device.h> | ||
33 | |||
34 | #include <mach/map.h> | ||
35 | #include <mach/regs-clock.h> | ||
36 | #include <mach/regs-ldm.h> | ||
37 | #include <linux/platform_data/video-nuc900fb.h> | ||
38 | |||
39 | #include "nuc900fb.h" | ||
40 | |||
41 | |||
42 | /* | ||
43 | * Initialize the nuc900 video (dual) buffer address | ||
44 | */ | ||
45 | static void nuc900fb_set_lcdaddr(struct fb_info *info) | ||
46 | { | ||
47 | struct nuc900fb_info *fbi = info->par; | ||
48 | void __iomem *regs = fbi->io; | ||
49 | unsigned long vbaddr1, vbaddr2; | ||
50 | |||
51 | vbaddr1 = info->fix.smem_start; | ||
52 | vbaddr2 = info->fix.smem_start; | ||
53 | vbaddr2 += info->fix.line_length * info->var.yres; | ||
54 | |||
55 | /* set frambuffer start phy addr*/ | ||
56 | writel(vbaddr1, regs + REG_LCM_VA_BADDR0); | ||
57 | writel(vbaddr2, regs + REG_LCM_VA_BADDR1); | ||
58 | |||
59 | writel(fbi->regs.lcd_va_fbctrl, regs + REG_LCM_VA_FBCTRL); | ||
60 | writel(fbi->regs.lcd_va_scale, regs + REG_LCM_VA_SCALE); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * calculate divider for lcd div | ||
65 | */ | ||
66 | static unsigned int nuc900fb_calc_pixclk(struct nuc900fb_info *fbi, | ||
67 | unsigned long pixclk) | ||
68 | { | ||
69 | unsigned long clk = fbi->clk_rate; | ||
70 | unsigned long long div; | ||
71 | |||
72 | /* pixclk is in picseconds. our clock is in Hz*/ | ||
73 | /* div = (clk * pixclk)/10^12 */ | ||
74 | div = (unsigned long long)clk * pixclk; | ||
75 | div >>= 12; | ||
76 | do_div(div, 625 * 625UL * 625); | ||
77 | |||
78 | dev_dbg(fbi->dev, "pixclk %ld, divisor is %lld\n", pixclk, div); | ||
79 | |||
80 | return div; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Check the video params of 'var'. | ||
85 | */ | ||
86 | static int nuc900fb_check_var(struct fb_var_screeninfo *var, | ||
87 | struct fb_info *info) | ||
88 | { | ||
89 | struct nuc900fb_info *fbi = info->par; | ||
90 | struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev); | ||
91 | struct nuc900fb_display *display = NULL; | ||
92 | struct nuc900fb_display *default_display = mach_info->displays + | ||
93 | mach_info->default_display; | ||
94 | int i; | ||
95 | |||
96 | dev_dbg(fbi->dev, "check_var(var=%p, info=%p)\n", var, info); | ||
97 | |||
98 | /* validate x/y resolution */ | ||
99 | /* choose default mode if possible */ | ||
100 | if (var->xres == default_display->xres && | ||
101 | var->yres == default_display->yres && | ||
102 | var->bits_per_pixel == default_display->bpp) | ||
103 | display = default_display; | ||
104 | else | ||
105 | for (i = 0; i < mach_info->num_displays; i++) | ||
106 | if (var->xres == mach_info->displays[i].xres && | ||
107 | var->yres == mach_info->displays[i].yres && | ||
108 | var->bits_per_pixel == mach_info->displays[i].bpp) { | ||
109 | display = mach_info->displays + i; | ||
110 | break; | ||
111 | } | ||
112 | |||
113 | if (display == NULL) { | ||
114 | printk(KERN_ERR "wrong resolution or depth %dx%d at %d bit per pixel\n", | ||
115 | var->xres, var->yres, var->bits_per_pixel); | ||
116 | return -EINVAL; | ||
117 | } | ||
118 | |||
119 | /* it should be the same size as the display */ | ||
120 | var->xres_virtual = display->xres; | ||
121 | var->yres_virtual = display->yres; | ||
122 | var->height = display->height; | ||
123 | var->width = display->width; | ||
124 | |||
125 | /* copy lcd settings */ | ||
126 | var->pixclock = display->pixclock; | ||
127 | var->left_margin = display->left_margin; | ||
128 | var->right_margin = display->right_margin; | ||
129 | var->upper_margin = display->upper_margin; | ||
130 | var->lower_margin = display->lower_margin; | ||
131 | var->vsync_len = display->vsync_len; | ||
132 | var->hsync_len = display->hsync_len; | ||
133 | |||
134 | var->transp.offset = 0; | ||
135 | var->transp.length = 0; | ||
136 | |||
137 | fbi->regs.lcd_dccs = display->dccs; | ||
138 | fbi->regs.lcd_device_ctrl = display->devctl; | ||
139 | fbi->regs.lcd_va_fbctrl = display->fbctrl; | ||
140 | fbi->regs.lcd_va_scale = display->scale; | ||
141 | |||
142 | /* set R/G/B possions */ | ||
143 | switch (var->bits_per_pixel) { | ||
144 | case 1: | ||
145 | case 2: | ||
146 | case 4: | ||
147 | case 8: | ||
148 | default: | ||
149 | var->red.offset = 0; | ||
150 | var->red.length = var->bits_per_pixel; | ||
151 | var->green = var->red; | ||
152 | var->blue = var->red; | ||
153 | break; | ||
154 | case 12: | ||
155 | var->red.length = 4; | ||
156 | var->green.length = 4; | ||
157 | var->blue.length = 4; | ||
158 | var->red.offset = 8; | ||
159 | var->green.offset = 4; | ||
160 | var->blue.offset = 0; | ||
161 | break; | ||
162 | case 16: | ||
163 | var->red.length = 5; | ||
164 | var->green.length = 6; | ||
165 | var->blue.length = 5; | ||
166 | var->red.offset = 11; | ||
167 | var->green.offset = 5; | ||
168 | var->blue.offset = 0; | ||
169 | break; | ||
170 | case 18: | ||
171 | var->red.length = 6; | ||
172 | var->green.length = 6; | ||
173 | var->blue.length = 6; | ||
174 | var->red.offset = 12; | ||
175 | var->green.offset = 6; | ||
176 | var->blue.offset = 0; | ||
177 | break; | ||
178 | case 32: | ||
179 | var->red.length = 8; | ||
180 | var->green.length = 8; | ||
181 | var->blue.length = 8; | ||
182 | var->red.offset = 16; | ||
183 | var->green.offset = 8; | ||
184 | var->blue.offset = 0; | ||
185 | break; | ||
186 | } | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Calculate lcd register values from var setting & save into hw | ||
193 | */ | ||
194 | static void nuc900fb_calculate_lcd_regs(const struct fb_info *info, | ||
195 | struct nuc900fb_hw *regs) | ||
196 | { | ||
197 | const struct fb_var_screeninfo *var = &info->var; | ||
198 | int vtt = var->height + var->upper_margin + var->lower_margin; | ||
199 | int htt = var->width + var->left_margin + var->right_margin; | ||
200 | int hsync = var->width + var->right_margin; | ||
201 | int vsync = var->height + var->lower_margin; | ||
202 | |||
203 | regs->lcd_crtc_size = LCM_CRTC_SIZE_VTTVAL(vtt) | | ||
204 | LCM_CRTC_SIZE_HTTVAL(htt); | ||
205 | regs->lcd_crtc_dend = LCM_CRTC_DEND_VDENDVAL(var->height) | | ||
206 | LCM_CRTC_DEND_HDENDVAL(var->width); | ||
207 | regs->lcd_crtc_hr = LCM_CRTC_HR_EVAL(var->width + 5) | | ||
208 | LCM_CRTC_HR_SVAL(var->width + 1); | ||
209 | regs->lcd_crtc_hsync = LCM_CRTC_HSYNC_EVAL(hsync + var->hsync_len) | | ||
210 | LCM_CRTC_HSYNC_SVAL(hsync); | ||
211 | regs->lcd_crtc_vr = LCM_CRTC_VR_EVAL(vsync + var->vsync_len) | | ||
212 | LCM_CRTC_VR_SVAL(vsync); | ||
213 | |||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Activate (set) the controller from the given framebuffer | ||
218 | * information | ||
219 | */ | ||
220 | static void nuc900fb_activate_var(struct fb_info *info) | ||
221 | { | ||
222 | struct nuc900fb_info *fbi = info->par; | ||
223 | void __iomem *regs = fbi->io; | ||
224 | struct fb_var_screeninfo *var = &info->var; | ||
225 | int clkdiv; | ||
226 | |||
227 | clkdiv = nuc900fb_calc_pixclk(fbi, var->pixclock) - 1; | ||
228 | if (clkdiv < 0) | ||
229 | clkdiv = 0; | ||
230 | |||
231 | nuc900fb_calculate_lcd_regs(info, &fbi->regs); | ||
232 | |||
233 | /* set the new lcd registers*/ | ||
234 | |||
235 | dev_dbg(fbi->dev, "new lcd register set:\n"); | ||
236 | dev_dbg(fbi->dev, "dccs = 0x%08x\n", fbi->regs.lcd_dccs); | ||
237 | dev_dbg(fbi->dev, "dev_ctl = 0x%08x\n", fbi->regs.lcd_device_ctrl); | ||
238 | dev_dbg(fbi->dev, "crtc_size = 0x%08x\n", fbi->regs.lcd_crtc_size); | ||
239 | dev_dbg(fbi->dev, "crtc_dend = 0x%08x\n", fbi->regs.lcd_crtc_dend); | ||
240 | dev_dbg(fbi->dev, "crtc_hr = 0x%08x\n", fbi->regs.lcd_crtc_hr); | ||
241 | dev_dbg(fbi->dev, "crtc_hsync = 0x%08x\n", fbi->regs.lcd_crtc_hsync); | ||
242 | dev_dbg(fbi->dev, "crtc_vr = 0x%08x\n", fbi->regs.lcd_crtc_vr); | ||
243 | |||
244 | writel(fbi->regs.lcd_device_ctrl, regs + REG_LCM_DEV_CTRL); | ||
245 | writel(fbi->regs.lcd_crtc_size, regs + REG_LCM_CRTC_SIZE); | ||
246 | writel(fbi->regs.lcd_crtc_dend, regs + REG_LCM_CRTC_DEND); | ||
247 | writel(fbi->regs.lcd_crtc_hr, regs + REG_LCM_CRTC_HR); | ||
248 | writel(fbi->regs.lcd_crtc_hsync, regs + REG_LCM_CRTC_HSYNC); | ||
249 | writel(fbi->regs.lcd_crtc_vr, regs + REG_LCM_CRTC_VR); | ||
250 | |||
251 | /* set lcd address pointers */ | ||
252 | nuc900fb_set_lcdaddr(info); | ||
253 | |||
254 | writel(fbi->regs.lcd_dccs, regs + REG_LCM_DCCS); | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Alters the hardware state. | ||
259 | * | ||
260 | */ | ||
261 | static int nuc900fb_set_par(struct fb_info *info) | ||
262 | { | ||
263 | struct fb_var_screeninfo *var = &info->var; | ||
264 | |||
265 | switch (var->bits_per_pixel) { | ||
266 | case 32: | ||
267 | case 24: | ||
268 | case 18: | ||
269 | case 16: | ||
270 | case 12: | ||
271 | info->fix.visual = FB_VISUAL_TRUECOLOR; | ||
272 | break; | ||
273 | case 1: | ||
274 | info->fix.visual = FB_VISUAL_MONO01; | ||
275 | break; | ||
276 | default: | ||
277 | info->fix.visual = FB_VISUAL_PSEUDOCOLOR; | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8; | ||
282 | |||
283 | /* activate this new configuration */ | ||
284 | nuc900fb_activate_var(info); | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | static inline unsigned int chan_to_field(unsigned int chan, | ||
289 | struct fb_bitfield *bf) | ||
290 | { | ||
291 | chan &= 0xffff; | ||
292 | chan >>= 16 - bf->length; | ||
293 | return chan << bf->offset; | ||
294 | } | ||
295 | |||
296 | static int nuc900fb_setcolreg(unsigned regno, | ||
297 | unsigned red, unsigned green, unsigned blue, | ||
298 | unsigned transp, struct fb_info *info) | ||
299 | { | ||
300 | unsigned int val; | ||
301 | |||
302 | switch (info->fix.visual) { | ||
303 | case FB_VISUAL_TRUECOLOR: | ||
304 | /* true-colour, use pseuo-palette */ | ||
305 | if (regno < 16) { | ||
306 | u32 *pal = info->pseudo_palette; | ||
307 | |||
308 | val = chan_to_field(red, &info->var.red); | ||
309 | val |= chan_to_field(green, &info->var.green); | ||
310 | val |= chan_to_field(blue, &info->var.blue); | ||
311 | pal[regno] = val; | ||
312 | } | ||
313 | break; | ||
314 | |||
315 | default: | ||
316 | return 1; /* unknown type */ | ||
317 | } | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * nuc900fb_blank | ||
323 | * | ||
324 | */ | ||
325 | static int nuc900fb_blank(int blank_mode, struct fb_info *info) | ||
326 | { | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static struct fb_ops nuc900fb_ops = { | ||
332 | .owner = THIS_MODULE, | ||
333 | .fb_check_var = nuc900fb_check_var, | ||
334 | .fb_set_par = nuc900fb_set_par, | ||
335 | .fb_blank = nuc900fb_blank, | ||
336 | .fb_setcolreg = nuc900fb_setcolreg, | ||
337 | .fb_fillrect = cfb_fillrect, | ||
338 | .fb_copyarea = cfb_copyarea, | ||
339 | .fb_imageblit = cfb_imageblit, | ||
340 | }; | ||
341 | |||
342 | |||
343 | static inline void modify_gpio(void __iomem *reg, | ||
344 | unsigned long set, unsigned long mask) | ||
345 | { | ||
346 | unsigned long tmp; | ||
347 | tmp = readl(reg) & ~mask; | ||
348 | writel(tmp | set, reg); | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Initialise LCD-related registers | ||
353 | */ | ||
354 | static int nuc900fb_init_registers(struct fb_info *info) | ||
355 | { | ||
356 | struct nuc900fb_info *fbi = info->par; | ||
357 | struct nuc900fb_mach_info *mach_info = dev_get_platdata(fbi->dev); | ||
358 | void __iomem *regs = fbi->io; | ||
359 | |||
360 | /*reset the display engine*/ | ||
361 | writel(0, regs + REG_LCM_DCCS); | ||
362 | writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_ENG_RST, | ||
363 | regs + REG_LCM_DCCS); | ||
364 | ndelay(100); | ||
365 | writel(readl(regs + REG_LCM_DCCS) & (~LCM_DCCS_ENG_RST), | ||
366 | regs + REG_LCM_DCCS); | ||
367 | ndelay(100); | ||
368 | |||
369 | writel(0, regs + REG_LCM_DEV_CTRL); | ||
370 | |||
371 | /* config gpio output */ | ||
372 | modify_gpio(W90X900_VA_GPIO + 0x54, mach_info->gpio_dir, | ||
373 | mach_info->gpio_dir_mask); | ||
374 | modify_gpio(W90X900_VA_GPIO + 0x58, mach_info->gpio_data, | ||
375 | mach_info->gpio_data_mask); | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | |||
381 | /* | ||
382 | * Alloc the SDRAM region of NUC900 for the frame buffer. | ||
383 | * The buffer should be a non-cached, non-buffered, memory region | ||
384 | * to allow palette and pixel writes without flushing the cache. | ||
385 | */ | ||
386 | static int nuc900fb_map_video_memory(struct fb_info *info) | ||
387 | { | ||
388 | struct nuc900fb_info *fbi = info->par; | ||
389 | dma_addr_t map_dma; | ||
390 | unsigned long map_size = PAGE_ALIGN(info->fix.smem_len); | ||
391 | |||
392 | dev_dbg(fbi->dev, "nuc900fb_map_video_memory(fbi=%p) map_size %lu\n", | ||
393 | fbi, map_size); | ||
394 | |||
395 | info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma, | ||
396 | GFP_KERNEL); | ||
397 | |||
398 | if (!info->screen_base) | ||
399 | return -ENOMEM; | ||
400 | |||
401 | memset(info->screen_base, 0x00, map_size); | ||
402 | info->fix.smem_start = map_dma; | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static inline void nuc900fb_unmap_video_memory(struct fb_info *info) | ||
408 | { | ||
409 | struct nuc900fb_info *fbi = info->par; | ||
410 | dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len), | ||
411 | info->screen_base, info->fix.smem_start); | ||
412 | } | ||
413 | |||
414 | static irqreturn_t nuc900fb_irqhandler(int irq, void *dev_id) | ||
415 | { | ||
416 | struct nuc900fb_info *fbi = dev_id; | ||
417 | void __iomem *regs = fbi->io; | ||
418 | void __iomem *irq_base = fbi->irq_base; | ||
419 | unsigned long lcdirq = readl(regs + REG_LCM_INT_CS); | ||
420 | |||
421 | if (lcdirq & LCM_INT_CS_DISP_F_STATUS) { | ||
422 | writel(readl(irq_base) | 1<<30, irq_base); | ||
423 | |||
424 | /* wait VA_EN low */ | ||
425 | if ((readl(regs + REG_LCM_DCCS) & | ||
426 | LCM_DCCS_SINGLE) == LCM_DCCS_SINGLE) | ||
427 | while ((readl(regs + REG_LCM_DCCS) & | ||
428 | LCM_DCCS_VA_EN) == LCM_DCCS_VA_EN) | ||
429 | ; | ||
430 | /* display_out-enable */ | ||
431 | writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_DISP_OUT_EN, | ||
432 | regs + REG_LCM_DCCS); | ||
433 | /* va-enable*/ | ||
434 | writel(readl(regs + REG_LCM_DCCS) | LCM_DCCS_VA_EN, | ||
435 | regs + REG_LCM_DCCS); | ||
436 | } else if (lcdirq & LCM_INT_CS_UNDERRUN_INT) { | ||
437 | writel(readl(irq_base) | LCM_INT_CS_UNDERRUN_INT, irq_base); | ||
438 | } else if (lcdirq & LCM_INT_CS_BUS_ERROR_INT) { | ||
439 | writel(readl(irq_base) | LCM_INT_CS_BUS_ERROR_INT, irq_base); | ||
440 | } | ||
441 | |||
442 | return IRQ_HANDLED; | ||
443 | } | ||
444 | |||
445 | #ifdef CONFIG_CPU_FREQ | ||
446 | |||
447 | static int nuc900fb_cpufreq_transition(struct notifier_block *nb, | ||
448 | unsigned long val, void *data) | ||
449 | { | ||
450 | struct nuc900fb_info *info; | ||
451 | struct fb_info *fbinfo; | ||
452 | long delta_f; | ||
453 | info = container_of(nb, struct nuc900fb_info, freq_transition); | ||
454 | fbinfo = dev_get_drvdata(info->dev); | ||
455 | |||
456 | delta_f = info->clk_rate - clk_get_rate(info->clk); | ||
457 | |||
458 | if ((val == CPUFREQ_POSTCHANGE && delta_f > 0) || | ||
459 | (val == CPUFREQ_PRECHANGE && delta_f < 0)) { | ||
460 | info->clk_rate = clk_get_rate(info->clk); | ||
461 | nuc900fb_activate_var(fbinfo); | ||
462 | } | ||
463 | |||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | static inline int nuc900fb_cpufreq_register(struct nuc900fb_info *fbi) | ||
468 | { | ||
469 | fbi->freq_transition.notifier_call = nuc900fb_cpufreq_transition; | ||
470 | return cpufreq_register_notifier(&fbi->freq_transition, | ||
471 | CPUFREQ_TRANSITION_NOTIFIER); | ||
472 | } | ||
473 | |||
474 | static inline void nuc900fb_cpufreq_deregister(struct nuc900fb_info *fbi) | ||
475 | { | ||
476 | cpufreq_unregister_notifier(&fbi->freq_transition, | ||
477 | CPUFREQ_TRANSITION_NOTIFIER); | ||
478 | } | ||
479 | #else | ||
480 | static inline int nuc900fb_cpufreq_transition(struct notifier_block *nb, | ||
481 | unsigned long val, void *data) | ||
482 | { | ||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | static inline int nuc900fb_cpufreq_register(struct nuc900fb_info *fbi) | ||
487 | { | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | static inline void nuc900fb_cpufreq_deregister(struct nuc900fb_info *info) | ||
492 | { | ||
493 | } | ||
494 | #endif | ||
495 | |||
496 | static char driver_name[] = "nuc900fb"; | ||
497 | |||
498 | static int nuc900fb_probe(struct platform_device *pdev) | ||
499 | { | ||
500 | struct nuc900fb_info *fbi; | ||
501 | struct nuc900fb_display *display; | ||
502 | struct fb_info *fbinfo; | ||
503 | struct nuc900fb_mach_info *mach_info; | ||
504 | struct resource *res; | ||
505 | int ret; | ||
506 | int irq; | ||
507 | int i; | ||
508 | int size; | ||
509 | |||
510 | dev_dbg(&pdev->dev, "devinit\n"); | ||
511 | mach_info = dev_get_platdata(&pdev->dev); | ||
512 | if (mach_info == NULL) { | ||
513 | dev_err(&pdev->dev, | ||
514 | "no platform data for lcd, cannot attach\n"); | ||
515 | return -EINVAL; | ||
516 | } | ||
517 | |||
518 | if (mach_info->default_display > mach_info->num_displays) { | ||
519 | dev_err(&pdev->dev, | ||
520 | "default display No. is %d but only %d displays \n", | ||
521 | mach_info->default_display, mach_info->num_displays); | ||
522 | return -EINVAL; | ||
523 | } | ||
524 | |||
525 | |||
526 | display = mach_info->displays + mach_info->default_display; | ||
527 | |||
528 | irq = platform_get_irq(pdev, 0); | ||
529 | if (irq < 0) { | ||
530 | dev_err(&pdev->dev, "no irq for device\n"); | ||
531 | return -ENOENT; | ||
532 | } | ||
533 | |||
534 | fbinfo = framebuffer_alloc(sizeof(struct nuc900fb_info), &pdev->dev); | ||
535 | if (!fbinfo) | ||
536 | return -ENOMEM; | ||
537 | |||
538 | platform_set_drvdata(pdev, fbinfo); | ||
539 | |||
540 | fbi = fbinfo->par; | ||
541 | fbi->dev = &pdev->dev; | ||
542 | |||
543 | #ifdef CONFIG_CPU_NUC950 | ||
544 | fbi->drv_type = LCDDRV_NUC950; | ||
545 | #endif | ||
546 | |||
547 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
548 | |||
549 | size = resource_size(res); | ||
550 | fbi->mem = request_mem_region(res->start, size, pdev->name); | ||
551 | if (fbi->mem == NULL) { | ||
552 | dev_err(&pdev->dev, "failed to alloc memory region\n"); | ||
553 | ret = -ENOENT; | ||
554 | goto free_fb; | ||
555 | } | ||
556 | |||
557 | fbi->io = ioremap(res->start, size); | ||
558 | if (fbi->io == NULL) { | ||
559 | dev_err(&pdev->dev, "ioremap() of lcd registers failed\n"); | ||
560 | ret = -ENXIO; | ||
561 | goto release_mem_region; | ||
562 | } | ||
563 | |||
564 | fbi->irq_base = fbi->io + REG_LCM_INT_CS; | ||
565 | |||
566 | |||
567 | /* Stop the LCD */ | ||
568 | writel(0, fbi->io + REG_LCM_DCCS); | ||
569 | |||
570 | /* fill the fbinfo*/ | ||
571 | strcpy(fbinfo->fix.id, driver_name); | ||
572 | fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; | ||
573 | fbinfo->fix.type_aux = 0; | ||
574 | fbinfo->fix.xpanstep = 0; | ||
575 | fbinfo->fix.ypanstep = 0; | ||
576 | fbinfo->fix.ywrapstep = 0; | ||
577 | fbinfo->fix.accel = FB_ACCEL_NONE; | ||
578 | fbinfo->var.nonstd = 0; | ||
579 | fbinfo->var.activate = FB_ACTIVATE_NOW; | ||
580 | fbinfo->var.accel_flags = 0; | ||
581 | fbinfo->var.vmode = FB_VMODE_NONINTERLACED; | ||
582 | fbinfo->fbops = &nuc900fb_ops; | ||
583 | fbinfo->flags = FBINFO_FLAG_DEFAULT; | ||
584 | fbinfo->pseudo_palette = &fbi->pseudo_pal; | ||
585 | |||
586 | ret = request_irq(irq, nuc900fb_irqhandler, 0, pdev->name, fbi); | ||
587 | if (ret) { | ||
588 | dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n", | ||
589 | irq, ret); | ||
590 | ret = -EBUSY; | ||
591 | goto release_regs; | ||
592 | } | ||
593 | |||
594 | fbi->clk = clk_get(&pdev->dev, NULL); | ||
595 | if (IS_ERR(fbi->clk)) { | ||
596 | printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n"); | ||
597 | ret = PTR_ERR(fbi->clk); | ||
598 | goto release_irq; | ||
599 | } | ||
600 | |||
601 | clk_enable(fbi->clk); | ||
602 | dev_dbg(&pdev->dev, "got and enabled clock\n"); | ||
603 | |||
604 | fbi->clk_rate = clk_get_rate(fbi->clk); | ||
605 | |||
606 | /* calutate the video buffer size */ | ||
607 | for (i = 0; i < mach_info->num_displays; i++) { | ||
608 | unsigned long smem_len = mach_info->displays[i].xres; | ||
609 | smem_len *= mach_info->displays[i].yres; | ||
610 | smem_len *= mach_info->displays[i].bpp; | ||
611 | smem_len >>= 3; | ||
612 | if (fbinfo->fix.smem_len < smem_len) | ||
613 | fbinfo->fix.smem_len = smem_len; | ||
614 | } | ||
615 | |||
616 | /* Initialize Video Memory */ | ||
617 | ret = nuc900fb_map_video_memory(fbinfo); | ||
618 | if (ret) { | ||
619 | printk(KERN_ERR "Failed to allocate video RAM: %x\n", ret); | ||
620 | goto release_clock; | ||
621 | } | ||
622 | |||
623 | dev_dbg(&pdev->dev, "got video memory\n"); | ||
624 | |||
625 | fbinfo->var.xres = display->xres; | ||
626 | fbinfo->var.yres = display->yres; | ||
627 | fbinfo->var.bits_per_pixel = display->bpp; | ||
628 | |||
629 | nuc900fb_init_registers(fbinfo); | ||
630 | |||
631 | nuc900fb_check_var(&fbinfo->var, fbinfo); | ||
632 | |||
633 | ret = nuc900fb_cpufreq_register(fbi); | ||
634 | if (ret < 0) { | ||
635 | dev_err(&pdev->dev, "Failed to register cpufreq\n"); | ||
636 | goto free_video_memory; | ||
637 | } | ||
638 | |||
639 | ret = register_framebuffer(fbinfo); | ||
640 | if (ret) { | ||
641 | printk(KERN_ERR "failed to register framebuffer device: %d\n", | ||
642 | ret); | ||
643 | goto free_cpufreq; | ||
644 | } | ||
645 | |||
646 | fb_info(fbinfo, "%s frame buffer device\n", fbinfo->fix.id); | ||
647 | |||
648 | return 0; | ||
649 | |||
650 | free_cpufreq: | ||
651 | nuc900fb_cpufreq_deregister(fbi); | ||
652 | free_video_memory: | ||
653 | nuc900fb_unmap_video_memory(fbinfo); | ||
654 | release_clock: | ||
655 | clk_disable(fbi->clk); | ||
656 | clk_put(fbi->clk); | ||
657 | release_irq: | ||
658 | free_irq(irq, fbi); | ||
659 | release_regs: | ||
660 | iounmap(fbi->io); | ||
661 | release_mem_region: | ||
662 | release_mem_region(res->start, size); | ||
663 | free_fb: | ||
664 | framebuffer_release(fbinfo); | ||
665 | return ret; | ||
666 | } | ||
667 | |||
668 | /* | ||
669 | * shutdown the lcd controller | ||
670 | */ | ||
671 | static void nuc900fb_stop_lcd(struct fb_info *info) | ||
672 | { | ||
673 | struct nuc900fb_info *fbi = info->par; | ||
674 | void __iomem *regs = fbi->io; | ||
675 | |||
676 | writel((~LCM_DCCS_DISP_INT_EN) | (~LCM_DCCS_VA_EN) | (~LCM_DCCS_OSD_EN), | ||
677 | regs + REG_LCM_DCCS); | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * Cleanup | ||
682 | */ | ||
683 | static int nuc900fb_remove(struct platform_device *pdev) | ||
684 | { | ||
685 | struct fb_info *fbinfo = platform_get_drvdata(pdev); | ||
686 | struct nuc900fb_info *fbi = fbinfo->par; | ||
687 | int irq; | ||
688 | |||
689 | nuc900fb_stop_lcd(fbinfo); | ||
690 | msleep(1); | ||
691 | |||
692 | unregister_framebuffer(fbinfo); | ||
693 | nuc900fb_cpufreq_deregister(fbi); | ||
694 | nuc900fb_unmap_video_memory(fbinfo); | ||
695 | |||
696 | iounmap(fbi->io); | ||
697 | |||
698 | irq = platform_get_irq(pdev, 0); | ||
699 | free_irq(irq, fbi); | ||
700 | |||
701 | release_resource(fbi->mem); | ||
702 | kfree(fbi->mem); | ||
703 | |||
704 | framebuffer_release(fbinfo); | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | #ifdef CONFIG_PM | ||
710 | |||
711 | /* | ||
712 | * suspend and resume support for the lcd controller | ||
713 | */ | ||
714 | |||
715 | static int nuc900fb_suspend(struct platform_device *dev, pm_message_t state) | ||
716 | { | ||
717 | struct fb_info *fbinfo = platform_get_drvdata(dev); | ||
718 | struct nuc900fb_info *info = fbinfo->par; | ||
719 | |||
720 | nuc900fb_stop_lcd(fbinfo); | ||
721 | msleep(1); | ||
722 | clk_disable(info->clk); | ||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int nuc900fb_resume(struct platform_device *dev) | ||
727 | { | ||
728 | struct fb_info *fbinfo = platform_get_drvdata(dev); | ||
729 | struct nuc900fb_info *fbi = fbinfo->par; | ||
730 | |||
731 | printk(KERN_INFO "nuc900fb resume\n"); | ||
732 | |||
733 | clk_enable(fbi->clk); | ||
734 | msleep(1); | ||
735 | |||
736 | nuc900fb_init_registers(fbinfo); | ||
737 | nuc900fb_activate_var(fbinfo); | ||
738 | |||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | #else | ||
743 | #define nuc900fb_suspend NULL | ||
744 | #define nuc900fb_resume NULL | ||
745 | #endif | ||
746 | |||
747 | static struct platform_driver nuc900fb_driver = { | ||
748 | .probe = nuc900fb_probe, | ||
749 | .remove = nuc900fb_remove, | ||
750 | .suspend = nuc900fb_suspend, | ||
751 | .resume = nuc900fb_resume, | ||
752 | .driver = { | ||
753 | .name = "nuc900-lcd", | ||
754 | }, | ||
755 | }; | ||
756 | |||
757 | module_platform_driver(nuc900fb_driver); | ||
758 | |||
759 | MODULE_DESCRIPTION("Framebuffer driver for the NUC900"); | ||
760 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/fbdev/nuc900fb.h b/drivers/video/fbdev/nuc900fb.h deleted file mode 100644 index 055ae9297931..000000000000 --- a/drivers/video/fbdev/nuc900fb.h +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
2 | /* | ||
3 | * | ||
4 | * Copyright (c) 2009 Nuvoton technology corporation | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Author: | ||
8 | * Wang Qiang(rurality.linux@gmail.com) 2009/12/16 | ||
9 | */ | ||
10 | |||
11 | #ifndef __NUC900FB_H | ||
12 | #define __NUC900FB_H | ||
13 | |||
14 | #include <mach/map.h> | ||
15 | #include <linux/platform_data/video-nuc900fb.h> | ||
16 | |||
17 | enum nuc900_lcddrv_type { | ||
18 | LCDDRV_NUC910, | ||
19 | LCDDRV_NUC930, | ||
20 | LCDDRV_NUC932, | ||
21 | LCDDRV_NUC950, | ||
22 | LCDDRV_NUC960, | ||
23 | }; | ||
24 | |||
25 | |||
26 | #define PALETTE_BUFFER_SIZE 256 | ||
27 | #define PALETTE_BUFF_CLEAR (0x80000000) /* entry is clear/invalid */ | ||
28 | |||
29 | struct nuc900fb_info { | ||
30 | struct device *dev; | ||
31 | struct clk *clk; | ||
32 | |||
33 | struct resource *mem; | ||
34 | void __iomem *io; | ||
35 | void __iomem *irq_base; | ||
36 | int drv_type; | ||
37 | struct nuc900fb_hw regs; | ||
38 | unsigned long clk_rate; | ||
39 | |||
40 | #ifdef CONFIG_CPU_FREQ | ||
41 | struct notifier_block freq_transition; | ||
42 | #endif | ||
43 | |||
44 | /* keep these registers in case we need to re-write palette */ | ||
45 | u32 palette_buffer[PALETTE_BUFFER_SIZE]; | ||
46 | u32 pseudo_pal[16]; | ||
47 | }; | ||
48 | |||
49 | int nuc900fb_init(void); | ||
50 | |||
51 | #endif /* __NUC900FB_H */ | ||
diff --git a/include/Kbuild b/include/Kbuild index c38f0d46b267..4d5a03a81fb5 100644 --- a/include/Kbuild +++ b/include/Kbuild | |||
@@ -550,7 +550,6 @@ header-test- += linux/platform_data/sky81452-backlight.h | |||
550 | header-test- += linux/platform_data/spi-davinci.h | 550 | header-test- += linux/platform_data/spi-davinci.h |
551 | header-test- += linux/platform_data/spi-ep93xx.h | 551 | header-test- += linux/platform_data/spi-ep93xx.h |
552 | header-test- += linux/platform_data/spi-mt65xx.h | 552 | header-test- += linux/platform_data/spi-mt65xx.h |
553 | header-test- += linux/platform_data/spi-nuc900.h | ||
554 | header-test- += linux/platform_data/st_sensors_pdata.h | 553 | header-test- += linux/platform_data/st_sensors_pdata.h |
555 | header-test- += linux/platform_data/ti-sysc.h | 554 | header-test- += linux/platform_data/ti-sysc.h |
556 | header-test- += linux/platform_data/timer-ixp4xx.h | 555 | header-test- += linux/platform_data/timer-ixp4xx.h |
@@ -569,7 +568,6 @@ header-test- += linux/platform_data/usb3503.h | |||
569 | header-test- += linux/platform_data/ux500_wdt.h | 568 | header-test- += linux/platform_data/ux500_wdt.h |
570 | header-test- += linux/platform_data/video-clcd-versatile.h | 569 | header-test- += linux/platform_data/video-clcd-versatile.h |
571 | header-test- += linux/platform_data/video-imxfb.h | 570 | header-test- += linux/platform_data/video-imxfb.h |
572 | header-test- += linux/platform_data/video-nuc900fb.h | ||
573 | header-test- += linux/platform_data/video-pxafb.h | 571 | header-test- += linux/platform_data/video-pxafb.h |
574 | header-test- += linux/platform_data/video_s3c.h | 572 | header-test- += linux/platform_data/video_s3c.h |
575 | header-test- += linux/platform_data/voltage-omap.h | 573 | header-test- += linux/platform_data/voltage-omap.h |
diff --git a/include/dt-bindings/bus/moxtet.h b/include/dt-bindings/bus/moxtet.h new file mode 100644 index 000000000000..dc9345440ebe --- /dev/null +++ b/include/dt-bindings/bus/moxtet.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Constant for device tree bindings for Turris Mox module configuration bus | ||
4 | * | ||
5 | * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> | ||
6 | */ | ||
7 | |||
8 | #ifndef _DT_BINDINGS_BUS_MOXTET_H | ||
9 | #define _DT_BINDINGS_BUS_MOXTET_H | ||
10 | |||
11 | #define MOXTET_IRQ_PCI 0 | ||
12 | #define MOXTET_IRQ_USB3 4 | ||
13 | #define MOXTET_IRQ_PERIDOT(n) (8 + (n)) | ||
14 | #define MOXTET_IRQ_TOPAZ 12 | ||
15 | |||
16 | #endif /* _DT_BINDINGS_BUS_MOXTET_H */ | ||
diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h new file mode 100644 index 000000000000..bb5e67a842de --- /dev/null +++ b/include/dt-bindings/power/meson-g12a-power.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ | ||
2 | /* | ||
3 | * Copyright (c) 2019 BayLibre, SAS | ||
4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef _DT_BINDINGS_MESON_G12A_POWER_H | ||
8 | #define _DT_BINDINGS_MESON_G12A_POWER_H | ||
9 | |||
10 | #define PWRC_G12A_VPU_ID 0 | ||
11 | #define PWRC_G12A_ETH_ID 1 | ||
12 | |||
13 | #endif | ||
diff --git a/include/dt-bindings/power/meson-sm1-power.h b/include/dt-bindings/power/meson-sm1-power.h new file mode 100644 index 000000000000..a020ab00c134 --- /dev/null +++ b/include/dt-bindings/power/meson-sm1-power.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ | ||
2 | /* | ||
3 | * Copyright (c) 2019 BayLibre, SAS | ||
4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef _DT_BINDINGS_MESON_SM1_POWER_H | ||
8 | #define _DT_BINDINGS_MESON_SM1_POWER_H | ||
9 | |||
10 | #define PWRC_SM1_VPU_ID 0 | ||
11 | #define PWRC_SM1_NNA_ID 1 | ||
12 | #define PWRC_SM1_USB_ID 2 | ||
13 | #define PWRC_SM1_PCIE_ID 3 | ||
14 | #define PWRC_SM1_GE2D_ID 4 | ||
15 | #define PWRC_SM1_AUDIO_ID 5 | ||
16 | #define PWRC_SM1_ETH_ID 6 | ||
17 | |||
18 | #endif | ||
diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h index 524d6077ac1b..ea5058618863 100644 --- a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h | |||
@@ -1,56 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ | ||
1 | /* | 2 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | * redistributing this file, you may do so under either license. | ||
4 | * | ||
5 | * GPL LICENSE SUMMARY | ||
6 | * | ||
7 | * Copyright (c) 2016 BayLibre, SAS. | 3 | * Copyright (c) 2016 BayLibre, SAS. |
8 | * Author: Neil Armstrong <narmstrong@baylibre.com> | 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> |
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called COPYING. | ||
23 | * | ||
24 | * BSD LICENSE | ||
25 | * | ||
26 | * Copyright (c) 2016 BayLibre, SAS. | ||
27 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
28 | * | ||
29 | * Redistribution and use in source and binary forms, with or without | ||
30 | * modification, are permitted provided that the following conditions | ||
31 | * are met: | ||
32 | * | ||
33 | * * Redistributions of source code must retain the above copyright | ||
34 | * notice, this list of conditions and the following disclaimer. | ||
35 | * * Redistributions in binary form must reproduce the above copyright | ||
36 | * notice, this list of conditions and the following disclaimer in | ||
37 | * the documentation and/or other materials provided with the | ||
38 | * distribution. | ||
39 | * * Neither the name of Intel Corporation nor the names of its | ||
40 | * contributors may be used to endorse or promote products derived | ||
41 | * from this software without specific prior written permission. | ||
42 | * | ||
43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
54 | */ | 5 | */ |
55 | #ifndef _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H | 6 | #ifndef _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H |
56 | #define _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H | 7 | #define _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H |
diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h index 614aff2c7aff..c614438bcbdb 100644 --- a/include/dt-bindings/reset/amlogic,meson8b-reset.h +++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h | |||
@@ -1,56 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ | ||
1 | /* | 2 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | * redistributing this file, you may do so under either license. | ||
4 | * | ||
5 | * GPL LICENSE SUMMARY | ||
6 | * | ||
7 | * Copyright (c) 2016 BayLibre, SAS. | 3 | * Copyright (c) 2016 BayLibre, SAS. |
8 | * Author: Neil Armstrong <narmstrong@baylibre.com> | 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> |
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called COPYING. | ||
23 | * | ||
24 | * BSD LICENSE | ||
25 | * | ||
26 | * Copyright (c) 2016 BayLibre, SAS. | ||
27 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
28 | * | ||
29 | * Redistribution and use in source and binary forms, with or without | ||
30 | * modification, are permitted provided that the following conditions | ||
31 | * are met: | ||
32 | * | ||
33 | * * Redistributions of source code must retain the above copyright | ||
34 | * notice, this list of conditions and the following disclaimer. | ||
35 | * * Redistributions in binary form must reproduce the above copyright | ||
36 | * notice, this list of conditions and the following disclaimer in | ||
37 | * the documentation and/or other materials provided with the | ||
38 | * distribution. | ||
39 | * * Neither the name of Intel Corporation nor the names of its | ||
40 | * contributors may be used to endorse or promote products derived | ||
41 | * from this software without specific prior written permission. | ||
42 | * | ||
43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
54 | */ | 5 | */ |
55 | #ifndef _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H | 6 | #ifndef _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H |
56 | #define _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H | 7 | #define _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H |
diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h index 57c592498aa0..9a301082d361 100644 --- a/include/dt-bindings/reset/imx8mq-reset.h +++ b/include/dt-bindings/reset/imx8mq-reset.h | |||
@@ -31,33 +31,33 @@ | |||
31 | #define IMX8MQ_RESET_OTG2_PHY_RESET 20 | 31 | #define IMX8MQ_RESET_OTG2_PHY_RESET 20 |
32 | #define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 | 32 | #define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 |
33 | #define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 | 33 | #define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 |
34 | #define IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N 23 | 34 | #define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23 |
35 | #define IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N 24 | 35 | #define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24 |
36 | #define IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N 25 | 36 | #define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25 |
37 | #define IMX8MQ_RESET_PCIEPHY 26 | 37 | #define IMX8MQ_RESET_PCIEPHY 26 |
38 | #define IMX8MQ_RESET_PCIEPHY_PERST 27 | 38 | #define IMX8MQ_RESET_PCIEPHY_PERST 27 |
39 | #define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 | 39 | #define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 |
40 | #define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 | 40 | #define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 |
41 | #define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 | 41 | #define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM does NOT support */ |
42 | #define IMX8MQ_RESET_DISP_RESET 31 | 42 | #define IMX8MQ_RESET_DISP_RESET 31 |
43 | #define IMX8MQ_RESET_GPU_RESET 32 | 43 | #define IMX8MQ_RESET_GPU_RESET 32 |
44 | #define IMX8MQ_RESET_VPU_RESET 33 | 44 | #define IMX8MQ_RESET_VPU_RESET 33 |
45 | #define IMX8MQ_RESET_PCIEPHY2 34 | 45 | #define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM does NOT support */ |
46 | #define IMX8MQ_RESET_PCIEPHY2_PERST 35 | 46 | #define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM does NOT support */ |
47 | #define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 | 47 | #define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM does NOT support */ |
48 | #define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 | 48 | #define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM does NOT support */ |
49 | #define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 | 49 | #define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM does NOT support */ |
50 | #define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 | 50 | #define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM does NOT support */ |
51 | #define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 | 51 | #define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM does NOT support */ |
52 | #define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 | 52 | #define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM does NOT support */ |
53 | #define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 | 53 | #define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM does NOT support */ |
54 | #define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 | 54 | #define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM does NOT support */ |
55 | #define IMX8MQ_RESET_DDRC1_PRST 44 | 55 | #define IMX8MQ_RESET_DDRC1_PRST 44 |
56 | #define IMX8MQ_RESET_DDRC1_CORE_RESET 45 | 56 | #define IMX8MQ_RESET_DDRC1_CORE_RESET 45 |
57 | #define IMX8MQ_RESET_DDRC1_PHY_RESET 46 | 57 | #define IMX8MQ_RESET_DDRC1_PHY_RESET 46 |
58 | #define IMX8MQ_RESET_DDRC2_PRST 47 | 58 | #define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM does NOT support */ |
59 | #define IMX8MQ_RESET_DDRC2_CORE_RESET 48 | 59 | #define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM does NOT support */ |
60 | #define IMX8MQ_RESET_DDRC2_PHY_RESET 49 | 60 | #define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM does NOT support */ |
61 | 61 | ||
62 | #define IMX8MQ_RESET_NUM 50 | 62 | #define IMX8MQ_RESET_NUM 50 |
63 | 63 | ||
diff --git a/include/dt-bindings/soc/ti,sci_pm_domain.h b/include/dt-bindings/soc/ti,sci_pm_domain.h new file mode 100644 index 000000000000..8f2a7360b65e --- /dev/null +++ b/include/dt-bindings/soc/ti,sci_pm_domain.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifndef __DT_BINDINGS_TI_SCI_PM_DOMAIN_H | ||
4 | #define __DT_BINDINGS_TI_SCI_PM_DOMAIN_H | ||
5 | |||
6 | #define TI_SCI_PD_EXCLUSIVE 1 | ||
7 | #define TI_SCI_PD_SHARED 0 | ||
8 | |||
9 | #endif /* __DT_BINDINGS_TI_SCI_PM_DOMAIN_H */ | ||
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h new file mode 100644 index 000000000000..7562099c9e46 --- /dev/null +++ b/include/linux/firmware/imx/dsp.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
2 | /* | ||
3 | * Copyright 2019 NXP | ||
4 | * | ||
5 | * Header file for the DSP IPC implementation | ||
6 | */ | ||
7 | |||
8 | #ifndef _IMX_DSP_IPC_H | ||
9 | #define _IMX_DSP_IPC_H | ||
10 | |||
11 | #include <linux/device.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mailbox_client.h> | ||
14 | |||
15 | #define DSP_MU_CHAN_NUM 4 | ||
16 | |||
17 | struct imx_dsp_chan { | ||
18 | struct imx_dsp_ipc *ipc; | ||
19 | struct mbox_client cl; | ||
20 | struct mbox_chan *ch; | ||
21 | char *name; | ||
22 | int idx; | ||
23 | }; | ||
24 | |||
25 | struct imx_dsp_ops { | ||
26 | void (*handle_reply)(struct imx_dsp_ipc *ipc); | ||
27 | void (*handle_request)(struct imx_dsp_ipc *ipc); | ||
28 | }; | ||
29 | |||
30 | struct imx_dsp_ipc { | ||
31 | /* Host <-> DSP communication uses 2 txdb and 2 rxdb channels */ | ||
32 | struct imx_dsp_chan chans[DSP_MU_CHAN_NUM]; | ||
33 | struct device *dev; | ||
34 | struct imx_dsp_ops *ops; | ||
35 | void *private_data; | ||
36 | }; | ||
37 | |||
38 | static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data) | ||
39 | { | ||
40 | if (!ipc) | ||
41 | return; | ||
42 | |||
43 | ipc->private_data = data; | ||
44 | } | ||
45 | |||
46 | static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc) | ||
47 | { | ||
48 | if (!ipc) | ||
49 | return NULL; | ||
50 | |||
51 | return ipc->private_data; | ||
52 | } | ||
53 | |||
54 | #if IS_ENABLED(CONFIG_IMX_DSP) | ||
55 | |||
56 | int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx); | ||
57 | |||
58 | #else | ||
59 | |||
60 | static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, | ||
61 | unsigned int chan_idx) | ||
62 | { | ||
63 | return -ENOTSUPP; | ||
64 | } | ||
65 | |||
66 | #endif | ||
67 | #endif /* _IMX_DSP_IPC_H */ | ||
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h new file mode 100644 index 000000000000..490db6886dcc --- /dev/null +++ b/include/linux/moxtet.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Turris Mox module configuration bus driver | ||
4 | * | ||
5 | * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> | ||
6 | */ | ||
7 | |||
8 | #ifndef __LINUX_MOXTET_H | ||
9 | #define __LINUX_MOXTET_H | ||
10 | |||
11 | #include <linux/device.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/irqdomain.h> | ||
14 | #include <linux/mutex.h> | ||
15 | |||
16 | #define TURRIS_MOX_MAX_MODULES 10 | ||
17 | |||
18 | enum turris_mox_cpu_module_id { | ||
19 | TURRIS_MOX_CPU_ID_EMMC = 0x00, | ||
20 | TURRIS_MOX_CPU_ID_SD = 0x10, | ||
21 | }; | ||
22 | |||
23 | enum turris_mox_module_id { | ||
24 | TURRIS_MOX_MODULE_FIRST = 0x01, | ||
25 | |||
26 | TURRIS_MOX_MODULE_SFP = 0x01, | ||
27 | TURRIS_MOX_MODULE_PCI = 0x02, | ||
28 | TURRIS_MOX_MODULE_TOPAZ = 0x03, | ||
29 | TURRIS_MOX_MODULE_PERIDOT = 0x04, | ||
30 | TURRIS_MOX_MODULE_USB3 = 0x05, | ||
31 | TURRIS_MOX_MODULE_PCI_BRIDGE = 0x06, | ||
32 | |||
33 | TURRIS_MOX_MODULE_LAST = 0x06, | ||
34 | }; | ||
35 | |||
36 | #define MOXTET_NIRQS 16 | ||
37 | |||
38 | extern struct bus_type moxtet_type; | ||
39 | |||
40 | struct moxtet { | ||
41 | struct device *dev; | ||
42 | struct mutex lock; | ||
43 | u8 modules[TURRIS_MOX_MAX_MODULES]; | ||
44 | int count; | ||
45 | u8 tx[TURRIS_MOX_MAX_MODULES]; | ||
46 | int dev_irq; | ||
47 | struct { | ||
48 | struct irq_domain *domain; | ||
49 | struct irq_chip chip; | ||
50 | unsigned long masked, exists; | ||
51 | struct moxtet_irqpos { | ||
52 | u8 idx; | ||
53 | u8 bit; | ||
54 | } position[MOXTET_NIRQS]; | ||
55 | } irq; | ||
56 | #ifdef CONFIG_DEBUG_FS | ||
57 | struct dentry *debugfs_root; | ||
58 | #endif | ||
59 | }; | ||
60 | |||
61 | struct moxtet_driver { | ||
62 | const enum turris_mox_module_id *id_table; | ||
63 | struct device_driver driver; | ||
64 | }; | ||
65 | |||
66 | static inline struct moxtet_driver * | ||
67 | to_moxtet_driver(struct device_driver *drv) | ||
68 | { | ||
69 | if (!drv) | ||
70 | return NULL; | ||
71 | return container_of(drv, struct moxtet_driver, driver); | ||
72 | } | ||
73 | |||
74 | extern int __moxtet_register_driver(struct module *owner, | ||
75 | struct moxtet_driver *mdrv); | ||
76 | |||
77 | static inline void moxtet_unregister_driver(struct moxtet_driver *mdrv) | ||
78 | { | ||
79 | if (mdrv) | ||
80 | driver_unregister(&mdrv->driver); | ||
81 | } | ||
82 | |||
83 | #define moxtet_register_driver(driver) \ | ||
84 | __moxtet_register_driver(THIS_MODULE, driver) | ||
85 | |||
86 | #define module_moxtet_driver(__moxtet_driver) \ | ||
87 | module_driver(__moxtet_driver, moxtet_register_driver, \ | ||
88 | moxtet_unregister_driver) | ||
89 | |||
90 | struct moxtet_device { | ||
91 | struct device dev; | ||
92 | struct moxtet *moxtet; | ||
93 | enum turris_mox_module_id id; | ||
94 | unsigned int idx; | ||
95 | }; | ||
96 | |||
97 | extern int moxtet_device_read(struct device *dev); | ||
98 | extern int moxtet_device_write(struct device *dev, u8 val); | ||
99 | extern int moxtet_device_written(struct device *dev); | ||
100 | |||
101 | static inline struct moxtet_device * | ||
102 | to_moxtet_device(struct device *dev) | ||
103 | { | ||
104 | if (!dev) | ||
105 | return NULL; | ||
106 | return container_of(dev, struct moxtet_device, dev); | ||
107 | } | ||
108 | |||
109 | #endif /* __LINUX_MOXTET_H */ | ||
diff --git a/include/linux/platform_data/spi-nuc900.h b/include/linux/platform_data/spi-nuc900.h deleted file mode 100644 index ca3510877000..000000000000 --- a/include/linux/platform_data/spi-nuc900.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-only */ | ||
2 | /* | ||
3 | * Copyright (c) 2009 Nuvoton technology corporation. | ||
4 | * | ||
5 | * Wan ZongShun <mcuos.com@gmail.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef __SPI_NUC900_H | ||
9 | #define __SPI_NUC900_H | ||
10 | |||
11 | extern void mfp_set_groupg(struct device *dev, const char *subname); | ||
12 | |||
13 | struct nuc900_spi_info { | ||
14 | unsigned int num_cs; | ||
15 | unsigned int lsb; | ||
16 | unsigned int txneg; | ||
17 | unsigned int rxneg; | ||
18 | unsigned int divider; | ||
19 | unsigned int sleep; | ||
20 | unsigned int txnum; | ||
21 | unsigned int txbitlen; | ||
22 | int bus_num; | ||
23 | }; | ||
24 | |||
25 | struct nuc900_spi_chip { | ||
26 | unsigned char bits_per_word; | ||
27 | }; | ||
28 | |||
29 | #endif /* __SPI_NUC900_H */ | ||
diff --git a/include/linux/platform_data/video-nuc900fb.h b/include/linux/platform_data/video-nuc900fb.h deleted file mode 100644 index 3da504460c91..000000000000 --- a/include/linux/platform_data/video-nuc900fb.h +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
2 | /* linux/include/asm/arch-nuc900/fb.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Nuvoton technology corporation | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Changelog: | ||
8 | * | ||
9 | * 2008/08/26 vincen.zswan modify this file for LCD. | ||
10 | */ | ||
11 | |||
12 | #ifndef __ASM_ARM_FB_H | ||
13 | #define __ASM_ARM_FB_H | ||
14 | |||
15 | |||
16 | |||
17 | /* LCD Controller Hardware Desc */ | ||
18 | struct nuc900fb_hw { | ||
19 | unsigned int lcd_dccs; | ||
20 | unsigned int lcd_device_ctrl; | ||
21 | unsigned int lcd_mpulcd_cmd; | ||
22 | unsigned int lcd_int_cs; | ||
23 | unsigned int lcd_crtc_size; | ||
24 | unsigned int lcd_crtc_dend; | ||
25 | unsigned int lcd_crtc_hr; | ||
26 | unsigned int lcd_crtc_hsync; | ||
27 | unsigned int lcd_crtc_vr; | ||
28 | unsigned int lcd_va_baddr0; | ||
29 | unsigned int lcd_va_baddr1; | ||
30 | unsigned int lcd_va_fbctrl; | ||
31 | unsigned int lcd_va_scale; | ||
32 | unsigned int lcd_va_test; | ||
33 | unsigned int lcd_va_win; | ||
34 | unsigned int lcd_va_stuff; | ||
35 | }; | ||
36 | |||
37 | /* LCD Display Description */ | ||
38 | struct nuc900fb_display { | ||
39 | /* LCD Image type */ | ||
40 | unsigned type; | ||
41 | |||
42 | /* LCD Screen Size */ | ||
43 | unsigned short width; | ||
44 | unsigned short height; | ||
45 | |||
46 | /* LCD Screen Info */ | ||
47 | unsigned short xres; | ||
48 | unsigned short yres; | ||
49 | unsigned short bpp; | ||
50 | |||
51 | unsigned long pixclock; | ||
52 | unsigned short left_margin; | ||
53 | unsigned short right_margin; | ||
54 | unsigned short hsync_len; | ||
55 | unsigned short upper_margin; | ||
56 | unsigned short lower_margin; | ||
57 | unsigned short vsync_len; | ||
58 | |||
59 | /* hardware special register value */ | ||
60 | unsigned int dccs; | ||
61 | unsigned int devctl; | ||
62 | unsigned int fbctrl; | ||
63 | unsigned int scale; | ||
64 | }; | ||
65 | |||
66 | struct nuc900fb_mach_info { | ||
67 | struct nuc900fb_display *displays; | ||
68 | unsigned num_displays; | ||
69 | unsigned default_display; | ||
70 | /* GPIO Setting Info */ | ||
71 | unsigned gpio_dir; | ||
72 | unsigned gpio_dir_mask; | ||
73 | unsigned gpio_data; | ||
74 | unsigned gpio_data_mask; | ||
75 | }; | ||
76 | |||
77 | extern void __init nuc900_fb_set_platdata(struct nuc900fb_mach_info *); | ||
78 | |||
79 | #endif /* __ASM_ARM_FB_H */ | ||
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 3f12cc77fb58..2d5eff506e13 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h | |||
@@ -49,8 +49,9 @@ extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, | |||
49 | extern int qcom_scm_pas_auth_and_reset(u32 peripheral); | 49 | extern int qcom_scm_pas_auth_and_reset(u32 peripheral); |
50 | extern int qcom_scm_pas_shutdown(u32 peripheral); | 50 | extern int qcom_scm_pas_shutdown(u32 peripheral); |
51 | extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, | 51 | extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, |
52 | unsigned int *src, struct qcom_scm_vmperm *newvm, | 52 | unsigned int *src, |
53 | int dest_cnt); | 53 | const struct qcom_scm_vmperm *newvm, |
54 | unsigned int dest_cnt); | ||
54 | extern void qcom_scm_cpu_power_down(u32 flags); | 55 | extern void qcom_scm_cpu_power_down(u32 flags); |
55 | extern u32 qcom_scm_get_version(void); | 56 | extern u32 qcom_scm_get_version(void); |
56 | extern int qcom_scm_set_remote_state(u32 state, u32 id); | 57 | extern int qcom_scm_set_remote_state(u32 state, u32 id); |
@@ -87,8 +88,8 @@ qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } | |||
87 | static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } | 88 | static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } |
88 | static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, | 89 | static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, |
89 | unsigned int *src, | 90 | unsigned int *src, |
90 | struct qcom_scm_vmperm *newvm, | 91 | const struct qcom_scm_vmperm *newvm, |
91 | int dest_cnt) { return -ENODEV; } | 92 | unsigned int dest_cnt) { return -ENODEV; } |
92 | static inline void qcom_scm_cpu_power_down(u32 flags) {} | 93 | static inline void qcom_scm_cpu_power_down(u32 flags) {} |
93 | static inline u32 qcom_scm_get_version(void) { return 0; } | 94 | static inline u32 qcom_scm_get_version(void) { return 0; } |
94 | static inline u32 | 95 | static inline u32 |
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 9ff2e9357e9a..881fea47c83d 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* | 2 | /* |
3 | * SCMI Message Protocol driver header | 3 | * SCMI Message Protocol driver header |
4 | * | 4 | * |
@@ -71,7 +71,7 @@ struct scmi_clk_ops { | |||
71 | int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, | 71 | int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, |
72 | u64 *rate); | 72 | u64 *rate); |
73 | int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, | 73 | int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, |
74 | u32 config, u64 rate); | 74 | u64 rate); |
75 | int (*enable)(const struct scmi_handle *handle, u32 clk_id); | 75 | int (*enable)(const struct scmi_handle *handle, u32 clk_id); |
76 | int (*disable)(const struct scmi_handle *handle, u32 clk_id); | 76 | int (*disable)(const struct scmi_handle *handle, u32 clk_id); |
77 | }; | 77 | }; |
@@ -145,6 +145,8 @@ struct scmi_sensor_info { | |||
145 | u32 id; | 145 | u32 id; |
146 | u8 type; | 146 | u8 type; |
147 | s8 scale; | 147 | s8 scale; |
148 | u8 num_trip_points; | ||
149 | bool async; | ||
148 | char name[SCMI_MAX_STR_SIZE]; | 150 | char name[SCMI_MAX_STR_SIZE]; |
149 | }; | 151 | }; |
150 | 152 | ||
@@ -167,9 +169,9 @@ enum scmi_sensor_class { | |||
167 | * | 169 | * |
168 | * @count_get: get the count of sensors provided by SCMI | 170 | * @count_get: get the count of sensors provided by SCMI |
169 | * @info_get: get the information of the specified sensor | 171 | * @info_get: get the information of the specified sensor |
170 | * @configuration_set: control notifications on cross-over events for | 172 | * @trip_point_notify: control notifications on cross-over events for |
171 | * the trip-points | 173 | * the trip-points |
172 | * @trip_point_set: selects and configures a trip-point of interest | 174 | * @trip_point_config: selects and configures a trip-point of interest |
173 | * @reading_get: gets the current value of the sensor | 175 | * @reading_get: gets the current value of the sensor |
174 | */ | 176 | */ |
175 | struct scmi_sensor_ops { | 177 | struct scmi_sensor_ops { |
@@ -177,12 +179,32 @@ struct scmi_sensor_ops { | |||
177 | 179 | ||
178 | const struct scmi_sensor_info *(*info_get) | 180 | const struct scmi_sensor_info *(*info_get) |
179 | (const struct scmi_handle *handle, u32 sensor_id); | 181 | (const struct scmi_handle *handle, u32 sensor_id); |
180 | int (*configuration_set)(const struct scmi_handle *handle, | 182 | int (*trip_point_notify)(const struct scmi_handle *handle, |
181 | u32 sensor_id); | 183 | u32 sensor_id, bool enable); |
182 | int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id, | 184 | int (*trip_point_config)(const struct scmi_handle *handle, |
183 | u8 trip_id, u64 trip_value); | 185 | u32 sensor_id, u8 trip_id, u64 trip_value); |
184 | int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, | 186 | int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, |
185 | bool async, u64 *value); | 187 | u64 *value); |
188 | }; | ||
189 | |||
190 | /** | ||
191 | * struct scmi_reset_ops - represents the various operations provided | ||
192 | * by SCMI Reset Protocol | ||
193 | * | ||
194 | * @num_domains_get: get the count of reset domains provided by SCMI | ||
195 | * @name_get: gets the name of a reset domain | ||
196 | * @latency_get: gets the reset latency for the specified reset domain | ||
197 | * @reset: resets the specified reset domain | ||
198 | * @assert: explicitly assert reset signal of the specified reset domain | ||
199 | * @deassert: explicitly deassert reset signal of the specified reset domain | ||
200 | */ | ||
201 | struct scmi_reset_ops { | ||
202 | int (*num_domains_get)(const struct scmi_handle *handle); | ||
203 | char *(*name_get)(const struct scmi_handle *handle, u32 domain); | ||
204 | int (*latency_get)(const struct scmi_handle *handle, u32 domain); | ||
205 | int (*reset)(const struct scmi_handle *handle, u32 domain); | ||
206 | int (*assert)(const struct scmi_handle *handle, u32 domain); | ||
207 | int (*deassert)(const struct scmi_handle *handle, u32 domain); | ||
186 | }; | 208 | }; |
187 | 209 | ||
188 | /** | 210 | /** |
@@ -194,6 +216,7 @@ struct scmi_sensor_ops { | |||
194 | * @perf_ops: pointer to set of performance protocol operations | 216 | * @perf_ops: pointer to set of performance protocol operations |
195 | * @clk_ops: pointer to set of clock protocol operations | 217 | * @clk_ops: pointer to set of clock protocol operations |
196 | * @sensor_ops: pointer to set of sensor protocol operations | 218 | * @sensor_ops: pointer to set of sensor protocol operations |
219 | * @reset_ops: pointer to set of reset protocol operations | ||
197 | * @perf_priv: pointer to private data structure specific to performance | 220 | * @perf_priv: pointer to private data structure specific to performance |
198 | * protocol(for internal use only) | 221 | * protocol(for internal use only) |
199 | * @clk_priv: pointer to private data structure specific to clock | 222 | * @clk_priv: pointer to private data structure specific to clock |
@@ -202,6 +225,8 @@ struct scmi_sensor_ops { | |||
202 | * protocol(for internal use only) | 225 | * protocol(for internal use only) |
203 | * @sensor_priv: pointer to private data structure specific to sensors | 226 | * @sensor_priv: pointer to private data structure specific to sensors |
204 | * protocol(for internal use only) | 227 | * protocol(for internal use only) |
228 | * @reset_priv: pointer to private data structure specific to reset | ||
229 | * protocol(for internal use only) | ||
205 | */ | 230 | */ |
206 | struct scmi_handle { | 231 | struct scmi_handle { |
207 | struct device *dev; | 232 | struct device *dev; |
@@ -210,11 +235,13 @@ struct scmi_handle { | |||
210 | struct scmi_clk_ops *clk_ops; | 235 | struct scmi_clk_ops *clk_ops; |
211 | struct scmi_power_ops *power_ops; | 236 | struct scmi_power_ops *power_ops; |
212 | struct scmi_sensor_ops *sensor_ops; | 237 | struct scmi_sensor_ops *sensor_ops; |
238 | struct scmi_reset_ops *reset_ops; | ||
213 | /* for protocol internal use */ | 239 | /* for protocol internal use */ |
214 | void *perf_priv; | 240 | void *perf_priv; |
215 | void *clk_priv; | 241 | void *clk_priv; |
216 | void *power_priv; | 242 | void *power_priv; |
217 | void *sensor_priv; | 243 | void *sensor_priv; |
244 | void *reset_priv; | ||
218 | }; | 245 | }; |
219 | 246 | ||
220 | enum scmi_std_protocol { | 247 | enum scmi_std_protocol { |
@@ -224,6 +251,7 @@ enum scmi_std_protocol { | |||
224 | SCMI_PROTOCOL_PERF = 0x13, | 251 | SCMI_PROTOCOL_PERF = 0x13, |
225 | SCMI_PROTOCOL_CLOCK = 0x14, | 252 | SCMI_PROTOCOL_CLOCK = 0x14, |
226 | SCMI_PROTOCOL_SENSOR = 0x15, | 253 | SCMI_PROTOCOL_SENSOR = 0x15, |
254 | SCMI_PROTOCOL_RESET = 0x16, | ||
227 | }; | 255 | }; |
228 | 256 | ||
229 | struct scmi_device { | 257 | struct scmi_device { |
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 54ade13a9b15..f3ae45d02e80 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h | |||
@@ -63,26 +63,26 @@ void cmdq_pkt_destroy(struct cmdq_pkt *pkt); | |||
63 | /** | 63 | /** |
64 | * cmdq_pkt_write() - append write command to the CMDQ packet | 64 | * cmdq_pkt_write() - append write command to the CMDQ packet |
65 | * @pkt: the CMDQ packet | 65 | * @pkt: the CMDQ packet |
66 | * @value: the specified target register value | ||
67 | * @subsys: the CMDQ sub system code | 66 | * @subsys: the CMDQ sub system code |
68 | * @offset: register offset from CMDQ sub system | 67 | * @offset: register offset from CMDQ sub system |
68 | * @value: the specified target register value | ||
69 | * | 69 | * |
70 | * Return: 0 for success; else the error code is returned | 70 | * Return: 0 for success; else the error code is returned |
71 | */ | 71 | */ |
72 | int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset); | 72 | int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value); |
73 | 73 | ||
74 | /** | 74 | /** |
75 | * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet | 75 | * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet |
76 | * @pkt: the CMDQ packet | 76 | * @pkt: the CMDQ packet |
77 | * @value: the specified target register value | ||
78 | * @subsys: the CMDQ sub system code | 77 | * @subsys: the CMDQ sub system code |
79 | * @offset: register offset from CMDQ sub system | 78 | * @offset: register offset from CMDQ sub system |
79 | * @value: the specified target register value | ||
80 | * @mask: the specified target register mask | 80 | * @mask: the specified target register mask |
81 | * | 81 | * |
82 | * Return: 0 for success; else the error code is returned | 82 | * Return: 0 for success; else the error code is returned |
83 | */ | 83 | */ |
84 | int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, | 84 | int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, |
85 | u32 subsys, u32 offset, u32 mask); | 85 | u16 offset, u32 value, u32 mask); |
86 | 86 | ||
87 | /** | 87 | /** |
88 | * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet | 88 | * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet |
@@ -91,7 +91,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, | |||
91 | * | 91 | * |
92 | * Return: 0 for success; else the error code is returned | 92 | * Return: 0 for success; else the error code is returned |
93 | */ | 93 | */ |
94 | int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event); | 94 | int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); |
95 | 95 | ||
96 | /** | 96 | /** |
97 | * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet | 97 | * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet |
@@ -100,7 +100,7 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event); | |||
100 | * | 100 | * |
101 | * Return: 0 for success; else the error code is returned | 101 | * Return: 0 for success; else the error code is returned |
102 | */ | 102 | */ |
103 | int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event); | 103 | int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); |
104 | 104 | ||
105 | /** | 105 | /** |
106 | * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ | 106 | * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ |
diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h new file mode 100644 index 000000000000..8bca6763f99c --- /dev/null +++ b/include/linux/soc/samsung/exynos-chipid.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (c) 2018 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com/ | ||
5 | * | ||
6 | * Exynos - CHIPID support | ||
7 | */ | ||
8 | #ifndef __LINUX_SOC_EXYNOS_CHIPID_H | ||
9 | #define __LINUX_SOC_EXYNOS_CHIPID_H | ||
10 | |||
11 | #define EXYNOS_CHIPID_REG_PRO_ID 0x00 | ||
12 | #define EXYNOS_SUBREV_MASK (0xf << 4) | ||
13 | #define EXYNOS_MAINREV_MASK (0xf << 0) | ||
14 | #define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \ | ||
15 | EXYNOS_MAINREV_MASK) | ||
16 | #define EXYNOS_MASK 0xfffff000 | ||
17 | |||
18 | #define EXYNOS_CHIPID_REG_PKG_ID 0x04 | ||
19 | /* Bit field definitions for EXYNOS_CHIPID_REG_PKG_ID register */ | ||
20 | #define EXYNOS5422_IDS_OFFSET 24 | ||
21 | #define EXYNOS5422_IDS_MASK 0xff | ||
22 | #define EXYNOS5422_USESG_OFFSET 3 | ||
23 | #define EXYNOS5422_USESG_MASK 0x01 | ||
24 | #define EXYNOS5422_SG_OFFSET 0 | ||
25 | #define EXYNOS5422_SG_MASK 0x07 | ||
26 | #define EXYNOS5422_TABLE_OFFSET 8 | ||
27 | #define EXYNOS5422_TABLE_MASK 0x03 | ||
28 | #define EXYNOS5422_SG_A_OFFSET 17 | ||
29 | #define EXYNOS5422_SG_A_MASK 0x0f | ||
30 | #define EXYNOS5422_SG_B_OFFSET 21 | ||
31 | #define EXYNOS5422_SG_B_MASK 0x03 | ||
32 | #define EXYNOS5422_SG_BSIGN_OFFSET 23 | ||
33 | #define EXYNOS5422_SG_BSIGN_MASK 0x01 | ||
34 | #define EXYNOS5422_BIN2_OFFSET 12 | ||
35 | #define EXYNOS5422_BIN2_MASK 0x01 | ||
36 | |||
37 | #define EXYNOS_CHIPID_REG_LOT_ID 0x14 | ||
38 | |||
39 | #define EXYNOS_CHIPID_REG_AUX_INFO 0x1c | ||
40 | /* Bit field definitions for EXYNOS_CHIPID_REG_AUX_INFO register */ | ||
41 | #define EXYNOS5422_TMCB_OFFSET 0 | ||
42 | #define EXYNOS5422_TMCB_MASK 0x7f | ||
43 | #define EXYNOS5422_ARM_UP_OFFSET 8 | ||
44 | #define EXYNOS5422_ARM_UP_MASK 0x03 | ||
45 | #define EXYNOS5422_ARM_DN_OFFSET 10 | ||
46 | #define EXYNOS5422_ARM_DN_MASK 0x03 | ||
47 | #define EXYNOS5422_KFC_UP_OFFSET 12 | ||
48 | #define EXYNOS5422_KFC_UP_MASK 0x03 | ||
49 | #define EXYNOS5422_KFC_DN_OFFSET 14 | ||
50 | #define EXYNOS5422_KFC_DN_MASK 0x03 | ||
51 | |||
52 | #endif /*__LINUX_SOC_EXYNOS_CHIPID_H */ | ||
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 6c610e188a44..9531ec823298 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h | |||
@@ -97,7 +97,10 @@ struct ti_sci_core_ops { | |||
97 | */ | 97 | */ |
98 | struct ti_sci_dev_ops { | 98 | struct ti_sci_dev_ops { |
99 | int (*get_device)(const struct ti_sci_handle *handle, u32 id); | 99 | int (*get_device)(const struct ti_sci_handle *handle, u32 id); |
100 | int (*get_device_exclusive)(const struct ti_sci_handle *handle, u32 id); | ||
100 | int (*idle_device)(const struct ti_sci_handle *handle, u32 id); | 101 | int (*idle_device)(const struct ti_sci_handle *handle, u32 id); |
102 | int (*idle_device_exclusive)(const struct ti_sci_handle *handle, | ||
103 | u32 id); | ||
101 | int (*put_device)(const struct ti_sci_handle *handle, u32 id); | 104 | int (*put_device)(const struct ti_sci_handle *handle, u32 id); |
102 | int (*is_valid)(const struct ti_sci_handle *handle, u32 id); | 105 | int (*is_valid)(const struct ti_sci_handle *handle, u32 id); |
103 | int (*get_context_loss_count)(const struct ti_sci_handle *handle, | 106 | int (*get_context_loss_count)(const struct ti_sci_handle *handle, |
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h index b7c70c3e953f..48ceea867dd6 100644 --- a/include/linux/sys_soc.h +++ b/include/linux/sys_soc.h | |||
@@ -12,6 +12,7 @@ struct soc_device_attribute { | |||
12 | const char *machine; | 12 | const char *machine; |
13 | const char *family; | 13 | const char *family; |
14 | const char *revision; | 14 | const char *revision; |
15 | const char *serial_number; | ||
15 | const char *soc_id; | 16 | const char *soc_id; |
16 | const void *data; | 17 | const void *data; |
17 | }; | 18 | }; |
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h index efed3c3383d6..1d19ae62b844 100644 --- a/include/video/da8xx-fb.h +++ b/include/video/da8xx-fb.h | |||
@@ -32,7 +32,6 @@ struct da8xx_lcdc_platform_data { | |||
32 | const char manu_name[10]; | 32 | const char manu_name[10]; |
33 | void *controller_data; | 33 | void *controller_data; |
34 | const char type[25]; | 34 | const char type[25]; |
35 | void (*panel_power_ctrl)(int); | ||
36 | }; | 35 | }; |
37 | 36 | ||
38 | struct lcd_ctrl_config { | 37 | struct lcd_ctrl_config { |