summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 14:14:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-18 14:14:31 -0400
commit6cfae0c26b21dce323fe8799b66cf4bc996e3565 (patch)
tree647f80442929de7ed17cc436c546c21c8c2b2aa9
parente6874fc29410fabfdbc8c12b467f41a16cbcfd2b (diff)
parent16a0f687cac70301f49d6f99c4115824e6aad42b (diff)
Merge tag 'char-misc-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big char/misc driver pull request for 5.4-rc1. As has been happening in previous releases, more and more individual driver subsystem trees are ending up in here. Now if that is good or bad I can't tell, but hopefully it makes your life easier as it's more of an aggregation of trees together to one merge point for you. Anyway, lots of stuff in here: - habanalabs driver updates - thunderbolt driver updates - misc driver updates - coresight and intel_th hwtracing driver updates - fpga driver updates - extcon driver updates - some dma driver updates - char driver updates - android binder driver updates - nvmem driver updates - phy driver updates - parport driver fixes - pcmcia driver fix - uio driver updates - w1 driver updates - configfs fixes - other assorted driver updates All of these have been in linux-next for a long time with no reported issues" * tag 'char-misc-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (200 commits) misc: mic: Use PTR_ERR_OR_ZERO rather than its implementation habanalabs: correctly cast variable to __le32 habanalabs: show correct id in error print habanalabs: stop using the acronym KMD habanalabs: display card name as sensors header habanalabs: add uapi to retrieve aggregate H/W events habanalabs: add uapi to retrieve device utilization habanalabs: Make the Coresight timestamp perpetual habanalabs: explicitly set the queue-id enumerated numbers habanalabs: print to kernel log when reset is finished habanalabs: replace __le32_to_cpu with le32_to_cpu habanalabs: replace __cpu_to_le32/64 with cpu_to_le32/64 habanalabs: Handle HW_IP_INFO if device disabled or in reset habanalabs: Expose devices after initialization is done habanalabs: improve security in Debug IOCTL habanalabs: use default structure for user input in Debug IOCTL habanalabs: Add descriptive name to PSOC app status register habanalabs: Add descriptive names to PSOC scratch-pad registers habanalabs: create two char devices per ASIC habanalabs: change device_setup_cdev() to be more generic ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc3
-rw-r--r--Documentation/ABI/testing/sysfs-class-mic (renamed from Documentation/ABI/testing/sysfs-class-mic.txt)0
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu128
-rw-r--r--Documentation/ABI/testing/sysfs-driver-habanalabs14
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dfl-fme85
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dfl-port85
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt4
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-arizona.txt2
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-fsa9480.txt4
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt5
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt5
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt5
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-bridge.txt13
-rw-r--r--Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt8
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,qcs404.txt45
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/pci-armada8k.txt8
-rw-r--r--Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml95
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt10
-rw-r--r--Documentation/driver-api/uio-howto.rst7
-rw-r--r--Documentation/fpga/dfl.rst105
-rw-r--r--Documentation/misc-devices/index.rst1
-rw-r--r--MAINTAINERS23
-rw-r--r--drivers/acpi/property.c6
-rw-r--r--drivers/android/binder.c100
-rw-r--r--drivers/android/binder_internal.h86
-rw-r--r--drivers/android/binderfs.c290
-rw-r--r--drivers/char/mem.c21
-rw-r--r--drivers/char/ppdev.c2
-rw-r--r--drivers/char/toshiba.c8
-rw-r--r--drivers/clk/qcom/clk-rpmh.c16
-rw-r--r--drivers/extcon/extcon-adc-jack.c4
-rw-r--r--drivers/extcon/extcon-arizona.c2
-rw-r--r--drivers/extcon/extcon-axp288.c16
-rw-r--r--drivers/extcon/extcon-fsa9480.c1
-rw-r--r--drivers/extcon/extcon-gpio.c29
-rw-r--r--drivers/extcon/extcon-max77843.c6
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/firmware/Kconfig18
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/google/vpd_decode.c55
-rw-r--r--drivers/firmware/google/vpd_decode.h6
-rw-r--r--drivers/firmware/stratix10-rsu.c451
-rw-r--r--drivers/firmware/stratix10-svc.c76
-rw-r--r--drivers/fpga/Kconfig6
-rw-r--r--drivers/fpga/Makefile3
-rw-r--r--drivers/fpga/altera-cvp.c342
-rw-r--r--drivers/fpga/altera-pr-ip-core-plat.c4
-rw-r--r--drivers/fpga/altera-pr-ip-core.c4
-rw-r--r--drivers/fpga/dfl-afu-error.c230
-rw-r--r--drivers/fpga/dfl-afu-main.c381
-rw-r--r--drivers/fpga/dfl-afu.h9
-rw-r--r--drivers/fpga/dfl-fme-error.c359
-rw-r--r--drivers/fpga/dfl-fme-main.c128
-rw-r--r--drivers/fpga/dfl-fme-pr.c7
-rw-r--r--drivers/fpga/dfl-fme.h6
-rw-r--r--drivers/fpga/dfl-pci.c36
-rw-r--r--drivers/fpga/dfl.c226
-rw-r--r--drivers/fpga/dfl.h52
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c38
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h10
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h11
-rw-r--r--drivers/hwtracing/intel_th/Makefile3
-rw-r--r--drivers/hwtracing/intel_th/msu-sink.c116
-rw-r--r--drivers/hwtracing/intel_th/msu.c537
-rw-r--r--drivers/hwtracing/intel_th/msu.h20
-rw-r--r--drivers/interconnect/core.c27
-rw-r--r--drivers/interconnect/qcom/Kconfig12
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/qcs404.c539
-rw-r--r--drivers/interconnect/qcom/sdm845.c160
-rw-r--r--drivers/interconnect/qcom/smd-rpm.c77
-rw-r--r--drivers/interconnect/qcom/smd-rpm.h15
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/alcor_pci.c6
-rw-r--r--drivers/misc/eeprom/Kconfig5
-rw-r--r--drivers/misc/eeprom/ee1004.c6
-rw-r--r--drivers/misc/eeprom/max6875.c6
-rw-r--r--drivers/misc/fastrpc.c79
-rw-r--r--drivers/misc/habanalabs/asid.c2
-rw-r--r--drivers/misc/habanalabs/command_buffer.c3
-rw-r--r--drivers/misc/habanalabs/command_submission.c27
-rw-r--r--drivers/misc/habanalabs/context.c40
-rw-r--r--drivers/misc/habanalabs/debugfs.c16
-rw-r--r--drivers/misc/habanalabs/device.c488
-rw-r--r--drivers/misc/habanalabs/goya/goya.c95
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h19
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c89
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c109
-rw-r--r--drivers/misc/habanalabs/habanalabs.h129
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c171
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c180
-rw-r--r--drivers/misc/habanalabs/hw_queue.c18
-rw-r--r--drivers/misc/habanalabs/hwmon.c24
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h85
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h34
-rw-r--r--drivers/misc/habanalabs/irq.c4
-rw-r--r--drivers/misc/habanalabs/sysfs.c126
-rw-r--r--drivers/misc/lkdtm/Makefile1
-rw-r--r--drivers/misc/lkdtm/bugs.c7
-rw-r--r--drivers/misc/lkdtm/cfi.c42
-rw-r--r--drivers/misc/lkdtm/core.c2
-rw-r--r--drivers/misc/lkdtm/lkdtm.h4
-rw-r--r--drivers/misc/mei/pci-me.c19
-rw-r--r--drivers/misc/mei/pci-txe.c19
-rw-r--r--drivers/misc/mic/card/mic_x100.c28
-rw-r--r--drivers/misc/mic/scif/scif_epd.h5
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c4
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c797
-rw-r--r--drivers/misc/xilinx_sdfec.c1214
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c7
-rw-r--r--drivers/nvmem/imx-ocotp.c7
-rw-r--r--drivers/nvmem/meson-mx-efuse.c3
-rw-r--r--drivers/nvmem/mxs-ocotp.c2
-rw-r--r--drivers/nvmem/sunxi_sid.c1
-rw-r--r--drivers/parport/Makefile2
-rw-r--r--drivers/parport/parport_serial.c6
-rw-r--r--drivers/pcmcia/i82092.c6
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/lantiq/Kconfig11
-rw-r--r--drivers/phy/lantiq/Makefile1
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c494
-rw-r--r--drivers/phy/marvell/Kconfig1
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c4
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c17
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c525
-rw-r--r--drivers/phy/phy-core.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c44
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c2
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c1
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c1
-rw-r--r--drivers/phy/samsung/phy-exynos-pcie.c1
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c1
-rw-r--r--drivers/phy/samsung/phy-exynos5250-sata.c1
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c33
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c5
-rw-r--r--drivers/slimbus/slimbus.h2
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/ctl.c23
-rw-r--r--drivers/thunderbolt/eeprom.c6
-rw-r--r--drivers/thunderbolt/icm.c194
-rw-r--r--drivers/thunderbolt/nhi.c134
-rw-r--r--drivers/thunderbolt/nhi.h22
-rw-r--r--drivers/thunderbolt/nhi_ops.c179
-rw-r--r--drivers/thunderbolt/nhi_regs.h37
-rw-r--r--drivers/thunderbolt/switch.c52
-rw-r--r--drivers/thunderbolt/tb_msgs.h16
-rw-r--r--drivers/thunderbolt/tunnel.c4
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c4
-rw-r--r--drivers/uio/uio_pdrv_genirq.c14
-rw-r--r--drivers/w1/masters/Kconfig9
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/mxc_w1.c4
-rw-r--r--drivers/w1/masters/omap_hdq.c4
-rw-r--r--drivers/w1/masters/sgi_w1.c130
-rw-r--r--drivers/w1/slaves/Kconfig6
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds250x.c290
-rw-r--r--include/dt-bindings/interconnect/qcom,qcs404.h88
-rw-r--r--include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h11
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h51
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h11
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h2
-rw-r--r--include/linux/intel_th.h79
-rw-r--r--include/linux/interconnect-provider.h7
-rw-r--r--include/linux/interconnect.h5
-rw-r--r--include/linux/platform_data/sgi-w1.h13
-rw-r--r--include/linux/thunderbolt.h2
-rw-r--r--include/linux/w1.h5
-rw-r--r--include/soc/qcom/tcs.h20
-rw-r--r--include/uapi/linux/fpga-dfl.h18
-rw-r--r--include/uapi/linux/ppdev.h5
-rw-r--r--include/uapi/misc/habanalabs.h102
-rw-r--r--include/uapi/misc/xilinx_sdfec.h448
-rw-r--r--lib/test_firmware.c50
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh57
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh11
190 files changed, 10364 insertions, 2299 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
index f54ae244f3f1..456cb62b384c 100644
--- a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
@@ -12,7 +12,8 @@ Description: (RW) Configure MSC operating mode:
12 - "single", for contiguous buffer mode (high-order alloc); 12 - "single", for contiguous buffer mode (high-order alloc);
13 - "multi", for multiblock mode; 13 - "multi", for multiblock mode;
14 - "ExI", for DCI handler mode; 14 - "ExI", for DCI handler mode;
15 - "debug", for debug mode. 15 - "debug", for debug mode;
16 - any of the currently loaded buffer sinks.
16 If operating mode changes, existing buffer is deallocated, 17 If operating mode changes, existing buffer is deallocated,
17 provided there are no active users and tracing is not enabled, 18 provided there are no active users and tracing is not enabled,
18 otherwise the write will fail. 19 otherwise the write will fail.
diff --git a/Documentation/ABI/testing/sysfs-class-mic.txt b/Documentation/ABI/testing/sysfs-class-mic
index 6ef682603179..6ef682603179 100644
--- a/Documentation/ABI/testing/sysfs-class-mic.txt
+++ b/Documentation/ABI/testing/sysfs-class-mic
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu b/Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu
new file mode 100644
index 000000000000..ae9af984471a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu
@@ -0,0 +1,128 @@
1 Intel Stratix10 Remote System Update (RSU) device attributes
2
3What: /sys/devices/platform/stratix10-rsu.0/current_image
4Date: August 2019
5KernelVersion: 5.4
6Contact: Richard Gong <richard.gong@linux.intel.com>
7Description:
8 (RO) the address in flash of currently running image.
9
10What: /sys/devices/platform/stratix10-rsu.0/fail_image
11Date: August 2019
12KernelVersion: 5.4
13Contact: Richard Gong <richard.gong@linux.intel.com>
14Description:
15 (RO) the address in flash of failed image.
16
17What: /sys/devices/platform/stratix10-rsu.0/state
18Date: August 2019
19KernelVersion: 5.4
20Contact: Richard Gong <richard.gong@linux.intel.com>
21Description:
22 (RO) the state of RSU system.
23 The state field has two parts: major error code in
24 upper 16 bits and minor error code in lower 16 bits.
25
26 b[15:0]
27 Currently used only when major error is 0xF006
28 (CPU watchdog timeout), in which case the minor
29 error code is the value reported by CPU to
30 firmware through the RSU notify command before
31 the watchdog timeout occurs.
32
33 b[31:16]
34 0xF001 bitstream error
35 0xF002 hardware access failure
36 0xF003 bitstream corruption
37 0xF004 internal error
38 0xF005 device error
39 0xF006 CPU watchdog timeout
40 0xF007 internal unknown error
41
42What: /sys/devices/platform/stratix10-rsu.0/version
43Date: August 2019
44KernelVersion: 5.4
45Contact: Richard Gong <richard.gong@linux.intel.com>
46Description:
47 (RO) the version number of RSU firmware. 19.3 or late
48 version includes information about the firmware which
49 reported the error.
50
51 pre 19.3:
52 b[31:0]
53 0x0 version number
54
55 19.3 or late:
56 b[15:0]
57 0x1 version number
58 b[31:16]
59 0x0 no error
60 0x0DCF Decision CMF error
61 0x0ACF Application CMF error
62
63What: /sys/devices/platform/stratix10-rsu.0/error_location
64Date: August 2019
65KernelVersion: 5.4
66Contact: Richard Gong <richard.gong@linux.intel.com>
67Description:
68 (RO) the error offset inside the image that failed.
69
70What: /sys/devices/platform/stratix10-rsu.0/error_details
71Date: August 2019
72KernelVersion: 5.4
73Contact: Richard Gong <richard.gong@linux.intel.com>
74Description:
75 (RO) error code.
76
77What: /sys/devices/platform/stratix10-rsu.0/retry_counter
78Date: August 2019
79KernelVersion: 5.4
80Contact: Richard Gong <richard.gong@linux.intel.com>
81Description:
82 (RO) the current image's retry counter, which is used by
83 user to know how many times the images is still allowed
84 to reload itself before giving up and starting RSU
85 fail-over flow.
86
87What: /sys/devices/platform/stratix10-rsu.0/reboot_image
88Date: August 2019
89KernelVersion: 5.4
90Contact: Richard Gong <richard.gong@linux.intel.com>
91Description:
92 (WO) the address in flash of image to be loaded on next
93 reboot command.
94
95What: /sys/devices/platform/stratix10-rsu.0/notify
96Date: August 2019
97KernelVersion: 5.4
98Contact: Richard Gong <richard.gong@linux.intel.com>
99Description:
100 (WO) client to notify firmware with different actions.
101
102 b[15:0]
103 inform firmware the current software execution
104 stage.
105 0 the first stage bootloader didn't run or
106 didn't reach the point of launching second
107 stage bootloader.
108 1 failed in second bootloader or didn't get
109 to the point of launching the operating
110 system.
111 2 both first and second stage bootloader ran
112 and the operating system launch was
113 attempted.
114
115 b[16]
116 1 firmware to reset current image retry
117 counter.
118 0 no action.
119
120 b[17]
121 1 firmware to clear RSU log
122 0 no action.
123
124 b[18]
125 this is negative logic
126 1 no action
127 0 firmware record the notify code defined
128 in b[15:0].
diff --git a/Documentation/ABI/testing/sysfs-driver-habanalabs b/Documentation/ABI/testing/sysfs-driver-habanalabs
index f433fc6db3c6..782df74042ed 100644
--- a/Documentation/ABI/testing/sysfs-driver-habanalabs
+++ b/Documentation/ABI/testing/sysfs-driver-habanalabs
@@ -57,6 +57,7 @@ KernelVersion: 5.1
57Contact: oded.gabbay@gmail.com 57Contact: oded.gabbay@gmail.com
58Description: Allows the user to set the maximum clock frequency for MME, TPC 58Description: Allows the user to set the maximum clock frequency for MME, TPC
59 and IC when the power management profile is set to "automatic". 59 and IC when the power management profile is set to "automatic".
60 This property is valid only for the Goya ASIC family
60 61
61What: /sys/class/habanalabs/hl<n>/ic_clk 62What: /sys/class/habanalabs/hl<n>/ic_clk
62Date: Jan 2019 63Date: Jan 2019
@@ -127,8 +128,8 @@ Description: Power management profile. Values are "auto", "manual". In "auto"
127 the max clock frequency to a low value when there are no user 128 the max clock frequency to a low value when there are no user
128 processes that are opened on the device's file. In "manual" 129 processes that are opened on the device's file. In "manual"
129 mode, the user sets the maximum clock frequency by writing to 130 mode, the user sets the maximum clock frequency by writing to
130 ic_clk, mme_clk and tpc_clk 131 ic_clk, mme_clk and tpc_clk. This property is valid only for
131 132 the Goya ASIC family
132 133
133What: /sys/class/habanalabs/hl<n>/preboot_btl_ver 134What: /sys/class/habanalabs/hl<n>/preboot_btl_ver
134Date: Jan 2019 135Date: Jan 2019
@@ -186,11 +187,4 @@ What: /sys/class/habanalabs/hl<n>/uboot_ver
186Date: Jan 2019 187Date: Jan 2019
187KernelVersion: 5.1 188KernelVersion: 5.1
188Contact: oded.gabbay@gmail.com 189Contact: oded.gabbay@gmail.com
189Description: Version of the u-boot running on the device's CPU 190Description: Version of the u-boot running on the device's CPU \ No newline at end of file
190
191What: /sys/class/habanalabs/hl<n>/write_open_cnt
192Date: Jan 2019
193KernelVersion: 5.1
194Contact: oded.gabbay@gmail.com
195Description: Displays the total number of user processes that are currently
196 opened on the device's file
diff --git a/Documentation/ABI/testing/sysfs-platform-dfl-fme b/Documentation/ABI/testing/sysfs-platform-dfl-fme
index 8fa4febfa4b2..72634d3ae4f4 100644
--- a/Documentation/ABI/testing/sysfs-platform-dfl-fme
+++ b/Documentation/ABI/testing/sysfs-platform-dfl-fme
@@ -21,3 +21,88 @@ Contact: Wu Hao <hao.wu@intel.com>
21Description: Read-only. It returns Bitstream (static FPGA region) meta 21Description: Read-only. It returns Bitstream (static FPGA region) meta
22 data, which includes the synthesis date, seed and other 22 data, which includes the synthesis date, seed and other
23 information of this static FPGA region. 23 information of this static FPGA region.
24
25What: /sys/bus/platform/devices/dfl-fme.0/cache_size
26Date: August 2019
27KernelVersion: 5.4
28Contact: Wu Hao <hao.wu@intel.com>
29Description: Read-only. It returns cache size of this FPGA device.
30
31What: /sys/bus/platform/devices/dfl-fme.0/fabric_version
32Date: August 2019
33KernelVersion: 5.4
34Contact: Wu Hao <hao.wu@intel.com>
35Description: Read-only. It returns fabric version of this FPGA device.
36 Userspace applications need this information to select
37 best data channels per different fabric design.
38
39What: /sys/bus/platform/devices/dfl-fme.0/socket_id
40Date: August 2019
41KernelVersion: 5.4
42Contact: Wu Hao <hao.wu@intel.com>
43Description: Read-only. It returns socket_id to indicate which socket
44 this FPGA belongs to, only valid for integrated solution.
45 User only needs this information, in case standard numa node
46 can't provide correct information.
47
48What: /sys/bus/platform/devices/dfl-fme.0/errors/pcie0_errors
49Date: August 2019
50KernelVersion: 5.4
51Contact: Wu Hao <hao.wu@intel.com>
52Description: Read-Write. Read this file for errors detected on pcie0 link.
53 Write this file to clear errors logged in pcie0_errors. Write
54 fails with -EINVAL if input parsing fails or input error code
55 doesn't match.
56
57What: /sys/bus/platform/devices/dfl-fme.0/errors/pcie1_errors
58Date: August 2019
59KernelVersion: 5.4
60Contact: Wu Hao <hao.wu@intel.com>
61Description: Read-Write. Read this file for errors detected on pcie1 link.
62 Write this file to clear errors logged in pcie1_errors. Write
63 fails with -EINVAL if input parsing fails or input error code
64 doesn't match.
65
66What: /sys/bus/platform/devices/dfl-fme.0/errors/nonfatal_errors
67Date: August 2019
68KernelVersion: 5.4
69Contact: Wu Hao <hao.wu@intel.com>
70Description: Read-only. It returns non-fatal errors detected.
71
72What: /sys/bus/platform/devices/dfl-fme.0/errors/catfatal_errors
73Date: August 2019
74KernelVersion: 5.4
75Contact: Wu Hao <hao.wu@intel.com>
76Description: Read-only. It returns catastrophic and fatal errors detected.
77
78What: /sys/bus/platform/devices/dfl-fme.0/errors/inject_errors
79Date: August 2019
80KernelVersion: 5.4
81Contact: Wu Hao <hao.wu@intel.com>
82Description: Read-Write. Read this file to check errors injected. Write this
83 file to inject errors for testing purpose. Write fails with
84 -EINVAL if input parsing fails or input inject error code isn't
85 supported.
86
87What: /sys/bus/platform/devices/dfl-fme.0/errors/fme_errors
88Date: August 2019
89KernelVersion: 5.4
90Contact: Wu Hao <hao.wu@intel.com>
91Description: Read-Write. Read this file to get errors detected on FME.
92 Write this file to clear errors logged in fme_errors. Write
93 fials with -EINVAL if input parsing fails or input error code
94 doesn't match.
95
96What: /sys/bus/platform/devices/dfl-fme.0/errors/first_error
97Date: August 2019
98KernelVersion: 5.4
99Contact: Wu Hao <hao.wu@intel.com>
100Description: Read-only. Read this file to get the first error detected by
101 hardware.
102
103What: /sys/bus/platform/devices/dfl-fme.0/errors/next_error
104Date: August 2019
105KernelVersion: 5.4
106Contact: Wu Hao <hao.wu@intel.com>
107Description: Read-only. Read this file to get the second error detected by
108 hardware.
diff --git a/Documentation/ABI/testing/sysfs-platform-dfl-port b/Documentation/ABI/testing/sysfs-platform-dfl-port
index 6a92dda517b0..65658267fcc0 100644
--- a/Documentation/ABI/testing/sysfs-platform-dfl-port
+++ b/Documentation/ABI/testing/sysfs-platform-dfl-port
@@ -14,3 +14,88 @@ Description: Read-only. User can program different PR bitstreams to FPGA
14 Accelerator Function Unit (AFU) for different functions. It 14 Accelerator Function Unit (AFU) for different functions. It
15 returns uuid which could be used to identify which PR bitstream 15 returns uuid which could be used to identify which PR bitstream
16 is programmed in this AFU. 16 is programmed in this AFU.
17
18What: /sys/bus/platform/devices/dfl-port.0/power_state
19Date: August 2019
20KernelVersion: 5.4
21Contact: Wu Hao <hao.wu@intel.com>
22Description: Read-only. It reports the APx (AFU Power) state, different APx
23 means different throttling level. When reading this file, it
24 returns "0" - Normal / "1" - AP1 / "2" - AP2 / "6" - AP6.
25
26What: /sys/bus/platform/devices/dfl-port.0/ap1_event
27Date: August 2019
28KernelVersion: 5.4
29Contact: Wu Hao <hao.wu@intel.com>
30Description: Read-write. Read this file for AP1 (AFU Power State 1) event.
31 It's used to indicate transient AP1 state. Write 1 to this
32 file to clear AP1 event.
33
34What: /sys/bus/platform/devices/dfl-port.0/ap2_event
35Date: August 2019
36KernelVersion: 5.4
37Contact: Wu Hao <hao.wu@intel.com>
38Description: Read-write. Read this file for AP2 (AFU Power State 2) event.
39 It's used to indicate transient AP2 state. Write 1 to this
40 file to clear AP2 event.
41
42What: /sys/bus/platform/devices/dfl-port.0/ltr
43Date: August 2019
44KernelVersion: 5.4
45Contact: Wu Hao <hao.wu@intel.com>
46Description: Read-write. Read or set AFU latency tolerance reporting value.
47 Set ltr to 1 if the AFU can tolerate latency >= 40us or set it
48 to 0 if it is latency sensitive.
49
50What: /sys/bus/platform/devices/dfl-port.0/userclk_freqcmd
51Date: August 2019
52KernelVersion: 5.4
53Contact: Wu Hao <hao.wu@intel.com>
54Description: Write-only. User writes command to this interface to set
55 userclock to AFU.
56
57What: /sys/bus/platform/devices/dfl-port.0/userclk_freqsts
58Date: August 2019
59KernelVersion: 5.4
60Contact: Wu Hao <hao.wu@intel.com>
61Description: Read-only. Read this file to get the status of issued command
62 to userclck_freqcmd.
63
64What: /sys/bus/platform/devices/dfl-port.0/userclk_freqcntrcmd
65Date: August 2019
66KernelVersion: 5.4
67Contact: Wu Hao <hao.wu@intel.com>
68Description: Write-only. User writes command to this interface to set
69 userclock counter.
70
71What: /sys/bus/platform/devices/dfl-port.0/userclk_freqcntrsts
72Date: August 2019
73KernelVersion: 5.4
74Contact: Wu Hao <hao.wu@intel.com>
75Description: Read-only. Read this file to get the status of issued command
76 to userclck_freqcntrcmd.
77
78What: /sys/bus/platform/devices/dfl-port.0/errors/errors
79Date: August 2019
80KernelVersion: 5.4
81Contact: Wu Hao <hao.wu@intel.com>
82Description: Read-Write. Read this file to get errors detected on port and
83 Accelerated Function Unit (AFU). Write error code to this file
84 to clear errors. Write fails with -EINVAL if input parsing
85 fails or input error code doesn't match. Write fails with
86 -EBUSY or -ETIMEDOUT if error can't be cleared as hardware
87 in low power state (-EBUSY) or not respoding (-ETIMEDOUT).
88
89What: /sys/bus/platform/devices/dfl-port.0/errors/first_error
90Date: August 2019
91KernelVersion: 5.4
92Contact: Wu Hao <hao.wu@intel.com>
93Description: Read-only. Read this file to get the first error detected by
94 hardware.
95
96What: /sys/bus/platform/devices/dfl-port.0/errors/first_malformed_req
97Date: August 2019
98KernelVersion: 5.4
99Contact: Wu Hao <hao.wu@intel.com>
100Description: Read-only. Read this file to get the first malformed request
101 captured by hardware.
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
index a575e42f7fec..c149fadc6f47 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
@@ -136,7 +136,9 @@ Required properties:
136OCOTP bindings based on SCU Message Protocol 136OCOTP bindings based on SCU Message Protocol
137------------------------------------------------------------ 137------------------------------------------------------------
138Required properties: 138Required properties:
139- compatible: Should be "fsl,imx8qxp-scu-ocotp" 139- compatible: Should be one of:
140 "fsl,imx8qm-scu-ocotp",
141 "fsl,imx8qxp-scu-ocotp".
140- #address-cells: Must be 1. Contains byte index 142- #address-cells: Must be 1. Contains byte index
141- #size-cells: Must be 1. Contains byte length 143- #size-cells: Must be 1. Contains byte length
142 144
diff --git a/Documentation/devicetree/bindings/extcon/extcon-arizona.txt b/Documentation/devicetree/bindings/extcon/extcon-arizona.txt
index 7f3d94ae81ff..208daaff0be4 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-arizona.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-arizona.txt
@@ -72,5 +72,5 @@ codec: wm8280@0 {
72 1 2 1 /* MICDET2 MICBIAS2 GPIO=high */ 72 1 2 1 /* MICDET2 MICBIAS2 GPIO=high */
73 >; 73 >;
74 74
75 wlf,gpsw = <0>; 75 wlf,gpsw = <ARIZONA_GPSW_OPEN>;
76}; 76};
diff --git a/Documentation/devicetree/bindings/extcon/extcon-fsa9480.txt b/Documentation/devicetree/bindings/extcon/extcon-fsa9480.txt
index d592c21245f2..624bd76f468e 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-fsa9480.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-fsa9480.txt
@@ -5,7 +5,9 @@ controlled using I2C and enables USB data, stereo and mono audio, video,
5microphone, and UART data to use a common connector port. 5microphone, and UART data to use a common connector port.
6 6
7Required properties: 7Required properties:
8 - compatible : Must be "fcs,fsa9480" 8 - compatible : Must be one of
9 "fcs,fsa9480"
10 "fcs,fsa880"
9 - reg : Specifies i2c slave address. Must be 0x25. 11 - reg : Specifies i2c slave address. Must be 0x25.
10 - interrupts : Should contain one entry specifying interrupt signal of 12 - interrupts : Should contain one entry specifying interrupt signal of
11 interrupt parent to which interrupt pin of the chip is connected. 13 interrupt parent to which interrupt pin of the chip is connected.
diff --git a/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt
index 817a8d4bf903..5dd0ff0f7b4e 100644
--- a/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt
+++ b/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt
@@ -3,10 +3,7 @@ Altera FPGA To SDRAM Bridge Driver
3Required properties: 3Required properties:
4- compatible : Should contain "altr,socfpga-fpga2sdram-bridge" 4- compatible : Should contain "altr,socfpga-fpga2sdram-bridge"
5 5
6Optional properties: 6See Documentation/devicetree/bindings/fpga/fpga-bridge.txt for generic bindings.
7- bridge-enable : 0 if driver should disable bridge at startup
8 1 if driver should enable bridge at startup
9 Default is to leave bridge in current state.
10 7
11Example: 8Example:
12 fpga_bridge3: fpga-bridge@ffc25080 { 9 fpga_bridge3: fpga-bridge@ffc25080 {
diff --git a/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt
index f8e288c71b2d..8b26fbcff3c6 100644
--- a/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt
+++ b/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt
@@ -10,10 +10,7 @@ Required properties:
10- compatible : Should contain "altr,freeze-bridge-controller" 10- compatible : Should contain "altr,freeze-bridge-controller"
11- regs : base address and size for freeze bridge module 11- regs : base address and size for freeze bridge module
12 12
13Optional properties: 13See Documentation/devicetree/bindings/fpga/fpga-bridge.txt for generic bindings.
14- bridge-enable : 0 if driver should disable bridge at startup
15 1 if driver should enable bridge at startup
16 Default is to leave bridge in current state.
17 14
18Example: 15Example:
19 freeze-controller@100000450 { 16 freeze-controller@100000450 {
diff --git a/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt
index 6406f9337eeb..68cce3945b10 100644
--- a/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt
+++ b/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt
@@ -9,10 +9,7 @@ Required properties:
9- resets : Phandle and reset specifier for this bridge's reset 9- resets : Phandle and reset specifier for this bridge's reset
10- clocks : Clocks used by this module. 10- clocks : Clocks used by this module.
11 11
12Optional properties: 12See Documentation/devicetree/bindings/fpga/fpga-bridge.txt for generic bindings.
13- bridge-enable : 0 if driver should disable bridge at startup.
14 1 if driver should enable bridge at startup.
15 Default is to leave bridge in its current state.
16 13
17Example: 14Example:
18 fpga_bridge0: fpga-bridge@ff400000 { 15 fpga_bridge0: fpga-bridge@ff400000 {
diff --git a/Documentation/devicetree/bindings/fpga/fpga-bridge.txt b/Documentation/devicetree/bindings/fpga/fpga-bridge.txt
new file mode 100644
index 000000000000..72e06917288a
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/fpga-bridge.txt
@@ -0,0 +1,13 @@
1FPGA Bridge Device Tree Binding
2
3Optional properties:
4- bridge-enable : 0 if driver should disable bridge at startup
5 1 if driver should enable bridge at startup
6 Default is to leave bridge in current state.
7
8Example:
9 fpga_bridge3: fpga-bridge@ffc25080 {
10 compatible = "altr,socfpga-fpga2sdram-bridge";
11 reg = <0xffc25080 0x4>;
12 bridge-enable = <0>;
13 };
diff --git a/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt b/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
index 8dcfba926bc7..4284d293fa61 100644
--- a/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
+++ b/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
@@ -18,12 +18,8 @@ Required properties:
18- clocks : input clock to IP 18- clocks : input clock to IP
19- clock-names : should contain "aclk" 19- clock-names : should contain "aclk"
20 20
21Optional properties: 21See Documentation/devicetree/bindings/fpga/fpga-region.txt and
22- bridge-enable : 0 if driver should disable bridge at startup 22Documentation/devicetree/bindings/fpga/fpga-bridge.txt for generic bindings.
23 1 if driver should enable bridge at startup
24 Default is to leave bridge in current state.
25
26See Documentation/devicetree/bindings/fpga/fpga-region.txt for generic bindings.
27 23
28Example: 24Example:
29 fpga-bridge@100000450 { 25 fpga-bridge@100000450 {
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,qcs404.txt b/Documentation/devicetree/bindings/interconnect/qcom,qcs404.txt
new file mode 100644
index 000000000000..c07d89812b73
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,qcs404.txt
@@ -0,0 +1,45 @@
1Qualcomm QCS404 Network-On-Chip interconnect driver binding
2-----------------------------------------------------------
3
4Required properties :
5- compatible : shall contain only one of the following:
6 "qcom,qcs404-bimc"
7 "qcom,qcs404-pcnoc"
8 "qcom,qcs404-snoc"
9- #interconnect-cells : should contain 1
10
11reg : specifies the physical base address and size of registers
12clocks : list of phandles and specifiers to all interconnect bus clocks
13clock-names : clock names should include both "bus" and "bus_a"
14
15Example:
16
17soc {
18 ...
19 bimc: interconnect@400000 {
20 reg = <0x00400000 0x80000>;
21 compatible = "qcom,qcs404-bimc";
22 #interconnect-cells = <1>;
23 clock-names = "bus", "bus_a";
24 clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
25 <&rpmcc RPM_SMD_BIMC_A_CLK>;
26 };
27
28 pnoc: interconnect@500000 {
29 reg = <0x00500000 0x15080>;
30 compatible = "qcom,qcs404-pcnoc";
31 #interconnect-cells = <1>;
32 clock-names = "bus", "bus_a";
33 clocks = <&rpmcc RPM_SMD_PNOC_CLK>,
34 <&rpmcc RPM_SMD_PNOC_A_CLK>;
35 };
36
37 snoc: interconnect@580000 {
38 reg = <0x00580000 0x23080>;
39 compatible = "qcom,qcs404-snoc";
40 #interconnect-cells = <1>;
41 clock-names = "bus", "bus_a";
42 clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
43 <&rpmcc RPM_SMD_SNOC_A_CLK>;
44 };
45};
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 96ffd06d2ca8..904dadf3d07b 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -2,7 +2,7 @@ Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
2 2
3This binding represents the on-chip eFuse OTP controller found on 3This binding represents the on-chip eFuse OTP controller found on
4i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL, 4i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
5i.MX7D/S, i.MX7ULP and i.MX8MQ SoCs. 5i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM and i.MX8MN SoCs.
6 6
7Required properties: 7Required properties:
8- compatible: should be one of 8- compatible: should be one of
@@ -16,6 +16,7 @@ Required properties:
16 "fsl,imx7ulp-ocotp" (i.MX7ULP), 16 "fsl,imx7ulp-ocotp" (i.MX7ULP),
17 "fsl,imx8mq-ocotp" (i.MX8MQ), 17 "fsl,imx8mq-ocotp" (i.MX8MQ),
18 "fsl,imx8mm-ocotp" (i.MX8MM), 18 "fsl,imx8mm-ocotp" (i.MX8MM),
19 "fsl,imx8mn-ocotp" (i.MX8MN),
19 followed by "syscon". 20 followed by "syscon".
20- #address-cells : Should be 1 21- #address-cells : Should be 1
21- #size-cells : Should be 1 22- #size-cells : Should be 1
diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
index 9e3fc15e1af8..8324a4ee6f06 100644
--- a/Documentation/devicetree/bindings/pci/pci-armada8k.txt
+++ b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
@@ -17,6 +17,14 @@ Required properties:
17 name must be "core" for the first clock and "reg" for the second 17 name must be "core" for the first clock and "reg" for the second
18 one 18 one
19 19
20Optional properties:
21- phys: phandle(s) to PHY node(s) following the generic PHY bindings.
22 Either 1, 2 or 4 PHYs might be needed depending on the number of
23 PCIe lanes.
24- phy-names: names of the PHYs corresponding to the number of lanes.
25 Must be "cp0-pcie0-x4-lane0-phy", "cp0-pcie0-x4-lane1-phy" for
26 2 PHYs.
27
20Example: 28Example:
21 29
22 pcie@f2600000 { 30 pcie@f2600000 {
diff --git a/Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml
new file mode 100644
index 000000000000..8a56a8526cef
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml
@@ -0,0 +1,95 @@
1# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2%YAML 1.2
3---
4$id: http://devicetree.org/schemas/phy/lantiq,vrx200-pcie-phy.yaml#
5$schema: http://devicetree.org/meta-schemas/core.yaml#
6
7title: Lantiq VRX200 and ARX300 PCIe PHY Device Tree Bindings
8
9maintainers:
10 - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
11
12properties:
13 "#phy-cells":
14 const: 1
15 description: selects the PHY mode as defined in <dt-bindings/phy/phy-lantiq-vrx200-pcie.h>
16
17 compatible:
18 enum:
19 - lantiq,vrx200-pcie-phy
20 - lantiq,arx300-pcie-phy
21
22 reg:
23 maxItems: 1
24
25 clocks:
26 items:
27 - description: PHY module clock
28 - description: PDI register clock
29
30 clock-names:
31 items:
32 - const: phy
33 - const: pdi
34
35 resets:
36 items:
37 - description: exclusive PHY reset line
38 - description: shared reset line between the PCIe PHY and PCIe controller
39
40 resets-names:
41 items:
42 - const: phy
43 - const: pcie
44
45 lantiq,rcu:
46 $ref: /schemas/types.yaml#/definitions/phandle
47 description: phandle to the RCU syscon
48
49 lantiq,rcu-endian-offset:
50 $ref: /schemas/types.yaml#/definitions/uint32
51 description: the offset of the endian registers for this PHY instance in the RCU syscon
52
53 lantiq,rcu-big-endian-mask:
54 $ref: /schemas/types.yaml#/definitions/uint32
55 description: the mask to set the PDI (PHY) registers for this PHY instance to big endian
56
57 big-endian:
58 description: Configures the PDI (PHY) registers in big-endian mode
59 type: boolean
60
61 little-endian:
62 description: Configures the PDI (PHY) registers in big-endian mode
63 type: boolean
64
65required:
66 - "#phy-cells"
67 - compatible
68 - reg
69 - clocks
70 - clock-names
71 - resets
72 - reset-names
73 - lantiq,rcu
74 - lantiq,rcu-endian-offset
75 - lantiq,rcu-big-endian-mask
76
77additionalProperties: false
78
79examples:
80 - |
81 pcie0_phy: phy@106800 {
82 compatible = "lantiq,vrx200-pcie-phy";
83 reg = <0x106800 0x100>;
84 lantiq,rcu = <&rcu0>;
85 lantiq,rcu-endian-offset = <0x4c>;
86 lantiq,rcu-big-endian-mask = <0x80>; /* bit 7 */
87 big-endian;
88 clocks = <&pmu 32>, <&pmu 36>;
89 clock-names = "phy", "pdi";
90 resets = <&reset0 12 24>, <&reset0 22 22>;
91 reset-names = "phy", "pcie";
92 #phy-cells = <1>;
93 };
94
95...
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
index cf2cd86db267..8c60e6985950 100644
--- a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
+++ b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
@@ -25,6 +25,13 @@ Required properties:
25- #address-cells: should be 1. 25- #address-cells: should be 1.
26- #size-cells: should be 0. 26- #size-cells: should be 0.
27 27
28Optional properlties:
29
30- clocks: pointers to the reference clocks for this device (CP110 only),
31 consequently: MG clock, MG Core clock, AXI clock.
32- clock-names: names of used clocks for CP110 only, must be :
33 "mg_clk", "mg_core_clk" and "axi_clk".
34
28A sub-node is required for each comphy lane provided by the comphy. 35A sub-node is required for each comphy lane provided by the comphy.
29 36
30Required properties (child nodes): 37Required properties (child nodes):
@@ -39,6 +46,9 @@ Examples:
39 compatible = "marvell,comphy-cp110"; 46 compatible = "marvell,comphy-cp110";
40 reg = <0x120000 0x6000>; 47 reg = <0x120000 0x6000>;
41 marvell,system-controller = <&cpm_syscon0>; 48 marvell,system-controller = <&cpm_syscon0>;
49 clocks = <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 6>,
50 <&CP110_LABEL(clk) 1 18>;
51 clock-names = "mg_clk", "mg_core_clk", "axi_clk";
42 #address-cells = <1>; 52 #address-cells = <1>;
43 #size-cells = <0>; 53 #size-cells = <0>;
44 54
diff --git a/Documentation/driver-api/uio-howto.rst b/Documentation/driver-api/uio-howto.rst
index 8fecfa11d4ff..84091cd25dc4 100644
--- a/Documentation/driver-api/uio-howto.rst
+++ b/Documentation/driver-api/uio-howto.rst
@@ -408,6 +408,13 @@ handler code. You also do not need to know anything about the chip's
408internal registers to create the kernel part of the driver. All you need 408internal registers to create the kernel part of the driver. All you need
409to know is the irq number of the pin the chip is connected to. 409to know is the irq number of the pin the chip is connected to.
410 410
411When used in a device-tree enabled system, the driver needs to be
412probed with the ``"of_id"`` module parameter set to the ``"compatible"``
413string of the node the driver is supposed to handle. By default, the
414node's name (without the unit address) is exposed as name for the
415UIO device in userspace. To set a custom name, a property named
416``"linux,uio-name"`` may be specified in the DT node.
417
411Using uio_dmem_genirq for platform devices 418Using uio_dmem_genirq for platform devices
412------------------------------------------ 419------------------------------------------
413 420
diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst
index 2f125abd777f..6fa483fc823e 100644
--- a/Documentation/fpga/dfl.rst
+++ b/Documentation/fpga/dfl.rst
@@ -87,6 +87,8 @@ The following functions are exposed through ioctls:
87- Get driver API version (DFL_FPGA_GET_API_VERSION) 87- Get driver API version (DFL_FPGA_GET_API_VERSION)
88- Check for extensions (DFL_FPGA_CHECK_EXTENSION) 88- Check for extensions (DFL_FPGA_CHECK_EXTENSION)
89- Program bitstream (DFL_FPGA_FME_PORT_PR) 89- Program bitstream (DFL_FPGA_FME_PORT_PR)
90- Assign port to PF (DFL_FPGA_FME_PORT_ASSIGN)
91- Release port from PF (DFL_FPGA_FME_PORT_RELEASE)
90 92
91More functions are exposed through sysfs 93More functions are exposed through sysfs
92(/sys/class/fpga_region/regionX/dfl-fme.n/): 94(/sys/class/fpga_region/regionX/dfl-fme.n/):
@@ -102,6 +104,10 @@ More functions are exposed through sysfs
102 one FPGA device may have more than one port, this sysfs interface indicates 104 one FPGA device may have more than one port, this sysfs interface indicates
103 how many ports the FPGA device has. 105 how many ports the FPGA device has.
104 106
107 Global error reporting management (errors/)
108 error reporting sysfs interfaces allow user to read errors detected by the
109 hardware, and clear the logged errors.
110
105 111
106FIU - PORT 112FIU - PORT
107========== 113==========
@@ -143,6 +149,10 @@ More functions are exposed through sysfs:
143 Read Accelerator GUID (afu_id) 149 Read Accelerator GUID (afu_id)
144 afu_id indicates which PR bitstream is programmed to this AFU. 150 afu_id indicates which PR bitstream is programmed to this AFU.
145 151
152 Error reporting (errors/)
153 error reporting sysfs interfaces allow user to read port/afu errors
154 detected by the hardware, and clear the logged errors.
155
146 156
147DFL Framework Overview 157DFL Framework Overview
148====================== 158======================
@@ -218,6 +228,101 @@ the compat_id exposed by the target FPGA region. This check is usually done by
218userspace before calling the reconfiguration IOCTL. 228userspace before calling the reconfiguration IOCTL.
219 229
220 230
231FPGA virtualization - PCIe SRIOV
232================================
233This section describes the virtualization support on DFL based FPGA device to
234enable accessing an accelerator from applications running in a virtual machine
235(VM). This section only describes the PCIe based FPGA device with SRIOV support.
236
237Features supported by the particular FPGA device are exposed through Device
238Feature Lists, as illustrated below:
239
240::
241
242 +-------------------------------+ +-------------+
243 | PF | | VF |
244 +-------------------------------+ +-------------+
245 ^ ^ ^ ^
246 | | | |
247 +-----|------------|---------|--------------|-------+
248 | | | | | |
249 | +-----+ +-------+ +-------+ +-------+ |
250 | | FME | | Port0 | | Port1 | | Port2 | |
251 | +-----+ +-------+ +-------+ +-------+ |
252 | ^ ^ ^ |
253 | | | | |
254 | +-------+ +------+ +-------+ |
255 | | AFU | | AFU | | AFU | |
256 | +-------+ +------+ +-------+ |
257 | |
258 | DFL based FPGA PCIe Device |
259 +---------------------------------------------------+
260
261FME is always accessed through the physical function (PF).
262
263Ports (and related AFUs) are accessed via PF by default, but could be exposed
264through virtual function (VF) devices via PCIe SRIOV. Each VF only contains
2651 Port and 1 AFU for isolation. Users could assign individual VFs (accelerators)
266created via PCIe SRIOV interface, to virtual machines.
267
268The driver organization in virtualization case is illustrated below:
269::
270
271 +-------++------++------+ |
272 | FME || FME || FME | |
273 | FPGA || FPGA || FPGA | |
274 |Manager||Bridge||Region| |
275 +-------++------++------+ |
276 +-----------------------+ +--------+ | +--------+
277 | FME | | AFU | | | AFU |
278 | Module | | Module | | | Module |
279 +-----------------------+ +--------+ | +--------+
280 +-----------------------+ | +-----------------------+
281 | FPGA Container Device | | | FPGA Container Device |
282 | (FPGA Base Region) | | | (FPGA Base Region) |
283 +-----------------------+ | +-----------------------+
284 +------------------+ | +------------------+
285 | FPGA PCIE Module | | Virtual | FPGA PCIE Module |
286 +------------------+ Host | Machine +------------------+
287 -------------------------------------- | ------------------------------
288 +---------------+ | +---------------+
289 | PCI PF Device | | | PCI VF Device |
290 +---------------+ | +---------------+
291
292FPGA PCIe device driver is always loaded first once a FPGA PCIe PF or VF device
293is detected. It:
294
295* Finishes enumeration on both FPGA PCIe PF and VF device using common
296 interfaces from DFL framework.
297* Supports SRIOV.
298
299The FME device driver plays a management role in this driver architecture, it
300provides ioctls to release Port from PF and assign Port to PF. After release
301a port from PF, then it's safe to expose this port through a VF via PCIe SRIOV
302sysfs interface.
303
304To enable accessing an accelerator from applications running in a VM, the
305respective AFU's port needs to be assigned to a VF using the following steps:
306
307#. The PF owns all AFU ports by default. Any port that needs to be
308 reassigned to a VF must first be released through the
309 DFL_FPGA_FME_PORT_RELEASE ioctl on the FME device.
310
311#. Once N ports are released from PF, then user can use command below
312 to enable SRIOV and VFs. Each VF owns only one Port with AFU.
313
314 ::
315
316 echo N > $PCI_DEVICE_PATH/sriov_numvfs
317
318#. Pass through the VFs to VMs
319
320#. The AFU under VF is accessible from applications in VM (using the
321 same driver inside the VF).
322
323Note that an FME can't be assigned to a VF, thus PR and other management
324functions are only available via the PF.
325
221Device enumeration 326Device enumeration
222================== 327==================
223This section introduces how applications enumerate the fpga device from 328This section introduces how applications enumerate the fpga device from
diff --git a/Documentation/misc-devices/index.rst b/Documentation/misc-devices/index.rst
index a57f92dfe49a..f11c5daeada5 100644
--- a/Documentation/misc-devices/index.rst
+++ b/Documentation/misc-devices/index.rst
@@ -20,3 +20,4 @@ fit into other categories.
20 isl29003 20 isl29003
21 lis3lv02d 21 lis3lv02d
22 max6875 22 max6875
23 xilinx_sdfec
diff --git a/MAINTAINERS b/MAINTAINERS
index 0262f99df61d..6e1b0ac2441b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8360,6 +8360,17 @@ F: drivers/platform/x86/intel_speed_select_if/
8360F: tools/power/x86/intel-speed-select/ 8360F: tools/power/x86/intel-speed-select/
8361F: include/uapi/linux/isst_if.h 8361F: include/uapi/linux/isst_if.h
8362 8362
8363INTEL STRATIX10 FIRMWARE DRIVERS
8364M: Richard Gong <richard.gong@linux.intel.com>
8365L: linux-kernel@vger.kernel.org
8366S: Maintained
8367F: drivers/firmware/stratix10-rsu.c
8368F: drivers/firmware/stratix10-svc.c
8369F: include/linux/firmware/intel/stratix10-smc.h
8370F: include/linux/firmware/intel/stratix10-svc-client.h
8371F: Documentation/ABI/testing/sysfs-devices-platform-stratix10-rsu
8372F: Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt
8373
8363INTEL TELEMETRY DRIVER 8374INTEL TELEMETRY DRIVER
8364M: Rajneesh Bhardwaj <rajneesh.bhardwaj@linux.intel.com> 8375M: Rajneesh Bhardwaj <rajneesh.bhardwaj@linux.intel.com>
8365M: "David E. Box" <david.e.box@linux.intel.com> 8376M: "David E. Box" <david.e.box@linux.intel.com>
@@ -8411,6 +8422,7 @@ M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
8411S: Supported 8422S: Supported
8412F: Documentation/trace/intel_th.rst 8423F: Documentation/trace/intel_th.rst
8413F: drivers/hwtracing/intel_th/ 8424F: drivers/hwtracing/intel_th/
8425F: include/linux/intel_th.h
8414 8426
8415INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) 8427INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
8416M: Ning Sun <ning.sun@intel.com> 8428M: Ning Sun <ning.sun@intel.com>
@@ -17760,6 +17772,17 @@ F: Documentation/devicetree/bindings/media/xilinx/
17760F: drivers/media/platform/xilinx/ 17772F: drivers/media/platform/xilinx/
17761F: include/uapi/linux/xilinx-v4l2-controls.h 17773F: include/uapi/linux/xilinx-v4l2-controls.h
17762 17774
17775XILINX SD-FEC IP CORES
17776M: Derek Kiernan <derek.kiernan@xilinx.com>
17777M: Dragan Cvetic <dragan.cvetic@xilinx.com>
17778S: Maintained
17779F: Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt
17780F: Documentation/misc-devices/xilinx_sdfec.rst
17781F: drivers/misc/xilinx_sdfec.c
17782F: drivers/misc/Kconfig
17783F: drivers/misc/Makefile
17784F: include/uapi/misc/xilinx_sdfec.h
17785
17763XILLYBUS DRIVER 17786XILLYBUS DRIVER
17764M: Eli Billauer <eli.billauer@gmail.com> 17787M: Eli Billauer <eli.billauer@gmail.com>
17765L: linux-kernel@vger.kernel.org 17788L: linux-kernel@vger.kernel.org
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 2cb35d30cb14..3eacf474e1e3 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -39,6 +39,12 @@ static const guid_t prp_guids[] = {
39 /* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */ 39 /* External facing port GUID: efcc06cc-73ac-4bc3-bff0-76143807c389 */
40 GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3, 40 GUID_INIT(0xefcc06cc, 0x73ac, 0x4bc3,
41 0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89), 41 0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
42 /* Thunderbolt GUID for IMR_VALID: c44d002f-69f9-4e7d-a904-a7baabdf43f7 */
43 GUID_INIT(0xc44d002f, 0x69f9, 0x4e7d,
44 0xa9, 0x04, 0xa7, 0xba, 0xab, 0xdf, 0x43, 0xf7),
45 /* Thunderbolt GUID for WAKE_SUPPORTED: 6c501103-c189-4296-ba72-9bf5a26ebe5d */
46 GUID_INIT(0x6c501103, 0xc189, 0x4296,
47 0xba, 0x72, 0x9b, 0xf5, 0xa2, 0x6e, 0xbe, 0x5d),
42}; 48};
43 49
44/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */ 50/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index dc1c83eafc22..c0a491277aca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -122,7 +122,7 @@ static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
122 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 122 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
123module_param_named(debug_mask, binder_debug_mask, uint, 0644); 123module_param_named(debug_mask, binder_debug_mask, uint, 0644);
124 124
125static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; 125char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
126module_param_named(devices, binder_devices_param, charp, 0444); 126module_param_named(devices, binder_devices_param, charp, 0444);
127 127
128static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 128static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
@@ -196,30 +196,8 @@ static inline void binder_stats_created(enum binder_stat_types type)
196 atomic_inc(&binder_stats.obj_created[type]); 196 atomic_inc(&binder_stats.obj_created[type]);
197} 197}
198 198
199struct binder_transaction_log_entry { 199struct binder_transaction_log binder_transaction_log;
200 int debug_id; 200struct binder_transaction_log binder_transaction_log_failed;
201 int debug_id_done;
202 int call_type;
203 int from_proc;
204 int from_thread;
205 int target_handle;
206 int to_proc;
207 int to_thread;
208 int to_node;
209 int data_size;
210 int offsets_size;
211 int return_error_line;
212 uint32_t return_error;
213 uint32_t return_error_param;
214 const char *context_name;
215};
216struct binder_transaction_log {
217 atomic_t cur;
218 bool full;
219 struct binder_transaction_log_entry entry[32];
220};
221static struct binder_transaction_log binder_transaction_log;
222static struct binder_transaction_log binder_transaction_log_failed;
223 201
224static struct binder_transaction_log_entry *binder_transaction_log_add( 202static struct binder_transaction_log_entry *binder_transaction_log_add(
225 struct binder_transaction_log *log) 203 struct binder_transaction_log *log)
@@ -480,6 +458,7 @@ enum binder_deferred_state {
480 * @inner_lock: can nest under outer_lock and/or node lock 458 * @inner_lock: can nest under outer_lock and/or node lock
481 * @outer_lock: no nesting under innor or node lock 459 * @outer_lock: no nesting under innor or node lock
482 * Lock order: 1) outer, 2) node, 3) inner 460 * Lock order: 1) outer, 2) node, 3) inner
461 * @binderfs_entry: process-specific binderfs log file
483 * 462 *
484 * Bookkeeping structure for binder processes 463 * Bookkeeping structure for binder processes
485 */ 464 */
@@ -509,6 +488,7 @@ struct binder_proc {
509 struct binder_context *context; 488 struct binder_context *context;
510 spinlock_t inner_lock; 489 spinlock_t inner_lock;
511 spinlock_t outer_lock; 490 spinlock_t outer_lock;
491 struct dentry *binderfs_entry;
512}; 492};
513 493
514enum { 494enum {
@@ -5230,6 +5210,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
5230{ 5210{
5231 struct binder_proc *proc; 5211 struct binder_proc *proc;
5232 struct binder_device *binder_dev; 5212 struct binder_device *binder_dev;
5213 struct binderfs_info *info;
5214 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5233 5215
5234 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, 5216 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5235 current->group_leader->pid, current->pid); 5217 current->group_leader->pid, current->pid);
@@ -5244,11 +5226,14 @@ static int binder_open(struct inode *nodp, struct file *filp)
5244 INIT_LIST_HEAD(&proc->todo); 5226 INIT_LIST_HEAD(&proc->todo);
5245 proc->default_priority = task_nice(current); 5227 proc->default_priority = task_nice(current);
5246 /* binderfs stashes devices in i_private */ 5228 /* binderfs stashes devices in i_private */
5247 if (is_binderfs_device(nodp)) 5229 if (is_binderfs_device(nodp)) {
5248 binder_dev = nodp->i_private; 5230 binder_dev = nodp->i_private;
5249 else 5231 info = nodp->i_sb->s_fs_info;
5232 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5233 } else {
5250 binder_dev = container_of(filp->private_data, 5234 binder_dev = container_of(filp->private_data,
5251 struct binder_device, miscdev); 5235 struct binder_device, miscdev);
5236 }
5252 proc->context = &binder_dev->context; 5237 proc->context = &binder_dev->context;
5253 binder_alloc_init(&proc->alloc); 5238 binder_alloc_init(&proc->alloc);
5254 5239
@@ -5279,6 +5264,35 @@ static int binder_open(struct inode *nodp, struct file *filp)
5279 &proc_fops); 5264 &proc_fops);
5280 } 5265 }
5281 5266
5267 if (binder_binderfs_dir_entry_proc) {
5268 char strbuf[11];
5269 struct dentry *binderfs_entry;
5270
5271 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5272 /*
5273 * Similar to debugfs, the process specific log file is shared
5274 * between contexts. If the file has already been created for a
5275 * process, the following binderfs_create_file() call will
5276 * fail with error code EEXIST if another context of the same
5277 * process invoked binder_open(). This is ok since same as
5278 * debugfs, the log file will contain information on all
5279 * contexts of a given PID.
5280 */
5281 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5282 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5283 if (!IS_ERR(binderfs_entry)) {
5284 proc->binderfs_entry = binderfs_entry;
5285 } else {
5286 int error;
5287
5288 error = PTR_ERR(binderfs_entry);
5289 if (error != -EEXIST) {
5290 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5291 strbuf, error);
5292 }
5293 }
5294 }
5295
5282 return 0; 5296 return 0;
5283} 5297}
5284 5298
@@ -5318,6 +5332,12 @@ static int binder_release(struct inode *nodp, struct file *filp)
5318 struct binder_proc *proc = filp->private_data; 5332 struct binder_proc *proc = filp->private_data;
5319 5333
5320 debugfs_remove(proc->debugfs_entry); 5334 debugfs_remove(proc->debugfs_entry);
5335
5336 if (proc->binderfs_entry) {
5337 binderfs_remove_file(proc->binderfs_entry);
5338 proc->binderfs_entry = NULL;
5339 }
5340
5321 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 5341 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5322 5342
5323 return 0; 5343 return 0;
@@ -5907,7 +5927,7 @@ static void print_binder_proc_stats(struct seq_file *m,
5907} 5927}
5908 5928
5909 5929
5910static int state_show(struct seq_file *m, void *unused) 5930int binder_state_show(struct seq_file *m, void *unused)
5911{ 5931{
5912 struct binder_proc *proc; 5932 struct binder_proc *proc;
5913 struct binder_node *node; 5933 struct binder_node *node;
@@ -5946,7 +5966,7 @@ static int state_show(struct seq_file *m, void *unused)
5946 return 0; 5966 return 0;
5947} 5967}
5948 5968
5949static int stats_show(struct seq_file *m, void *unused) 5969int binder_stats_show(struct seq_file *m, void *unused)
5950{ 5970{
5951 struct binder_proc *proc; 5971 struct binder_proc *proc;
5952 5972
@@ -5962,7 +5982,7 @@ static int stats_show(struct seq_file *m, void *unused)
5962 return 0; 5982 return 0;
5963} 5983}
5964 5984
5965static int transactions_show(struct seq_file *m, void *unused) 5985int binder_transactions_show(struct seq_file *m, void *unused)
5966{ 5986{
5967 struct binder_proc *proc; 5987 struct binder_proc *proc;
5968 5988
@@ -6018,7 +6038,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
6018 "\n" : " (incomplete)\n"); 6038 "\n" : " (incomplete)\n");
6019} 6039}
6020 6040
6021static int transaction_log_show(struct seq_file *m, void *unused) 6041int binder_transaction_log_show(struct seq_file *m, void *unused)
6022{ 6042{
6023 struct binder_transaction_log *log = m->private; 6043 struct binder_transaction_log *log = m->private;
6024 unsigned int log_cur = atomic_read(&log->cur); 6044 unsigned int log_cur = atomic_read(&log->cur);
@@ -6050,11 +6070,6 @@ const struct file_operations binder_fops = {
6050 .release = binder_release, 6070 .release = binder_release,
6051}; 6071};
6052 6072
6053DEFINE_SHOW_ATTRIBUTE(state);
6054DEFINE_SHOW_ATTRIBUTE(stats);
6055DEFINE_SHOW_ATTRIBUTE(transactions);
6056DEFINE_SHOW_ATTRIBUTE(transaction_log);
6057
6058static int __init init_binder_device(const char *name) 6073static int __init init_binder_device(const char *name)
6059{ 6074{
6060 int ret; 6075 int ret;
@@ -6108,30 +6123,31 @@ static int __init binder_init(void)
6108 0444, 6123 0444,
6109 binder_debugfs_dir_entry_root, 6124 binder_debugfs_dir_entry_root,
6110 NULL, 6125 NULL,
6111 &state_fops); 6126 &binder_state_fops);
6112 debugfs_create_file("stats", 6127 debugfs_create_file("stats",
6113 0444, 6128 0444,
6114 binder_debugfs_dir_entry_root, 6129 binder_debugfs_dir_entry_root,
6115 NULL, 6130 NULL,
6116 &stats_fops); 6131 &binder_stats_fops);
6117 debugfs_create_file("transactions", 6132 debugfs_create_file("transactions",
6118 0444, 6133 0444,
6119 binder_debugfs_dir_entry_root, 6134 binder_debugfs_dir_entry_root,
6120 NULL, 6135 NULL,
6121 &transactions_fops); 6136 &binder_transactions_fops);
6122 debugfs_create_file("transaction_log", 6137 debugfs_create_file("transaction_log",
6123 0444, 6138 0444,
6124 binder_debugfs_dir_entry_root, 6139 binder_debugfs_dir_entry_root,
6125 &binder_transaction_log, 6140 &binder_transaction_log,
6126 &transaction_log_fops); 6141 &binder_transaction_log_fops);
6127 debugfs_create_file("failed_transaction_log", 6142 debugfs_create_file("failed_transaction_log",
6128 0444, 6143 0444,
6129 binder_debugfs_dir_entry_root, 6144 binder_debugfs_dir_entry_root,
6130 &binder_transaction_log_failed, 6145 &binder_transaction_log_failed,
6131 &transaction_log_fops); 6146 &binder_transaction_log_fops);
6132 } 6147 }
6133 6148
6134 if (strcmp(binder_devices_param, "") != 0) { 6149 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6150 strcmp(binder_devices_param, "") != 0) {
6135 /* 6151 /*
6136 * Copy the module_parameter string, because we don't want to 6152 * Copy the module_parameter string, because we don't want to
6137 * tokenize it in-place. 6153 * tokenize it in-place.
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 045b3e42d98b..bd47f7f72075 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -35,15 +35,63 @@ struct binder_device {
35 struct inode *binderfs_inode; 35 struct inode *binderfs_inode;
36}; 36};
37 37
38/**
39 * binderfs_mount_opts - mount options for binderfs
40 * @max: maximum number of allocatable binderfs binder devices
41 * @stats_mode: enable binder stats in binderfs.
42 */
43struct binderfs_mount_opts {
44 int max;
45 int stats_mode;
46};
47
48/**
49 * binderfs_info - information about a binderfs mount
50 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
51 * @control_dentry: This records the dentry of this binderfs mount
52 * binder-control device.
53 * @root_uid: uid that needs to be used when a new binder device is
54 * created.
55 * @root_gid: gid that needs to be used when a new binder device is
56 * created.
57 * @mount_opts: The mount options in use.
58 * @device_count: The current number of allocated binder devices.
59 * @proc_log_dir: Pointer to the directory dentry containing process-specific
60 * logs.
61 */
62struct binderfs_info {
63 struct ipc_namespace *ipc_ns;
64 struct dentry *control_dentry;
65 kuid_t root_uid;
66 kgid_t root_gid;
67 struct binderfs_mount_opts mount_opts;
68 int device_count;
69 struct dentry *proc_log_dir;
70};
71
38extern const struct file_operations binder_fops; 72extern const struct file_operations binder_fops;
39 73
74extern char *binder_devices_param;
75
40#ifdef CONFIG_ANDROID_BINDERFS 76#ifdef CONFIG_ANDROID_BINDERFS
41extern bool is_binderfs_device(const struct inode *inode); 77extern bool is_binderfs_device(const struct inode *inode);
78extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
79 const struct file_operations *fops,
80 void *data);
81extern void binderfs_remove_file(struct dentry *dentry);
42#else 82#else
43static inline bool is_binderfs_device(const struct inode *inode) 83static inline bool is_binderfs_device(const struct inode *inode)
44{ 84{
45 return false; 85 return false;
46} 86}
87static inline struct dentry *binderfs_create_file(struct dentry *dir,
88 const char *name,
89 const struct file_operations *fops,
90 void *data)
91{
92 return NULL;
93}
94static inline void binderfs_remove_file(struct dentry *dentry) {}
47#endif 95#endif
48 96
49#ifdef CONFIG_ANDROID_BINDERFS 97#ifdef CONFIG_ANDROID_BINDERFS
@@ -55,4 +103,42 @@ static inline int __init init_binderfs(void)
55} 103}
56#endif 104#endif
57 105
106int binder_stats_show(struct seq_file *m, void *unused);
107DEFINE_SHOW_ATTRIBUTE(binder_stats);
108
109int binder_state_show(struct seq_file *m, void *unused);
110DEFINE_SHOW_ATTRIBUTE(binder_state);
111
112int binder_transactions_show(struct seq_file *m, void *unused);
113DEFINE_SHOW_ATTRIBUTE(binder_transactions);
114
115int binder_transaction_log_show(struct seq_file *m, void *unused);
116DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
117
118struct binder_transaction_log_entry {
119 int debug_id;
120 int debug_id_done;
121 int call_type;
122 int from_proc;
123 int from_thread;
124 int target_handle;
125 int to_proc;
126 int to_thread;
127 int to_node;
128 int data_size;
129 int offsets_size;
130 int return_error_line;
131 uint32_t return_error;
132 uint32_t return_error_param;
133 const char *context_name;
134};
135
136struct binder_transaction_log {
137 atomic_t cur;
138 bool full;
139 struct binder_transaction_log_entry entry[32];
140};
141
142extern struct binder_transaction_log binder_transaction_log;
143extern struct binder_transaction_log binder_transaction_log_failed;
58#endif /* _LINUX_BINDER_INTERNAL_H */ 144#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index e773f45d19d9..e2580e5316a2 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -48,45 +48,23 @@ static dev_t binderfs_dev;
48static DEFINE_MUTEX(binderfs_minors_mutex); 48static DEFINE_MUTEX(binderfs_minors_mutex);
49static DEFINE_IDA(binderfs_minors); 49static DEFINE_IDA(binderfs_minors);
50 50
51/**
52 * binderfs_mount_opts - mount options for binderfs
53 * @max: maximum number of allocatable binderfs binder devices
54 */
55struct binderfs_mount_opts {
56 int max;
57};
58
59enum { 51enum {
60 Opt_max, 52 Opt_max,
53 Opt_stats_mode,
61 Opt_err 54 Opt_err
62}; 55};
63 56
57enum binderfs_stats_mode {
58 STATS_NONE,
59 STATS_GLOBAL,
60};
61
64static const match_table_t tokens = { 62static const match_table_t tokens = {
65 { Opt_max, "max=%d" }, 63 { Opt_max, "max=%d" },
64 { Opt_stats_mode, "stats=%s" },
66 { Opt_err, NULL } 65 { Opt_err, NULL }
67}; 66};
68 67
69/**
70 * binderfs_info - information about a binderfs mount
71 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
72 * @control_dentry: This records the dentry of this binderfs mount
73 * binder-control device.
74 * @root_uid: uid that needs to be used when a new binder device is
75 * created.
76 * @root_gid: gid that needs to be used when a new binder device is
77 * created.
78 * @mount_opts: The mount options in use.
79 * @device_count: The current number of allocated binder devices.
80 */
81struct binderfs_info {
82 struct ipc_namespace *ipc_ns;
83 struct dentry *control_dentry;
84 kuid_t root_uid;
85 kgid_t root_gid;
86 struct binderfs_mount_opts mount_opts;
87 int device_count;
88};
89
90static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) 68static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
91{ 69{
92 return inode->i_sb->s_fs_info; 70 return inode->i_sb->s_fs_info;
@@ -186,8 +164,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
186 req->major = MAJOR(binderfs_dev); 164 req->major = MAJOR(binderfs_dev);
187 req->minor = minor; 165 req->minor = minor;
188 166
189 ret = copy_to_user(userp, req, sizeof(*req)); 167 if (userp && copy_to_user(userp, req, sizeof(*req))) {
190 if (ret) {
191 ret = -EFAULT; 168 ret = -EFAULT;
192 goto err; 169 goto err;
193 } 170 }
@@ -272,7 +249,7 @@ static void binderfs_evict_inode(struct inode *inode)
272 249
273 clear_inode(inode); 250 clear_inode(inode);
274 251
275 if (!device) 252 if (!S_ISCHR(inode->i_mode) || !device)
276 return; 253 return;
277 254
278 mutex_lock(&binderfs_minors_mutex); 255 mutex_lock(&binderfs_minors_mutex);
@@ -291,8 +268,9 @@ static void binderfs_evict_inode(struct inode *inode)
291static int binderfs_parse_mount_opts(char *data, 268static int binderfs_parse_mount_opts(char *data,
292 struct binderfs_mount_opts *opts) 269 struct binderfs_mount_opts *opts)
293{ 270{
294 char *p; 271 char *p, *stats;
295 opts->max = BINDERFS_MAX_MINOR; 272 opts->max = BINDERFS_MAX_MINOR;
273 opts->stats_mode = STATS_NONE;
296 274
297 while ((p = strsep(&data, ",")) != NULL) { 275 while ((p = strsep(&data, ",")) != NULL) {
298 substring_t args[MAX_OPT_ARGS]; 276 substring_t args[MAX_OPT_ARGS];
@@ -312,6 +290,22 @@ static int binderfs_parse_mount_opts(char *data,
312 290
313 opts->max = max_devices; 291 opts->max = max_devices;
314 break; 292 break;
293 case Opt_stats_mode:
294 if (!capable(CAP_SYS_ADMIN))
295 return -EINVAL;
296
297 stats = match_strdup(&args[0]);
298 if (!stats)
299 return -ENOMEM;
300
301 if (strcmp(stats, "global") != 0) {
302 kfree(stats);
303 return -EINVAL;
304 }
305
306 opts->stats_mode = STATS_GLOBAL;
307 kfree(stats);
308 break;
315 default: 309 default:
316 pr_err("Invalid mount options\n"); 310 pr_err("Invalid mount options\n");
317 return -EINVAL; 311 return -EINVAL;
@@ -323,8 +317,21 @@ static int binderfs_parse_mount_opts(char *data,
323 317
324static int binderfs_remount(struct super_block *sb, int *flags, char *data) 318static int binderfs_remount(struct super_block *sb, int *flags, char *data)
325{ 319{
320 int prev_stats_mode, ret;
326 struct binderfs_info *info = sb->s_fs_info; 321 struct binderfs_info *info = sb->s_fs_info;
327 return binderfs_parse_mount_opts(data, &info->mount_opts); 322
323 prev_stats_mode = info->mount_opts.stats_mode;
324 ret = binderfs_parse_mount_opts(data, &info->mount_opts);
325 if (ret)
326 return ret;
327
328 if (prev_stats_mode != info->mount_opts.stats_mode) {
329 pr_err("Binderfs stats mode cannot be changed during a remount\n");
330 info->mount_opts.stats_mode = prev_stats_mode;
331 return -EINVAL;
332 }
333
334 return 0;
328} 335}
329 336
330static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root) 337static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
@@ -334,6 +341,8 @@ static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
334 info = root->d_sb->s_fs_info; 341 info = root->d_sb->s_fs_info;
335 if (info->mount_opts.max <= BINDERFS_MAX_MINOR) 342 if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
336 seq_printf(seq, ",max=%d", info->mount_opts.max); 343 seq_printf(seq, ",max=%d", info->mount_opts.max);
344 if (info->mount_opts.stats_mode == STATS_GLOBAL)
345 seq_printf(seq, ",stats=global");
337 346
338 return 0; 347 return 0;
339} 348}
@@ -462,11 +471,192 @@ static const struct inode_operations binderfs_dir_inode_operations = {
462 .unlink = binderfs_unlink, 471 .unlink = binderfs_unlink,
463}; 472};
464 473
474static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
475{
476 struct inode *ret;
477
478 ret = new_inode(sb);
479 if (ret) {
480 ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
481 ret->i_mode = mode;
482 ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
483 }
484 return ret;
485}
486
487static struct dentry *binderfs_create_dentry(struct dentry *parent,
488 const char *name)
489{
490 struct dentry *dentry;
491
492 dentry = lookup_one_len(name, parent, strlen(name));
493 if (IS_ERR(dentry))
494 return dentry;
495
496 /* Return error if the file/dir already exists. */
497 if (d_really_is_positive(dentry)) {
498 dput(dentry);
499 return ERR_PTR(-EEXIST);
500 }
501
502 return dentry;
503}
504
505void binderfs_remove_file(struct dentry *dentry)
506{
507 struct inode *parent_inode;
508
509 parent_inode = d_inode(dentry->d_parent);
510 inode_lock(parent_inode);
511 if (simple_positive(dentry)) {
512 dget(dentry);
513 simple_unlink(parent_inode, dentry);
514 d_delete(dentry);
515 dput(dentry);
516 }
517 inode_unlock(parent_inode);
518}
519
520struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
521 const struct file_operations *fops,
522 void *data)
523{
524 struct dentry *dentry;
525 struct inode *new_inode, *parent_inode;
526 struct super_block *sb;
527
528 parent_inode = d_inode(parent);
529 inode_lock(parent_inode);
530
531 dentry = binderfs_create_dentry(parent, name);
532 if (IS_ERR(dentry))
533 goto out;
534
535 sb = parent_inode->i_sb;
536 new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
537 if (!new_inode) {
538 dput(dentry);
539 dentry = ERR_PTR(-ENOMEM);
540 goto out;
541 }
542
543 new_inode->i_fop = fops;
544 new_inode->i_private = data;
545 d_instantiate(dentry, new_inode);
546 fsnotify_create(parent_inode, dentry);
547
548out:
549 inode_unlock(parent_inode);
550 return dentry;
551}
552
553static struct dentry *binderfs_create_dir(struct dentry *parent,
554 const char *name)
555{
556 struct dentry *dentry;
557 struct inode *new_inode, *parent_inode;
558 struct super_block *sb;
559
560 parent_inode = d_inode(parent);
561 inode_lock(parent_inode);
562
563 dentry = binderfs_create_dentry(parent, name);
564 if (IS_ERR(dentry))
565 goto out;
566
567 sb = parent_inode->i_sb;
568 new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
569 if (!new_inode) {
570 dput(dentry);
571 dentry = ERR_PTR(-ENOMEM);
572 goto out;
573 }
574
575 new_inode->i_fop = &simple_dir_operations;
576 new_inode->i_op = &simple_dir_inode_operations;
577
578 set_nlink(new_inode, 2);
579 d_instantiate(dentry, new_inode);
580 inc_nlink(parent_inode);
581 fsnotify_mkdir(parent_inode, dentry);
582
583out:
584 inode_unlock(parent_inode);
585 return dentry;
586}
587
588static int init_binder_logs(struct super_block *sb)
589{
590 struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
591 struct binderfs_info *info;
592 int ret = 0;
593
594 binder_logs_root_dir = binderfs_create_dir(sb->s_root,
595 "binder_logs");
596 if (IS_ERR(binder_logs_root_dir)) {
597 ret = PTR_ERR(binder_logs_root_dir);
598 goto out;
599 }
600
601 dentry = binderfs_create_file(binder_logs_root_dir, "stats",
602 &binder_stats_fops, NULL);
603 if (IS_ERR(dentry)) {
604 ret = PTR_ERR(dentry);
605 goto out;
606 }
607
608 dentry = binderfs_create_file(binder_logs_root_dir, "state",
609 &binder_state_fops, NULL);
610 if (IS_ERR(dentry)) {
611 ret = PTR_ERR(dentry);
612 goto out;
613 }
614
615 dentry = binderfs_create_file(binder_logs_root_dir, "transactions",
616 &binder_transactions_fops, NULL);
617 if (IS_ERR(dentry)) {
618 ret = PTR_ERR(dentry);
619 goto out;
620 }
621
622 dentry = binderfs_create_file(binder_logs_root_dir,
623 "transaction_log",
624 &binder_transaction_log_fops,
625 &binder_transaction_log);
626 if (IS_ERR(dentry)) {
627 ret = PTR_ERR(dentry);
628 goto out;
629 }
630
631 dentry = binderfs_create_file(binder_logs_root_dir,
632 "failed_transaction_log",
633 &binder_transaction_log_fops,
634 &binder_transaction_log_failed);
635 if (IS_ERR(dentry)) {
636 ret = PTR_ERR(dentry);
637 goto out;
638 }
639
640 proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
641 if (IS_ERR(proc_log_dir)) {
642 ret = PTR_ERR(proc_log_dir);
643 goto out;
644 }
645 info = sb->s_fs_info;
646 info->proc_log_dir = proc_log_dir;
647
648out:
649 return ret;
650}
651
465static int binderfs_fill_super(struct super_block *sb, void *data, int silent) 652static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
466{ 653{
467 int ret; 654 int ret;
468 struct binderfs_info *info; 655 struct binderfs_info *info;
469 struct inode *inode = NULL; 656 struct inode *inode = NULL;
657 struct binderfs_device device_info = { 0 };
658 const char *name;
659 size_t len;
470 660
471 sb->s_blocksize = PAGE_SIZE; 661 sb->s_blocksize = PAGE_SIZE;
472 sb->s_blocksize_bits = PAGE_SHIFT; 662 sb->s_blocksize_bits = PAGE_SHIFT;
@@ -521,7 +711,25 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
521 if (!sb->s_root) 711 if (!sb->s_root)
522 return -ENOMEM; 712 return -ENOMEM;
523 713
524 return binderfs_binder_ctl_create(sb); 714 ret = binderfs_binder_ctl_create(sb);
715 if (ret)
716 return ret;
717
718 name = binder_devices_param;
719 for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
720 strscpy(device_info.name, name, len + 1);
721 ret = binderfs_binder_device_create(inode, NULL, &device_info);
722 if (ret)
723 return ret;
724 name += len;
725 if (*name == ',')
726 name++;
727 }
728
729 if (info->mount_opts.stats_mode == STATS_GLOBAL)
730 return init_binder_logs(sb);
731
732 return 0;
525} 733}
526 734
527static struct dentry *binderfs_mount(struct file_system_type *fs_type, 735static struct dentry *binderfs_mount(struct file_system_type *fs_type,
@@ -553,6 +761,18 @@ static struct file_system_type binder_fs_type = {
553int __init init_binderfs(void) 761int __init init_binderfs(void)
554{ 762{
555 int ret; 763 int ret;
764 const char *name;
765 size_t len;
766
767 /* Verify that the default binderfs device names are valid. */
768 name = binder_devices_param;
769 for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
770 if (len > BINDERFS_MAX_NAME)
771 return -E2BIG;
772 name += len;
773 if (*name == ',')
774 name++;
775 }
556 776
557 /* Allocate new major number for binderfs. */ 777 /* Allocate new major number for binderfs. */
558 ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR, 778 ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index b08dc50f9f26..9eb564c002f6 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
97} 97}
98#endif 98#endif
99 99
100static inline bool should_stop_iteration(void)
101{
102 if (need_resched())
103 cond_resched();
104 return fatal_signal_pending(current);
105}
106
100/* 107/*
101 * This funcion reads the *physical* memory. The f_pos points directly to the 108 * This funcion reads the *physical* memory. The f_pos points directly to the
102 * memory location. 109 * memory location.
@@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
175 p += sz; 182 p += sz;
176 count -= sz; 183 count -= sz;
177 read += sz; 184 read += sz;
185 if (should_stop_iteration())
186 break;
178 } 187 }
179 kfree(bounce); 188 kfree(bounce);
180 189
@@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
251 p += sz; 260 p += sz;
252 count -= sz; 261 count -= sz;
253 written += sz; 262 written += sz;
263 if (should_stop_iteration())
264 break;
254 } 265 }
255 266
256 *ppos += written; 267 *ppos += written;
@@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
468 read += sz; 479 read += sz;
469 low_count -= sz; 480 low_count -= sz;
470 count -= sz; 481 count -= sz;
482 if (should_stop_iteration()) {
483 count = 0;
484 break;
485 }
471 } 486 }
472 } 487 }
473 488
@@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
492 buf += sz; 507 buf += sz;
493 read += sz; 508 read += sz;
494 p += sz; 509 p += sz;
510 if (should_stop_iteration())
511 break;
495 } 512 }
496 free_page((unsigned long)kbuf); 513 free_page((unsigned long)kbuf);
497 } 514 }
@@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
544 p += sz; 561 p += sz;
545 count -= sz; 562 count -= sz;
546 written += sz; 563 written += sz;
564 if (should_stop_iteration())
565 break;
547 } 566 }
548 567
549 *ppos += written; 568 *ppos += written;
@@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
595 buf += sz; 614 buf += sz;
596 virtr += sz; 615 virtr += sz;
597 p += sz; 616 p += sz;
617 if (should_stop_iteration())
618 break;
598 } 619 }
599 free_page((unsigned long)kbuf); 620 free_page((unsigned long)kbuf);
600 } 621 }
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index f0a8adca1eee..c86f18aa8985 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -737,7 +737,7 @@ static int pp_release(struct inode *inode, struct file *file)
737 "negotiated back to compatibility mode because user-space forgot\n"); 737 "negotiated back to compatibility mode because user-space forgot\n");
738 } 738 }
739 739
740 if (pp->flags & PP_CLAIMED) { 740 if ((pp->flags & PP_CLAIMED) && pp->pdev) {
741 struct ieee1284_info *info; 741 struct ieee1284_info *info;
742 742
743 info = &pp->pdev->port->ieee1284; 743 info = &pp->pdev->port->ieee1284;
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 0bdc602f0d48..98f3150e0048 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -373,7 +373,7 @@ static int tosh_get_machine_id(void __iomem *bios)
373 value. This has been verified on a Satellite Pro 430CDT, 373 value. This has been verified on a Satellite Pro 430CDT,
374 Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */ 374 Tecra 750CDT, Tecra 780DVD and Satellite 310CDT. */
375#if TOSH_DEBUG 375#if TOSH_DEBUG
376 printk("toshiba: debugging ID ebx=0x%04x\n", regs.ebx); 376 pr_debug("toshiba: debugging ID ebx=0x%04x\n", regs.ebx);
377#endif 377#endif
378 bx = 0xe6f5; 378 bx = 0xe6f5;
379 379
@@ -417,7 +417,7 @@ static int tosh_probe(void)
417 417
418 for (i=0;i<7;i++) { 418 for (i=0;i<7;i++) {
419 if (readb(bios+0xe010+i)!=signature[i]) { 419 if (readb(bios+0xe010+i)!=signature[i]) {
420 printk("toshiba: not a supported Toshiba laptop\n"); 420 pr_err("toshiba: not a supported Toshiba laptop\n");
421 iounmap(bios); 421 iounmap(bios);
422 return -ENODEV; 422 return -ENODEV;
423 } 423 }
@@ -433,7 +433,7 @@ static int tosh_probe(void)
433 /* if this is not a Toshiba laptop carry flag is set and ah=0x86 */ 433 /* if this is not a Toshiba laptop carry flag is set and ah=0x86 */
434 434
435 if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) { 435 if ((flag==1) || ((regs.eax & 0xff00)==0x8600)) {
436 printk("toshiba: not a supported Toshiba laptop\n"); 436 pr_err("toshiba: not a supported Toshiba laptop\n");
437 iounmap(bios); 437 iounmap(bios);
438 return -ENODEV; 438 return -ENODEV;
439 } 439 }
@@ -486,7 +486,7 @@ static int __init toshiba_init(void)
486 if (tosh_probe()) 486 if (tosh_probe())
487 return -ENODEV; 487 return -ENODEV;
488 488
489 printk(KERN_INFO "Toshiba System Management Mode driver v" TOSH_VERSION "\n"); 489 pr_info("Toshiba System Management Mode driver v" TOSH_VERSION "\n");
490 490
491 /* set the port to use for Fn status if not specified as a parameter */ 491 /* set the port to use for Fn status if not specified as a parameter */
492 if (tosh_fn==0x00) 492 if (tosh_fn==0x00)
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c3fd632af119..a32bfaeb7e61 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
4 */ 4 */
5 5
6#include <linux/clk-provider.h> 6#include <linux/clk-provider.h>
@@ -12,23 +12,13 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <soc/qcom/cmd-db.h> 13#include <soc/qcom/cmd-db.h>
14#include <soc/qcom/rpmh.h> 14#include <soc/qcom/rpmh.h>
15#include <soc/qcom/tcs.h>
15 16
16#include <dt-bindings/clock/qcom,rpmh.h> 17#include <dt-bindings/clock/qcom,rpmh.h>
17 18
18#define CLK_RPMH_ARC_EN_OFFSET 0 19#define CLK_RPMH_ARC_EN_OFFSET 0
19#define CLK_RPMH_VRM_EN_OFFSET 4 20#define CLK_RPMH_VRM_EN_OFFSET 4
20 21
21#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
22#define BCM_TCS_CMD_VALID_SHIFT 29
23#define BCM_TCS_CMD_VOTE_MASK 0x3fff
24#define BCM_TCS_CMD_VOTE_SHIFT 0
25
26#define BCM_TCS_CMD(valid, vote) \
27 (BCM_TCS_CMD_COMMIT_MASK | \
28 ((valid) << BCM_TCS_CMD_VALID_SHIFT) | \
29 ((vote & BCM_TCS_CMD_VOTE_MASK) \
30 << BCM_TCS_CMD_VOTE_SHIFT))
31
32/** 22/**
33 * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) 23 * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM)
34 * @unit: divisor used to convert Hz value to an RPMh msg 24 * @unit: divisor used to convert Hz value to an RPMh msg
@@ -269,7 +259,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
269 } 259 }
270 260
271 cmd.addr = c->res_addr; 261 cmd.addr = c->res_addr;
272 cmd.data = BCM_TCS_CMD(enable, cmd_state); 262 cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
273 263
274 ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1); 264 ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
275 if (ret) { 265 if (ret) {
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index ee9b5f70bfa4..ad02dc6747a4 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -140,10 +140,8 @@ static int adc_jack_probe(struct platform_device *pdev)
140 return err; 140 return err;
141 141
142 data->irq = platform_get_irq(pdev, 0); 142 data->irq = platform_get_irq(pdev, 0);
143 if (data->irq < 0) { 143 if (data->irq < 0)
144 dev_err(&pdev->dev, "platform_get_irq failed\n");
145 return -ENODEV; 144 return -ENODEV;
146 }
147 145
148 err = request_any_context_irq(data->irq, adc_jack_irq_thread, 146 err = request_any_context_irq(data->irq, adc_jack_irq_thread,
149 pdata->irq_flags, pdata->name, data); 147 pdata->irq_flags, pdata->name, data);
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 7e9f4c9ee87d..e970134c95fa 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1253,7 +1253,7 @@ static int arizona_extcon_get_micd_configs(struct device *dev,
1253 int i, j; 1253 int i, j;
1254 u32 *vals; 1254 u32 *vals;
1255 1255
1256 nconfs = device_property_read_u32_array(arizona->dev, prop, NULL, 0); 1256 nconfs = device_property_count_u32(arizona->dev, prop);
1257 if (nconfs <= 0) 1257 if (nconfs <= 0)
1258 return 0; 1258 return 0;
1259 1259
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 7254852e6ec0..415afaf479e7 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -121,7 +121,6 @@ static const char * const axp288_pwr_up_down_info[] = {
121 "Last shutdown caused by PMIC UVLO threshold", 121 "Last shutdown caused by PMIC UVLO threshold",
122 "Last shutdown caused by SOC initiated cold off", 122 "Last shutdown caused by SOC initiated cold off",
123 "Last shutdown caused by user pressing the power button", 123 "Last shutdown caused by user pressing the power button",
124 NULL,
125}; 124};
126 125
127/* 126/*
@@ -130,18 +129,21 @@ static const char * const axp288_pwr_up_down_info[] = {
130 */ 129 */
131static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) 130static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
132{ 131{
133 const char * const *rsi;
134 unsigned int val, i, clear_mask = 0; 132 unsigned int val, i, clear_mask = 0;
133 unsigned long bits;
135 int ret; 134 int ret;
136 135
137 ret = regmap_read(info->regmap, AXP288_PS_BOOT_REASON_REG, &val); 136 ret = regmap_read(info->regmap, AXP288_PS_BOOT_REASON_REG, &val);
138 for (i = 0, rsi = axp288_pwr_up_down_info; *rsi; rsi++, i++) { 137 if (ret < 0) {
139 if (val & BIT(i)) { 138 dev_err(info->dev, "failed to read reset source indicator\n");
140 dev_dbg(info->dev, "%s\n", *rsi); 139 return;
141 clear_mask |= BIT(i);
142 }
143 } 140 }
144 141
142 bits = val & GENMASK(ARRAY_SIZE(axp288_pwr_up_down_info) - 1, 0);
143 for_each_set_bit(i, &bits, ARRAY_SIZE(axp288_pwr_up_down_info))
144 dev_dbg(info->dev, "%s\n", axp288_pwr_up_down_info[i]);
145 clear_mask = bits;
146
145 /* Clear the register value for next reboot (write 1 to clear bit) */ 147 /* Clear the register value for next reboot (write 1 to clear bit) */
146 regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask); 148 regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
147} 149}
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index 350fb34abfa0..8405512f5199 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -363,6 +363,7 @@ MODULE_DEVICE_TABLE(i2c, fsa9480_id);
363 363
364static const struct of_device_id fsa9480_of_match[] = { 364static const struct of_device_id fsa9480_of_match[] = {
365 { .compatible = "fcs,fsa9480", }, 365 { .compatible = "fcs,fsa9480", },
366 { .compatible = "fcs,fsa880", },
366 { }, 367 { },
367}; 368};
368MODULE_DEVICE_TABLE(of, fsa9480_of_match); 369MODULE_DEVICE_TABLE(of, fsa9480_of_match);
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index faddeac948db..c211222f5d0c 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -22,26 +22,22 @@
22/** 22/**
23 * struct gpio_extcon_data - A simple GPIO-controlled extcon device state container. 23 * struct gpio_extcon_data - A simple GPIO-controlled extcon device state container.
24 * @edev: Extcon device. 24 * @edev: Extcon device.
25 * @irq: Interrupt line for the external connector.
26 * @work: Work fired by the interrupt. 25 * @work: Work fired by the interrupt.
27 * @debounce_jiffies: Number of jiffies to wait for the GPIO to stabilize, from the debounce 26 * @debounce_jiffies: Number of jiffies to wait for the GPIO to stabilize, from the debounce
28 * value. 27 * value.
29 * @gpiod: GPIO descriptor for this external connector. 28 * @gpiod: GPIO descriptor for this external connector.
30 * @extcon_id: The unique id of specific external connector. 29 * @extcon_id: The unique id of specific external connector.
31 * @debounce: Debounce time for GPIO IRQ in ms. 30 * @debounce: Debounce time for GPIO IRQ in ms.
32 * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
33 * @check_on_resume: Boolean describing whether to check the state of gpio 31 * @check_on_resume: Boolean describing whether to check the state of gpio
34 * while resuming from sleep. 32 * while resuming from sleep.
35 */ 33 */
36struct gpio_extcon_data { 34struct gpio_extcon_data {
37 struct extcon_dev *edev; 35 struct extcon_dev *edev;
38 int irq;
39 struct delayed_work work; 36 struct delayed_work work;
40 unsigned long debounce_jiffies; 37 unsigned long debounce_jiffies;
41 struct gpio_desc *gpiod; 38 struct gpio_desc *gpiod;
42 unsigned int extcon_id; 39 unsigned int extcon_id;
43 unsigned long debounce; 40 unsigned long debounce;
44 unsigned long irq_flags;
45 bool check_on_resume; 41 bool check_on_resume;
46}; 42};
47 43
@@ -69,6 +65,8 @@ static int gpio_extcon_probe(struct platform_device *pdev)
69{ 65{
70 struct gpio_extcon_data *data; 66 struct gpio_extcon_data *data;
71 struct device *dev = &pdev->dev; 67 struct device *dev = &pdev->dev;
68 unsigned long irq_flags;
69 int irq;
72 int ret; 70 int ret;
73 71
74 data = devm_kzalloc(dev, sizeof(struct gpio_extcon_data), GFP_KERNEL); 72 data = devm_kzalloc(dev, sizeof(struct gpio_extcon_data), GFP_KERNEL);
@@ -82,15 +80,26 @@ static int gpio_extcon_probe(struct platform_device *pdev)
82 * developed to get the extcon id from device-tree or others. 80 * developed to get the extcon id from device-tree or others.
83 * On later, it have to be solved. 81 * On later, it have to be solved.
84 */ 82 */
85 if (!data->irq_flags || data->extcon_id > EXTCON_NONE) 83 if (data->extcon_id > EXTCON_NONE)
86 return -EINVAL; 84 return -EINVAL;
87 85
88 data->gpiod = devm_gpiod_get(dev, "extcon", GPIOD_IN); 86 data->gpiod = devm_gpiod_get(dev, "extcon", GPIOD_IN);
89 if (IS_ERR(data->gpiod)) 87 if (IS_ERR(data->gpiod))
90 return PTR_ERR(data->gpiod); 88 return PTR_ERR(data->gpiod);
91 data->irq = gpiod_to_irq(data->gpiod); 89 irq = gpiod_to_irq(data->gpiod);
92 if (data->irq <= 0) 90 if (irq <= 0)
93 return data->irq; 91 return irq;
92
93 /*
94 * It is unlikely that this is an acknowledged interrupt that goes
95 * away after handling, what we are looking for are falling edges
96 * if the signal is active low, and rising edges if the signal is
97 * active high.
98 */
99 if (gpiod_is_active_low(data->gpiod))
100 irq_flags = IRQF_TRIGGER_FALLING;
101 else
102 irq_flags = IRQF_TRIGGER_RISING;
94 103
95 /* Allocate the memory of extcon devie and register extcon device */ 104 /* Allocate the memory of extcon devie and register extcon device */
96 data->edev = devm_extcon_dev_allocate(dev, &data->extcon_id); 105 data->edev = devm_extcon_dev_allocate(dev, &data->extcon_id);
@@ -109,8 +118,8 @@ static int gpio_extcon_probe(struct platform_device *pdev)
109 * Request the interrupt of gpio to detect whether external connector 118 * Request the interrupt of gpio to detect whether external connector
110 * is attached or detached. 119 * is attached or detached.
111 */ 120 */
112 ret = devm_request_any_context_irq(dev, data->irq, 121 ret = devm_request_any_context_irq(dev, irq,
113 gpio_irq_handler, data->irq_flags, 122 gpio_irq_handler, irq_flags,
114 pdev->name, data); 123 pdev->name, data);
115 if (ret < 0) 124 if (ret < 0)
116 return ret; 125 return ret;
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index a343a6ef3506..e6b50ca83008 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -774,12 +774,12 @@ static int max77843_init_muic_regmap(struct max77693_dev *max77843)
774{ 774{
775 int ret; 775 int ret;
776 776
777 max77843->i2c_muic = i2c_new_dummy(max77843->i2c->adapter, 777 max77843->i2c_muic = i2c_new_dummy_device(max77843->i2c->adapter,
778 I2C_ADDR_MUIC); 778 I2C_ADDR_MUIC);
779 if (!max77843->i2c_muic) { 779 if (IS_ERR(max77843->i2c_muic)) {
780 dev_err(&max77843->i2c->dev, 780 dev_err(&max77843->i2c->dev,
781 "Cannot allocate I2C device for MUIC\n"); 781 "Cannot allocate I2C device for MUIC\n");
782 return -ENOMEM; 782 return PTR_ERR(max77843->i2c_muic);
783 } 783 }
784 784
785 i2c_set_clientdata(max77843->i2c_muic, max77843); 785 i2c_set_clientdata(max77843->i2c_muic, max77843);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 98e4f616b8f1..dc43847ad2b0 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -597,7 +597,7 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
597 597
598 ret = devm_request_threaded_irq(info->dev, virq, NULL, 598 ret = devm_request_threaded_irq(info->dev, virq, NULL,
599 sm5502_muic_irq_handler, 599 sm5502_muic_irq_handler,
600 IRQF_NO_SUSPEND, 600 IRQF_NO_SUSPEND | IRQF_ONESHOT,
601 muic_irq->name, info); 601 muic_irq->name, info);
602 if (ret) { 602 if (ret) {
603 dev_err(info->dev, 603 dev_err(info->dev,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index c9a827bffe1c..e40a77bfe821 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -216,6 +216,24 @@ config INTEL_STRATIX10_SERVICE
216 216
217 Say Y here if you want Stratix10 service layer support. 217 Say Y here if you want Stratix10 service layer support.
218 218
219config INTEL_STRATIX10_RSU
220 tristate "Intel Stratix10 Remote System Update"
221 depends on INTEL_STRATIX10_SERVICE
222 help
223 The Intel Remote System Update (RSU) driver exposes interfaces
224 access through the Intel Service Layer to user space via sysfs
225 device attribute nodes. The RSU interfaces report/control some of
226 the optional RSU features of the Stratix 10 SoC FPGA.
227
228 The RSU provides a way for customers to update the boot
229 configuration of a Stratix 10 SoC device with significantly reduced
230 risk of corrupting the bitstream storage and bricking the system.
231
232 Enable RSU support if you are using an Intel SoC FPGA with the RSU
233 feature enabled and you want Linux user space control.
234
235 Say Y here if you want Intel RSU support.
236
219config QCOM_SCM 237config QCOM_SCM
220 bool 238 bool
221 depends on ARM || ARM64 239 depends on ARM || ARM64
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 2b6e3a0be595..3fcb91975bdc 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EDD) += edd.o
11obj-$(CONFIG_EFI_PCDP) += pcdp.o 11obj-$(CONFIG_EFI_PCDP) += pcdp.o
12obj-$(CONFIG_DMIID) += dmi-id.o 12obj-$(CONFIG_DMIID) += dmi-id.o
13obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o 13obj-$(CONFIG_INTEL_STRATIX10_SERVICE) += stratix10-svc.o
14obj-$(CONFIG_INTEL_STRATIX10_RSU) += stratix10-rsu.o
14obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o 15obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
15obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 16obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
16obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o 17obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 0739f3b70347..db0812263d46 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -92,8 +92,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
92 return VPD_OK; 92 return VPD_OK;
93} 93}
94 94
95static int vpd_section_attrib_add(const u8 *key, s32 key_len, 95static int vpd_section_attrib_add(const u8 *key, u32 key_len,
96 const u8 *value, s32 value_len, 96 const u8 *value, u32 value_len,
97 void *arg) 97 void *arg)
98{ 98{
99 int ret; 99 int ret;
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
index 92e3258552fc..dda525c0f968 100644
--- a/drivers/firmware/google/vpd_decode.c
+++ b/drivers/firmware/google/vpd_decode.c
@@ -9,8 +9,8 @@
9 9
10#include "vpd_decode.h" 10#include "vpd_decode.h"
11 11
12static int vpd_decode_len(const s32 max_len, const u8 *in, 12static int vpd_decode_len(const u32 max_len, const u8 *in,
13 s32 *length, s32 *decoded_len) 13 u32 *length, u32 *decoded_len)
14{ 14{
15 u8 more; 15 u8 more;
16 int i = 0; 16 int i = 0;
@@ -30,18 +30,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
30 } while (more); 30 } while (more);
31 31
32 *decoded_len = i; 32 *decoded_len = i;
33 return VPD_OK;
34}
35
36static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
37 u32 *_consumed, const u8 **entry, u32 *entry_len)
38{
39 u32 decoded_len;
40 u32 consumed = *_consumed;
41
42 if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
43 entry_len, &decoded_len) != VPD_OK)
44 return VPD_FAIL;
45 if (max_len - consumed < decoded_len)
46 return VPD_FAIL;
47
48 consumed += decoded_len;
49 *entry = input_buf + consumed;
50
51 /* entry_len is untrusted data and must be checked again. */
52 if (max_len - consumed < *entry_len)
53 return VPD_FAIL;
33 54
55 consumed += decoded_len;
56 *_consumed = consumed;
34 return VPD_OK; 57 return VPD_OK;
35} 58}
36 59
37int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, 60int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
38 vpd_decode_callback callback, void *callback_arg) 61 vpd_decode_callback callback, void *callback_arg)
39{ 62{
40 int type; 63 int type;
41 int res; 64 u32 key_len;
42 s32 key_len; 65 u32 value_len;
43 s32 value_len;
44 s32 decoded_len;
45 const u8 *key; 66 const u8 *key;
46 const u8 *value; 67 const u8 *value;
47 68
@@ -56,26 +77,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
56 case VPD_TYPE_STRING: 77 case VPD_TYPE_STRING:
57 (*consumed)++; 78 (*consumed)++;
58 79
59 /* key */ 80 if (vpd_decode_entry(max_len, input_buf, consumed, &key,
60 res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed], 81 &key_len) != VPD_OK)
61 &key_len, &decoded_len);
62 if (res != VPD_OK || *consumed + decoded_len >= max_len)
63 return VPD_FAIL; 82 return VPD_FAIL;
64 83
65 *consumed += decoded_len; 84 if (vpd_decode_entry(max_len, input_buf, consumed, &value,
66 key = &input_buf[*consumed]; 85 &value_len) != VPD_OK)
67 *consumed += key_len;
68
69 /* value */
70 res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
71 &value_len, &decoded_len);
72 if (res != VPD_OK || *consumed + decoded_len > max_len)
73 return VPD_FAIL; 86 return VPD_FAIL;
74 87
75 *consumed += decoded_len;
76 value = &input_buf[*consumed];
77 *consumed += value_len;
78
79 if (type == VPD_TYPE_STRING) 88 if (type == VPD_TYPE_STRING)
80 return callback(key, key_len, value, value_len, 89 return callback(key, key_len, value, value_len,
81 callback_arg); 90 callback_arg);
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
index cf8c2ace155a..8dbe41cac599 100644
--- a/drivers/firmware/google/vpd_decode.h
+++ b/drivers/firmware/google/vpd_decode.h
@@ -25,8 +25,8 @@ enum {
25}; 25};
26 26
27/* Callback for vpd_decode_string to invoke. */ 27/* Callback for vpd_decode_string to invoke. */
28typedef int vpd_decode_callback(const u8 *key, s32 key_len, 28typedef int vpd_decode_callback(const u8 *key, u32 key_len,
29 const u8 *value, s32 value_len, 29 const u8 *value, u32 value_len,
30 void *arg); 30 void *arg);
31 31
32/* 32/*
@@ -44,7 +44,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
44 * If one entry is successfully decoded, sends it to callback and returns the 44 * If one entry is successfully decoded, sends it to callback and returns the
45 * result. 45 * result.
46 */ 46 */
47int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, 47int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
48 vpd_decode_callback callback, void *callback_arg); 48 vpd_decode_callback callback, void *callback_arg);
49 49
50#endif /* __VPD_DECODE_H */ 50#endif /* __VPD_DECODE_H */
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
new file mode 100644
index 000000000000..bb008c019920
--- /dev/null
+++ b/drivers/firmware/stratix10-rsu.c
@@ -0,0 +1,451 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2019, Intel Corporation
4 */
5
6#include <linux/arm-smccc.h>
7#include <linux/bitfield.h>
8#include <linux/completion.h>
9#include <linux/kobject.h>
10#include <linux/module.h>
11#include <linux/mutex.h>
12#include <linux/of.h>
13#include <linux/of_platform.h>
14#include <linux/platform_device.h>
15#include <linux/firmware/intel/stratix10-svc-client.h>
16#include <linux/string.h>
17#include <linux/sysfs.h>
18
19#define RSU_STATE_MASK GENMASK_ULL(31, 0)
20#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
21#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
22#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
23#define RSU_FW_VERSION_MASK GENMASK_ULL(15, 0)
24
25#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
26
27#define INVALID_RETRY_COUNTER 0xFFFFFFFF
28
29typedef void (*rsu_callback)(struct stratix10_svc_client *client,
30 struct stratix10_svc_cb_data *data);
31/**
32 * struct stratix10_rsu_priv - rsu data structure
33 * @chan: pointer to the allocated service channel
34 * @client: active service client
35 * @completion: state for callback completion
36 * @lock: a mutex to protect callback completion state
37 * @status.current_image: address of image currently running in flash
38 * @status.fail_image: address of failed image in flash
39 * @status.version: the version number of RSU firmware
40 * @status.state: the state of RSU system
41 * @status.error_details: error code
42 * @status.error_location: the error offset inside the image that failed
43 * @retry_counter: the current image's retry counter
44 */
45struct stratix10_rsu_priv {
46 struct stratix10_svc_chan *chan;
47 struct stratix10_svc_client client;
48 struct completion completion;
49 struct mutex lock;
50 struct {
51 unsigned long current_image;
52 unsigned long fail_image;
53 unsigned int version;
54 unsigned int state;
55 unsigned int error_details;
56 unsigned int error_location;
57 } status;
58 unsigned int retry_counter;
59};
60
61/**
62 * rsu_status_callback() - Status callback from Intel Service Layer
63 * @client: pointer to service client
64 * @data: pointer to callback data structure
65 *
66 * Callback from Intel service layer for RSU status request. Status is
67 * only updated after a system reboot, so a get updated status call is
68 * made during driver probe.
69 */
70static void rsu_status_callback(struct stratix10_svc_client *client,
71 struct stratix10_svc_cb_data *data)
72{
73 struct stratix10_rsu_priv *priv = client->priv;
74 struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
75
76 if (data->status == BIT(SVC_STATUS_RSU_OK)) {
77 priv->status.version = FIELD_GET(RSU_VERSION_MASK,
78 res->a2);
79 priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
80 priv->status.fail_image = res->a1;
81 priv->status.current_image = res->a0;
82 priv->status.error_location =
83 FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
84 priv->status.error_details =
85 FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
86 } else {
87 dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
88 res->a0);
89 priv->status.version = 0;
90 priv->status.state = 0;
91 priv->status.fail_image = 0;
92 priv->status.current_image = 0;
93 priv->status.error_location = 0;
94 priv->status.error_details = 0;
95 }
96
97 complete(&priv->completion);
98}
99
100/**
101 * rsu_command_callback() - Update callback from Intel Service Layer
102 * @client: pointer to client
103 * @data: pointer to callback data structure
104 *
105 * Callback from Intel service layer for RSU commands.
106 */
107static void rsu_command_callback(struct stratix10_svc_client *client,
108 struct stratix10_svc_cb_data *data)
109{
110 struct stratix10_rsu_priv *priv = client->priv;
111
112 if (data->status != BIT(SVC_STATUS_RSU_OK))
113 dev_err(client->dev, "RSU returned status is %i\n",
114 data->status);
115 complete(&priv->completion);
116}
117
118/**
119 * rsu_retry_callback() - Callback from Intel service layer for getting
120 * the current image's retry counter from firmware
121 * @client: pointer to client
122 * @data: pointer to callback data structure
123 *
124 * Callback from Intel service layer for retry counter, which is used by
125 * user to know how many times the images is still allowed to reload
126 * itself before giving up and starting RSU fail-over flow.
127 */
128static void rsu_retry_callback(struct stratix10_svc_client *client,
129 struct stratix10_svc_cb_data *data)
130{
131 struct stratix10_rsu_priv *priv = client->priv;
132 unsigned int *counter = (unsigned int *)data->kaddr1;
133
134 if (data->status == BIT(SVC_STATUS_RSU_OK))
135 priv->retry_counter = *counter;
136 else
137 dev_err(client->dev, "Failed to get retry counter %i\n",
138 data->status);
139
140 complete(&priv->completion);
141}
142
143/**
144 * rsu_send_msg() - send a message to Intel service layer
145 * @priv: pointer to rsu private data
146 * @command: RSU status or update command
147 * @arg: the request argument, the bitstream address or notify status
148 * @callback: function pointer for the callback (status or update)
149 *
150 * Start an Intel service layer transaction to perform the SMC call that
151 * is necessary to get RSU boot log or set the address of bitstream to
152 * boot after reboot.
153 *
154 * Returns 0 on success or -ETIMEDOUT on error.
155 */
156static int rsu_send_msg(struct stratix10_rsu_priv *priv,
157 enum stratix10_svc_command_code command,
158 unsigned long arg,
159 rsu_callback callback)
160{
161 struct stratix10_svc_client_msg msg;
162 int ret;
163
164 mutex_lock(&priv->lock);
165 reinit_completion(&priv->completion);
166 priv->client.receive_cb = callback;
167
168 msg.command = command;
169 if (arg)
170 msg.arg[0] = arg;
171
172 ret = stratix10_svc_send(priv->chan, &msg);
173 if (ret < 0)
174 goto status_done;
175
176 ret = wait_for_completion_interruptible_timeout(&priv->completion,
177 RSU_TIMEOUT);
178 if (!ret) {
179 dev_err(priv->client.dev,
180 "timeout waiting for SMC call\n");
181 ret = -ETIMEDOUT;
182 goto status_done;
183 } else if (ret < 0) {
184 dev_err(priv->client.dev,
185 "error %d waiting for SMC call\n", ret);
186 goto status_done;
187 } else {
188 ret = 0;
189 }
190
191status_done:
192 stratix10_svc_done(priv->chan);
193 mutex_unlock(&priv->lock);
194 return ret;
195}
196
197/*
198 * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
199 * The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
200 * related. They allow user space software to query the configuration system
201 * status and to request optional reboot behavior specific to Intel FPGAs.
202 */
203
204static ssize_t current_image_show(struct device *dev,
205 struct device_attribute *attr, char *buf)
206{
207 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
208
209 if (!priv)
210 return -ENODEV;
211
212 return sprintf(buf, "0x%08lx\n", priv->status.current_image);
213}
214
215static ssize_t fail_image_show(struct device *dev,
216 struct device_attribute *attr, char *buf)
217{
218 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
219
220 if (!priv)
221 return -ENODEV;
222
223 return sprintf(buf, "0x%08lx\n", priv->status.fail_image);
224}
225
226static ssize_t version_show(struct device *dev, struct device_attribute *attr,
227 char *buf)
228{
229 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
230
231 if (!priv)
232 return -ENODEV;
233
234 return sprintf(buf, "0x%08x\n", priv->status.version);
235}
236
237static ssize_t state_show(struct device *dev, struct device_attribute *attr,
238 char *buf)
239{
240 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
241
242 if (!priv)
243 return -ENODEV;
244
245 return sprintf(buf, "0x%08x\n", priv->status.state);
246}
247
248static ssize_t error_location_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
252
253 if (!priv)
254 return -ENODEV;
255
256 return sprintf(buf, "0x%08x\n", priv->status.error_location);
257}
258
259static ssize_t error_details_show(struct device *dev,
260 struct device_attribute *attr, char *buf)
261{
262 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
263
264 if (!priv)
265 return -ENODEV;
266
267 return sprintf(buf, "0x%08x\n", priv->status.error_details);
268}
269
270static ssize_t retry_counter_show(struct device *dev,
271 struct device_attribute *attr, char *buf)
272{
273 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
274
275 if (!priv)
276 return -ENODEV;
277
278 return sprintf(buf, "0x%08x\n", priv->retry_counter);
279}
280
281static ssize_t reboot_image_store(struct device *dev,
282 struct device_attribute *attr,
283 const char *buf, size_t count)
284{
285 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
286 unsigned long address;
287 int ret;
288
289 if (priv == 0)
290 return -ENODEV;
291
292 ret = kstrtoul(buf, 0, &address);
293 if (ret)
294 return ret;
295
296 ret = rsu_send_msg(priv, COMMAND_RSU_UPDATE,
297 address, rsu_command_callback);
298 if (ret) {
299 dev_err(dev, "Error, RSU update returned %i\n", ret);
300 return ret;
301 }
302
303 return count;
304}
305
306static ssize_t notify_store(struct device *dev,
307 struct device_attribute *attr,
308 const char *buf, size_t count)
309{
310 struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
311 unsigned long status;
312 int ret;
313
314 if (priv == 0)
315 return -ENODEV;
316
317 ret = kstrtoul(buf, 0, &status);
318 if (ret)
319 return ret;
320
321 ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
322 status, rsu_command_callback);
323 if (ret) {
324 dev_err(dev, "Error, RSU notify returned %i\n", ret);
325 return ret;
326 }
327
328 /* to get the updated state */
329 ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
330 0, rsu_status_callback);
331 if (ret) {
332 dev_err(dev, "Error, getting RSU status %i\n", ret);
333 return ret;
334 }
335
336 /* only 19.3 or late version FW supports retry counter feature */
337 if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
338 ret = rsu_send_msg(priv, COMMAND_RSU_RETRY,
339 0, rsu_retry_callback);
340 if (ret) {
341 dev_err(dev,
342 "Error, getting RSU retry %i\n", ret);
343 return ret;
344 }
345 }
346
347 return count;
348}
349
350static DEVICE_ATTR_RO(current_image);
351static DEVICE_ATTR_RO(fail_image);
352static DEVICE_ATTR_RO(state);
353static DEVICE_ATTR_RO(version);
354static DEVICE_ATTR_RO(error_location);
355static DEVICE_ATTR_RO(error_details);
356static DEVICE_ATTR_RO(retry_counter);
357static DEVICE_ATTR_WO(reboot_image);
358static DEVICE_ATTR_WO(notify);
359
360static struct attribute *rsu_attrs[] = {
361 &dev_attr_current_image.attr,
362 &dev_attr_fail_image.attr,
363 &dev_attr_state.attr,
364 &dev_attr_version.attr,
365 &dev_attr_error_location.attr,
366 &dev_attr_error_details.attr,
367 &dev_attr_retry_counter.attr,
368 &dev_attr_reboot_image.attr,
369 &dev_attr_notify.attr,
370 NULL
371};
372
373ATTRIBUTE_GROUPS(rsu);
374
375static int stratix10_rsu_probe(struct platform_device *pdev)
376{
377 struct device *dev = &pdev->dev;
378 struct stratix10_rsu_priv *priv;
379 int ret;
380
381 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
382 if (!priv)
383 return -ENOMEM;
384
385 priv->client.dev = dev;
386 priv->client.receive_cb = NULL;
387 priv->client.priv = priv;
388 priv->status.current_image = 0;
389 priv->status.fail_image = 0;
390 priv->status.error_location = 0;
391 priv->status.error_details = 0;
392 priv->status.version = 0;
393 priv->status.state = 0;
394 priv->retry_counter = INVALID_RETRY_COUNTER;
395
396 mutex_init(&priv->lock);
397 priv->chan = stratix10_svc_request_channel_byname(&priv->client,
398 SVC_CLIENT_RSU);
399 if (IS_ERR(priv->chan)) {
400 dev_err(dev, "couldn't get service channel %s\n",
401 SVC_CLIENT_RSU);
402 return PTR_ERR(priv->chan);
403 }
404
405 init_completion(&priv->completion);
406 platform_set_drvdata(pdev, priv);
407
408 /* get the initial state from firmware */
409 ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
410 0, rsu_status_callback);
411 if (ret) {
412 dev_err(dev, "Error, getting RSU status %i\n", ret);
413 stratix10_svc_free_channel(priv->chan);
414 }
415
416 /* only 19.3 or late version FW supports retry counter feature */
417 if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
418 ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0,
419 rsu_retry_callback);
420 if (ret) {
421 dev_err(dev,
422 "Error, getting RSU retry %i\n", ret);
423 stratix10_svc_free_channel(priv->chan);
424 }
425 }
426
427 return ret;
428}
429
430static int stratix10_rsu_remove(struct platform_device *pdev)
431{
432 struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
433
434 stratix10_svc_free_channel(priv->chan);
435 return 0;
436}
437
438static struct platform_driver stratix10_rsu_driver = {
439 .probe = stratix10_rsu_probe,
440 .remove = stratix10_rsu_remove,
441 .driver = {
442 .name = "stratix10-rsu",
443 .dev_groups = rsu_groups,
444 },
445};
446
447module_platform_driver(stratix10_rsu_driver);
448
449MODULE_LICENSE("GPL v2");
450MODULE_DESCRIPTION("Intel Remote System Update Driver");
451MODULE_AUTHOR("Richard Gong <richard.gong@intel.com>");
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 6e6514825ad0..b485321189e1 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -38,6 +38,9 @@
38#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 38#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
39#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 39#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
40 40
41/* stratix10 service layer clients */
42#define STRATIX10_RSU "stratix10-rsu"
43
41typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long, 44typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
42 unsigned long, unsigned long, unsigned long, 45 unsigned long, unsigned long, unsigned long,
43 unsigned long, unsigned long, 46 unsigned long, unsigned long,
@@ -45,6 +48,14 @@ typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
45struct stratix10_svc_chan; 48struct stratix10_svc_chan;
46 49
47/** 50/**
51 * struct stratix10_svc - svc private data
52 * @stratix10_svc_rsu: pointer to stratix10 RSU device
53 */
54struct stratix10_svc {
55 struct platform_device *stratix10_svc_rsu;
56};
57
58/**
48 * struct stratix10_svc_sh_memory - service shared memory structure 59 * struct stratix10_svc_sh_memory - service shared memory structure
49 * @sync_complete: state for a completion 60 * @sync_complete: state for a completion
50 * @addr: physical address of shared memory block 61 * @addr: physical address of shared memory block
@@ -296,7 +307,12 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
296 cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED); 307 cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
297 break; 308 break;
298 case COMMAND_RSU_UPDATE: 309 case COMMAND_RSU_UPDATE:
310 case COMMAND_RSU_NOTIFY:
311 cb_data->status = BIT(SVC_STATUS_RSU_OK);
312 break;
313 case COMMAND_RSU_RETRY:
299 cb_data->status = BIT(SVC_STATUS_RSU_OK); 314 cb_data->status = BIT(SVC_STATUS_RSU_OK);
315 cb_data->kaddr1 = &res.a1;
300 break; 316 break;
301 default: 317 default:
302 pr_warn("it shouldn't happen\n"); 318 pr_warn("it shouldn't happen\n");
@@ -386,6 +402,16 @@ static int svc_normal_to_secure_thread(void *data)
386 a1 = pdata->arg[0]; 402 a1 = pdata->arg[0];
387 a2 = 0; 403 a2 = 0;
388 break; 404 break;
405 case COMMAND_RSU_NOTIFY:
406 a0 = INTEL_SIP_SMC_RSU_NOTIFY;
407 a1 = pdata->arg[0];
408 a2 = 0;
409 break;
410 case COMMAND_RSU_RETRY:
411 a0 = INTEL_SIP_SMC_RSU_RETRY_COUNTER;
412 a1 = 0;
413 a2 = 0;
414 break;
389 default: 415 default:
390 pr_warn("it shouldn't happen\n"); 416 pr_warn("it shouldn't happen\n");
391 break; 417 break;
@@ -438,7 +464,28 @@ static int svc_normal_to_secure_thread(void *data)
438 pr_debug("%s: STATUS_REJECTED\n", __func__); 464 pr_debug("%s: STATUS_REJECTED\n", __func__);
439 break; 465 break;
440 case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR: 466 case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
467 case INTEL_SIP_SMC_RSU_ERROR:
441 pr_err("%s: STATUS_ERROR\n", __func__); 468 pr_err("%s: STATUS_ERROR\n", __func__);
469 switch (pdata->command) {
470 /* for FPGA mgr */
471 case COMMAND_RECONFIG_DATA_CLAIM:
472 case COMMAND_RECONFIG:
473 case COMMAND_RECONFIG_DATA_SUBMIT:
474 case COMMAND_RECONFIG_STATUS:
475 cbdata->status =
476 BIT(SVC_STATUS_RECONFIG_ERROR);
477 break;
478
479 /* for RSU */
480 case COMMAND_RSU_STATUS:
481 case COMMAND_RSU_UPDATE:
482 case COMMAND_RSU_NOTIFY:
483 case COMMAND_RSU_RETRY:
484 cbdata->status =
485 BIT(SVC_STATUS_RSU_ERROR);
486 break;
487 }
488
442 cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR); 489 cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
443 cbdata->kaddr1 = NULL; 490 cbdata->kaddr1 = NULL;
444 cbdata->kaddr2 = NULL; 491 cbdata->kaddr2 = NULL;
@@ -530,7 +577,7 @@ static int svc_get_sh_memory(struct platform_device *pdev,
530 577
531 if (!sh_memory->addr || !sh_memory->size) { 578 if (!sh_memory->addr || !sh_memory->size) {
532 dev_err(dev, 579 dev_err(dev,
533 "fails to get shared memory info from secure world\n"); 580 "failed to get shared memory info from secure world\n");
534 return -ENOMEM; 581 return -ENOMEM;
535 } 582 }
536 583
@@ -768,7 +815,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
768 "svc_smc_hvc_thread"); 815 "svc_smc_hvc_thread");
769 if (IS_ERR(chan->ctrl->task)) { 816 if (IS_ERR(chan->ctrl->task)) {
770 dev_err(chan->ctrl->dev, 817 dev_err(chan->ctrl->dev,
771 "fails to create svc_smc_hvc_thread\n"); 818 "failed to create svc_smc_hvc_thread\n");
772 kfree(p_data); 819 kfree(p_data);
773 return -EINVAL; 820 return -EINVAL;
774 } 821 }
@@ -913,6 +960,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
913 struct stratix10_svc_chan *chans; 960 struct stratix10_svc_chan *chans;
914 struct gen_pool *genpool; 961 struct gen_pool *genpool;
915 struct stratix10_svc_sh_memory *sh_memory; 962 struct stratix10_svc_sh_memory *sh_memory;
963 struct stratix10_svc *svc;
964
916 svc_invoke_fn *invoke_fn; 965 svc_invoke_fn *invoke_fn;
917 size_t fifo_size; 966 size_t fifo_size;
918 int ret; 967 int ret;
@@ -957,7 +1006,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
957 fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; 1006 fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
958 ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); 1007 ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
959 if (ret) { 1008 if (ret) {
960 dev_err(dev, "fails to allocate FIFO\n"); 1009 dev_err(dev, "failed to allocate FIFO\n");
961 return ret; 1010 return ret;
962 } 1011 }
963 spin_lock_init(&controller->svc_fifo_lock); 1012 spin_lock_init(&controller->svc_fifo_lock);
@@ -975,6 +1024,24 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
975 list_add_tail(&controller->node, &svc_ctrl); 1024 list_add_tail(&controller->node, &svc_ctrl);
976 platform_set_drvdata(pdev, controller); 1025 platform_set_drvdata(pdev, controller);
977 1026
1027 /* add svc client device(s) */
1028 svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
1029 if (!svc)
1030 return -ENOMEM;
1031
1032 svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
1033 if (!svc->stratix10_svc_rsu) {
1034 dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
1035 return -ENOMEM;
1036 }
1037
1038 ret = platform_device_add(svc->stratix10_svc_rsu);
1039 if (ret) {
1040 platform_device_put(svc->stratix10_svc_rsu);
1041 return ret;
1042 }
1043 dev_set_drvdata(dev, svc);
1044
978 pr_info("Intel Service Layer Driver Initialized\n"); 1045 pr_info("Intel Service Layer Driver Initialized\n");
979 1046
980 return ret; 1047 return ret;
@@ -982,8 +1049,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
982 1049
983static int stratix10_svc_drv_remove(struct platform_device *pdev) 1050static int stratix10_svc_drv_remove(struct platform_device *pdev)
984{ 1051{
1052 struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
985 struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); 1053 struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
986 1054
1055 platform_device_unregister(svc->stratix10_svc_rsu);
1056
987 kfifo_free(&ctrl->svc_fifo); 1057 kfifo_free(&ctrl->svc_fifo);
988 if (ctrl->task) { 1058 if (ctrl->task) {
989 kthread_stop(ctrl->task); 1059 kthread_stop(ctrl->task);
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index cdd4f73b4869..73c779e920ed 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -46,11 +46,11 @@ config FPGA_MGR_ALTERA_PS_SPI
46 using the passive serial interface over SPI. 46 using the passive serial interface over SPI.
47 47
48config FPGA_MGR_ALTERA_CVP 48config FPGA_MGR_ALTERA_CVP
49 tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager" 49 tristate "Altera CvP FPGA Manager"
50 depends on PCI 50 depends on PCI
51 help 51 help
52 FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V 52 FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V,
53 and Arria 10 Altera FPGAs using the CvP interface over PCIe. 53 Arria 10 and Stratix10 Altera FPGAs using the CvP interface over PCIe.
54 54
55config FPGA_MGR_ZYNQ_FPGA 55config FPGA_MGR_ZYNQ_FPGA
56 tristate "Xilinx Zynq FPGA" 56 tristate "Xilinx Zynq FPGA"
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 312b9371742f..4865b74b00a4 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -39,8 +39,9 @@ obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o
39obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o 39obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
40obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o 40obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
41 41
42dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o 42dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
43dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o 43dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
44dfl-afu-objs += dfl-afu-error.o
44 45
45# Drivers for FPGAs which implement DFL 46# Drivers for FPGAs which implement DFL
46obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o 47obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 770915fb97f9..4e0edb60bfba 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -22,10 +22,10 @@
22#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */ 22#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
23 23
24/* Vendor Specific Extended Capability Registers */ 24/* Vendor Specific Extended Capability Registers */
25#define VSE_PCIE_EXT_CAP_ID 0x200 25#define VSE_PCIE_EXT_CAP_ID 0x0
26#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */ 26#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
27 27
28#define VSE_CVP_STATUS 0x21c /* 32bit */ 28#define VSE_CVP_STATUS 0x1c /* 32bit */
29#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */ 29#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
30#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */ 30#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
31#define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */ 31#define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */
@@ -33,41 +33,93 @@
33#define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */ 33#define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */
34#define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */ 34#define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */
35 35
36#define VSE_CVP_MODE_CTRL 0x220 /* 32bit */ 36#define VSE_CVP_MODE_CTRL 0x20 /* 32bit */
37#define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */ 37#define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */
38#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */ 38#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */
39#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */ 39#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */
40#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8) 40#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8)
41 41
42#define VSE_CVP_DATA 0x228 /* 32bit */ 42#define VSE_CVP_DATA 0x28 /* 32bit */
43#define VSE_CVP_PROG_CTRL 0x22c /* 32bit */ 43#define VSE_CVP_PROG_CTRL 0x2c /* 32bit */
44#define VSE_CVP_PROG_CTRL_CONFIG BIT(0) 44#define VSE_CVP_PROG_CTRL_CONFIG BIT(0)
45#define VSE_CVP_PROG_CTRL_START_XFER BIT(1) 45#define VSE_CVP_PROG_CTRL_START_XFER BIT(1)
46#define VSE_CVP_PROG_CTRL_MASK GENMASK(1, 0)
46 47
47#define VSE_UNCOR_ERR_STATUS 0x234 /* 32bit */ 48#define VSE_UNCOR_ERR_STATUS 0x34 /* 32bit */
48#define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */ 49#define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */
49 50
51#define V1_VSEC_OFFSET 0x200 /* Vendor Specific Offset V1 */
52/* V2 Defines */
53#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
54
55#define V2_CREDIT_TIMEOUT_US 20000
56#define V2_CHECK_CREDIT_US 10
57#define V2_POLL_TIMEOUT_US 1000000
58#define V2_USER_TIMEOUT_US 500000
59
60#define V1_POLL_TIMEOUT_US 10
61
50#define DRV_NAME "altera-cvp" 62#define DRV_NAME "altera-cvp"
51#define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager" 63#define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager"
52 64
65/* Write block sizes */
66#define ALTERA_CVP_V1_SIZE 4
67#define ALTERA_CVP_V2_SIZE 4096
68
53/* Optional CvP config error status check for debugging */ 69/* Optional CvP config error status check for debugging */
54static bool altera_cvp_chkcfg; 70static bool altera_cvp_chkcfg;
55 71
72struct cvp_priv;
73
56struct altera_cvp_conf { 74struct altera_cvp_conf {
57 struct fpga_manager *mgr; 75 struct fpga_manager *mgr;
58 struct pci_dev *pci_dev; 76 struct pci_dev *pci_dev;
59 void __iomem *map; 77 void __iomem *map;
60 void (*write_data)(struct altera_cvp_conf *, u32); 78 void (*write_data)(struct altera_cvp_conf *conf,
79 u32 data);
61 char mgr_name[64]; 80 char mgr_name[64];
62 u8 numclks; 81 u8 numclks;
82 u32 sent_packets;
83 u32 vsec_offset;
84 const struct cvp_priv *priv;
85};
86
87struct cvp_priv {
88 void (*switch_clk)(struct altera_cvp_conf *conf);
89 int (*clear_state)(struct altera_cvp_conf *conf);
90 int (*wait_credit)(struct fpga_manager *mgr, u32 blocks);
91 size_t block_size;
92 int poll_time_us;
93 int user_time_us;
63}; 94};
64 95
96static int altera_read_config_byte(struct altera_cvp_conf *conf,
97 int where, u8 *val)
98{
99 return pci_read_config_byte(conf->pci_dev, conf->vsec_offset + where,
100 val);
101}
102
103static int altera_read_config_dword(struct altera_cvp_conf *conf,
104 int where, u32 *val)
105{
106 return pci_read_config_dword(conf->pci_dev, conf->vsec_offset + where,
107 val);
108}
109
110static int altera_write_config_dword(struct altera_cvp_conf *conf,
111 int where, u32 val)
112{
113 return pci_write_config_dword(conf->pci_dev, conf->vsec_offset + where,
114 val);
115}
116
65static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr) 117static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
66{ 118{
67 struct altera_cvp_conf *conf = mgr->priv; 119 struct altera_cvp_conf *conf = mgr->priv;
68 u32 status; 120 u32 status;
69 121
70 pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status); 122 altera_read_config_dword(conf, VSE_CVP_STATUS, &status);
71 123
72 if (status & VSE_CVP_STATUS_CFG_DONE) 124 if (status & VSE_CVP_STATUS_CFG_DONE)
73 return FPGA_MGR_STATE_OPERATING; 125 return FPGA_MGR_STATE_OPERATING;
@@ -85,7 +137,8 @@ static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
85 137
86static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val) 138static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
87{ 139{
88 pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val); 140 pci_write_config_dword(conf->pci_dev, conf->vsec_offset + VSE_CVP_DATA,
141 val);
89} 142}
90 143
91/* switches between CvP clock and internal clock */ 144/* switches between CvP clock and internal clock */
@@ -95,10 +148,10 @@ static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
95 u32 val; 148 u32 val;
96 149
97 /* set 1 CVP clock cycle for every CVP Data Register Write */ 150 /* set 1 CVP clock cycle for every CVP Data Register Write */
98 pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val); 151 altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
99 val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK; 152 val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
100 val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; 153 val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
101 pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val); 154 altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
102 155
103 for (i = 0; i < CVP_DUMMY_WR; i++) 156 for (i = 0; i < CVP_DUMMY_WR; i++)
104 conf->write_data(conf, 0); /* dummy data, could be any value */ 157 conf->write_data(conf, 0); /* dummy data, could be any value */
@@ -115,7 +168,7 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
115 retries++; 168 retries++;
116 169
117 do { 170 do {
118 pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val); 171 altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
119 if ((val & status_mask) == status_val) 172 if ((val & status_mask) == status_val)
120 return 0; 173 return 0;
121 174
@@ -126,32 +179,136 @@ static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
126 return -ETIMEDOUT; 179 return -ETIMEDOUT;
127} 180}
128 181
182static int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
183{
184 struct altera_cvp_conf *conf = mgr->priv;
185 u32 val;
186 int ret;
187
188 /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
189 ret = altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
190 if (ret || (val & VSE_CVP_STATUS_CFG_ERR)) {
191 dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
192 bytes);
193 return -EPROTO;
194 }
195 return 0;
196}
197
198/*
199 * CvP Version2 Functions
200 * Recent Intel FPGAs use a credit mechanism to throttle incoming
201 * bitstreams and a different method of clearing the state.
202 */
203
204static int altera_cvp_v2_clear_state(struct altera_cvp_conf *conf)
205{
206 u32 val;
207 int ret;
208
209 /* Clear the START_XFER and CVP_CONFIG bits */
210 ret = altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
211 if (ret) {
212 dev_err(&conf->pci_dev->dev,
213 "Error reading CVP Program Control Register\n");
214 return ret;
215 }
216
217 val &= ~VSE_CVP_PROG_CTRL_MASK;
218 ret = altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
219 if (ret) {
220 dev_err(&conf->pci_dev->dev,
221 "Error writing CVP Program Control Register\n");
222 return ret;
223 }
224
225 return altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
226 conf->priv->poll_time_us);
227}
228
229static int altera_cvp_v2_wait_for_credit(struct fpga_manager *mgr,
230 u32 blocks)
231{
232 u32 timeout = V2_CREDIT_TIMEOUT_US / V2_CHECK_CREDIT_US;
233 struct altera_cvp_conf *conf = mgr->priv;
234 int ret;
235 u8 val;
236
237 do {
238 ret = altera_read_config_byte(conf, VSE_CVP_TX_CREDITS, &val);
239 if (ret) {
240 dev_err(&conf->pci_dev->dev,
241 "Error reading CVP Credit Register\n");
242 return ret;
243 }
244
245 /* Return if there is space in FIFO */
246 if (val - (u8)conf->sent_packets)
247 return 0;
248
249 ret = altera_cvp_chk_error(mgr, blocks * ALTERA_CVP_V2_SIZE);
250 if (ret) {
251 dev_err(&conf->pci_dev->dev,
252 "CE Bit error credit reg[0x%x]:sent[0x%x]\n",
253 val, conf->sent_packets);
254 return -EAGAIN;
255 }
256
257 /* Limit the check credit byte traffic */
258 usleep_range(V2_CHECK_CREDIT_US, V2_CHECK_CREDIT_US + 1);
259 } while (timeout--);
260
261 dev_err(&conf->pci_dev->dev, "Timeout waiting for credit\n");
262 return -ETIMEDOUT;
263}
264
265static int altera_cvp_send_block(struct altera_cvp_conf *conf,
266 const u32 *data, size_t len)
267{
268 u32 mask, words = len / sizeof(u32);
269 int i, remainder;
270
271 for (i = 0; i < words; i++)
272 conf->write_data(conf, *data++);
273
274 /* write up to 3 trailing bytes, if any */
275 remainder = len % sizeof(u32);
276 if (remainder) {
277 mask = BIT(remainder * 8) - 1;
278 if (mask)
279 conf->write_data(conf, *data & mask);
280 }
281
282 return 0;
283}
284
129static int altera_cvp_teardown(struct fpga_manager *mgr, 285static int altera_cvp_teardown(struct fpga_manager *mgr,
130 struct fpga_image_info *info) 286 struct fpga_image_info *info)
131{ 287{
132 struct altera_cvp_conf *conf = mgr->priv; 288 struct altera_cvp_conf *conf = mgr->priv;
133 struct pci_dev *pdev = conf->pci_dev;
134 int ret; 289 int ret;
135 u32 val; 290 u32 val;
136 291
137 /* STEP 12 - reset START_XFER bit */ 292 /* STEP 12 - reset START_XFER bit */
138 pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); 293 altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
139 val &= ~VSE_CVP_PROG_CTRL_START_XFER; 294 val &= ~VSE_CVP_PROG_CTRL_START_XFER;
140 pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); 295 altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
141 296
142 /* STEP 13 - reset CVP_CONFIG bit */ 297 /* STEP 13 - reset CVP_CONFIG bit */
143 val &= ~VSE_CVP_PROG_CTRL_CONFIG; 298 val &= ~VSE_CVP_PROG_CTRL_CONFIG;
144 pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); 299 altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
145 300
146 /* 301 /*
147 * STEP 14 302 * STEP 14
148 * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy 303 * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
149 * writes to the HIP 304 * writes to the HIP
150 */ 305 */
151 altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */ 306 if (conf->priv->switch_clk)
307 conf->priv->switch_clk(conf);
152 308
153 /* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */ 309 /* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
154 ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10); 310 ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
311 conf->priv->poll_time_us);
155 if (ret) 312 if (ret)
156 dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n"); 313 dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
157 314
@@ -163,7 +320,6 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
163 const char *buf, size_t count) 320 const char *buf, size_t count)
164{ 321{
165 struct altera_cvp_conf *conf = mgr->priv; 322 struct altera_cvp_conf *conf = mgr->priv;
166 struct pci_dev *pdev = conf->pci_dev;
167 u32 iflags, val; 323 u32 iflags, val;
168 int ret; 324 int ret;
169 325
@@ -183,7 +339,7 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
183 conf->numclks = 1; /* for uncompressed and unencrypted images */ 339 conf->numclks = 1; /* for uncompressed and unencrypted images */
184 340
185 /* STEP 1 - read CVP status and check CVP_EN flag */ 341 /* STEP 1 - read CVP status and check CVP_EN flag */
186 pci_read_config_dword(pdev, VSE_CVP_STATUS, &val); 342 altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
187 if (!(val & VSE_CVP_STATUS_CVP_EN)) { 343 if (!(val & VSE_CVP_STATUS_CVP_EN)) {
188 dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val); 344 dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
189 return -ENODEV; 345 return -ENODEV;
@@ -201,30 +357,42 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
201 * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned) 357 * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
202 */ 358 */
203 /* switch from fabric to PMA clock */ 359 /* switch from fabric to PMA clock */
204 pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); 360 altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
205 val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL; 361 val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
206 pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); 362 altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
207 363
208 /* set CVP mode */ 364 /* set CVP mode */
209 pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); 365 altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
210 val |= VSE_CVP_MODE_CTRL_CVP_MODE; 366 val |= VSE_CVP_MODE_CTRL_CVP_MODE;
211 pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); 367 altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
212 368
213 /* 369 /*
214 * STEP 3 370 * STEP 3
215 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP 371 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
216 */ 372 */
217 altera_cvp_dummy_write(conf); 373 if (conf->priv->switch_clk)
374 conf->priv->switch_clk(conf);
375
376 if (conf->priv->clear_state) {
377 ret = conf->priv->clear_state(conf);
378 if (ret) {
379 dev_err(&mgr->dev, "Problem clearing out state\n");
380 return ret;
381 }
382 }
383
384 conf->sent_packets = 0;
218 385
219 /* STEP 4 - set CVP_CONFIG bit */ 386 /* STEP 4 - set CVP_CONFIG bit */
220 pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); 387 altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
221 /* request control block to begin transfer using CVP */ 388 /* request control block to begin transfer using CVP */
222 val |= VSE_CVP_PROG_CTRL_CONFIG; 389 val |= VSE_CVP_PROG_CTRL_CONFIG;
223 pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); 390 altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
224 391
225 /* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */ 392 /* STEP 5 - poll CVP_CONFIG READY for 1 with timeout */
226 ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 393 ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
227 VSE_CVP_STATUS_CFG_RDY, 10); 394 VSE_CVP_STATUS_CFG_RDY,
395 conf->priv->poll_time_us);
228 if (ret) { 396 if (ret) {
229 dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n"); 397 dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
230 return ret; 398 return ret;
@@ -234,33 +402,28 @@ static int altera_cvp_write_init(struct fpga_manager *mgr,
234 * STEP 6 402 * STEP 6
235 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP 403 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
236 */ 404 */
237 altera_cvp_dummy_write(conf); 405 if (conf->priv->switch_clk)
406 conf->priv->switch_clk(conf);
407
408 if (altera_cvp_chkcfg) {
409 ret = altera_cvp_chk_error(mgr, 0);
410 if (ret) {
411 dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
412 return ret;
413 }
414 }
238 415
239 /* STEP 7 - set START_XFER */ 416 /* STEP 7 - set START_XFER */
240 pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val); 417 altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
241 val |= VSE_CVP_PROG_CTRL_START_XFER; 418 val |= VSE_CVP_PROG_CTRL_START_XFER;
242 pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val); 419 altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
243 420
244 /* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */ 421 /* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
245 pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); 422 if (conf->priv->switch_clk) {
246 val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK; 423 altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
247 val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF; 424 val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
248 pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); 425 val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
249 426 altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
250 return 0;
251}
252
253static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
254{
255 struct altera_cvp_conf *conf = mgr->priv;
256 u32 val;
257
258 /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
259 pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
260 if (val & VSE_CVP_STATUS_CFG_ERR) {
261 dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
262 bytes);
263 return -EPROTO;
264 } 427 }
265 return 0; 428 return 0;
266} 429}
@@ -269,20 +432,32 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
269 size_t count) 432 size_t count)
270{ 433{
271 struct altera_cvp_conf *conf = mgr->priv; 434 struct altera_cvp_conf *conf = mgr->priv;
435 size_t done, remaining, len;
272 const u32 *data; 436 const u32 *data;
273 size_t done, remaining;
274 int status = 0; 437 int status = 0;
275 u32 mask;
276 438
277 /* STEP 9 - write 32-bit data from RBF file to CVP data register */ 439 /* STEP 9 - write 32-bit data from RBF file to CVP data register */
278 data = (u32 *)buf; 440 data = (u32 *)buf;
279 remaining = count; 441 remaining = count;
280 done = 0; 442 done = 0;
281 443
282 while (remaining >= 4) { 444 while (remaining) {
283 conf->write_data(conf, *data++); 445 /* Use credit throttling if available */
284 done += 4; 446 if (conf->priv->wait_credit) {
285 remaining -= 4; 447 status = conf->priv->wait_credit(mgr, done);
448 if (status) {
449 dev_err(&conf->pci_dev->dev,
450 "Wait Credit ERR: 0x%x\n", status);
451 return status;
452 }
453 }
454
455 len = min(conf->priv->block_size, remaining);
456 altera_cvp_send_block(conf, data, len);
457 data += len / sizeof(u32);
458 done += len;
459 remaining -= len;
460 conf->sent_packets++;
286 461
287 /* 462 /*
288 * STEP 10 (optional) and STEP 11 463 * STEP 10 (optional) and STEP 11
@@ -300,11 +475,6 @@ static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
300 } 475 }
301 } 476 }
302 477
303 /* write up to 3 trailing bytes, if any */
304 mask = BIT(remaining * 8) - 1;
305 if (mask)
306 conf->write_data(conf, *data & mask);
307
308 if (altera_cvp_chkcfg) 478 if (altera_cvp_chkcfg)
309 status = altera_cvp_chk_error(mgr, count); 479 status = altera_cvp_chk_error(mgr, count);
310 480
@@ -315,31 +485,30 @@ static int altera_cvp_write_complete(struct fpga_manager *mgr,
315 struct fpga_image_info *info) 485 struct fpga_image_info *info)
316{ 486{
317 struct altera_cvp_conf *conf = mgr->priv; 487 struct altera_cvp_conf *conf = mgr->priv;
318 struct pci_dev *pdev = conf->pci_dev; 488 u32 mask, val;
319 int ret; 489 int ret;
320 u32 mask;
321 u32 val;
322 490
323 ret = altera_cvp_teardown(mgr, info); 491 ret = altera_cvp_teardown(mgr, info);
324 if (ret) 492 if (ret)
325 return ret; 493 return ret;
326 494
327 /* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */ 495 /* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
328 pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val); 496 altera_read_config_dword(conf, VSE_UNCOR_ERR_STATUS, &val);
329 if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) { 497 if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
330 dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n"); 498 dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
331 return -EPROTO; 499 return -EPROTO;
332 } 500 }
333 501
334 /* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */ 502 /* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
335 pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val); 503 altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
336 val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL; 504 val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
337 val &= ~VSE_CVP_MODE_CTRL_CVP_MODE; 505 val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
338 pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val); 506 altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
339 507
340 /* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */ 508 /* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
341 mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE; 509 mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
342 ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US); 510 ret = altera_cvp_wait_status(conf, mask, mask,
511 conf->priv->user_time_us);
343 if (ret) 512 if (ret)
344 dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n"); 513 dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
345 514
@@ -353,6 +522,21 @@ static const struct fpga_manager_ops altera_cvp_ops = {
353 .write_complete = altera_cvp_write_complete, 522 .write_complete = altera_cvp_write_complete,
354}; 523};
355 524
525static const struct cvp_priv cvp_priv_v1 = {
526 .switch_clk = altera_cvp_dummy_write,
527 .block_size = ALTERA_CVP_V1_SIZE,
528 .poll_time_us = V1_POLL_TIMEOUT_US,
529 .user_time_us = TIMEOUT_US,
530};
531
532static const struct cvp_priv cvp_priv_v2 = {
533 .clear_state = altera_cvp_v2_clear_state,
534 .wait_credit = altera_cvp_v2_wait_for_credit,
535 .block_size = ALTERA_CVP_V2_SIZE,
536 .poll_time_us = V2_POLL_TIMEOUT_US,
537 .user_time_us = V2_USER_TIMEOUT_US,
538};
539
356static ssize_t chkcfg_show(struct device_driver *dev, char *buf) 540static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
357{ 541{
358 return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg); 542 return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
@@ -394,22 +578,29 @@ static int altera_cvp_probe(struct pci_dev *pdev,
394{ 578{
395 struct altera_cvp_conf *conf; 579 struct altera_cvp_conf *conf;
396 struct fpga_manager *mgr; 580 struct fpga_manager *mgr;
581 int ret, offset;
397 u16 cmd, val; 582 u16 cmd, val;
398 u32 regval; 583 u32 regval;
399 int ret; 584
585 /* Discover the Vendor Specific Offset for this device */
586 offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR);
587 if (!offset) {
588 dev_err(&pdev->dev, "No Vendor Specific Offset.\n");
589 return -ENODEV;
590 }
400 591
401 /* 592 /*
402 * First check if this is the expected FPGA device. PCI config 593 * First check if this is the expected FPGA device. PCI config
403 * space access works without enabling the PCI device, memory 594 * space access works without enabling the PCI device, memory
404 * space access is enabled further down. 595 * space access is enabled further down.
405 */ 596 */
406 pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val); 597 pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);
407 if (val != VSE_PCIE_EXT_CAP_ID_VAL) { 598 if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
408 dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val); 599 dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
409 return -ENODEV; 600 return -ENODEV;
410 } 601 }
411 602
412 pci_read_config_dword(pdev, VSE_CVP_STATUS, &regval); 603 pci_read_config_dword(pdev, offset + VSE_CVP_STATUS, &regval);
413 if (!(regval & VSE_CVP_STATUS_CVP_EN)) { 604 if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
414 dev_err(&pdev->dev, 605 dev_err(&pdev->dev,
415 "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n", 606 "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
@@ -421,6 +612,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
421 if (!conf) 612 if (!conf)
422 return -ENOMEM; 613 return -ENOMEM;
423 614
615 conf->vsec_offset = offset;
616
424 /* 617 /*
425 * Enable memory BAR access. We cannot use pci_enable_device() here 618 * Enable memory BAR access. We cannot use pci_enable_device() here
426 * because it will make the driver unusable with FPGA devices that 619 * because it will make the driver unusable with FPGA devices that
@@ -445,6 +638,11 @@ static int altera_cvp_probe(struct pci_dev *pdev,
445 conf->pci_dev = pdev; 638 conf->pci_dev = pdev;
446 conf->write_data = altera_cvp_write_data_iomem; 639 conf->write_data = altera_cvp_write_data_iomem;
447 640
641 if (conf->vsec_offset == V1_VSEC_OFFSET)
642 conf->priv = &cvp_priv_v1;
643 else
644 conf->priv = &cvp_priv_v2;
645
448 conf->map = pci_iomap(pdev, CVP_BAR, 0); 646 conf->map = pci_iomap(pdev, CVP_BAR, 0);
449 if (!conf->map) { 647 if (!conf->map) {
450 dev_warn(&pdev->dev, "Mapping CVP BAR failed\n"); 648 dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c
index b293d83143f1..99b9cc0e70f0 100644
--- a/drivers/fpga/altera-pr-ip-core-plat.c
+++ b/drivers/fpga/altera-pr-ip-core-plat.c
@@ -32,7 +32,9 @@ static int alt_pr_platform_remove(struct platform_device *pdev)
32{ 32{
33 struct device *dev = &pdev->dev; 33 struct device *dev = &pdev->dev;
34 34
35 return alt_pr_unregister(dev); 35 alt_pr_unregister(dev);
36
37 return 0;
36} 38}
37 39
38static const struct of_device_id alt_pr_of_match[] = { 40static const struct of_device_id alt_pr_of_match[] = {
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index a7a3bf0b5202..2cf25fd5e897 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -201,15 +201,13 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
201} 201}
202EXPORT_SYMBOL_GPL(alt_pr_register); 202EXPORT_SYMBOL_GPL(alt_pr_register);
203 203
204int alt_pr_unregister(struct device *dev) 204void alt_pr_unregister(struct device *dev)
205{ 205{
206 struct fpga_manager *mgr = dev_get_drvdata(dev); 206 struct fpga_manager *mgr = dev_get_drvdata(dev);
207 207
208 dev_dbg(dev, "%s\n", __func__); 208 dev_dbg(dev, "%s\n", __func__);
209 209
210 fpga_mgr_unregister(mgr); 210 fpga_mgr_unregister(mgr);
211
212 return 0;
213} 211}
214EXPORT_SYMBOL_GPL(alt_pr_unregister); 212EXPORT_SYMBOL_GPL(alt_pr_unregister);
215 213
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
new file mode 100644
index 000000000000..c1467ae1a6b6
--- /dev/null
+++ b/drivers/fpga/dfl-afu-error.c
@@ -0,0 +1,230 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
4 *
5 * Copyright 2019 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@linux.intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Mitchel Henry <henry.mitchel@intel.com>
15 */
16
17#include <linux/uaccess.h>
18
19#include "dfl-afu.h"
20
21#define PORT_ERROR_MASK 0x8
22#define PORT_ERROR 0x10
23#define PORT_FIRST_ERROR 0x18
24#define PORT_MALFORMED_REQ0 0x20
25#define PORT_MALFORMED_REQ1 0x28
26
27#define ERROR_MASK GENMASK_ULL(63, 0)
28
29/* mask or unmask port errors by the error mask register. */
30static void __afu_port_err_mask(struct device *dev, bool mask)
31{
32 void __iomem *base;
33
34 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
35
36 writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
37}
38
39static void afu_port_err_mask(struct device *dev, bool mask)
40{
41 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
42
43 mutex_lock(&pdata->lock);
44 __afu_port_err_mask(dev, mask);
45 mutex_unlock(&pdata->lock);
46}
47
48/* clear port errors. */
49static int afu_port_err_clear(struct device *dev, u64 err)
50{
51 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
52 struct platform_device *pdev = to_platform_device(dev);
53 void __iomem *base_err, *base_hdr;
54 int ret = -EBUSY;
55 u64 v;
56
57 base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
58 base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
59
60 mutex_lock(&pdata->lock);
61
62 /*
63 * clear Port Errors
64 *
65 * - Check for AP6 State
66 * - Halt Port by keeping Port in reset
67 * - Set PORT Error mask to all 1 to mask errors
68 * - Clear all errors
69 * - Set Port mask to all 0 to enable errors
70 * - All errors start capturing new errors
71 * - Enable Port by pulling the port out of reset
72 */
73
74 /* if device is still in AP6 power state, can not clear any error. */
75 v = readq(base_hdr + PORT_HDR_STS);
76 if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
77 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
78 goto done;
79 }
80
81 /* Halt Port by keeping Port in reset */
82 ret = __afu_port_disable(pdev);
83 if (ret)
84 goto done;
85
86 /* Mask all errors */
87 __afu_port_err_mask(dev, true);
88
89 /* Clear errors if err input matches with current port errors.*/
90 v = readq(base_err + PORT_ERROR);
91
92 if (v == err) {
93 writeq(v, base_err + PORT_ERROR);
94
95 v = readq(base_err + PORT_FIRST_ERROR);
96 writeq(v, base_err + PORT_FIRST_ERROR);
97 } else {
98 ret = -EINVAL;
99 }
100
101 /* Clear mask */
102 __afu_port_err_mask(dev, false);
103
104 /* Enable the Port by clear the reset */
105 __afu_port_enable(pdev);
106
107done:
108 mutex_unlock(&pdata->lock);
109 return ret;
110}
111
112static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
113 char *buf)
114{
115 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
116 void __iomem *base;
117 u64 error;
118
119 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
120
121 mutex_lock(&pdata->lock);
122 error = readq(base + PORT_ERROR);
123 mutex_unlock(&pdata->lock);
124
125 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
126}
127
128static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
129 const char *buff, size_t count)
130{
131 u64 value;
132 int ret;
133
134 if (kstrtou64(buff, 0, &value))
135 return -EINVAL;
136
137 ret = afu_port_err_clear(dev, value);
138
139 return ret ? ret : count;
140}
141static DEVICE_ATTR_RW(errors);
142
143static ssize_t first_error_show(struct device *dev,
144 struct device_attribute *attr, char *buf)
145{
146 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
147 void __iomem *base;
148 u64 error;
149
150 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
151
152 mutex_lock(&pdata->lock);
153 error = readq(base + PORT_FIRST_ERROR);
154 mutex_unlock(&pdata->lock);
155
156 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
157}
158static DEVICE_ATTR_RO(first_error);
159
160static ssize_t first_malformed_req_show(struct device *dev,
161 struct device_attribute *attr,
162 char *buf)
163{
164 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
165 void __iomem *base;
166 u64 req0, req1;
167
168 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
169
170 mutex_lock(&pdata->lock);
171 req0 = readq(base + PORT_MALFORMED_REQ0);
172 req1 = readq(base + PORT_MALFORMED_REQ1);
173 mutex_unlock(&pdata->lock);
174
175 return sprintf(buf, "0x%016llx%016llx\n",
176 (unsigned long long)req1, (unsigned long long)req0);
177}
178static DEVICE_ATTR_RO(first_malformed_req);
179
180static struct attribute *port_err_attrs[] = {
181 &dev_attr_errors.attr,
182 &dev_attr_first_error.attr,
183 &dev_attr_first_malformed_req.attr,
184 NULL,
185};
186
187static umode_t port_err_attrs_visible(struct kobject *kobj,
188 struct attribute *attr, int n)
189{
190 struct device *dev = kobj_to_dev(kobj);
191
192 /*
193 * sysfs entries are visible only if related private feature is
194 * enumerated.
195 */
196 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
197 return 0;
198
199 return attr->mode;
200}
201
202const struct attribute_group port_err_group = {
203 .name = "errors",
204 .attrs = port_err_attrs,
205 .is_visible = port_err_attrs_visible,
206};
207
208static int port_err_init(struct platform_device *pdev,
209 struct dfl_feature *feature)
210{
211 afu_port_err_mask(&pdev->dev, false);
212
213 return 0;
214}
215
216static void port_err_uinit(struct platform_device *pdev,
217 struct dfl_feature *feature)
218{
219 afu_port_err_mask(&pdev->dev, true);
220}
221
222const struct dfl_feature_id port_err_id_table[] = {
223 {.id = PORT_FEATURE_ID_ERROR,},
224 {0,}
225};
226
227const struct dfl_feature_ops port_err_ops = {
228 .init = port_err_init,
229 .uinit = port_err_uinit,
230};
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 02baa6a227c0..e4a34dc7947f 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -22,14 +22,17 @@
22#include "dfl-afu.h" 22#include "dfl-afu.h"
23 23
24/** 24/**
25 * port_enable - enable a port 25 * __afu_port_enable - enable a port by clear reset
26 * @pdev: port platform device. 26 * @pdev: port platform device.
27 * 27 *
28 * Enable Port by clear the port soft reset bit, which is set by default. 28 * Enable Port by clear the port soft reset bit, which is set by default.
29 * The AFU is unable to respond to any MMIO access while in reset. 29 * The AFU is unable to respond to any MMIO access while in reset.
30 * port_enable function should only be used after port_disable function. 30 * __afu_port_enable function should only be used after __afu_port_disable
31 * function.
32 *
33 * The caller needs to hold lock for protection.
31 */ 34 */
32static void port_enable(struct platform_device *pdev) 35void __afu_port_enable(struct platform_device *pdev)
33{ 36{
34 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 37 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
35 void __iomem *base; 38 void __iomem *base;
@@ -52,13 +55,14 @@ static void port_enable(struct platform_device *pdev)
52#define RST_POLL_TIMEOUT 1000 /* us */ 55#define RST_POLL_TIMEOUT 1000 /* us */
53 56
54/** 57/**
55 * port_disable - disable a port 58 * __afu_port_disable - disable a port by hold reset
56 * @pdev: port platform device. 59 * @pdev: port platform device.
57 * 60 *
58 * Disable Port by setting the port soft reset bit, it puts the port into 61 * Disable Port by setting the port soft reset bit, it puts the port into reset.
59 * reset. 62 *
63 * The caller needs to hold lock for protection.
60 */ 64 */
61static int port_disable(struct platform_device *pdev) 65int __afu_port_disable(struct platform_device *pdev)
62{ 66{
63 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 67 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
64 void __iomem *base; 68 void __iomem *base;
@@ -104,9 +108,9 @@ static int __port_reset(struct platform_device *pdev)
104{ 108{
105 int ret; 109 int ret;
106 110
107 ret = port_disable(pdev); 111 ret = __afu_port_disable(pdev);
108 if (!ret) 112 if (!ret)
109 port_enable(pdev); 113 __afu_port_enable(pdev);
110 114
111 return ret; 115 return ret;
112} 116}
@@ -141,27 +145,267 @@ id_show(struct device *dev, struct device_attribute *attr, char *buf)
141} 145}
142static DEVICE_ATTR_RO(id); 146static DEVICE_ATTR_RO(id);
143 147
144static const struct attribute *port_hdr_attrs[] = { 148static ssize_t
149ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
150{
151 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
152 void __iomem *base;
153 u64 v;
154
155 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
156
157 mutex_lock(&pdata->lock);
158 v = readq(base + PORT_HDR_CTRL);
159 mutex_unlock(&pdata->lock);
160
161 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
162}
163
164static ssize_t
165ltr_store(struct device *dev, struct device_attribute *attr,
166 const char *buf, size_t count)
167{
168 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
169 void __iomem *base;
170 bool ltr;
171 u64 v;
172
173 if (kstrtobool(buf, &ltr))
174 return -EINVAL;
175
176 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
177
178 mutex_lock(&pdata->lock);
179 v = readq(base + PORT_HDR_CTRL);
180 v &= ~PORT_CTRL_LATENCY;
181 v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
182 writeq(v, base + PORT_HDR_CTRL);
183 mutex_unlock(&pdata->lock);
184
185 return count;
186}
187static DEVICE_ATTR_RW(ltr);
188
189static ssize_t
190ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
191{
192 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
193 void __iomem *base;
194 u64 v;
195
196 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
197
198 mutex_lock(&pdata->lock);
199 v = readq(base + PORT_HDR_STS);
200 mutex_unlock(&pdata->lock);
201
202 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
203}
204
205static ssize_t
206ap1_event_store(struct device *dev, struct device_attribute *attr,
207 const char *buf, size_t count)
208{
209 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
210 void __iomem *base;
211 bool clear;
212
213 if (kstrtobool(buf, &clear) || !clear)
214 return -EINVAL;
215
216 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
217
218 mutex_lock(&pdata->lock);
219 writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
220 mutex_unlock(&pdata->lock);
221
222 return count;
223}
224static DEVICE_ATTR_RW(ap1_event);
225
226static ssize_t
227ap2_event_show(struct device *dev, struct device_attribute *attr,
228 char *buf)
229{
230 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
231 void __iomem *base;
232 u64 v;
233
234 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
235
236 mutex_lock(&pdata->lock);
237 v = readq(base + PORT_HDR_STS);
238 mutex_unlock(&pdata->lock);
239
240 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
241}
242
243static ssize_t
244ap2_event_store(struct device *dev, struct device_attribute *attr,
245 const char *buf, size_t count)
246{
247 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
248 void __iomem *base;
249 bool clear;
250
251 if (kstrtobool(buf, &clear) || !clear)
252 return -EINVAL;
253
254 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
255
256 mutex_lock(&pdata->lock);
257 writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
258 mutex_unlock(&pdata->lock);
259
260 return count;
261}
262static DEVICE_ATTR_RW(ap2_event);
263
264static ssize_t
265power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
266{
267 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
268 void __iomem *base;
269 u64 v;
270
271 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
272
273 mutex_lock(&pdata->lock);
274 v = readq(base + PORT_HDR_STS);
275 mutex_unlock(&pdata->lock);
276
277 return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
278}
279static DEVICE_ATTR_RO(power_state);
280
281static ssize_t
282userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
283 const char *buf, size_t count)
284{
285 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
286 u64 userclk_freq_cmd;
287 void __iomem *base;
288
289 if (kstrtou64(buf, 0, &userclk_freq_cmd))
290 return -EINVAL;
291
292 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
293
294 mutex_lock(&pdata->lock);
295 writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
296 mutex_unlock(&pdata->lock);
297
298 return count;
299}
300static DEVICE_ATTR_WO(userclk_freqcmd);
301
302static ssize_t
303userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
304 const char *buf, size_t count)
305{
306 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
307 u64 userclk_freqcntr_cmd;
308 void __iomem *base;
309
310 if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
311 return -EINVAL;
312
313 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
314
315 mutex_lock(&pdata->lock);
316 writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
317 mutex_unlock(&pdata->lock);
318
319 return count;
320}
321static DEVICE_ATTR_WO(userclk_freqcntrcmd);
322
323static ssize_t
324userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
325 char *buf)
326{
327 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
328 u64 userclk_freqsts;
329 void __iomem *base;
330
331 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
332
333 mutex_lock(&pdata->lock);
334 userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
335 mutex_unlock(&pdata->lock);
336
337 return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
338}
339static DEVICE_ATTR_RO(userclk_freqsts);
340
341static ssize_t
342userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
343 char *buf)
344{
345 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
346 u64 userclk_freqcntrsts;
347 void __iomem *base;
348
349 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
350
351 mutex_lock(&pdata->lock);
352 userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
353 mutex_unlock(&pdata->lock);
354
355 return sprintf(buf, "0x%llx\n",
356 (unsigned long long)userclk_freqcntrsts);
357}
358static DEVICE_ATTR_RO(userclk_freqcntrsts);
359
360static struct attribute *port_hdr_attrs[] = {
145 &dev_attr_id.attr, 361 &dev_attr_id.attr,
362 &dev_attr_ltr.attr,
363 &dev_attr_ap1_event.attr,
364 &dev_attr_ap2_event.attr,
365 &dev_attr_power_state.attr,
366 &dev_attr_userclk_freqcmd.attr,
367 &dev_attr_userclk_freqcntrcmd.attr,
368 &dev_attr_userclk_freqsts.attr,
369 &dev_attr_userclk_freqcntrsts.attr,
146 NULL, 370 NULL,
147}; 371};
148 372
149static int port_hdr_init(struct platform_device *pdev, 373static umode_t port_hdr_attrs_visible(struct kobject *kobj,
150 struct dfl_feature *feature) 374 struct attribute *attr, int n)
151{ 375{
152 dev_dbg(&pdev->dev, "PORT HDR Init.\n"); 376 struct device *dev = kobj_to_dev(kobj);
377 umode_t mode = attr->mode;
378 void __iomem *base;
153 379
154 port_reset(pdev); 380 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
381
382 if (dfl_feature_revision(base) > 0) {
383 /*
384 * userclk sysfs interfaces are only visible in case port
385 * revision is 0, as hardware with revision >0 doesn't
386 * support this.
387 */
388 if (attr == &dev_attr_userclk_freqcmd.attr ||
389 attr == &dev_attr_userclk_freqcntrcmd.attr ||
390 attr == &dev_attr_userclk_freqsts.attr ||
391 attr == &dev_attr_userclk_freqcntrsts.attr)
392 mode = 0;
393 }
155 394
156 return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs); 395 return mode;
157} 396}
158 397
159static void port_hdr_uinit(struct platform_device *pdev, 398static const struct attribute_group port_hdr_group = {
160 struct dfl_feature *feature) 399 .attrs = port_hdr_attrs,
400 .is_visible = port_hdr_attrs_visible,
401};
402
403static int port_hdr_init(struct platform_device *pdev,
404 struct dfl_feature *feature)
161{ 405{
162 dev_dbg(&pdev->dev, "PORT HDR UInit.\n"); 406 port_reset(pdev);
163 407
164 sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs); 408 return 0;
165} 409}
166 410
167static long 411static long
@@ -185,9 +429,13 @@ port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
185 return ret; 429 return ret;
186} 430}
187 431
432static const struct dfl_feature_id port_hdr_id_table[] = {
433 {.id = PORT_FEATURE_ID_HEADER,},
434 {0,}
435};
436
188static const struct dfl_feature_ops port_hdr_ops = { 437static const struct dfl_feature_ops port_hdr_ops = {
189 .init = port_hdr_init, 438 .init = port_hdr_init,
190 .uinit = port_hdr_uinit,
191 .ioctl = port_hdr_ioctl, 439 .ioctl = port_hdr_ioctl,
192}; 440};
193 441
@@ -214,52 +462,91 @@ afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
214} 462}
215static DEVICE_ATTR_RO(afu_id); 463static DEVICE_ATTR_RO(afu_id);
216 464
217static const struct attribute *port_afu_attrs[] = { 465static struct attribute *port_afu_attrs[] = {
218 &dev_attr_afu_id.attr, 466 &dev_attr_afu_id.attr,
219 NULL 467 NULL
220}; 468};
221 469
470static umode_t port_afu_attrs_visible(struct kobject *kobj,
471 struct attribute *attr, int n)
472{
473 struct device *dev = kobj_to_dev(kobj);
474
475 /*
476 * sysfs entries are visible only if related private feature is
477 * enumerated.
478 */
479 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
480 return 0;
481
482 return attr->mode;
483}
484
485static const struct attribute_group port_afu_group = {
486 .attrs = port_afu_attrs,
487 .is_visible = port_afu_attrs_visible,
488};
489
222static int port_afu_init(struct platform_device *pdev, 490static int port_afu_init(struct platform_device *pdev,
223 struct dfl_feature *feature) 491 struct dfl_feature *feature)
224{ 492{
225 struct resource *res = &pdev->resource[feature->resource_index]; 493 struct resource *res = &pdev->resource[feature->resource_index];
226 int ret;
227 494
228 dev_dbg(&pdev->dev, "PORT AFU Init.\n"); 495 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
496 DFL_PORT_REGION_INDEX_AFU,
497 resource_size(res), res->start,
498 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
499 DFL_PORT_REGION_WRITE);
500}
229 501
230 ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev), 502static const struct dfl_feature_id port_afu_id_table[] = {
231 DFL_PORT_REGION_INDEX_AFU, resource_size(res), 503 {.id = PORT_FEATURE_ID_AFU,},
232 res->start, DFL_PORT_REGION_READ | 504 {0,}
233 DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP); 505};
234 if (ret)
235 return ret;
236 506
237 return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs); 507static const struct dfl_feature_ops port_afu_ops = {
238} 508 .init = port_afu_init,
509};
239 510
240static void port_afu_uinit(struct platform_device *pdev, 511static int port_stp_init(struct platform_device *pdev,
241 struct dfl_feature *feature) 512 struct dfl_feature *feature)
242{ 513{
243 dev_dbg(&pdev->dev, "PORT AFU UInit.\n"); 514 struct resource *res = &pdev->resource[feature->resource_index];
244 515
245 sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs); 516 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
517 DFL_PORT_REGION_INDEX_STP,
518 resource_size(res), res->start,
519 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
520 DFL_PORT_REGION_WRITE);
246} 521}
247 522
248static const struct dfl_feature_ops port_afu_ops = { 523static const struct dfl_feature_id port_stp_id_table[] = {
249 .init = port_afu_init, 524 {.id = PORT_FEATURE_ID_STP,},
250 .uinit = port_afu_uinit, 525 {0,}
526};
527
528static const struct dfl_feature_ops port_stp_ops = {
529 .init = port_stp_init,
251}; 530};
252 531
253static struct dfl_feature_driver port_feature_drvs[] = { 532static struct dfl_feature_driver port_feature_drvs[] = {
254 { 533 {
255 .id = PORT_FEATURE_ID_HEADER, 534 .id_table = port_hdr_id_table,
256 .ops = &port_hdr_ops, 535 .ops = &port_hdr_ops,
257 }, 536 },
258 { 537 {
259 .id = PORT_FEATURE_ID_AFU, 538 .id_table = port_afu_id_table,
260 .ops = &port_afu_ops, 539 .ops = &port_afu_ops,
261 }, 540 },
262 { 541 {
542 .id_table = port_err_id_table,
543 .ops = &port_err_ops,
544 },
545 {
546 .id_table = port_stp_id_table,
547 .ops = &port_stp_ops,
548 },
549 {
263 .ops = NULL, 550 .ops = NULL,
264 } 551 }
265}; 552};
@@ -545,9 +832,9 @@ static int port_enable_set(struct platform_device *pdev, bool enable)
545 832
546 mutex_lock(&pdata->lock); 833 mutex_lock(&pdata->lock);
547 if (enable) 834 if (enable)
548 port_enable(pdev); 835 __afu_port_enable(pdev);
549 else 836 else
550 ret = port_disable(pdev); 837 ret = __afu_port_disable(pdev);
551 mutex_unlock(&pdata->lock); 838 mutex_unlock(&pdata->lock);
552 839
553 return ret; 840 return ret;
@@ -599,9 +886,17 @@ static int afu_remove(struct platform_device *pdev)
599 return 0; 886 return 0;
600} 887}
601 888
889static const struct attribute_group *afu_dev_groups[] = {
890 &port_hdr_group,
891 &port_afu_group,
892 &port_err_group,
893 NULL
894};
895
602static struct platform_driver afu_driver = { 896static struct platform_driver afu_driver = {
603 .driver = { 897 .driver = {
604 .name = DFL_FPGA_FEATURE_DEV_PORT, 898 .name = DFL_FPGA_FEATURE_DEV_PORT,
899 .dev_groups = afu_dev_groups,
605 }, 900 },
606 .probe = afu_probe, 901 .probe = afu_probe,
607 .remove = afu_remove, 902 .remove = afu_remove,
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 0c7630ae3cda..576e94960086 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -79,6 +79,10 @@ struct dfl_afu {
79 struct dfl_feature_platform_data *pdata; 79 struct dfl_feature_platform_data *pdata;
80}; 80};
81 81
82/* hold pdata->lock when call __afu_port_enable/disable */
83void __afu_port_enable(struct platform_device *pdev);
84int __afu_port_disable(struct platform_device *pdev);
85
82void afu_mmio_region_init(struct dfl_feature_platform_data *pdata); 86void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
83int afu_mmio_region_add(struct dfl_feature_platform_data *pdata, 87int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
84 u32 region_index, u64 region_size, u64 phys, u32 flags); 88 u32 region_index, u64 region_size, u64 phys, u32 flags);
@@ -97,4 +101,9 @@ int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
97struct dfl_afu_dma_region * 101struct dfl_afu_dma_region *
98afu_dma_region_find(struct dfl_feature_platform_data *pdata, 102afu_dma_region_find(struct dfl_feature_platform_data *pdata,
99 u64 iova, u64 size); 103 u64 iova, u64 size);
104
105extern const struct dfl_feature_ops port_err_ops;
106extern const struct dfl_feature_id port_err_id_table[];
107extern const struct attribute_group port_err_group;
108
100#endif /* __DFL_AFU_H */ 109#endif /* __DFL_AFU_H */
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
new file mode 100644
index 000000000000..f897d414b923
--- /dev/null
+++ b/drivers/fpga/dfl-fme-error.c
@@ -0,0 +1,359 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Management Engine Error Management
4 *
5 * Copyright 2019 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Joseph Grecco <joe.grecco@intel.com>
12 * Enno Luebbers <enno.luebbers@intel.com>
13 * Tim Whisonant <tim.whisonant@intel.com>
14 * Ananda Ravuri <ananda.ravuri@intel.com>
15 * Mitchel, Henry <henry.mitchel@intel.com>
16 */
17
18#include <linux/uaccess.h>
19
20#include "dfl.h"
21#include "dfl-fme.h"
22
23#define FME_ERROR_MASK 0x8
24#define FME_ERROR 0x10
25#define MBP_ERROR BIT_ULL(6)
26#define PCIE0_ERROR_MASK 0x18
27#define PCIE0_ERROR 0x20
28#define PCIE1_ERROR_MASK 0x28
29#define PCIE1_ERROR 0x30
30#define FME_FIRST_ERROR 0x38
31#define FME_NEXT_ERROR 0x40
32#define RAS_NONFAT_ERROR_MASK 0x48
33#define RAS_NONFAT_ERROR 0x50
34#define RAS_CATFAT_ERROR_MASK 0x58
35#define RAS_CATFAT_ERROR 0x60
36#define RAS_ERROR_INJECT 0x68
37#define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
38
39#define ERROR_MASK GENMASK_ULL(63, 0)
40
41static ssize_t pcie0_errors_show(struct device *dev,
42 struct device_attribute *attr, char *buf)
43{
44 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
45 void __iomem *base;
46 u64 value;
47
48 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
49
50 mutex_lock(&pdata->lock);
51 value = readq(base + PCIE0_ERROR);
52 mutex_unlock(&pdata->lock);
53
54 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
55}
56
57static ssize_t pcie0_errors_store(struct device *dev,
58 struct device_attribute *attr,
59 const char *buf, size_t count)
60{
61 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
62 void __iomem *base;
63 int ret = 0;
64 u64 v, val;
65
66 if (kstrtou64(buf, 0, &val))
67 return -EINVAL;
68
69 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
70
71 mutex_lock(&pdata->lock);
72 writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
73
74 v = readq(base + PCIE0_ERROR);
75 if (val == v)
76 writeq(v, base + PCIE0_ERROR);
77 else
78 ret = -EINVAL;
79
80 writeq(0ULL, base + PCIE0_ERROR_MASK);
81 mutex_unlock(&pdata->lock);
82 return ret ? ret : count;
83}
84static DEVICE_ATTR_RW(pcie0_errors);
85
86static ssize_t pcie1_errors_show(struct device *dev,
87 struct device_attribute *attr, char *buf)
88{
89 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
90 void __iomem *base;
91 u64 value;
92
93 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
94
95 mutex_lock(&pdata->lock);
96 value = readq(base + PCIE1_ERROR);
97 mutex_unlock(&pdata->lock);
98
99 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
100}
101
102static ssize_t pcie1_errors_store(struct device *dev,
103 struct device_attribute *attr,
104 const char *buf, size_t count)
105{
106 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
107 void __iomem *base;
108 int ret = 0;
109 u64 v, val;
110
111 if (kstrtou64(buf, 0, &val))
112 return -EINVAL;
113
114 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
115
116 mutex_lock(&pdata->lock);
117 writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
118
119 v = readq(base + PCIE1_ERROR);
120 if (val == v)
121 writeq(v, base + PCIE1_ERROR);
122 else
123 ret = -EINVAL;
124
125 writeq(0ULL, base + PCIE1_ERROR_MASK);
126 mutex_unlock(&pdata->lock);
127 return ret ? ret : count;
128}
129static DEVICE_ATTR_RW(pcie1_errors);
130
131static ssize_t nonfatal_errors_show(struct device *dev,
132 struct device_attribute *attr, char *buf)
133{
134 void __iomem *base;
135
136 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
137
138 return sprintf(buf, "0x%llx\n",
139 (unsigned long long)readq(base + RAS_NONFAT_ERROR));
140}
141static DEVICE_ATTR_RO(nonfatal_errors);
142
143static ssize_t catfatal_errors_show(struct device *dev,
144 struct device_attribute *attr, char *buf)
145{
146 void __iomem *base;
147
148 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
149
150 return sprintf(buf, "0x%llx\n",
151 (unsigned long long)readq(base + RAS_CATFAT_ERROR));
152}
153static DEVICE_ATTR_RO(catfatal_errors);
154
155static ssize_t inject_errors_show(struct device *dev,
156 struct device_attribute *attr, char *buf)
157{
158 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
159 void __iomem *base;
160 u64 v;
161
162 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
163
164 mutex_lock(&pdata->lock);
165 v = readq(base + RAS_ERROR_INJECT);
166 mutex_unlock(&pdata->lock);
167
168 return sprintf(buf, "0x%llx\n",
169 (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
170}
171
172static ssize_t inject_errors_store(struct device *dev,
173 struct device_attribute *attr,
174 const char *buf, size_t count)
175{
176 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
177 void __iomem *base;
178 u8 inject_error;
179 u64 v;
180
181 if (kstrtou8(buf, 0, &inject_error))
182 return -EINVAL;
183
184 if (inject_error & ~INJECT_ERROR_MASK)
185 return -EINVAL;
186
187 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
188
189 mutex_lock(&pdata->lock);
190 v = readq(base + RAS_ERROR_INJECT);
191 v &= ~INJECT_ERROR_MASK;
192 v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
193 writeq(v, base + RAS_ERROR_INJECT);
194 mutex_unlock(&pdata->lock);
195
196 return count;
197}
198static DEVICE_ATTR_RW(inject_errors);
199
200static ssize_t fme_errors_show(struct device *dev,
201 struct device_attribute *attr, char *buf)
202{
203 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
204 void __iomem *base;
205 u64 value;
206
207 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
208
209 mutex_lock(&pdata->lock);
210 value = readq(base + FME_ERROR);
211 mutex_unlock(&pdata->lock);
212
213 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
214}
215
216static ssize_t fme_errors_store(struct device *dev,
217 struct device_attribute *attr,
218 const char *buf, size_t count)
219{
220 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
221 void __iomem *base;
222 u64 v, val;
223 int ret = 0;
224
225 if (kstrtou64(buf, 0, &val))
226 return -EINVAL;
227
228 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
229
230 mutex_lock(&pdata->lock);
231 writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
232
233 v = readq(base + FME_ERROR);
234 if (val == v)
235 writeq(v, base + FME_ERROR);
236 else
237 ret = -EINVAL;
238
239 /* Workaround: disable MBP_ERROR if feature revision is 0 */
240 writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
241 base + FME_ERROR_MASK);
242 mutex_unlock(&pdata->lock);
243 return ret ? ret : count;
244}
245static DEVICE_ATTR_RW(fme_errors);
246
247static ssize_t first_error_show(struct device *dev,
248 struct device_attribute *attr, char *buf)
249{
250 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
251 void __iomem *base;
252 u64 value;
253
254 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
255
256 mutex_lock(&pdata->lock);
257 value = readq(base + FME_FIRST_ERROR);
258 mutex_unlock(&pdata->lock);
259
260 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
261}
262static DEVICE_ATTR_RO(first_error);
263
264static ssize_t next_error_show(struct device *dev,
265 struct device_attribute *attr, char *buf)
266{
267 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
268 void __iomem *base;
269 u64 value;
270
271 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
272
273 mutex_lock(&pdata->lock);
274 value = readq(base + FME_NEXT_ERROR);
275 mutex_unlock(&pdata->lock);
276
277 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
278}
279static DEVICE_ATTR_RO(next_error);
280
281static struct attribute *fme_global_err_attrs[] = {
282 &dev_attr_pcie0_errors.attr,
283 &dev_attr_pcie1_errors.attr,
284 &dev_attr_nonfatal_errors.attr,
285 &dev_attr_catfatal_errors.attr,
286 &dev_attr_inject_errors.attr,
287 &dev_attr_fme_errors.attr,
288 &dev_attr_first_error.attr,
289 &dev_attr_next_error.attr,
290 NULL,
291};
292
293static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
294 struct attribute *attr, int n)
295{
296 struct device *dev = kobj_to_dev(kobj);
297
298 /*
299 * sysfs entries are visible only if related private feature is
300 * enumerated.
301 */
302 if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
303 return 0;
304
305 return attr->mode;
306}
307
308const struct attribute_group fme_global_err_group = {
309 .name = "errors",
310 .attrs = fme_global_err_attrs,
311 .is_visible = fme_global_err_attrs_visible,
312};
313
314static void fme_err_mask(struct device *dev, bool mask)
315{
316 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
317 void __iomem *base;
318
319 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
320
321 mutex_lock(&pdata->lock);
322
323 /* Workaround: keep MBP_ERROR always masked if revision is 0 */
324 if (dfl_feature_revision(base))
325 writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
326 else
327 writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
328
329 writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
330 writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
331 writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
332 writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
333
334 mutex_unlock(&pdata->lock);
335}
336
337static int fme_global_err_init(struct platform_device *pdev,
338 struct dfl_feature *feature)
339{
340 fme_err_mask(&pdev->dev, false);
341
342 return 0;
343}
344
345static void fme_global_err_uinit(struct platform_device *pdev,
346 struct dfl_feature *feature)
347{
348 fme_err_mask(&pdev->dev, true);
349}
350
351const struct dfl_feature_id fme_global_err_id_table[] = {
352 {.id = FME_FEATURE_ID_GLOBAL_ERR,},
353 {0,}
354};
355
356const struct dfl_feature_ops fme_global_err_ops = {
357 .init = fme_global_err_init,
358 .uinit = fme_global_err_uinit,
359};
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 086ad2420ade..4d78e182878f 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/uaccess.h>
19#include <linux/fpga-dfl.h> 20#include <linux/fpga-dfl.h>
20 21
21#include "dfl.h" 22#include "dfl.h"
@@ -72,50 +73,126 @@ static ssize_t bitstream_metadata_show(struct device *dev,
72} 73}
73static DEVICE_ATTR_RO(bitstream_metadata); 74static DEVICE_ATTR_RO(bitstream_metadata);
74 75
75static const struct attribute *fme_hdr_attrs[] = { 76static ssize_t cache_size_show(struct device *dev,
77 struct device_attribute *attr, char *buf)
78{
79 void __iomem *base;
80 u64 v;
81
82 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
83
84 v = readq(base + FME_HDR_CAP);
85
86 return sprintf(buf, "%u\n",
87 (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
88}
89static DEVICE_ATTR_RO(cache_size);
90
91static ssize_t fabric_version_show(struct device *dev,
92 struct device_attribute *attr, char *buf)
93{
94 void __iomem *base;
95 u64 v;
96
97 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
98
99 v = readq(base + FME_HDR_CAP);
100
101 return sprintf(buf, "%u\n",
102 (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
103}
104static DEVICE_ATTR_RO(fabric_version);
105
106static ssize_t socket_id_show(struct device *dev,
107 struct device_attribute *attr, char *buf)
108{
109 void __iomem *base;
110 u64 v;
111
112 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
113
114 v = readq(base + FME_HDR_CAP);
115
116 return sprintf(buf, "%u\n",
117 (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
118}
119static DEVICE_ATTR_RO(socket_id);
120
121static struct attribute *fme_hdr_attrs[] = {
76 &dev_attr_ports_num.attr, 122 &dev_attr_ports_num.attr,
77 &dev_attr_bitstream_id.attr, 123 &dev_attr_bitstream_id.attr,
78 &dev_attr_bitstream_metadata.attr, 124 &dev_attr_bitstream_metadata.attr,
125 &dev_attr_cache_size.attr,
126 &dev_attr_fabric_version.attr,
127 &dev_attr_socket_id.attr,
79 NULL, 128 NULL,
80}; 129};
81 130
82static int fme_hdr_init(struct platform_device *pdev, 131static const struct attribute_group fme_hdr_group = {
83 struct dfl_feature *feature) 132 .attrs = fme_hdr_attrs,
133};
134
135static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
136 unsigned long arg)
84{ 137{
85 void __iomem *base = feature->ioaddr; 138 struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
86 int ret; 139 int port_id;
87 140
88 dev_dbg(&pdev->dev, "FME HDR Init.\n"); 141 if (get_user(port_id, (int __user *)arg))
89 dev_dbg(&pdev->dev, "FME cap %llx.\n", 142 return -EFAULT;
90 (unsigned long long)readq(base + FME_HDR_CAP));
91 143
92 ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs); 144 return dfl_fpga_cdev_release_port(cdev, port_id);
93 if (ret) 145}
94 return ret;
95 146
96 return 0; 147static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
148 unsigned long arg)
149{
150 struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
151 int port_id;
152
153 if (get_user(port_id, (int __user *)arg))
154 return -EFAULT;
155
156 return dfl_fpga_cdev_assign_port(cdev, port_id);
97} 157}
98 158
99static void fme_hdr_uinit(struct platform_device *pdev, 159static long fme_hdr_ioctl(struct platform_device *pdev,
100 struct dfl_feature *feature) 160 struct dfl_feature *feature,
161 unsigned int cmd, unsigned long arg)
101{ 162{
102 dev_dbg(&pdev->dev, "FME HDR UInit.\n"); 163 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
103 sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs); 164
165 switch (cmd) {
166 case DFL_FPGA_FME_PORT_RELEASE:
167 return fme_hdr_ioctl_release_port(pdata, arg);
168 case DFL_FPGA_FME_PORT_ASSIGN:
169 return fme_hdr_ioctl_assign_port(pdata, arg);
170 }
171
172 return -ENODEV;
104} 173}
105 174
175static const struct dfl_feature_id fme_hdr_id_table[] = {
176 {.id = FME_FEATURE_ID_HEADER,},
177 {0,}
178};
179
106static const struct dfl_feature_ops fme_hdr_ops = { 180static const struct dfl_feature_ops fme_hdr_ops = {
107 .init = fme_hdr_init, 181 .ioctl = fme_hdr_ioctl,
108 .uinit = fme_hdr_uinit,
109}; 182};
110 183
111static struct dfl_feature_driver fme_feature_drvs[] = { 184static struct dfl_feature_driver fme_feature_drvs[] = {
112 { 185 {
113 .id = FME_FEATURE_ID_HEADER, 186 .id_table = fme_hdr_id_table,
114 .ops = &fme_hdr_ops, 187 .ops = &fme_hdr_ops,
115 }, 188 },
116 { 189 {
117 .id = FME_FEATURE_ID_PR_MGMT, 190 .id_table = fme_pr_mgmt_id_table,
118 .ops = &pr_mgmt_ops, 191 .ops = &fme_pr_mgmt_ops,
192 },
193 {
194 .id_table = fme_global_err_id_table,
195 .ops = &fme_global_err_ops,
119 }, 196 },
120 { 197 {
121 .ops = NULL, 198 .ops = NULL,
@@ -263,9 +340,16 @@ static int fme_remove(struct platform_device *pdev)
263 return 0; 340 return 0;
264} 341}
265 342
343static const struct attribute_group *fme_dev_groups[] = {
344 &fme_hdr_group,
345 &fme_global_err_group,
346 NULL
347};
348
266static struct platform_driver fme_driver = { 349static struct platform_driver fme_driver = {
267 .driver = { 350 .driver = {
268 .name = DFL_FPGA_FEATURE_DEV_FME, 351 .name = DFL_FPGA_FEATURE_DEV_FME,
352 .dev_groups = fme_dev_groups,
269 }, 353 },
270 .probe = fme_probe, 354 .probe = fme_probe,
271 .remove = fme_remove, 355 .remove = fme_remove,
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index 3c71dc3faaf5..a233a53db708 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -470,7 +470,12 @@ static long fme_pr_ioctl(struct platform_device *pdev,
470 return ret; 470 return ret;
471} 471}
472 472
473const struct dfl_feature_ops pr_mgmt_ops = { 473const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
474 {.id = FME_FEATURE_ID_PR_MGMT,},
475 {0}
476};
477
478const struct dfl_feature_ops fme_pr_mgmt_ops = {
474 .init = pr_mgmt_init, 479 .init = pr_mgmt_init,
475 .uinit = pr_mgmt_uinit, 480 .uinit = pr_mgmt_uinit,
476 .ioctl = fme_pr_ioctl, 481 .ioctl = fme_pr_ioctl,
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 5394a216c5c0..6685c8ef965b 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -33,6 +33,10 @@ struct dfl_fme {
33 struct dfl_feature_platform_data *pdata; 33 struct dfl_feature_platform_data *pdata;
34}; 34};
35 35
36extern const struct dfl_feature_ops pr_mgmt_ops; 36extern const struct dfl_feature_ops fme_pr_mgmt_ops;
37extern const struct dfl_feature_id fme_pr_mgmt_id_table[];
38extern const struct dfl_feature_ops fme_global_err_ops;
39extern const struct dfl_feature_id fme_global_err_id_table[];
40extern const struct attribute_group fme_global_err_group;
37 41
38#endif /* __DFL_FME_H */ 42#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 66b5720582bb..89ca292236ad 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -223,8 +223,43 @@ disable_error_report_exit:
223 return ret; 223 return ret;
224} 224}
225 225
226static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
227{
228 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
229 struct dfl_fpga_cdev *cdev = drvdata->cdev;
230 int ret = 0;
231
232 if (!num_vfs) {
233 /*
234 * disable SRIOV and then put released ports back to default
235 * PF access mode.
236 */
237 pci_disable_sriov(pcidev);
238
239 dfl_fpga_cdev_config_ports_pf(cdev);
240
241 } else {
242 /*
243 * before enable SRIOV, put released ports into VF access mode
244 * first of all.
245 */
246 ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
247 if (ret)
248 return ret;
249
250 ret = pci_enable_sriov(pcidev, num_vfs);
251 if (ret)
252 dfl_fpga_cdev_config_ports_pf(cdev);
253 }
254
255 return ret;
256}
257
226static void cci_pci_remove(struct pci_dev *pcidev) 258static void cci_pci_remove(struct pci_dev *pcidev)
227{ 259{
260 if (dev_is_pf(&pcidev->dev))
261 cci_pci_sriov_configure(pcidev, 0);
262
228 cci_remove_feature_devs(pcidev); 263 cci_remove_feature_devs(pcidev);
229 pci_disable_pcie_error_reporting(pcidev); 264 pci_disable_pcie_error_reporting(pcidev);
230} 265}
@@ -234,6 +269,7 @@ static struct pci_driver cci_pci_driver = {
234 .id_table = cci_pcie_id_tbl, 269 .id_table = cci_pcie_id_tbl,
235 .probe = cci_pci_probe, 270 .probe = cci_pci_probe,
236 .remove = cci_pci_remove, 271 .remove = cci_pci_remove,
272 .sriov_configure = cci_pci_sriov_configure,
237}; 273};
238 274
239module_pci_driver(cci_pci_driver); 275module_pci_driver(cci_pci_driver);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 4b66aaa32b5a..96a2b8274a33 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -231,16 +231,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
231 */ 231 */
232int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id) 232int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
233{ 233{
234 struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev); 234 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
235 int port_id; 235 struct dfl_fpga_port_ops *port_ops;
236
237 if (pdata->id != FEATURE_DEV_ID_UNUSED)
238 return pdata->id == *(int *)pport_id;
236 239
240 port_ops = dfl_fpga_port_ops_get(pdev);
237 if (!port_ops || !port_ops->get_id) 241 if (!port_ops || !port_ops->get_id)
238 return 0; 242 return 0;
239 243
240 port_id = port_ops->get_id(pdev); 244 pdata->id = port_ops->get_id(pdev);
241 dfl_fpga_port_ops_put(port_ops); 245 dfl_fpga_port_ops_put(port_ops);
242 246
243 return port_id == *(int *)pport_id; 247 return pdata->id == *(int *)pport_id;
244} 248}
245EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); 249EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
246 250
@@ -255,7 +259,8 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
255 259
256 dfl_fpga_dev_for_each_feature(pdata, feature) 260 dfl_fpga_dev_for_each_feature(pdata, feature)
257 if (feature->ops) { 261 if (feature->ops) {
258 feature->ops->uinit(pdev, feature); 262 if (feature->ops->uinit)
263 feature->ops->uinit(pdev, feature);
259 feature->ops = NULL; 264 feature->ops = NULL;
260 } 265 }
261} 266}
@@ -266,17 +271,34 @@ static int dfl_feature_instance_init(struct platform_device *pdev,
266 struct dfl_feature *feature, 271 struct dfl_feature *feature,
267 struct dfl_feature_driver *drv) 272 struct dfl_feature_driver *drv)
268{ 273{
269 int ret; 274 int ret = 0;
270 275
271 ret = drv->ops->init(pdev, feature); 276 if (drv->ops->init) {
272 if (ret) 277 ret = drv->ops->init(pdev, feature);
273 return ret; 278 if (ret)
279 return ret;
280 }
274 281
275 feature->ops = drv->ops; 282 feature->ops = drv->ops;
276 283
277 return ret; 284 return ret;
278} 285}
279 286
287static bool dfl_feature_drv_match(struct dfl_feature *feature,
288 struct dfl_feature_driver *driver)
289{
290 const struct dfl_feature_id *ids = driver->id_table;
291
292 if (ids) {
293 while (ids->id) {
294 if (ids->id == feature->id)
295 return true;
296 ids++;
297 }
298 }
299 return false;
300}
301
280/** 302/**
281 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device 303 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
282 * @pdev: feature device. 304 * @pdev: feature device.
@@ -297,8 +319,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
297 319
298 while (drv->ops) { 320 while (drv->ops) {
299 dfl_fpga_dev_for_each_feature(pdata, feature) { 321 dfl_fpga_dev_for_each_feature(pdata, feature) {
300 /* match feature and drv using id */ 322 if (dfl_feature_drv_match(feature, drv)) {
301 if (feature->id == drv->id) {
302 ret = dfl_feature_instance_init(pdev, pdata, 323 ret = dfl_feature_instance_init(pdev, pdata,
303 feature, drv); 324 feature, drv);
304 if (ret) 325 if (ret)
@@ -474,6 +495,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
474 pdata->dev = fdev; 495 pdata->dev = fdev;
475 pdata->num = binfo->feature_num; 496 pdata->num = binfo->feature_num;
476 pdata->dfl_cdev = binfo->cdev; 497 pdata->dfl_cdev = binfo->cdev;
498 pdata->id = FEATURE_DEV_ID_UNUSED;
477 mutex_init(&pdata->lock); 499 mutex_init(&pdata->lock);
478 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], 500 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
479 dfl_pdata_key_strings[type]); 501 dfl_pdata_key_strings[type]);
@@ -973,25 +995,27 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
973{ 995{
974 struct dfl_feature_platform_data *pdata, *ptmp; 996 struct dfl_feature_platform_data *pdata, *ptmp;
975 997
976 remove_feature_devs(cdev);
977
978 mutex_lock(&cdev->lock); 998 mutex_lock(&cdev->lock);
979 if (cdev->fme_dev) { 999 if (cdev->fme_dev)
980 /* the fme should be unregistered. */
981 WARN_ON(device_is_registered(cdev->fme_dev));
982 put_device(cdev->fme_dev); 1000 put_device(cdev->fme_dev);
983 }
984 1001
985 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) { 1002 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
986 struct platform_device *port_dev = pdata->dev; 1003 struct platform_device *port_dev = pdata->dev;
987 1004
988 /* the port should be unregistered. */ 1005 /* remove released ports */
989 WARN_ON(device_is_registered(&port_dev->dev)); 1006 if (!device_is_registered(&port_dev->dev)) {
1007 dfl_id_free(feature_dev_id_type(port_dev),
1008 port_dev->id);
1009 platform_device_put(port_dev);
1010 }
1011
990 list_del(&pdata->node); 1012 list_del(&pdata->node);
991 put_device(&port_dev->dev); 1013 put_device(&port_dev->dev);
992 } 1014 }
993 mutex_unlock(&cdev->lock); 1015 mutex_unlock(&cdev->lock);
994 1016
1017 remove_feature_devs(cdev);
1018
995 fpga_region_unregister(cdev->region); 1019 fpga_region_unregister(cdev->region);
996 devm_kfree(cdev->parent, cdev); 1020 devm_kfree(cdev->parent, cdev);
997} 1021}
@@ -1042,6 +1066,170 @@ static int __init dfl_fpga_init(void)
1042 return ret; 1066 return ret;
1043} 1067}
1044 1068
1069/**
1070 * dfl_fpga_cdev_release_port - release a port platform device
1071 *
1072 * @cdev: parent container device.
1073 * @port_id: id of the port platform device.
1074 *
1075 * This function allows user to release a port platform device. This is a
1076 * mandatory step before turn a port from PF into VF for SRIOV support.
1077 *
1078 * Return: 0 on success, negative error code otherwise.
1079 */
1080int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
1081{
1082 struct platform_device *port_pdev;
1083 int ret = -ENODEV;
1084
1085 mutex_lock(&cdev->lock);
1086 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1087 dfl_fpga_check_port_id);
1088 if (!port_pdev)
1089 goto unlock_exit;
1090
1091 if (!device_is_registered(&port_pdev->dev)) {
1092 ret = -EBUSY;
1093 goto put_dev_exit;
1094 }
1095
1096 ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
1097 if (ret)
1098 goto put_dev_exit;
1099
1100 platform_device_del(port_pdev);
1101 cdev->released_port_num++;
1102put_dev_exit:
1103 put_device(&port_pdev->dev);
1104unlock_exit:
1105 mutex_unlock(&cdev->lock);
1106 return ret;
1107}
1108EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
1109
1110/**
1111 * dfl_fpga_cdev_assign_port - assign a port platform device back
1112 *
1113 * @cdev: parent container device.
1114 * @port_id: id of the port platform device.
1115 *
1116 * This function allows user to assign a port platform device back. This is
1117 * a mandatory step after disable SRIOV support.
1118 *
1119 * Return: 0 on success, negative error code otherwise.
1120 */
1121int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
1122{
1123 struct platform_device *port_pdev;
1124 int ret = -ENODEV;
1125
1126 mutex_lock(&cdev->lock);
1127 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1128 dfl_fpga_check_port_id);
1129 if (!port_pdev)
1130 goto unlock_exit;
1131
1132 if (device_is_registered(&port_pdev->dev)) {
1133 ret = -EBUSY;
1134 goto put_dev_exit;
1135 }
1136
1137 ret = platform_device_add(port_pdev);
1138 if (ret)
1139 goto put_dev_exit;
1140
1141 dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
1142 cdev->released_port_num--;
1143put_dev_exit:
1144 put_device(&port_pdev->dev);
1145unlock_exit:
1146 mutex_unlock(&cdev->lock);
1147 return ret;
1148}
1149EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
1150
1151static void config_port_access_mode(struct device *fme_dev, int port_id,
1152 bool is_vf)
1153{
1154 void __iomem *base;
1155 u64 v;
1156
1157 base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
1158
1159 v = readq(base + FME_HDR_PORT_OFST(port_id));
1160
1161 v &= ~FME_PORT_OFST_ACC_CTRL;
1162 v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
1163 is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
1164
1165 writeq(v, base + FME_HDR_PORT_OFST(port_id));
1166}
1167
1168#define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1169#define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1170
1171/**
1172 * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
1173 *
1174 * @cdev: parent container device.
1175 *
1176 * This function is needed in sriov configuration routine. It could be used to
1177 * configure the all released ports from VF access mode to PF.
1178 */
1179void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
1180{
1181 struct dfl_feature_platform_data *pdata;
1182
1183 mutex_lock(&cdev->lock);
1184 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1185 if (device_is_registered(&pdata->dev->dev))
1186 continue;
1187
1188 config_port_pf_mode(cdev->fme_dev, pdata->id);
1189 }
1190 mutex_unlock(&cdev->lock);
1191}
1192EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
1193
1194/**
1195 * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
1196 *
1197 * @cdev: parent container device.
1198 * @num_vfs: VF device number.
1199 *
1200 * This function is needed in sriov configuration routine. It could be used to
1201 * configure the released ports from PF access mode to VF.
1202 *
1203 * Return: 0 on success, negative error code otherwise.
1204 */
1205int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
1206{
1207 struct dfl_feature_platform_data *pdata;
1208 int ret = 0;
1209
1210 mutex_lock(&cdev->lock);
1211 /*
1212 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
1213 * device, so if released port number doesn't match VF device number,
1214 * then reject the request with -EINVAL error code.
1215 */
1216 if (cdev->released_port_num != num_vfs) {
1217 ret = -EINVAL;
1218 goto done;
1219 }
1220
1221 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1222 if (device_is_registered(&pdata->dev->dev))
1223 continue;
1224
1225 config_port_vf_mode(cdev->fme_dev, pdata->id);
1226 }
1227done:
1228 mutex_unlock(&cdev->lock);
1229 return ret;
1230}
1231EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
1232
1045static void __exit dfl_fpga_exit(void) 1233static void __exit dfl_fpga_exit(void)
1046{ 1234{
1047 dfl_chardev_uinit(); 1235 dfl_chardev_uinit();
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index a8b869e9e5b7..9f0e656de720 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -30,8 +30,8 @@
30/* plus one for fme device */ 30/* plus one for fme device */
31#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1) 31#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1)
32 32
33/* Reserved 0x0 for Header Group Register and 0xff for AFU */ 33/* Reserved 0xfe for Header Group Register and 0xff for AFU */
34#define FEATURE_ID_FIU_HEADER 0x0 34#define FEATURE_ID_FIU_HEADER 0xfe
35#define FEATURE_ID_AFU 0xff 35#define FEATURE_ID_AFU 0xff
36 36
37#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER 37#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
@@ -119,6 +119,11 @@
119#define PORT_HDR_NEXT_AFU NEXT_AFU 119#define PORT_HDR_NEXT_AFU NEXT_AFU
120#define PORT_HDR_CAP 0x30 120#define PORT_HDR_CAP 0x30
121#define PORT_HDR_CTRL 0x38 121#define PORT_HDR_CTRL 0x38
122#define PORT_HDR_STS 0x40
123#define PORT_HDR_USRCLK_CMD0 0x50
124#define PORT_HDR_USRCLK_CMD1 0x58
125#define PORT_HDR_USRCLK_STS0 0x60
126#define PORT_HDR_USRCLK_STS1 0x68
122 127
123/* Port Capability Register Bitfield */ 128/* Port Capability Register Bitfield */
124#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */ 129#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */
@@ -130,6 +135,16 @@
130/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/ 135/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/
131#define PORT_CTRL_LATENCY BIT_ULL(2) 136#define PORT_CTRL_LATENCY BIT_ULL(2)
132#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */ 137#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */
138
139/* Port Status Register Bitfield */
140#define PORT_STS_AP2_EVT BIT_ULL(13) /* AP2 event detected */
141#define PORT_STS_AP1_EVT BIT_ULL(12) /* AP1 event detected */
142#define PORT_STS_PWR_STATE GENMASK_ULL(11, 8) /* AFU power states */
143#define PORT_STS_PWR_STATE_NORM 0
144#define PORT_STS_PWR_STATE_AP1 1 /* 50% throttling */
145#define PORT_STS_PWR_STATE_AP2 2 /* 90% throttling */
146#define PORT_STS_PWR_STATE_AP6 6 /* 100% throttling */
147
133/** 148/**
134 * struct dfl_fpga_port_ops - port ops 149 * struct dfl_fpga_port_ops - port ops
135 * 150 *
@@ -154,13 +169,22 @@ void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
154int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id); 169int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
155 170
156/** 171/**
157 * struct dfl_feature_driver - sub feature's driver 172 * struct dfl_feature_id - dfl private feature id
158 * 173 *
159 * @id: sub feature id. 174 * @id: unique dfl private feature id.
160 * @ops: ops of this sub feature.
161 */ 175 */
162struct dfl_feature_driver { 176struct dfl_feature_id {
163 u64 id; 177 u64 id;
178};
179
180/**
181 * struct dfl_feature_driver - dfl private feature driver
182 *
183 * @id_table: id_table for dfl private features supported by this driver.
184 * @ops: ops of this dfl private feature driver.
185 */
186struct dfl_feature_driver {
187 const struct dfl_feature_id *id_table;
164 const struct dfl_feature_ops *ops; 188 const struct dfl_feature_ops *ops;
165}; 189};
166 190
@@ -183,6 +207,8 @@ struct dfl_feature {
183 207
184#define DEV_STATUS_IN_USE 0 208#define DEV_STATUS_IN_USE 0
185 209
210#define FEATURE_DEV_ID_UNUSED (-1)
211
186/** 212/**
187 * struct dfl_feature_platform_data - platform data for feature devices 213 * struct dfl_feature_platform_data - platform data for feature devices
188 * 214 *
@@ -191,6 +217,7 @@ struct dfl_feature {
191 * @cdev: cdev of feature dev. 217 * @cdev: cdev of feature dev.
192 * @dev: ptr to platform device linked with this platform data. 218 * @dev: ptr to platform device linked with this platform data.
193 * @dfl_cdev: ptr to container device. 219 * @dfl_cdev: ptr to container device.
220 * @id: id used for this feature device.
194 * @disable_count: count for port disable. 221 * @disable_count: count for port disable.
195 * @num: number for sub features. 222 * @num: number for sub features.
196 * @dev_status: dev status (e.g. DEV_STATUS_IN_USE). 223 * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
@@ -203,6 +230,7 @@ struct dfl_feature_platform_data {
203 struct cdev cdev; 230 struct cdev cdev;
204 struct platform_device *dev; 231 struct platform_device *dev;
205 struct dfl_fpga_cdev *dfl_cdev; 232 struct dfl_fpga_cdev *dfl_cdev;
233 int id;
206 unsigned int disable_count; 234 unsigned int disable_count;
207 unsigned long dev_status; 235 unsigned long dev_status;
208 void *private; 236 void *private;
@@ -331,6 +359,11 @@ static inline bool dfl_feature_is_port(void __iomem *base)
331 (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT); 359 (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);
332} 360}
333 361
362static inline u8 dfl_feature_revision(void __iomem *base)
363{
364 return (u8)FIELD_GET(DFH_REVISION, readq(base + DFH));
365}
366
334/** 367/**
335 * struct dfl_fpga_enum_info - DFL FPGA enumeration information 368 * struct dfl_fpga_enum_info - DFL FPGA enumeration information
336 * 369 *
@@ -373,6 +406,7 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
373 * @fme_dev: FME feature device under this container device. 406 * @fme_dev: FME feature device under this container device.
374 * @lock: mutex lock to protect the port device list. 407 * @lock: mutex lock to protect the port device list.
375 * @port_dev_list: list of all port feature devices under this container device. 408 * @port_dev_list: list of all port feature devices under this container device.
409 * @released_port_num: released port number under this container device.
376 */ 410 */
377struct dfl_fpga_cdev { 411struct dfl_fpga_cdev {
378 struct device *parent; 412 struct device *parent;
@@ -380,6 +414,7 @@ struct dfl_fpga_cdev {
380 struct device *fme_dev; 414 struct device *fme_dev;
381 struct mutex lock; 415 struct mutex lock;
382 struct list_head port_dev_list; 416 struct list_head port_dev_list;
417 int released_port_num;
383}; 418};
384 419
385struct dfl_fpga_cdev * 420struct dfl_fpga_cdev *
@@ -407,4 +442,9 @@ dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
407 442
408 return pdev; 443 return pdev;
409} 444}
445
446int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
447int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id);
448void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev);
449int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf);
410#endif /* __FPGA_DFL_H */ 450#endif /* __FPGA_DFL_H */
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 2463aa7ab4f6..96544b348c27 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -646,24 +646,23 @@ static int debug_remove(struct amba_device *adev)
646 return 0; 646 return 0;
647} 647}
648 648
649static const struct amba_cs_uci_id uci_id_debug[] = {
650 {
651 /* CPU Debug UCI data */
652 .devarch = 0x47706a15,
653 .devarch_mask = 0xfff0ffff,
654 .devtype = 0x00000015,
655 }
656};
657
649static const struct amba_id debug_ids[] = { 658static const struct amba_id debug_ids[] = {
650 { /* Debug for Cortex-A53 */ 659 CS_AMBA_ID(0x000bbd03), /* Cortex-A53 */
651 .id = 0x000bbd03, 660 CS_AMBA_ID(0x000bbd07), /* Cortex-A57 */
652 .mask = 0x000fffff, 661 CS_AMBA_ID(0x000bbd08), /* Cortex-A72 */
653 }, 662 CS_AMBA_ID(0x000bbd09), /* Cortex-A73 */
654 { /* Debug for Cortex-A57 */ 663 CS_AMBA_UCI_ID(0x000f0205, uci_id_debug), /* Qualcomm Kryo */
655 .id = 0x000bbd07, 664 CS_AMBA_UCI_ID(0x000f0211, uci_id_debug), /* Qualcomm Kryo */
656 .mask = 0x000fffff, 665 {},
657 },
658 { /* Debug for Cortex-A72 */
659 .id = 0x000bbd08,
660 .mask = 0x000fffff,
661 },
662 { /* Debug for Cortex-A73 */
663 .id = 0x000bbd09,
664 .mask = 0x000fffff,
665 },
666 { 0, 0 },
667}; 666};
668 667
669static struct amba_driver debug_driver = { 668static struct amba_driver debug_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index a0365e23678e..219c10eb752c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -296,11 +296,8 @@ static ssize_t mode_store(struct device *dev,
296 296
297 spin_lock(&drvdata->spinlock); 297 spin_lock(&drvdata->spinlock);
298 config->mode = val & ETMv4_MODE_ALL; 298 config->mode = val & ETMv4_MODE_ALL;
299 299 etm4_set_mode_exclude(drvdata,
300 if (config->mode & ETM_MODE_EXCLUDE) 300 config->mode & ETM_MODE_EXCLUDE ? true : false);
301 etm4_set_mode_exclude(drvdata, true);
302 else
303 etm4_set_mode_exclude(drvdata, false);
304 301
305 if (drvdata->instrp0 == true) { 302 if (drvdata->instrp0 == true) {
306 /* start by clearing instruction P0 field */ 303 /* start by clearing instruction P0 field */
@@ -999,10 +996,8 @@ static ssize_t addr_range_store(struct device *dev,
999 * Program include or exclude control bits for vinst or vdata 996 * Program include or exclude control bits for vinst or vdata
1000 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE 997 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1001 */ 998 */
1002 if (config->mode & ETM_MODE_EXCLUDE) 999 etm4_set_mode_exclude(drvdata,
1003 etm4_set_mode_exclude(drvdata, true); 1000 config->mode & ETM_MODE_EXCLUDE ? true : false);
1004 else
1005 etm4_set_mode_exclude(drvdata, false);
1006 1001
1007 spin_unlock(&drvdata->spinlock); 1002 spin_unlock(&drvdata->spinlock);
1008 return size; 1003 return size;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 7bcac8896fc1..a128b5063f46 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -34,7 +34,8 @@
34#include "coresight-etm-perf.h" 34#include "coresight-etm-perf.h"
35 35
36static int boot_enable; 36static int boot_enable;
37module_param_named(boot_enable, boot_enable, int, S_IRUGO); 37module_param(boot_enable, int, 0444);
38MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
38 39
39/* The number of ETMv4 currently registered */ 40/* The number of ETMv4 currently registered */
40static int etm4_count; 41static int etm4_count;
@@ -47,7 +48,7 @@ static enum cpuhp_state hp_online;
47 48
48static void etm4_os_unlock(struct etmv4_drvdata *drvdata) 49static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
49{ 50{
50 /* Writing any value to ETMOSLAR unlocks the trace registers */ 51 /* Writing 0 to TRCOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR); 52 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
52 drvdata->os_unlock = true; 53 drvdata->os_unlock = true;
53 isb(); 54 isb();
@@ -188,6 +189,13 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
188 dev_err(etm_dev, 189 dev_err(etm_dev,
189 "timeout while waiting for Idle Trace Status\n"); 190 "timeout while waiting for Idle Trace Status\n");
190 191
192 /*
193 * As recommended by section 4.3.7 ("Synchronization when using the
194 * memory-mapped interface") of ARM IHI 0064D
195 */
196 dsb(sy);
197 isb();
198
191done: 199done:
192 CS_LOCK(drvdata->base); 200 CS_LOCK(drvdata->base);
193 201
@@ -453,8 +461,12 @@ static void etm4_disable_hw(void *info)
453 /* EN, bit[0] Trace unit enable bit */ 461 /* EN, bit[0] Trace unit enable bit */
454 control &= ~0x1; 462 control &= ~0x1;
455 463
456 /* make sure everything completes before disabling */ 464 /*
457 mb(); 465 * Make sure everything completes before disabling, as recommended
466 * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
467 * SSTATUS") of ARM IHI 0064D
468 */
469 dsb(sy);
458 isb(); 470 isb();
459 writel_relaxed(control, drvdata->base + TRCPRGCTLR); 471 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
460 472
@@ -1047,10 +1059,8 @@ static int etm4_starting_cpu(unsigned int cpu)
1047 return 0; 1059 return 0;
1048 1060
1049 spin_lock(&etmdrvdata[cpu]->spinlock); 1061 spin_lock(&etmdrvdata[cpu]->spinlock);
1050 if (!etmdrvdata[cpu]->os_unlock) { 1062 if (!etmdrvdata[cpu]->os_unlock)
1051 etm4_os_unlock(etmdrvdata[cpu]); 1063 etm4_os_unlock(etmdrvdata[cpu]);
1052 etmdrvdata[cpu]->os_unlock = true;
1053 }
1054 1064
1055 if (local_read(&etmdrvdata[cpu]->mode)) 1065 if (local_read(&etmdrvdata[cpu]->mode))
1056 etm4_enable_hw(etmdrvdata[cpu]); 1066 etm4_enable_hw(etmdrvdata[cpu]);
@@ -1192,11 +1202,15 @@ static struct amba_cs_uci_id uci_id_etm4[] = {
1192}; 1202};
1193 1203
1194static const struct amba_id etm4_ids[] = { 1204static const struct amba_id etm4_ids[] = {
1195 CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */ 1205 CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
1196 CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */ 1206 CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
1197 CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */ 1207 CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
1198 CS_AMBA_ID(0x000bb959), /* Cortex-A73 */ 1208 CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
1199 CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4), /* Cortex-A35 */ 1209 CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
1210 CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
1211 CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
1212 CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
1213 CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
1200 {}, 1214 {},
1201}; 1215};
1202 1216
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index fa97cb9ab4f9..05f7896c3a01 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -5,6 +5,7 @@
5 * Description: CoreSight Funnel driver 5 * Description: CoreSight Funnel driver
6 */ 6 */
7 7
8#include <linux/acpi.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/init.h> 10#include <linux/init.h>
10#include <linux/types.h> 11#include <linux/types.h>
@@ -192,7 +193,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
192 193
193 if (is_of_node(dev_fwnode(dev)) && 194 if (is_of_node(dev_fwnode(dev)) &&
194 of_device_is_compatible(dev->of_node, "arm,coresight-funnel")) 195 of_device_is_compatible(dev->of_node, "arm,coresight-funnel"))
195 pr_warn_once("Uses OBSOLETE CoreSight funnel binding\n"); 196 dev_warn_once(dev, "Uses OBSOLETE CoreSight funnel binding\n");
196 197
197 desc.name = coresight_alloc_device_name(&funnel_devs, dev); 198 desc.name = coresight_alloc_device_name(&funnel_devs, dev);
198 if (!desc.name) 199 if (!desc.name)
@@ -302,11 +303,19 @@ static const struct of_device_id static_funnel_match[] = {
302 {} 303 {}
303}; 304};
304 305
306#ifdef CONFIG_ACPI
307static const struct acpi_device_id static_funnel_ids[] = {
308 {"ARMHC9FE", 0},
309 {},
310};
311#endif
312
305static struct platform_driver static_funnel_driver = { 313static struct platform_driver static_funnel_driver = {
306 .probe = static_funnel_probe, 314 .probe = static_funnel_probe,
307 .driver = { 315 .driver = {
308 .name = "coresight-static-funnel", 316 .name = "coresight-static-funnel",
309 .of_match_table = static_funnel_match, 317 .of_match_table = static_funnel_match,
318 .acpi_match_table = ACPI_PTR(static_funnel_ids),
310 .pm = &funnel_dev_pm_ops, 319 .pm = &funnel_dev_pm_ops,
311 .suppress_bind_attrs = true, 320 .suppress_bind_attrs = true,
312 }, 321 },
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 61d7f9ff054d..82e563cdc879 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -185,11 +185,11 @@ static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
185 } 185 }
186 186
187/* coresight AMBA ID, full UCI structure: id table entry. */ 187/* coresight AMBA ID, full UCI structure: id table entry. */
188#define CS_AMBA_UCI_ID(pid, uci_ptr) \ 188#define CS_AMBA_UCI_ID(pid, uci_ptr) \
189 { \ 189 { \
190 .id = pid, \ 190 .id = pid, \
191 .mask = 0x000fffff, \ 191 .mask = 0x000fffff, \
192 .data = uci_ptr \ 192 .data = (void *)uci_ptr \
193 } 193 }
194 194
195/* extract the data value from a UCI structure given amba_id pointer. */ 195/* extract the data value from a UCI structure given amba_id pointer. */
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b7d6d59d56db..b29ba640eb25 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -184,7 +184,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
184 184
185 if (is_of_node(dev_fwnode(dev)) && 185 if (is_of_node(dev_fwnode(dev)) &&
186 of_device_is_compatible(dev->of_node, "arm,coresight-replicator")) 186 of_device_is_compatible(dev->of_node, "arm,coresight-replicator"))
187 pr_warn_once("Uses OBSOLETE CoreSight replicator binding\n"); 187 dev_warn_once(dev,
188 "Uses OBSOLETE CoreSight replicator binding\n");
188 189
189 desc.name = coresight_alloc_device_name(&replicator_devs, dev); 190 desc.name = coresight_alloc_device_name(&replicator_devs, dev);
190 if (!desc.name) 191 if (!desc.name)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 23b7ff00af5c..807416b75ecc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -479,30 +479,11 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
479 * traces. 479 * traces.
480 */ 480 */
481 if (!buf->snapshot && to_read > handle->size) { 481 if (!buf->snapshot && to_read > handle->size) {
482 u32 mask = 0; 482 u32 mask = tmc_get_memwidth_mask(drvdata);
483
484 /*
485 * The value written to RRP must be byte-address aligned to
486 * the width of the trace memory databus _and_ to a frame
487 * boundary (16 byte), whichever is the biggest. For example,
488 * for 32-bit, 64-bit and 128-bit wide trace memory, the four
489 * LSBs must be 0s. For 256-bit wide trace memory, the five
490 * LSBs must be 0s.
491 */
492 switch (drvdata->memwidth) {
493 case TMC_MEM_INTF_WIDTH_32BITS:
494 case TMC_MEM_INTF_WIDTH_64BITS:
495 case TMC_MEM_INTF_WIDTH_128BITS:
496 mask = GENMASK(31, 4);
497 break;
498 case TMC_MEM_INTF_WIDTH_256BITS:
499 mask = GENMASK(31, 5);
500 break;
501 }
502 483
503 /* 484 /*
504 * Make sure the new size is aligned in accordance with the 485 * Make sure the new size is aligned in accordance with the
505 * requirement explained above. 486 * requirement explained in function tmc_get_memwidth_mask().
506 */ 487 */
507 to_read = handle->size & mask; 488 to_read = handle->size & mask;
508 /* Move the RAM read pointer up */ 489 /* Move the RAM read pointer up */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 17006705287a..625882bc8b08 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -871,6 +871,7 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
871 return ERR_PTR(rc); 871 return ERR_PTR(rc);
872 } 872 }
873 873
874 refcount_set(&etr_buf->refcount, 1);
874 dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n", 875 dev_dbg(dev, "allocated buffer of size %ldKB in mode %d\n",
875 (unsigned long)size >> 10, etr_buf->mode); 876 (unsigned long)size >> 10, etr_buf->mode);
876 return etr_buf; 877 return etr_buf;
@@ -927,15 +928,24 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
927 rrp = tmc_read_rrp(drvdata); 928 rrp = tmc_read_rrp(drvdata);
928 rwp = tmc_read_rwp(drvdata); 929 rwp = tmc_read_rwp(drvdata);
929 status = readl_relaxed(drvdata->base + TMC_STS); 930 status = readl_relaxed(drvdata->base + TMC_STS);
931
932 /*
933 * If there were memory errors in the session, truncate the
934 * buffer.
935 */
936 if (WARN_ON_ONCE(status & TMC_STS_MEMERR)) {
937 dev_dbg(&drvdata->csdev->dev,
938 "tmc memory error detected, truncating buffer\n");
939 etr_buf->len = 0;
940 etr_buf->full = 0;
941 return;
942 }
943
930 etr_buf->full = status & TMC_STS_FULL; 944 etr_buf->full = status & TMC_STS_FULL;
931 945
932 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync); 946 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
933 947
934 etr_buf->ops->sync(etr_buf, rrp, rwp); 948 etr_buf->ops->sync(etr_buf, rrp, rwp);
935
936 /* Insert barrier packets at the beginning, if there was an overflow */
937 if (etr_buf->full)
938 tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
939} 949}
940 950
941static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) 951static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
@@ -1072,6 +1082,13 @@ static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
1072 drvdata->sysfs_buf = NULL; 1082 drvdata->sysfs_buf = NULL;
1073 } else { 1083 } else {
1074 tmc_sync_etr_buf(drvdata); 1084 tmc_sync_etr_buf(drvdata);
1085 /*
1086 * Insert barrier packets at the beginning, if there was
1087 * an overflow.
1088 */
1089 if (etr_buf->full)
1090 tmc_etr_buf_insert_barrier_packet(etr_buf,
1091 etr_buf->offset);
1075 } 1092 }
1076} 1093}
1077 1094
@@ -1263,8 +1280,6 @@ retry:
1263 if (IS_ERR(etr_buf)) 1280 if (IS_ERR(etr_buf))
1264 return etr_buf; 1281 return etr_buf;
1265 1282
1266 refcount_set(&etr_buf->refcount, 1);
1267
1268 /* Now that we have a buffer, add it to the IDR. */ 1283 /* Now that we have a buffer, add it to the IDR. */
1269 mutex_lock(&drvdata->idr_mutex); 1284 mutex_lock(&drvdata->idr_mutex);
1270 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL); 1285 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
@@ -1291,19 +1306,11 @@ get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
1291 struct perf_event *event, int nr_pages, 1306 struct perf_event *event, int nr_pages,
1292 void **pages, bool snapshot) 1307 void **pages, bool snapshot)
1293{ 1308{
1294 struct etr_buf *etr_buf;
1295
1296 /* 1309 /*
1297 * In per-thread mode the etr_buf isn't shared, so just go ahead 1310 * In per-thread mode the etr_buf isn't shared, so just go ahead
1298 * with memory allocation. 1311 * with memory allocation.
1299 */ 1312 */
1300 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); 1313 return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1301 if (IS_ERR(etr_buf))
1302 goto out;
1303
1304 refcount_set(&etr_buf->refcount, 1);
1305out:
1306 return etr_buf;
1307} 1314}
1308 1315
1309static struct etr_buf * 1316static struct etr_buf *
@@ -1410,10 +1417,12 @@ free_etr_perf_buffer:
1410 * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware 1417 * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
1411 * buffer to the perf ring buffer. 1418 * buffer to the perf ring buffer.
1412 */ 1419 */
1413static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf) 1420static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
1421 unsigned long src_offset,
1422 unsigned long to_copy)
1414{ 1423{
1415 long bytes, to_copy; 1424 long bytes;
1416 long pg_idx, pg_offset, src_offset; 1425 long pg_idx, pg_offset;
1417 unsigned long head = etr_perf->head; 1426 unsigned long head = etr_perf->head;
1418 char **dst_pages, *src_buf; 1427 char **dst_pages, *src_buf;
1419 struct etr_buf *etr_buf = etr_perf->etr_buf; 1428 struct etr_buf *etr_buf = etr_perf->etr_buf;
@@ -1422,8 +1431,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
1422 pg_idx = head >> PAGE_SHIFT; 1431 pg_idx = head >> PAGE_SHIFT;
1423 pg_offset = head & (PAGE_SIZE - 1); 1432 pg_offset = head & (PAGE_SIZE - 1);
1424 dst_pages = (char **)etr_perf->pages; 1433 dst_pages = (char **)etr_perf->pages;
1425 src_offset = etr_buf->offset;
1426 to_copy = etr_buf->len;
1427 1434
1428 while (to_copy > 0) { 1435 while (to_copy > 0) {
1429 /* 1436 /*
@@ -1434,6 +1441,8 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
1434 * 3) what is available in the destination page. 1441 * 3) what is available in the destination page.
1435 * in one iteration. 1442 * in one iteration.
1436 */ 1443 */
1444 if (src_offset >= etr_buf->size)
1445 src_offset -= etr_buf->size;
1437 bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy, 1446 bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
1438 &src_buf); 1447 &src_buf);
1439 if (WARN_ON_ONCE(bytes <= 0)) 1448 if (WARN_ON_ONCE(bytes <= 0))
@@ -1454,8 +1463,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
1454 1463
1455 /* Move source pointers */ 1464 /* Move source pointers */
1456 src_offset += bytes; 1465 src_offset += bytes;
1457 if (src_offset >= etr_buf->size)
1458 src_offset -= etr_buf->size;
1459 } 1466 }
1460} 1467}
1461 1468
@@ -1471,7 +1478,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
1471 void *config) 1478 void *config)
1472{ 1479{
1473 bool lost = false; 1480 bool lost = false;
1474 unsigned long flags, size = 0; 1481 unsigned long flags, offset, size = 0;
1475 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1482 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1476 struct etr_perf_buffer *etr_perf = config; 1483 struct etr_perf_buffer *etr_perf = config;
1477 struct etr_buf *etr_buf = etr_perf->etr_buf; 1484 struct etr_buf *etr_buf = etr_perf->etr_buf;
@@ -1484,7 +1491,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
1484 goto out; 1491 goto out;
1485 } 1492 }
1486 1493
1487 if (WARN_ON(drvdata->perf_data != etr_perf)) { 1494 if (WARN_ON(drvdata->perf_buf != etr_buf)) {
1488 lost = true; 1495 lost = true;
1489 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1496 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1490 goto out; 1497 goto out;
@@ -1496,12 +1503,38 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
1496 tmc_sync_etr_buf(drvdata); 1503 tmc_sync_etr_buf(drvdata);
1497 1504
1498 CS_LOCK(drvdata->base); 1505 CS_LOCK(drvdata->base);
1499 /* Reset perf specific data */
1500 drvdata->perf_data = NULL;
1501 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1506 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1502 1507
1508 lost = etr_buf->full;
1509 offset = etr_buf->offset;
1503 size = etr_buf->len; 1510 size = etr_buf->len;
1504 tmc_etr_sync_perf_buffer(etr_perf); 1511
1512 /*
1513 * The ETR buffer may be bigger than the space available in the
1514 * perf ring buffer (handle->size). If so advance the offset so that we
1515 * get the latest trace data. In snapshot mode none of that matters
1516 * since we are expected to clobber stale data in favour of the latest
1517 * traces.
1518 */
1519 if (!etr_perf->snapshot && size > handle->size) {
1520 u32 mask = tmc_get_memwidth_mask(drvdata);
1521
1522 /*
1523 * Make sure the new size is aligned in accordance with the
1524 * requirement explained in function tmc_get_memwidth_mask().
1525 */
1526 size = handle->size & mask;
1527 offset = etr_buf->offset + etr_buf->len - size;
1528
1529 if (offset >= etr_buf->size)
1530 offset -= etr_buf->size;
1531 lost = true;
1532 }
1533
1534 /* Insert barrier packets at the beginning, if there was an overflow */
1535 if (lost)
1536 tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
1537 tmc_etr_sync_perf_buffer(etr_perf, offset, size);
1505 1538
1506 /* 1539 /*
1507 * In snapshot mode we simply increment the head by the number of byte 1540 * In snapshot mode we simply increment the head by the number of byte
@@ -1511,8 +1544,6 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
1511 */ 1544 */
1512 if (etr_perf->snapshot) 1545 if (etr_perf->snapshot)
1513 handle->head += size; 1546 handle->head += size;
1514
1515 lost |= etr_buf->full;
1516out: 1547out:
1517 /* 1548 /*
1518 * Don't set the TRUNCATED flag in snapshot mode because 1) the 1549 * Don't set the TRUNCATED flag in snapshot mode because 1) the
@@ -1556,7 +1587,6 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1556 } 1587 }
1557 1588
1558 etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf); 1589 etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
1559 drvdata->perf_data = etr_perf;
1560 1590
1561 /* 1591 /*
1562 * No HW configuration is needed if the sink is already in 1592 * No HW configuration is needed if the sink is already in
@@ -1572,6 +1602,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1572 /* Associate with monitored process. */ 1602 /* Associate with monitored process. */
1573 drvdata->pid = pid; 1603 drvdata->pid = pid;
1574 drvdata->mode = CS_MODE_PERF; 1604 drvdata->mode = CS_MODE_PERF;
1605 drvdata->perf_buf = etr_perf->etr_buf;
1575 atomic_inc(csdev->refcnt); 1606 atomic_inc(csdev->refcnt);
1576 } 1607 }
1577 1608
@@ -1617,6 +1648,8 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
1617 /* Dissociate from monitored process. */ 1648 /* Dissociate from monitored process. */
1618 drvdata->pid = -1; 1649 drvdata->pid = -1;
1619 drvdata->mode = CS_MODE_DISABLED; 1650 drvdata->mode = CS_MODE_DISABLED;
1651 /* Reset perf specific data */
1652 drvdata->perf_buf = NULL;
1620 1653
1621 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1654 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1622 1655
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index be37aff573b4..1cf82fa58289 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -70,6 +70,34 @@ void tmc_disable_hw(struct tmc_drvdata *drvdata)
70 writel_relaxed(0x0, drvdata->base + TMC_CTL); 70 writel_relaxed(0x0, drvdata->base + TMC_CTL);
71} 71}
72 72
73u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
74{
75 u32 mask = 0;
76
77 /*
78 * When moving RRP or an offset address forward, the new values must
79 * be byte-address aligned to the width of the trace memory databus
80 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
81 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
82 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
83 * be 0s.
84 */
85 switch (drvdata->memwidth) {
86 case TMC_MEM_INTF_WIDTH_32BITS:
87 /* fallthrough */
88 case TMC_MEM_INTF_WIDTH_64BITS:
89 /* fallthrough */
90 case TMC_MEM_INTF_WIDTH_128BITS:
91 mask = GENMASK(31, 4);
92 break;
93 case TMC_MEM_INTF_WIDTH_256BITS:
94 mask = GENMASK(31, 5);
95 break;
96 }
97
98 return mask;
99}
100
73static int tmc_read_prepare(struct tmc_drvdata *drvdata) 101static int tmc_read_prepare(struct tmc_drvdata *drvdata)
74{ 102{
75 int ret = 0; 103 int ret = 0;
@@ -236,6 +264,7 @@ coresight_tmc_reg(ffcr, TMC_FFCR);
236coresight_tmc_reg(mode, TMC_MODE); 264coresight_tmc_reg(mode, TMC_MODE);
237coresight_tmc_reg(pscr, TMC_PSCR); 265coresight_tmc_reg(pscr, TMC_PSCR);
238coresight_tmc_reg(axictl, TMC_AXICTL); 266coresight_tmc_reg(axictl, TMC_AXICTL);
267coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
239coresight_tmc_reg(devid, CORESIGHT_DEVID); 268coresight_tmc_reg(devid, CORESIGHT_DEVID);
240coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI); 269coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
241coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI); 270coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
@@ -255,6 +284,7 @@ static struct attribute *coresight_tmc_mgmt_attrs[] = {
255 &dev_attr_devid.attr, 284 &dev_attr_devid.attr,
256 &dev_attr_dba.attr, 285 &dev_attr_dba.attr,
257 &dev_attr_axictl.attr, 286 &dev_attr_axictl.attr,
287 &dev_attr_authstatus.attr,
258 NULL, 288 NULL,
259}; 289};
260 290
@@ -342,6 +372,13 @@ static inline bool tmc_etr_can_use_sg(struct device *dev)
342 return fwnode_property_present(dev->fwnode, "arm,scatter-gather"); 372 return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
343} 373}
344 374
375static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
376{
377 u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
378
379 return (auth & TMC_AUTH_NSID_MASK) == 0x3;
380}
381
345/* Detect and initialise the capabilities of a TMC ETR */ 382/* Detect and initialise the capabilities of a TMC ETR */
346static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps) 383static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
347{ 384{
@@ -349,6 +386,9 @@ static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
349 u32 dma_mask = 0; 386 u32 dma_mask = 0;
350 struct tmc_drvdata *drvdata = dev_get_drvdata(parent); 387 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
351 388
389 if (!tmc_etr_has_non_secure_access(drvdata))
390 return -EACCES;
391
352 /* Set the unadvertised capabilities */ 392 /* Set the unadvertised capabilities */
353 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps); 393 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
354 394
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 1ed50411cc3c..71de978575f3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -39,6 +39,7 @@
39#define TMC_ITATBCTR2 0xef0 39#define TMC_ITATBCTR2 0xef0
40#define TMC_ITATBCTR1 0xef4 40#define TMC_ITATBCTR1 0xef4
41#define TMC_ITATBCTR0 0xef8 41#define TMC_ITATBCTR0 0xef8
42#define TMC_AUTHSTATUS 0xfb8
42 43
43/* register description */ 44/* register description */
44/* TMC_CTL - 0x020 */ 45/* TMC_CTL - 0x020 */
@@ -47,6 +48,7 @@
47#define TMC_STS_TMCREADY_BIT 2 48#define TMC_STS_TMCREADY_BIT 2
48#define TMC_STS_FULL BIT(0) 49#define TMC_STS_FULL BIT(0)
49#define TMC_STS_TRIGGERED BIT(1) 50#define TMC_STS_TRIGGERED BIT(1)
51#define TMC_STS_MEMERR BIT(5)
50/* 52/*
51 * TMC_AXICTL - 0x110 53 * TMC_AXICTL - 0x110
52 * 54 *
@@ -89,6 +91,8 @@
89#define TMC_DEVID_AXIAW_SHIFT 17 91#define TMC_DEVID_AXIAW_SHIFT 17
90#define TMC_DEVID_AXIAW_MASK 0x7f 92#define TMC_DEVID_AXIAW_MASK 0x7f
91 93
94#define TMC_AUTH_NSID_MASK GENMASK(1, 0)
95
92enum tmc_config_type { 96enum tmc_config_type {
93 TMC_CONFIG_TYPE_ETB, 97 TMC_CONFIG_TYPE_ETB,
94 TMC_CONFIG_TYPE_ETR, 98 TMC_CONFIG_TYPE_ETR,
@@ -178,8 +182,8 @@ struct etr_buf {
178 * device configuration register (DEVID) 182 * device configuration register (DEVID)
179 * @idr: Holds etr_bufs allocated for this ETR. 183 * @idr: Holds etr_bufs allocated for this ETR.
180 * @idr_mutex: Access serialisation for idr. 184 * @idr_mutex: Access serialisation for idr.
181 * @perf_data: PERF buffer for ETR. 185 * @sysfs_buf: SYSFS buffer for ETR.
182 * @sysfs_data: SYSFS buffer for ETR. 186 * @perf_buf: PERF buffer for ETR.
183 */ 187 */
184struct tmc_drvdata { 188struct tmc_drvdata {
185 void __iomem *base; 189 void __iomem *base;
@@ -202,7 +206,7 @@ struct tmc_drvdata {
202 struct idr idr; 206 struct idr idr;
203 struct mutex idr_mutex; 207 struct mutex idr_mutex;
204 struct etr_buf *sysfs_buf; 208 struct etr_buf *sysfs_buf;
205 void *perf_data; 209 struct etr_buf *perf_buf;
206}; 210};
207 211
208struct etr_buf_operations { 212struct etr_buf_operations {
@@ -251,6 +255,7 @@ void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
251void tmc_flush_and_stop(struct tmc_drvdata *drvdata); 255void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
252void tmc_enable_hw(struct tmc_drvdata *drvdata); 256void tmc_enable_hw(struct tmc_drvdata *drvdata);
253void tmc_disable_hw(struct tmc_drvdata *drvdata); 257void tmc_disable_hw(struct tmc_drvdata *drvdata);
258u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata);
254 259
255/* ETB/ETF functions */ 260/* ETB/ETF functions */
256int tmc_read_prepare_etb(struct tmc_drvdata *drvdata); 261int tmc_read_prepare_etb(struct tmc_drvdata *drvdata);
diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile
index d9252fa8d9ca..b63eb8f309ad 100644
--- a/drivers/hwtracing/intel_th/Makefile
+++ b/drivers/hwtracing/intel_th/Makefile
@@ -20,3 +20,6 @@ intel_th_msu-y := msu.o
20 20
21obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o 21obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o
22intel_th_pti-y := pti.o 22intel_th_pti-y := pti.o
23
24obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu_sink.o
25intel_th_msu_sink-y := msu-sink.o
diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c
new file mode 100644
index 000000000000..2c7f5116be12
--- /dev/null
+++ b/drivers/hwtracing/intel_th/msu-sink.c
@@ -0,0 +1,116 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * An example software sink buffer for Intel TH MSU.
4 *
5 * Copyright (C) 2019 Intel Corporation.
6 */
7
8#include <linux/intel_th.h>
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13
14#define MAX_SGTS 16
15
16struct msu_sink_private {
17 struct device *dev;
18 struct sg_table **sgts;
19 unsigned int nr_sgts;
20};
21
22static void *msu_sink_assign(struct device *dev, int *mode)
23{
24 struct msu_sink_private *priv;
25
26 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
27 if (!priv)
28 return NULL;
29
30 priv->sgts = kcalloc(MAX_SGTS, sizeof(void *), GFP_KERNEL);
31 if (!priv->sgts) {
32 kfree(priv);
33 return NULL;
34 }
35
36 priv->dev = dev;
37 *mode = MSC_MODE_MULTI;
38
39 return priv;
40}
41
42static void msu_sink_unassign(void *data)
43{
44 struct msu_sink_private *priv = data;
45
46 kfree(priv->sgts);
47 kfree(priv);
48}
49
50/* See also: msc.c: __msc_buffer_win_alloc() */
51static int msu_sink_alloc_window(void *data, struct sg_table **sgt, size_t size)
52{
53 struct msu_sink_private *priv = data;
54 unsigned int nents;
55 struct scatterlist *sg_ptr;
56 void *block;
57 int ret, i;
58
59 if (priv->nr_sgts == MAX_SGTS)
60 return -ENOMEM;
61
62 nents = DIV_ROUND_UP(size, PAGE_SIZE);
63
64 ret = sg_alloc_table(*sgt, nents, GFP_KERNEL);
65 if (ret)
66 return -ENOMEM;
67
68 priv->sgts[priv->nr_sgts++] = *sgt;
69
70 for_each_sg((*sgt)->sgl, sg_ptr, nents, i) {
71 block = dma_alloc_coherent(priv->dev->parent->parent,
72 PAGE_SIZE, &sg_dma_address(sg_ptr),
73 GFP_KERNEL);
74 sg_set_buf(sg_ptr, block, PAGE_SIZE);
75 }
76
77 return nents;
78}
79
80/* See also: msc.c: __msc_buffer_win_free() */
81static void msu_sink_free_window(void *data, struct sg_table *sgt)
82{
83 struct msu_sink_private *priv = data;
84 struct scatterlist *sg_ptr;
85 int i;
86
87 for_each_sg(sgt->sgl, sg_ptr, sgt->nents, i) {
88 dma_free_coherent(priv->dev->parent->parent, PAGE_SIZE,
89 sg_virt(sg_ptr), sg_dma_address(sg_ptr));
90 }
91
92 sg_free_table(sgt);
93 priv->nr_sgts--;
94}
95
96static int msu_sink_ready(void *data, struct sg_table *sgt, size_t bytes)
97{
98 struct msu_sink_private *priv = data;
99
100 intel_th_msc_window_unlock(priv->dev, sgt);
101
102 return 0;
103}
104
105static const struct msu_buffer sink_mbuf = {
106 .name = "sink",
107 .assign = msu_sink_assign,
108 .unassign = msu_sink_unassign,
109 .alloc_window = msu_sink_alloc_window,
110 .free_window = msu_sink_free_window,
111 .ready = msu_sink_ready,
112};
113
114module_intel_th_msu_buffer(sink_mbuf);
115
116MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 8ab28e5fb366..fc9f15f36ad4 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -17,21 +17,48 @@
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/workqueue.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
21 22
22#ifdef CONFIG_X86 23#ifdef CONFIG_X86
23#include <asm/set_memory.h> 24#include <asm/set_memory.h>
24#endif 25#endif
25 26
27#include <linux/intel_th.h>
26#include "intel_th.h" 28#include "intel_th.h"
27#include "msu.h" 29#include "msu.h"
28 30
29#define msc_dev(x) (&(x)->thdev->dev) 31#define msc_dev(x) (&(x)->thdev->dev)
30 32
33/*
34 * Lockout state transitions:
35 * READY -> INUSE -+-> LOCKED -+-> READY -> etc.
36 * \-----------/
37 * WIN_READY: window can be used by HW
38 * WIN_INUSE: window is in use
39 * WIN_LOCKED: window is filled up and is being processed by the buffer
40 * handling code
41 *
42 * All state transitions happen automatically, except for the LOCKED->READY,
43 * which needs to be signalled by the buffer code by calling
44 * intel_th_msc_window_unlock().
45 *
46 * When the interrupt handler has to switch to the next window, it checks
47 * whether it's READY, and if it is, it performs the switch and tracing
48 * continues. If it's LOCKED, it stops the trace.
49 */
50enum lockout_state {
51 WIN_READY = 0,
52 WIN_INUSE,
53 WIN_LOCKED
54};
55
31/** 56/**
32 * struct msc_window - multiblock mode window descriptor 57 * struct msc_window - multiblock mode window descriptor
33 * @entry: window list linkage (msc::win_list) 58 * @entry: window list linkage (msc::win_list)
34 * @pgoff: page offset into the buffer that this window starts at 59 * @pgoff: page offset into the buffer that this window starts at
60 * @lockout: lockout state, see comment below
61 * @lo_lock: lockout state serialization
35 * @nr_blocks: number of blocks (pages) in this window 62 * @nr_blocks: number of blocks (pages) in this window
36 * @nr_segs: number of segments in this window (<= @nr_blocks) 63 * @nr_segs: number of segments in this window (<= @nr_blocks)
37 * @_sgt: array of block descriptors 64 * @_sgt: array of block descriptors
@@ -40,6 +67,8 @@
40struct msc_window { 67struct msc_window {
41 struct list_head entry; 68 struct list_head entry;
42 unsigned long pgoff; 69 unsigned long pgoff;
70 enum lockout_state lockout;
71 spinlock_t lo_lock;
43 unsigned int nr_blocks; 72 unsigned int nr_blocks;
44 unsigned int nr_segs; 73 unsigned int nr_segs;
45 struct msc *msc; 74 struct msc *msc;
@@ -66,8 +95,8 @@ struct msc_iter {
66 struct msc_window *start_win; 95 struct msc_window *start_win;
67 struct msc_window *win; 96 struct msc_window *win;
68 unsigned long offset; 97 unsigned long offset;
69 int start_block; 98 struct scatterlist *start_block;
70 int block; 99 struct scatterlist *block;
71 unsigned int block_off; 100 unsigned int block_off;
72 unsigned int wrap_count; 101 unsigned int wrap_count;
73 unsigned int eof; 102 unsigned int eof;
@@ -77,6 +106,8 @@ struct msc_iter {
77 * struct msc - MSC device representation 106 * struct msc - MSC device representation
78 * @reg_base: register window base address 107 * @reg_base: register window base address
79 * @thdev: intel_th_device pointer 108 * @thdev: intel_th_device pointer
109 * @mbuf: MSU buffer, if assigned
110 * @mbuf_priv MSU buffer's private data, if @mbuf
80 * @win_list: list of windows in multiblock mode 111 * @win_list: list of windows in multiblock mode
81 * @single_sgt: single mode buffer 112 * @single_sgt: single mode buffer
82 * @cur_win: current window 113 * @cur_win: current window
@@ -100,6 +131,10 @@ struct msc {
100 void __iomem *msu_base; 131 void __iomem *msu_base;
101 struct intel_th_device *thdev; 132 struct intel_th_device *thdev;
102 133
134 const struct msu_buffer *mbuf;
135 void *mbuf_priv;
136
137 struct work_struct work;
103 struct list_head win_list; 138 struct list_head win_list;
104 struct sg_table single_sgt; 139 struct sg_table single_sgt;
105 struct msc_window *cur_win; 140 struct msc_window *cur_win;
@@ -108,6 +143,8 @@ struct msc {
108 unsigned int single_wrap : 1; 143 unsigned int single_wrap : 1;
109 void *base; 144 void *base;
110 dma_addr_t base_addr; 145 dma_addr_t base_addr;
146 u32 orig_addr;
147 u32 orig_sz;
111 148
112 /* <0: no buffer, 0: no users, >0: active users */ 149 /* <0: no buffer, 0: no users, >0: active users */
113 atomic_t user_count; 150 atomic_t user_count;
@@ -126,6 +163,101 @@ struct msc {
126 unsigned int index; 163 unsigned int index;
127}; 164};
128 165
166static LIST_HEAD(msu_buffer_list);
167static struct mutex msu_buffer_mutex;
168
169/**
170 * struct msu_buffer_entry - internal MSU buffer bookkeeping
171 * @entry: link to msu_buffer_list
172 * @mbuf: MSU buffer object
173 * @owner: module that provides this MSU buffer
174 */
175struct msu_buffer_entry {
176 struct list_head entry;
177 const struct msu_buffer *mbuf;
178 struct module *owner;
179};
180
181static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
182{
183 struct msu_buffer_entry *mbe;
184
185 lockdep_assert_held(&msu_buffer_mutex);
186
187 list_for_each_entry(mbe, &msu_buffer_list, entry) {
188 if (!strcmp(mbe->mbuf->name, name))
189 return mbe;
190 }
191
192 return NULL;
193}
194
195static const struct msu_buffer *
196msu_buffer_get(const char *name)
197{
198 struct msu_buffer_entry *mbe;
199
200 mutex_lock(&msu_buffer_mutex);
201 mbe = __msu_buffer_entry_find(name);
202 if (mbe && !try_module_get(mbe->owner))
203 mbe = NULL;
204 mutex_unlock(&msu_buffer_mutex);
205
206 return mbe ? mbe->mbuf : NULL;
207}
208
209static void msu_buffer_put(const struct msu_buffer *mbuf)
210{
211 struct msu_buffer_entry *mbe;
212
213 mutex_lock(&msu_buffer_mutex);
214 mbe = __msu_buffer_entry_find(mbuf->name);
215 if (mbe)
216 module_put(mbe->owner);
217 mutex_unlock(&msu_buffer_mutex);
218}
219
220int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
221 struct module *owner)
222{
223 struct msu_buffer_entry *mbe;
224 int ret = 0;
225
226 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
227 if (!mbe)
228 return -ENOMEM;
229
230 mutex_lock(&msu_buffer_mutex);
231 if (__msu_buffer_entry_find(mbuf->name)) {
232 ret = -EEXIST;
233 kfree(mbe);
234 goto unlock;
235 }
236
237 mbe->mbuf = mbuf;
238 mbe->owner = owner;
239 list_add_tail(&mbe->entry, &msu_buffer_list);
240unlock:
241 mutex_unlock(&msu_buffer_mutex);
242
243 return ret;
244}
245EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
246
247void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
248{
249 struct msu_buffer_entry *mbe;
250
251 mutex_lock(&msu_buffer_mutex);
252 mbe = __msu_buffer_entry_find(mbuf->name);
253 if (mbe) {
254 list_del(&mbe->entry);
255 kfree(mbe);
256 }
257 mutex_unlock(&msu_buffer_mutex);
258}
259EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
260
129static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 261static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
130{ 262{
131 /* header hasn't been written */ 263 /* header hasn't been written */
@@ -139,28 +271,25 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
139 return false; 271 return false;
140} 272}
141 273
142static inline struct msc_block_desc * 274static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
143msc_win_block(struct msc_window *win, unsigned int block)
144{ 275{
145 return sg_virt(&win->sgt->sgl[block]); 276 return win->sgt->sgl;
146} 277}
147 278
148static inline size_t 279static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
149msc_win_actual_bsz(struct msc_window *win, unsigned int block)
150{ 280{
151 return win->sgt->sgl[block].length; 281 return sg_virt(msc_win_base_sg(win));
152} 282}
153 283
154static inline dma_addr_t 284static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
155msc_win_baddr(struct msc_window *win, unsigned int block)
156{ 285{
157 return sg_dma_address(&win->sgt->sgl[block]); 286 return sg_dma_address(msc_win_base_sg(win));
158} 287}
159 288
160static inline unsigned long 289static inline unsigned long
161msc_win_bpfn(struct msc_window *win, unsigned int block) 290msc_win_base_pfn(struct msc_window *win)
162{ 291{
163 return msc_win_baddr(win, block) >> PAGE_SHIFT; 292 return PFN_DOWN(msc_win_base_dma(win));
164} 293}
165 294
166/** 295/**
@@ -188,6 +317,26 @@ static struct msc_window *msc_next_window(struct msc_window *win)
188 return list_next_entry(win, entry); 317 return list_next_entry(win, entry);
189} 318}
190 319
320static size_t msc_win_total_sz(struct msc_window *win)
321{
322 struct scatterlist *sg;
323 unsigned int blk;
324 size_t size = 0;
325
326 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
327 struct msc_block_desc *bdesc = sg_virt(sg);
328
329 if (msc_block_wrapped(bdesc))
330 return win->nr_blocks << PAGE_SHIFT;
331
332 size += msc_total_sz(bdesc);
333 if (msc_block_last_written(bdesc))
334 break;
335 }
336
337 return size;
338}
339
191/** 340/**
192 * msc_find_window() - find a window matching a given sg_table 341 * msc_find_window() - find a window matching a given sg_table
193 * @msc: MSC device 342 * @msc: MSC device
@@ -216,7 +365,7 @@ msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
216 found++; 365 found++;
217 366
218 /* skip the empty ones */ 367 /* skip the empty ones */
219 if (nonempty && msc_block_is_empty(msc_win_block(win, 0))) 368 if (nonempty && msc_block_is_empty(msc_win_base(win)))
220 continue; 369 continue;
221 370
222 if (found) 371 if (found)
@@ -250,44 +399,38 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
250} 399}
251 400
252/** 401/**
253 * msc_win_oldest_block() - locate the oldest block in a given window 402 * msc_win_oldest_sg() - locate the oldest block in a given window
254 * @win: window to look at 403 * @win: window to look at
255 * 404 *
256 * Return: index of the block with the oldest data 405 * Return: index of the block with the oldest data
257 */ 406 */
258static unsigned int msc_win_oldest_block(struct msc_window *win) 407static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
259{ 408{
260 unsigned int blk; 409 unsigned int blk;
261 struct msc_block_desc *bdesc = msc_win_block(win, 0); 410 struct scatterlist *sg;
411 struct msc_block_desc *bdesc = msc_win_base(win);
262 412
263 /* without wrapping, first block is the oldest */ 413 /* without wrapping, first block is the oldest */
264 if (!msc_block_wrapped(bdesc)) 414 if (!msc_block_wrapped(bdesc))
265 return 0; 415 return msc_win_base_sg(win);
266 416
267 /* 417 /*
268 * with wrapping, last written block contains both the newest and the 418 * with wrapping, last written block contains both the newest and the
269 * oldest data for this window. 419 * oldest data for this window.
270 */ 420 */
271 for (blk = 0; blk < win->nr_segs; blk++) { 421 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
272 bdesc = msc_win_block(win, blk); 422 struct msc_block_desc *bdesc = sg_virt(sg);
273 423
274 if (msc_block_last_written(bdesc)) 424 if (msc_block_last_written(bdesc))
275 return blk; 425 return sg;
276 } 426 }
277 427
278 return 0; 428 return msc_win_base_sg(win);
279} 429}
280 430
281static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 431static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
282{ 432{
283 return msc_win_block(iter->win, iter->block); 433 return sg_virt(iter->block);
284}
285
286static void msc_iter_init(struct msc_iter *iter)
287{
288 memset(iter, 0, sizeof(*iter));
289 iter->start_block = -1;
290 iter->block = -1;
291} 434}
292 435
293static struct msc_iter *msc_iter_install(struct msc *msc) 436static struct msc_iter *msc_iter_install(struct msc *msc)
@@ -312,7 +455,6 @@ static struct msc_iter *msc_iter_install(struct msc *msc)
312 goto unlock; 455 goto unlock;
313 } 456 }
314 457
315 msc_iter_init(iter);
316 iter->msc = msc; 458 iter->msc = msc;
317 459
318 list_add_tail(&iter->entry, &msc->iter_list); 460 list_add_tail(&iter->entry, &msc->iter_list);
@@ -333,10 +475,10 @@ static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
333 475
334static void msc_iter_block_start(struct msc_iter *iter) 476static void msc_iter_block_start(struct msc_iter *iter)
335{ 477{
336 if (iter->start_block != -1) 478 if (iter->start_block)
337 return; 479 return;
338 480
339 iter->start_block = msc_win_oldest_block(iter->win); 481 iter->start_block = msc_win_oldest_sg(iter->win);
340 iter->block = iter->start_block; 482 iter->block = iter->start_block;
341 iter->wrap_count = 0; 483 iter->wrap_count = 0;
342 484
@@ -360,7 +502,7 @@ static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
360 return -EINVAL; 502 return -EINVAL;
361 503
362 iter->win = iter->start_win; 504 iter->win = iter->start_win;
363 iter->start_block = -1; 505 iter->start_block = NULL;
364 506
365 msc_iter_block_start(iter); 507 msc_iter_block_start(iter);
366 508
@@ -370,7 +512,7 @@ static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
370static int msc_iter_win_advance(struct msc_iter *iter) 512static int msc_iter_win_advance(struct msc_iter *iter)
371{ 513{
372 iter->win = msc_next_window(iter->win); 514 iter->win = msc_next_window(iter->win);
373 iter->start_block = -1; 515 iter->start_block = NULL;
374 516
375 if (iter->win == iter->start_win) { 517 if (iter->win == iter->start_win) {
376 iter->eof++; 518 iter->eof++;
@@ -400,8 +542,10 @@ static int msc_iter_block_advance(struct msc_iter *iter)
400 return msc_iter_win_advance(iter); 542 return msc_iter_win_advance(iter);
401 543
402 /* block advance */ 544 /* block advance */
403 if (++iter->block == iter->win->nr_segs) 545 if (sg_is_last(iter->block))
404 iter->block = 0; 546 iter->block = msc_win_base_sg(iter->win);
547 else
548 iter->block = sg_next(iter->block);
405 549
406 /* no wrapping, sanity check in case there is no last written block */ 550 /* no wrapping, sanity check in case there is no last written block */
407 if (!iter->wrap_count && iter->block == iter->start_block) 551 if (!iter->wrap_count && iter->block == iter->start_block)
@@ -506,14 +650,15 @@ next_block:
506static void msc_buffer_clear_hw_header(struct msc *msc) 650static void msc_buffer_clear_hw_header(struct msc *msc)
507{ 651{
508 struct msc_window *win; 652 struct msc_window *win;
653 struct scatterlist *sg;
509 654
510 list_for_each_entry(win, &msc->win_list, entry) { 655 list_for_each_entry(win, &msc->win_list, entry) {
511 unsigned int blk; 656 unsigned int blk;
512 size_t hw_sz = sizeof(struct msc_block_desc) - 657 size_t hw_sz = sizeof(struct msc_block_desc) -
513 offsetof(struct msc_block_desc, hw_tag); 658 offsetof(struct msc_block_desc, hw_tag);
514 659
515 for (blk = 0; blk < win->nr_segs; blk++) { 660 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
516 struct msc_block_desc *bdesc = msc_win_block(win, blk); 661 struct msc_block_desc *bdesc = sg_virt(sg);
517 662
518 memset(&bdesc->hw_tag, 0, hw_sz); 663 memset(&bdesc->hw_tag, 0, hw_sz);
519 } 664 }
@@ -527,6 +672,9 @@ static int intel_th_msu_init(struct msc *msc)
527 if (!msc->do_irq) 672 if (!msc->do_irq)
528 return 0; 673 return 0;
529 674
675 if (!msc->mbuf)
676 return 0;
677
530 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 678 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
531 mintctl |= msc->index ? M1BLIE : M0BLIE; 679 mintctl |= msc->index ? M1BLIE : M0BLIE;
532 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 680 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
@@ -554,6 +702,49 @@ static void intel_th_msu_deinit(struct msc *msc)
554 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 702 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
555} 703}
556 704
705static int msc_win_set_lockout(struct msc_window *win,
706 enum lockout_state expect,
707 enum lockout_state new)
708{
709 enum lockout_state old;
710 unsigned long flags;
711 int ret = 0;
712
713 if (!win->msc->mbuf)
714 return 0;
715
716 spin_lock_irqsave(&win->lo_lock, flags);
717 old = win->lockout;
718
719 if (old != expect) {
720 ret = -EINVAL;
721 dev_warn_ratelimited(msc_dev(win->msc),
722 "expected lockout state %d, got %d\n",
723 expect, old);
724 goto unlock;
725 }
726
727 win->lockout = new;
728
729 if (old == expect && new == WIN_LOCKED)
730 atomic_inc(&win->msc->user_count);
731 else if (old == expect && old == WIN_LOCKED)
732 atomic_dec(&win->msc->user_count);
733
734unlock:
735 spin_unlock_irqrestore(&win->lo_lock, flags);
736
737 if (ret) {
738 if (expect == WIN_READY && old == WIN_LOCKED)
739 return -EBUSY;
740
741 /* from intel_th_msc_window_unlock(), don't warn if not locked */
742 if (expect == WIN_LOCKED && old == new)
743 return 0;
744 }
745
746 return ret;
747}
557/** 748/**
558 * msc_configure() - set up MSC hardware 749 * msc_configure() - set up MSC hardware
559 * @msc: the MSC device to configure 750 * @msc: the MSC device to configure
@@ -571,8 +762,15 @@ static int msc_configure(struct msc *msc)
571 if (msc->mode > MSC_MODE_MULTI) 762 if (msc->mode > MSC_MODE_MULTI)
572 return -ENOTSUPP; 763 return -ENOTSUPP;
573 764
574 if (msc->mode == MSC_MODE_MULTI) 765 if (msc->mode == MSC_MODE_MULTI) {
766 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
767 return -EBUSY;
768
575 msc_buffer_clear_hw_header(msc); 769 msc_buffer_clear_hw_header(msc);
770 }
771
772 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
773 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
576 774
577 reg = msc->base_addr >> PAGE_SHIFT; 775 reg = msc->base_addr >> PAGE_SHIFT;
578 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); 776 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
@@ -594,10 +792,14 @@ static int msc_configure(struct msc *msc)
594 792
595 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 793 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
596 794
795 intel_th_msu_init(msc);
796
597 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 797 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
598 intel_th_trace_enable(msc->thdev); 798 intel_th_trace_enable(msc->thdev);
599 msc->enabled = 1; 799 msc->enabled = 1;
600 800
801 if (msc->mbuf && msc->mbuf->activate)
802 msc->mbuf->activate(msc->mbuf_priv);
601 803
602 return 0; 804 return 0;
603} 805}
@@ -611,10 +813,17 @@ static int msc_configure(struct msc *msc)
611 */ 813 */
612static void msc_disable(struct msc *msc) 814static void msc_disable(struct msc *msc)
613{ 815{
816 struct msc_window *win = msc->cur_win;
614 u32 reg; 817 u32 reg;
615 818
616 lockdep_assert_held(&msc->buf_mutex); 819 lockdep_assert_held(&msc->buf_mutex);
617 820
821 if (msc->mode == MSC_MODE_MULTI)
822 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
823
824 if (msc->mbuf && msc->mbuf->deactivate)
825 msc->mbuf->deactivate(msc->mbuf_priv);
826 intel_th_msu_deinit(msc);
618 intel_th_trace_disable(msc->thdev); 827 intel_th_trace_disable(msc->thdev);
619 828
620 if (msc->mode == MSC_MODE_SINGLE) { 829 if (msc->mode == MSC_MODE_SINGLE) {
@@ -630,16 +839,25 @@ static void msc_disable(struct msc *msc)
630 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 839 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
631 reg &= ~MSC_EN; 840 reg &= ~MSC_EN;
632 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 841 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
842
843 if (msc->mbuf && msc->mbuf->ready)
844 msc->mbuf->ready(msc->mbuf_priv, win->sgt,
845 msc_win_total_sz(win));
846
633 msc->enabled = 0; 847 msc->enabled = 0;
634 848
635 iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); 849 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
636 iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE); 850 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
637 851
638 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", 852 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
639 ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); 853 ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
640 854
641 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 855 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
642 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 856 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
857
858 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
859 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
860 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
643} 861}
644 862
645static int intel_th_msc_activate(struct intel_th_device *thdev) 863static int intel_th_msc_activate(struct intel_th_device *thdev)
@@ -791,10 +1009,9 @@ static int __msc_buffer_win_alloc(struct msc_window *win,
791 return nr_segs; 1009 return nr_segs;
792 1010
793err_nomem: 1011err_nomem:
794 for (i--; i >= 0; i--) 1012 for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
795 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1013 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
796 msc_win_block(win, i), 1014 sg_virt(sg_ptr), sg_dma_address(sg_ptr));
797 msc_win_baddr(win, i));
798 1015
799 sg_free_table(win->sgt); 1016 sg_free_table(win->sgt);
800 1017
@@ -804,20 +1021,26 @@ err_nomem:
804#ifdef CONFIG_X86 1021#ifdef CONFIG_X86
805static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) 1022static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
806{ 1023{
1024 struct scatterlist *sg_ptr;
807 int i; 1025 int i;
808 1026
809 for (i = 0; i < nr_segs; i++) 1027 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
810 /* Set the page as uncached */ 1028 /* Set the page as uncached */
811 set_memory_uc((unsigned long)msc_win_block(win, i), 1); 1029 set_memory_uc((unsigned long)sg_virt(sg_ptr),
1030 PFN_DOWN(sg_ptr->length));
1031 }
812} 1032}
813 1033
814static void msc_buffer_set_wb(struct msc_window *win) 1034static void msc_buffer_set_wb(struct msc_window *win)
815{ 1035{
1036 struct scatterlist *sg_ptr;
816 int i; 1037 int i;
817 1038
818 for (i = 0; i < win->nr_segs; i++) 1039 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
819 /* Reset the page to write-back */ 1040 /* Reset the page to write-back */
820 set_memory_wb((unsigned long)msc_win_block(win, i), 1); 1041 set_memory_wb((unsigned long)sg_virt(sg_ptr),
1042 PFN_DOWN(sg_ptr->length));
1043 }
821} 1044}
822#else /* !X86 */ 1045#else /* !X86 */
823static inline void 1046static inline void
@@ -843,19 +1066,14 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
843 if (!nr_blocks) 1066 if (!nr_blocks)
844 return 0; 1067 return 0;
845 1068
846 /*
847 * This limitation hold as long as we need random access to the
848 * block. When that changes, this can go away.
849 */
850 if (nr_blocks > SG_MAX_SINGLE_ALLOC)
851 return -EINVAL;
852
853 win = kzalloc(sizeof(*win), GFP_KERNEL); 1069 win = kzalloc(sizeof(*win), GFP_KERNEL);
854 if (!win) 1070 if (!win)
855 return -ENOMEM; 1071 return -ENOMEM;
856 1072
857 win->msc = msc; 1073 win->msc = msc;
858 win->sgt = &win->_sgt; 1074 win->sgt = &win->_sgt;
1075 win->lockout = WIN_READY;
1076 spin_lock_init(&win->lo_lock);
859 1077
860 if (!list_empty(&msc->win_list)) { 1078 if (!list_empty(&msc->win_list)) {
861 struct msc_window *prev = list_last_entry(&msc->win_list, 1079 struct msc_window *prev = list_last_entry(&msc->win_list,
@@ -865,8 +1083,13 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
865 win->pgoff = prev->pgoff + prev->nr_blocks; 1083 win->pgoff = prev->pgoff + prev->nr_blocks;
866 } 1084 }
867 1085
868 ret = __msc_buffer_win_alloc(win, nr_blocks); 1086 if (msc->mbuf && msc->mbuf->alloc_window)
869 if (ret < 0) 1087 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
1088 nr_blocks << PAGE_SHIFT);
1089 else
1090 ret = __msc_buffer_win_alloc(win, nr_blocks);
1091
1092 if (ret <= 0)
870 goto err_nomem; 1093 goto err_nomem;
871 1094
872 msc_buffer_set_uc(win, ret); 1095 msc_buffer_set_uc(win, ret);
@@ -875,8 +1098,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
875 win->nr_blocks = nr_blocks; 1098 win->nr_blocks = nr_blocks;
876 1099
877 if (list_empty(&msc->win_list)) { 1100 if (list_empty(&msc->win_list)) {
878 msc->base = msc_win_block(win, 0); 1101 msc->base = msc_win_base(win);
879 msc->base_addr = msc_win_baddr(win, 0); 1102 msc->base_addr = msc_win_base_dma(win);
880 msc->cur_win = win; 1103 msc->cur_win = win;
881 } 1104 }
882 1105
@@ -893,14 +1116,15 @@ err_nomem:
893 1116
894static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 1117static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
895{ 1118{
1119 struct scatterlist *sg;
896 int i; 1120 int i;
897 1121
898 for (i = 0; i < win->nr_segs; i++) { 1122 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
899 struct page *page = sg_page(&win->sgt->sgl[i]); 1123 struct page *page = sg_page(sg);
900 1124
901 page->mapping = NULL; 1125 page->mapping = NULL;
902 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 1126 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
903 msc_win_block(win, i), msc_win_baddr(win, i)); 1127 sg_virt(sg), sg_dma_address(sg));
904 } 1128 }
905 sg_free_table(win->sgt); 1129 sg_free_table(win->sgt);
906} 1130}
@@ -925,7 +1149,10 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
925 1149
926 msc_buffer_set_wb(win); 1150 msc_buffer_set_wb(win);
927 1151
928 __msc_buffer_win_free(msc, win); 1152 if (msc->mbuf && msc->mbuf->free_window)
1153 msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
1154 else
1155 __msc_buffer_win_free(msc, win);
929 1156
930 kfree(win); 1157 kfree(win);
931} 1158}
@@ -943,6 +1170,7 @@ static void msc_buffer_relink(struct msc *msc)
943 1170
944 /* call with msc::mutex locked */ 1171 /* call with msc::mutex locked */
945 list_for_each_entry(win, &msc->win_list, entry) { 1172 list_for_each_entry(win, &msc->win_list, entry) {
1173 struct scatterlist *sg;
946 unsigned int blk; 1174 unsigned int blk;
947 u32 sw_tag = 0; 1175 u32 sw_tag = 0;
948 1176
@@ -958,12 +1186,12 @@ static void msc_buffer_relink(struct msc *msc)
958 next_win = list_next_entry(win, entry); 1186 next_win = list_next_entry(win, entry);
959 } 1187 }
960 1188
961 for (blk = 0; blk < win->nr_segs; blk++) { 1189 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
962 struct msc_block_desc *bdesc = msc_win_block(win, blk); 1190 struct msc_block_desc *bdesc = sg_virt(sg);
963 1191
964 memset(bdesc, 0, sizeof(*bdesc)); 1192 memset(bdesc, 0, sizeof(*bdesc));
965 1193
966 bdesc->next_win = msc_win_bpfn(next_win, 0); 1194 bdesc->next_win = msc_win_base_pfn(next_win);
967 1195
968 /* 1196 /*
969 * Similarly to last window, last block should point 1197 * Similarly to last window, last block should point
@@ -971,13 +1199,15 @@ static void msc_buffer_relink(struct msc *msc)
971 */ 1199 */
972 if (blk == win->nr_segs - 1) { 1200 if (blk == win->nr_segs - 1) {
973 sw_tag |= MSC_SW_TAG_LASTBLK; 1201 sw_tag |= MSC_SW_TAG_LASTBLK;
974 bdesc->next_blk = msc_win_bpfn(win, 0); 1202 bdesc->next_blk = msc_win_base_pfn(win);
975 } else { 1203 } else {
976 bdesc->next_blk = msc_win_bpfn(win, blk + 1); 1204 dma_addr_t addr = sg_dma_address(sg_next(sg));
1205
1206 bdesc->next_blk = PFN_DOWN(addr);
977 } 1207 }
978 1208
979 bdesc->sw_tag = sw_tag; 1209 bdesc->sw_tag = sw_tag;
980 bdesc->block_sz = msc_win_actual_bsz(win, blk) / 64; 1210 bdesc->block_sz = sg->length / 64;
981 } 1211 }
982 } 1212 }
983 1213
@@ -1136,6 +1366,7 @@ static int msc_buffer_free_unless_used(struct msc *msc)
1136static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff) 1366static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1137{ 1367{
1138 struct msc_window *win; 1368 struct msc_window *win;
1369 struct scatterlist *sg;
1139 unsigned int blk; 1370 unsigned int blk;
1140 1371
1141 if (msc->mode == MSC_MODE_SINGLE) 1372 if (msc->mode == MSC_MODE_SINGLE)
@@ -1150,9 +1381,9 @@ static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1150found: 1381found:
1151 pgoff -= win->pgoff; 1382 pgoff -= win->pgoff;
1152 1383
1153 for (blk = 0; blk < win->nr_segs; blk++) { 1384 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1154 struct page *page = sg_page(&win->sgt->sgl[blk]); 1385 struct page *page = sg_page(sg);
1155 size_t pgsz = PFN_DOWN(msc_win_actual_bsz(win, blk)); 1386 size_t pgsz = PFN_DOWN(sg->length);
1156 1387
1157 if (pgoff < pgsz) 1388 if (pgoff < pgsz)
1158 return page + pgoff; 1389 return page + pgoff;
@@ -1456,24 +1687,83 @@ static void msc_win_switch(struct msc *msc)
1456 else 1687 else
1457 msc->cur_win = list_next_entry(msc->cur_win, entry); 1688 msc->cur_win = list_next_entry(msc->cur_win, entry);
1458 1689
1459 msc->base = msc_win_block(msc->cur_win, 0); 1690 msc->base = msc_win_base(msc->cur_win);
1460 msc->base_addr = msc_win_baddr(msc->cur_win, 0); 1691 msc->base_addr = msc_win_base_dma(msc->cur_win);
1461 1692
1462 intel_th_trace_switch(msc->thdev); 1693 intel_th_trace_switch(msc->thdev);
1463} 1694}
1464 1695
1696/**
1697 * intel_th_msc_window_unlock - put the window back in rotation
1698 * @dev: MSC device to which this relates
1699 * @sgt: buffer's sg_table for the window, does nothing if NULL
1700 */
1701void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
1702{
1703 struct msc *msc = dev_get_drvdata(dev);
1704 struct msc_window *win;
1705
1706 if (!sgt)
1707 return;
1708
1709 win = msc_find_window(msc, sgt, false);
1710 if (!win)
1711 return;
1712
1713 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
1714}
1715EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
1716
1717static void msc_work(struct work_struct *work)
1718{
1719 struct msc *msc = container_of(work, struct msc, work);
1720
1721 intel_th_msc_deactivate(msc->thdev);
1722}
1723
1465static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1724static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1466{ 1725{
1467 struct msc *msc = dev_get_drvdata(&thdev->dev); 1726 struct msc *msc = dev_get_drvdata(&thdev->dev);
1468 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1727 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1469 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1728 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1729 struct msc_window *win, *next_win;
1730
1731 if (!msc->do_irq || !msc->mbuf)
1732 return IRQ_NONE;
1733
1734 msusts &= mask;
1735
1736 if (!msusts)
1737 return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
1738
1739 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
1470 1740
1471 if (!(msusts & mask)) { 1741 if (!msc->enabled)
1472 if (msc->enabled)
1473 return IRQ_HANDLED;
1474 return IRQ_NONE; 1742 return IRQ_NONE;
1743
1744 /* grab the window before we do the switch */
1745 win = msc->cur_win;
1746 if (!win)
1747 return IRQ_HANDLED;
1748 next_win = msc_next_window(win);
1749 if (!next_win)
1750 return IRQ_HANDLED;
1751
1752 /* next window: if READY, proceed, if LOCKED, stop the trace */
1753 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
1754 schedule_work(&msc->work);
1755 return IRQ_HANDLED;
1475 } 1756 }
1476 1757
1758 /* current window: INUSE -> LOCKED */
1759 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
1760
1761 msc_win_switch(msc);
1762
1763 if (msc->mbuf && msc->mbuf->ready)
1764 msc->mbuf->ready(msc->mbuf_priv, win->sgt,
1765 msc_win_total_sz(win));
1766
1477 return IRQ_HANDLED; 1767 return IRQ_HANDLED;
1478} 1768}
1479 1769
@@ -1511,21 +1801,43 @@ wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1511 1801
1512static DEVICE_ATTR_RW(wrap); 1802static DEVICE_ATTR_RW(wrap);
1513 1803
1804static void msc_buffer_unassign(struct msc *msc)
1805{
1806 lockdep_assert_held(&msc->buf_mutex);
1807
1808 if (!msc->mbuf)
1809 return;
1810
1811 msc->mbuf->unassign(msc->mbuf_priv);
1812 msu_buffer_put(msc->mbuf);
1813 msc->mbuf_priv = NULL;
1814 msc->mbuf = NULL;
1815}
1816
1514static ssize_t 1817static ssize_t
1515mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1818mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1516{ 1819{
1517 struct msc *msc = dev_get_drvdata(dev); 1820 struct msc *msc = dev_get_drvdata(dev);
1821 const char *mode = msc_mode[msc->mode];
1822 ssize_t ret;
1823
1824 mutex_lock(&msc->buf_mutex);
1825 if (msc->mbuf)
1826 mode = msc->mbuf->name;
1827 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
1828 mutex_unlock(&msc->buf_mutex);
1518 1829
1519 return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]); 1830 return ret;
1520} 1831}
1521 1832
1522static ssize_t 1833static ssize_t
1523mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1834mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1524 size_t size) 1835 size_t size)
1525{ 1836{
1837 const struct msu_buffer *mbuf = NULL;
1526 struct msc *msc = dev_get_drvdata(dev); 1838 struct msc *msc = dev_get_drvdata(dev);
1527 size_t len = size; 1839 size_t len = size;
1528 char *cp; 1840 char *cp, *mode;
1529 int i, ret; 1841 int i, ret;
1530 1842
1531 if (!capable(CAP_SYS_RAWIO)) 1843 if (!capable(CAP_SYS_RAWIO))
@@ -1535,17 +1847,59 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1535 if (cp) 1847 if (cp)
1536 len = cp - buf; 1848 len = cp - buf;
1537 1849
1538 for (i = 0; i < ARRAY_SIZE(msc_mode); i++) 1850 mode = kstrndup(buf, len, GFP_KERNEL);
1539 if (!strncmp(msc_mode[i], buf, len)) 1851 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
1540 goto found; 1852 if (i >= 0)
1853 goto found;
1854
1855 /* Buffer sinks only work with a usable IRQ */
1856 if (!msc->do_irq) {
1857 kfree(mode);
1858 return -EINVAL;
1859 }
1860
1861 mbuf = msu_buffer_get(mode);
1862 kfree(mode);
1863 if (mbuf)
1864 goto found;
1541 1865
1542 return -EINVAL; 1866 return -EINVAL;
1543 1867
1544found: 1868found:
1545 mutex_lock(&msc->buf_mutex); 1869 mutex_lock(&msc->buf_mutex);
1870 ret = 0;
1871
1872 /* Same buffer: do nothing */
1873 if (mbuf && mbuf == msc->mbuf) {
1874 /* put the extra reference we just got */
1875 msu_buffer_put(mbuf);
1876 goto unlock;
1877 }
1878
1546 ret = msc_buffer_unlocked_free_unless_used(msc); 1879 ret = msc_buffer_unlocked_free_unless_used(msc);
1547 if (!ret) 1880 if (ret)
1548 msc->mode = i; 1881 goto unlock;
1882
1883 if (mbuf) {
1884 void *mbuf_priv = mbuf->assign(dev, &i);
1885
1886 if (!mbuf_priv) {
1887 ret = -ENOMEM;
1888 goto unlock;
1889 }
1890
1891 msc_buffer_unassign(msc);
1892 msc->mbuf_priv = mbuf_priv;
1893 msc->mbuf = mbuf;
1894 } else {
1895 msc_buffer_unassign(msc);
1896 }
1897
1898 msc->mode = i;
1899
1900unlock:
1901 if (ret && mbuf)
1902 msu_buffer_put(mbuf);
1549 mutex_unlock(&msc->buf_mutex); 1903 mutex_unlock(&msc->buf_mutex);
1550 1904
1551 return ret ? ret : size; 1905 return ret ? ret : size;
@@ -1667,7 +2021,12 @@ win_switch_store(struct device *dev, struct device_attribute *attr,
1667 return -EINVAL; 2021 return -EINVAL;
1668 2022
1669 mutex_lock(&msc->buf_mutex); 2023 mutex_lock(&msc->buf_mutex);
1670 if (msc->mode != MSC_MODE_MULTI) 2024 /*
2025 * Window switch can only happen in the "multi" mode.
2026 * If a external buffer is engaged, they have the full
2027 * control over window switching.
2028 */
2029 if (msc->mode != MSC_MODE_MULTI || msc->mbuf)
1671 ret = -ENOTSUPP; 2030 ret = -ENOTSUPP;
1672 else 2031 else
1673 msc_win_switch(msc); 2032 msc_win_switch(msc);
@@ -1720,10 +2079,7 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
1720 msc->reg_base = base + msc->index * 0x100; 2079 msc->reg_base = base + msc->index * 0x100;
1721 msc->msu_base = base; 2080 msc->msu_base = base;
1722 2081
1723 err = intel_th_msu_init(msc); 2082 INIT_WORK(&msc->work, msc_work);
1724 if (err)
1725 return err;
1726
1727 err = intel_th_msc_init(msc); 2083 err = intel_th_msc_init(msc);
1728 if (err) 2084 if (err)
1729 return err; 2085 return err;
@@ -1739,7 +2095,6 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
1739 int ret; 2095 int ret;
1740 2096
1741 intel_th_msc_deactivate(thdev); 2097 intel_th_msc_deactivate(thdev);
1742 intel_th_msu_deinit(msc);
1743 2098
1744 /* 2099 /*
1745 * Buffers should not be used at this point except if the 2100 * Buffers should not be used at this point except if the
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 13d9b141daaa..e771f509bd02 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -44,14 +44,6 @@ enum {
44#define M0BLIE BIT(16) 44#define M0BLIE BIT(16)
45#define M1BLIE BIT(24) 45#define M1BLIE BIT(24)
46 46
47/* MSC operating modes (MSC_MODE) */
48enum {
49 MSC_MODE_SINGLE = 0,
50 MSC_MODE_MULTI,
51 MSC_MODE_EXI,
52 MSC_MODE_DEBUG,
53};
54
55/* MSCnSTS bits */ 47/* MSCnSTS bits */
56#define MSCSTS_WRAPSTAT BIT(1) /* Wrap occurred */ 48#define MSCSTS_WRAPSTAT BIT(1) /* Wrap occurred */
57#define MSCSTS_PLE BIT(2) /* Pipeline Empty */ 49#define MSCSTS_PLE BIT(2) /* Pipeline Empty */
@@ -93,6 +85,16 @@ static inline unsigned long msc_data_sz(struct msc_block_desc *bdesc)
93 return bdesc->valid_dw * 4 - MSC_BDESC; 85 return bdesc->valid_dw * 4 - MSC_BDESC;
94} 86}
95 87
88static inline unsigned long msc_total_sz(struct msc_block_desc *bdesc)
89{
90 return bdesc->valid_dw * 4;
91}
92
93static inline unsigned long msc_block_sz(struct msc_block_desc *bdesc)
94{
95 return bdesc->block_sz * 64 - MSC_BDESC;
96}
97
96static inline bool msc_block_wrapped(struct msc_block_desc *bdesc) 98static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
97{ 99{
98 if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP)) 100 if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP))
@@ -104,7 +106,7 @@ static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
104static inline bool msc_block_last_written(struct msc_block_desc *bdesc) 106static inline bool msc_block_last_written(struct msc_block_desc *bdesc)
105{ 107{
106 if ((bdesc->hw_tag & MSC_HW_TAG_ENDBIT) || 108 if ((bdesc->hw_tag & MSC_HW_TAG_ENDBIT) ||
107 (msc_data_sz(bdesc) != DATA_IN_PAGE)) 109 (msc_data_sz(bdesc) != msc_block_sz(bdesc)))
108 return true; 110 return true;
109 111
110 return false; 112 return false;
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 871eb4bc4efc..7b971228df38 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -29,6 +29,7 @@ static struct dentry *icc_debugfs_dir;
29 * @req_node: entry in list of requests for the particular @node 29 * @req_node: entry in list of requests for the particular @node
30 * @node: the interconnect node to which this constraint applies 30 * @node: the interconnect node to which this constraint applies
31 * @dev: reference to the device that sets the constraints 31 * @dev: reference to the device that sets the constraints
32 * @tag: path tag (optional)
32 * @avg_bw: an integer describing the average bandwidth in kBps 33 * @avg_bw: an integer describing the average bandwidth in kBps
33 * @peak_bw: an integer describing the peak bandwidth in kBps 34 * @peak_bw: an integer describing the peak bandwidth in kBps
34 */ 35 */
@@ -36,6 +37,7 @@ struct icc_req {
36 struct hlist_node req_node; 37 struct hlist_node req_node;
37 struct icc_node *node; 38 struct icc_node *node;
38 struct device *dev; 39 struct device *dev;
40 u32 tag;
39 u32 avg_bw; 41 u32 avg_bw;
40 u32 peak_bw; 42 u32 peak_bw;
41}; 43};
@@ -203,8 +205,11 @@ static int aggregate_requests(struct icc_node *node)
203 node->avg_bw = 0; 205 node->avg_bw = 0;
204 node->peak_bw = 0; 206 node->peak_bw = 0;
205 207
208 if (p->pre_aggregate)
209 p->pre_aggregate(node);
210
206 hlist_for_each_entry(r, &node->req_list, req_node) 211 hlist_for_each_entry(r, &node->req_list, req_node)
207 p->aggregate(node, r->avg_bw, r->peak_bw, 212 p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
208 &node->avg_bw, &node->peak_bw); 213 &node->avg_bw, &node->peak_bw);
209 214
210 return 0; 215 return 0;
@@ -386,6 +391,26 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
386EXPORT_SYMBOL_GPL(of_icc_get); 391EXPORT_SYMBOL_GPL(of_icc_get);
387 392
388/** 393/**
394 * icc_set_tag() - set an optional tag on a path
395 * @path: the path we want to tag
396 * @tag: the tag value
397 *
398 * This function allows consumers to append a tag to the requests associated
399 * with a path, so that a different aggregation could be done based on this tag.
400 */
401void icc_set_tag(struct icc_path *path, u32 tag)
402{
403 int i;
404
405 if (!path)
406 return;
407
408 for (i = 0; i < path->num_nodes; i++)
409 path->reqs[i].tag = tag;
410}
411EXPORT_SYMBOL_GPL(icc_set_tag);
412
413/**
389 * icc_set_bw() - set bandwidth constraints on an interconnect path 414 * icc_set_bw() - set bandwidth constraints on an interconnect path
390 * @path: reference to the path returned by icc_get() 415 * @path: reference to the path returned by icc_get()
391 * @avg_bw: average bandwidth in kilobytes per second 416 * @avg_bw: average bandwidth in kilobytes per second
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index d5e70ebc2410..6ab4012a059a 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
5 help 5 help
6 Support for Qualcomm's Network-on-Chip interconnect hardware. 6 Support for Qualcomm's Network-on-Chip interconnect hardware.
7 7
8config INTERCONNECT_QCOM_QCS404
9 tristate "Qualcomm QCS404 interconnect driver"
10 depends on INTERCONNECT_QCOM
11 depends on QCOM_SMD_RPM
12 select INTERCONNECT_QCOM_SMD_RPM
13 help
14 This is a driver for the Qualcomm Network-on-Chip on qcs404-based
15 platforms.
16
8config INTERCONNECT_QCOM_SDM845 17config INTERCONNECT_QCOM_SDM845
9 tristate "Qualcomm SDM845 interconnect driver" 18 tristate "Qualcomm SDM845 interconnect driver"
10 depends on INTERCONNECT_QCOM 19 depends on INTERCONNECT_QCOM
@@ -12,3 +21,6 @@ config INTERCONNECT_QCOM_SDM845
12 help 21 help
13 This is a driver for the Qualcomm Network-on-Chip on sdm845-based 22 This is a driver for the Qualcomm Network-on-Chip on sdm845-based
14 platforms. 23 platforms.
24
25config INTERCONNECT_QCOM_SMD_RPM
26 tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 1c1cea690f92..67dafb783dec 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,5 +1,9 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3qnoc-qcs404-objs := qcs404.o
3qnoc-sdm845-objs := sdm845.o 4qnoc-sdm845-objs := sdm845.o
5icc-smd-rpm-objs := smd-rpm.o
4 6
7obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
5obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o 8obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
9obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
new file mode 100644
index 000000000000..910081d6ddc0
--- /dev/null
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -0,0 +1,539 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Linaro Ltd
4 */
5
6#include <dt-bindings/interconnect/qcom,qcs404.h>
7#include <linux/clk.h>
8#include <linux/device.h>
9#include <linux/interconnect-provider.h>
10#include <linux/io.h>
11#include <linux/module.h>
12#include <linux/of_device.h>
13#include <linux/of_platform.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16
17#include "smd-rpm.h"
18
19#define RPM_BUS_MASTER_REQ 0x73616d62
20#define RPM_BUS_SLAVE_REQ 0x766c7362
21
22enum {
23 QCS404_MASTER_AMPSS_M0 = 1,
24 QCS404_MASTER_GRAPHICS_3D,
25 QCS404_MASTER_MDP_PORT0,
26 QCS404_SNOC_BIMC_1_MAS,
27 QCS404_MASTER_TCU_0,
28 QCS404_MASTER_SPDM,
29 QCS404_MASTER_BLSP_1,
30 QCS404_MASTER_BLSP_2,
31 QCS404_MASTER_XM_USB_HS1,
32 QCS404_MASTER_CRYPTO_CORE0,
33 QCS404_MASTER_SDCC_1,
34 QCS404_MASTER_SDCC_2,
35 QCS404_SNOC_PNOC_MAS,
36 QCS404_MASTER_QPIC,
37 QCS404_MASTER_QDSS_BAM,
38 QCS404_BIMC_SNOC_MAS,
39 QCS404_PNOC_SNOC_MAS,
40 QCS404_MASTER_QDSS_ETR,
41 QCS404_MASTER_EMAC,
42 QCS404_MASTER_PCIE,
43 QCS404_MASTER_USB3,
44 QCS404_PNOC_INT_0,
45 QCS404_PNOC_INT_2,
46 QCS404_PNOC_INT_3,
47 QCS404_PNOC_SLV_0,
48 QCS404_PNOC_SLV_1,
49 QCS404_PNOC_SLV_2,
50 QCS404_PNOC_SLV_3,
51 QCS404_PNOC_SLV_4,
52 QCS404_PNOC_SLV_6,
53 QCS404_PNOC_SLV_7,
54 QCS404_PNOC_SLV_8,
55 QCS404_PNOC_SLV_9,
56 QCS404_PNOC_SLV_10,
57 QCS404_PNOC_SLV_11,
58 QCS404_SNOC_QDSS_INT,
59 QCS404_SNOC_INT_0,
60 QCS404_SNOC_INT_1,
61 QCS404_SNOC_INT_2,
62 QCS404_SLAVE_EBI_CH0,
63 QCS404_BIMC_SNOC_SLV,
64 QCS404_SLAVE_SPDM_WRAPPER,
65 QCS404_SLAVE_PDM,
66 QCS404_SLAVE_PRNG,
67 QCS404_SLAVE_TCSR,
68 QCS404_SLAVE_SNOC_CFG,
69 QCS404_SLAVE_MESSAGE_RAM,
70 QCS404_SLAVE_DISPLAY_CFG,
71 QCS404_SLAVE_GRAPHICS_3D_CFG,
72 QCS404_SLAVE_BLSP_1,
73 QCS404_SLAVE_TLMM_NORTH,
74 QCS404_SLAVE_PCIE_1,
75 QCS404_SLAVE_EMAC_CFG,
76 QCS404_SLAVE_BLSP_2,
77 QCS404_SLAVE_TLMM_EAST,
78 QCS404_SLAVE_TCU,
79 QCS404_SLAVE_PMIC_ARB,
80 QCS404_SLAVE_SDCC_1,
81 QCS404_SLAVE_SDCC_2,
82 QCS404_SLAVE_TLMM_SOUTH,
83 QCS404_SLAVE_USB_HS,
84 QCS404_SLAVE_USB3,
85 QCS404_SLAVE_CRYPTO_0_CFG,
86 QCS404_PNOC_SNOC_SLV,
87 QCS404_SLAVE_APPSS,
88 QCS404_SLAVE_WCSS,
89 QCS404_SNOC_BIMC_1_SLV,
90 QCS404_SLAVE_OCIMEM,
91 QCS404_SNOC_PNOC_SLV,
92 QCS404_SLAVE_QDSS_STM,
93 QCS404_SLAVE_CATS_128,
94 QCS404_SLAVE_OCMEM_64,
95 QCS404_SLAVE_LPASS,
96};
97
98#define to_qcom_provider(_provider) \
99 container_of(_provider, struct qcom_icc_provider, provider)
100
101static const struct clk_bulk_data bus_clocks[] = {
102 { .id = "bus" },
103 { .id = "bus_a" },
104};
105
106/**
107 * struct qcom_icc_provider - Qualcomm specific interconnect provider
108 * @provider: generic interconnect provider
109 * @bus_clks: the clk_bulk_data table of bus clocks
110 * @num_clks: the total number of clk_bulk_data entries
111 */
112struct qcom_icc_provider {
113 struct icc_provider provider;
114 struct clk_bulk_data *bus_clks;
115 int num_clks;
116};
117
118#define QCS404_MAX_LINKS 12
119
120/**
121 * struct qcom_icc_node - Qualcomm specific interconnect nodes
122 * @name: the node name used in debugfs
123 * @id: a unique node identifier
124 * @links: an array of nodes where we can go next while traversing
125 * @num_links: the total number of @links
126 * @buswidth: width of the interconnect between a node and the bus (bytes)
127 * @mas_rpm_id: RPM id for devices that are bus masters
128 * @slv_rpm_id: RPM id for devices that are bus slaves
129 * @rate: current bus clock rate in Hz
130 */
131struct qcom_icc_node {
132 unsigned char *name;
133 u16 id;
134 u16 links[QCS404_MAX_LINKS];
135 u16 num_links;
136 u16 buswidth;
137 int mas_rpm_id;
138 int slv_rpm_id;
139 u64 rate;
140};
141
142struct qcom_icc_desc {
143 struct qcom_icc_node **nodes;
144 size_t num_nodes;
145};
146
147#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
148 ...) \
149 static struct qcom_icc_node _name = { \
150 .name = #_name, \
151 .id = _id, \
152 .buswidth = _buswidth, \
153 .mas_rpm_id = _mas_rpm_id, \
154 .slv_rpm_id = _slv_rpm_id, \
155 .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
156 .links = { __VA_ARGS__ }, \
157 }
158
159DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
160DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, 6, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
161DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, 8, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
162DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0);
163DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
164DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3);
165DEFINE_QNODE(mas_blsp_1, QCS404_MASTER_BLSP_1, 4, 41, -1, QCS404_PNOC_INT_3);
166DEFINE_QNODE(mas_blsp_2, QCS404_MASTER_BLSP_2, 4, 39, -1, QCS404_PNOC_INT_3);
167DEFINE_QNODE(mas_xi_usb_hs1, QCS404_MASTER_XM_USB_HS1, 8, 138, -1, QCS404_PNOC_INT_0);
168DEFINE_QNODE(mas_crypto, QCS404_MASTER_CRYPTO_CORE0, 8, 23, -1, QCS404_PNOC_SNOC_SLV, QCS404_PNOC_INT_2);
169DEFINE_QNODE(mas_sdcc_1, QCS404_MASTER_SDCC_1, 8, 33, -1, QCS404_PNOC_INT_0);
170DEFINE_QNODE(mas_sdcc_2, QCS404_MASTER_SDCC_2, 8, 35, -1, QCS404_PNOC_INT_0);
171DEFINE_QNODE(mas_snoc_pcnoc, QCS404_SNOC_PNOC_MAS, 8, 77, -1, QCS404_PNOC_INT_2);
172DEFINE_QNODE(mas_qpic, QCS404_MASTER_QPIC, 4, -1, -1, QCS404_PNOC_INT_0);
173DEFINE_QNODE(mas_qdss_bam, QCS404_MASTER_QDSS_BAM, 4, -1, -1, QCS404_SNOC_QDSS_INT);
174DEFINE_QNODE(mas_bimc_snoc, QCS404_BIMC_SNOC_MAS, 8, 21, -1, QCS404_SLAVE_OCMEM_64, QCS404_SLAVE_CATS_128, QCS404_SNOC_INT_0, QCS404_SNOC_INT_1);
175DEFINE_QNODE(mas_pcnoc_snoc, QCS404_PNOC_SNOC_MAS, 8, 29, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_2, QCS404_SNOC_INT_0);
176DEFINE_QNODE(mas_qdss_etr, QCS404_MASTER_QDSS_ETR, 8, -1, -1, QCS404_SNOC_QDSS_INT);
177DEFINE_QNODE(mas_emac, QCS404_MASTER_EMAC, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
178DEFINE_QNODE(mas_pcie, QCS404_MASTER_PCIE, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
179DEFINE_QNODE(mas_usb3, QCS404_MASTER_USB3, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
180DEFINE_QNODE(pcnoc_int_0, QCS404_PNOC_INT_0, 8, 85, 114, QCS404_PNOC_SNOC_SLV, QCS404_PNOC_INT_2);
181DEFINE_QNODE(pcnoc_int_2, QCS404_PNOC_INT_2, 8, 124, 184, QCS404_PNOC_SLV_10, QCS404_SLAVE_TCU, QCS404_PNOC_SLV_11, QCS404_PNOC_SLV_2, QCS404_PNOC_SLV_3, QCS404_PNOC_SLV_0, QCS404_PNOC_SLV_1, QCS404_PNOC_SLV_6, QCS404_PNOC_SLV_7, QCS404_PNOC_SLV_4, QCS404_PNOC_SLV_8, QCS404_PNOC_SLV_9);
182DEFINE_QNODE(pcnoc_int_3, QCS404_PNOC_INT_3, 8, 125, 185, QCS404_PNOC_SNOC_SLV);
183DEFINE_QNODE(pcnoc_s_0, QCS404_PNOC_SLV_0, 4, 89, 118, QCS404_SLAVE_PRNG, QCS404_SLAVE_SPDM_WRAPPER, QCS404_SLAVE_PDM);
184DEFINE_QNODE(pcnoc_s_1, QCS404_PNOC_SLV_1, 4, 90, 119, QCS404_SLAVE_TCSR);
185DEFINE_QNODE(pcnoc_s_2, QCS404_PNOC_SLV_2, 4, -1, -1, QCS404_SLAVE_GRAPHICS_3D_CFG);
186DEFINE_QNODE(pcnoc_s_3, QCS404_PNOC_SLV_3, 4, 92, 121, QCS404_SLAVE_MESSAGE_RAM);
187DEFINE_QNODE(pcnoc_s_4, QCS404_PNOC_SLV_4, 4, 93, 122, QCS404_SLAVE_SNOC_CFG);
188DEFINE_QNODE(pcnoc_s_6, QCS404_PNOC_SLV_6, 4, 94, 123, QCS404_SLAVE_BLSP_1, QCS404_SLAVE_TLMM_NORTH, QCS404_SLAVE_EMAC_CFG);
189DEFINE_QNODE(pcnoc_s_7, QCS404_PNOC_SLV_7, 4, 95, 124, QCS404_SLAVE_TLMM_SOUTH, QCS404_SLAVE_DISPLAY_CFG, QCS404_SLAVE_SDCC_1, QCS404_SLAVE_PCIE_1, QCS404_SLAVE_SDCC_2);
190DEFINE_QNODE(pcnoc_s_8, QCS404_PNOC_SLV_8, 4, 96, 125, QCS404_SLAVE_CRYPTO_0_CFG);
191DEFINE_QNODE(pcnoc_s_9, QCS404_PNOC_SLV_9, 4, 97, 126, QCS404_SLAVE_BLSP_2, QCS404_SLAVE_TLMM_EAST, QCS404_SLAVE_PMIC_ARB);
192DEFINE_QNODE(pcnoc_s_10, QCS404_PNOC_SLV_10, 4, 157, -1, QCS404_SLAVE_USB_HS);
193DEFINE_QNODE(pcnoc_s_11, QCS404_PNOC_SLV_11, 4, 158, 246, QCS404_SLAVE_USB3);
194DEFINE_QNODE(qdss_int, QCS404_SNOC_QDSS_INT, 8, -1, -1, QCS404_SNOC_BIMC_1_SLV, QCS404_SNOC_INT_1);
195DEFINE_QNODE(snoc_int_0, QCS404_SNOC_INT_0, 8, 99, 130, QCS404_SLAVE_LPASS, QCS404_SLAVE_APPSS, QCS404_SLAVE_WCSS);
196DEFINE_QNODE(snoc_int_1, QCS404_SNOC_INT_1, 8, 100, 131, QCS404_SNOC_PNOC_SLV, QCS404_SNOC_INT_2);
197DEFINE_QNODE(snoc_int_2, QCS404_SNOC_INT_2, 8, 134, 197, QCS404_SLAVE_QDSS_STM, QCS404_SLAVE_OCIMEM);
198DEFINE_QNODE(slv_ebi, QCS404_SLAVE_EBI_CH0, 8, -1, 0, 0);
199DEFINE_QNODE(slv_bimc_snoc, QCS404_BIMC_SNOC_SLV, 8, -1, 2, QCS404_BIMC_SNOC_MAS);
200DEFINE_QNODE(slv_spdm, QCS404_SLAVE_SPDM_WRAPPER, 4, -1, -1, 0);
201DEFINE_QNODE(slv_pdm, QCS404_SLAVE_PDM, 4, -1, 41, 0);
202DEFINE_QNODE(slv_prng, QCS404_SLAVE_PRNG, 4, -1, 44, 0);
203DEFINE_QNODE(slv_tcsr, QCS404_SLAVE_TCSR, 4, -1, 50, 0);
204DEFINE_QNODE(slv_snoc_cfg, QCS404_SLAVE_SNOC_CFG, 4, -1, 70, 0);
205DEFINE_QNODE(slv_message_ram, QCS404_SLAVE_MESSAGE_RAM, 4, -1, 55, 0);
206DEFINE_QNODE(slv_disp_ss_cfg, QCS404_SLAVE_DISPLAY_CFG, 4, -1, -1, 0);
207DEFINE_QNODE(slv_gpu_cfg, QCS404_SLAVE_GRAPHICS_3D_CFG, 4, -1, -1, 0);
208DEFINE_QNODE(slv_blsp_1, QCS404_SLAVE_BLSP_1, 4, -1, 39, 0);
209DEFINE_QNODE(slv_tlmm_north, QCS404_SLAVE_TLMM_NORTH, 4, -1, 214, 0);
210DEFINE_QNODE(slv_pcie, QCS404_SLAVE_PCIE_1, 4, -1, -1, 0);
211DEFINE_QNODE(slv_ethernet, QCS404_SLAVE_EMAC_CFG, 4, -1, -1, 0);
212DEFINE_QNODE(slv_blsp_2, QCS404_SLAVE_BLSP_2, 4, -1, 37, 0);
213DEFINE_QNODE(slv_tlmm_east, QCS404_SLAVE_TLMM_EAST, 4, -1, 213, 0);
214DEFINE_QNODE(slv_tcu, QCS404_SLAVE_TCU, 8, -1, -1, 0);
215DEFINE_QNODE(slv_pmic_arb, QCS404_SLAVE_PMIC_ARB, 4, -1, 59, 0);
216DEFINE_QNODE(slv_sdcc_1, QCS404_SLAVE_SDCC_1, 4, -1, 31, 0);
217DEFINE_QNODE(slv_sdcc_2, QCS404_SLAVE_SDCC_2, 4, -1, 33, 0);
218DEFINE_QNODE(slv_tlmm_south, QCS404_SLAVE_TLMM_SOUTH, 4, -1, -1, 0);
219DEFINE_QNODE(slv_usb_hs, QCS404_SLAVE_USB_HS, 4, -1, 40, 0);
220DEFINE_QNODE(slv_usb3, QCS404_SLAVE_USB3, 4, -1, 22, 0);
221DEFINE_QNODE(slv_crypto_0_cfg, QCS404_SLAVE_CRYPTO_0_CFG, 4, -1, 52, 0);
222DEFINE_QNODE(slv_pcnoc_snoc, QCS404_PNOC_SNOC_SLV, 8, -1, 45, QCS404_PNOC_SNOC_MAS);
223DEFINE_QNODE(slv_kpss_ahb, QCS404_SLAVE_APPSS, 4, -1, -1, 0);
224DEFINE_QNODE(slv_wcss, QCS404_SLAVE_WCSS, 4, -1, 23, 0);
225DEFINE_QNODE(slv_snoc_bimc_1, QCS404_SNOC_BIMC_1_SLV, 8, -1, 104, QCS404_SNOC_BIMC_1_MAS);
226DEFINE_QNODE(slv_imem, QCS404_SLAVE_OCIMEM, 8, -1, 26, 0);
227DEFINE_QNODE(slv_snoc_pcnoc, QCS404_SNOC_PNOC_SLV, 8, -1, 28, QCS404_SNOC_PNOC_MAS);
228DEFINE_QNODE(slv_qdss_stm, QCS404_SLAVE_QDSS_STM, 4, -1, 30, 0);
229DEFINE_QNODE(slv_cats_0, QCS404_SLAVE_CATS_128, 16, -1, -1, 0);
230DEFINE_QNODE(slv_cats_1, QCS404_SLAVE_OCMEM_64, 8, -1, -1, 0);
231DEFINE_QNODE(slv_lpass, QCS404_SLAVE_LPASS, 4, -1, -1, 0);
232
233static struct qcom_icc_node *qcs404_bimc_nodes[] = {
234 [MASTER_AMPSS_M0] = &mas_apps_proc,
235 [MASTER_OXILI] = &mas_oxili,
236 [MASTER_MDP_PORT0] = &mas_mdp,
237 [MASTER_SNOC_BIMC_1] = &mas_snoc_bimc_1,
238 [MASTER_TCU_0] = &mas_tcu_0,
239 [SLAVE_EBI_CH0] = &slv_ebi,
240 [SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
241};
242
243static struct qcom_icc_desc qcs404_bimc = {
244 .nodes = qcs404_bimc_nodes,
245 .num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
246};
247
248static struct qcom_icc_node *qcs404_pcnoc_nodes[] = {
249 [MASTER_SPDM] = &mas_spdm,
250 [MASTER_BLSP_1] = &mas_blsp_1,
251 [MASTER_BLSP_2] = &mas_blsp_2,
252 [MASTER_XI_USB_HS1] = &mas_xi_usb_hs1,
253 [MASTER_CRYPT0] = &mas_crypto,
254 [MASTER_SDCC_1] = &mas_sdcc_1,
255 [MASTER_SDCC_2] = &mas_sdcc_2,
256 [MASTER_SNOC_PCNOC] = &mas_snoc_pcnoc,
257 [MASTER_QPIC] = &mas_qpic,
258 [PCNOC_INT_0] = &pcnoc_int_0,
259 [PCNOC_INT_2] = &pcnoc_int_2,
260 [PCNOC_INT_3] = &pcnoc_int_3,
261 [PCNOC_S_0] = &pcnoc_s_0,
262 [PCNOC_S_1] = &pcnoc_s_1,
263 [PCNOC_S_2] = &pcnoc_s_2,
264 [PCNOC_S_3] = &pcnoc_s_3,
265 [PCNOC_S_4] = &pcnoc_s_4,
266 [PCNOC_S_6] = &pcnoc_s_6,
267 [PCNOC_S_7] = &pcnoc_s_7,
268 [PCNOC_S_8] = &pcnoc_s_8,
269 [PCNOC_S_9] = &pcnoc_s_9,
270 [PCNOC_S_10] = &pcnoc_s_10,
271 [PCNOC_S_11] = &pcnoc_s_11,
272 [SLAVE_SPDM] = &slv_spdm,
273 [SLAVE_PDM] = &slv_pdm,
274 [SLAVE_PRNG] = &slv_prng,
275 [SLAVE_TCSR] = &slv_tcsr,
276 [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
277 [SLAVE_MESSAGE_RAM] = &slv_message_ram,
278 [SLAVE_DISP_SS_CFG] = &slv_disp_ss_cfg,
279 [SLAVE_GPU_CFG] = &slv_gpu_cfg,
280 [SLAVE_BLSP_1] = &slv_blsp_1,
281 [SLAVE_BLSP_2] = &slv_blsp_2,
282 [SLAVE_TLMM_NORTH] = &slv_tlmm_north,
283 [SLAVE_PCIE] = &slv_pcie,
284 [SLAVE_ETHERNET] = &slv_ethernet,
285 [SLAVE_TLMM_EAST] = &slv_tlmm_east,
286 [SLAVE_TCU] = &slv_tcu,
287 [SLAVE_PMIC_ARB] = &slv_pmic_arb,
288 [SLAVE_SDCC_1] = &slv_sdcc_1,
289 [SLAVE_SDCC_2] = &slv_sdcc_2,
290 [SLAVE_TLMM_SOUTH] = &slv_tlmm_south,
291 [SLAVE_USB_HS] = &slv_usb_hs,
292 [SLAVE_USB3] = &slv_usb3,
293 [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
294 [SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
295};
296
297static struct qcom_icc_desc qcs404_pcnoc = {
298 .nodes = qcs404_pcnoc_nodes,
299 .num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
300};
301
302static struct qcom_icc_node *qcs404_snoc_nodes[] = {
303 [MASTER_QDSS_BAM] = &mas_qdss_bam,
304 [MASTER_BIMC_SNOC] = &mas_bimc_snoc,
305 [MASTER_PCNOC_SNOC] = &mas_pcnoc_snoc,
306 [MASTER_QDSS_ETR] = &mas_qdss_etr,
307 [MASTER_EMAC] = &mas_emac,
308 [MASTER_PCIE] = &mas_pcie,
309 [MASTER_USB3] = &mas_usb3,
310 [QDSS_INT] = &qdss_int,
311 [SNOC_INT_0] = &snoc_int_0,
312 [SNOC_INT_1] = &snoc_int_1,
313 [SNOC_INT_2] = &snoc_int_2,
314 [SLAVE_KPSS_AHB] = &slv_kpss_ahb,
315 [SLAVE_WCSS] = &slv_wcss,
316 [SLAVE_SNOC_BIMC_1] = &slv_snoc_bimc_1,
317 [SLAVE_IMEM] = &slv_imem,
318 [SLAVE_SNOC_PCNOC] = &slv_snoc_pcnoc,
319 [SLAVE_QDSS_STM] = &slv_qdss_stm,
320 [SLAVE_CATS_0] = &slv_cats_0,
321 [SLAVE_CATS_1] = &slv_cats_1,
322 [SLAVE_LPASS] = &slv_lpass,
323};
324
325static struct qcom_icc_desc qcs404_snoc = {
326 .nodes = qcs404_snoc_nodes,
327 .num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
328};
329
330static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
331 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
332{
333 *agg_avg += avg_bw;
334 *agg_peak = max(*agg_peak, peak_bw);
335
336 return 0;
337}
338
339static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
340{
341 struct qcom_icc_provider *qp;
342 struct qcom_icc_node *qn;
343 struct icc_provider *provider;
344 struct icc_node *n;
345 u64 sum_bw;
346 u64 max_peak_bw;
347 u64 rate;
348 u32 agg_avg = 0;
349 u32 agg_peak = 0;
350 int ret, i;
351
352 qn = src->data;
353 provider = src->provider;
354 qp = to_qcom_provider(provider);
355
356 list_for_each_entry(n, &provider->nodes, node_list)
357 qcom_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
358 &agg_avg, &agg_peak);
359
360 sum_bw = icc_units_to_bps(agg_avg);
361 max_peak_bw = icc_units_to_bps(agg_peak);
362
363 /* send bandwidth request message to the RPM processor */
364 if (qn->mas_rpm_id != -1) {
365 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
366 RPM_BUS_MASTER_REQ,
367 qn->mas_rpm_id,
368 sum_bw);
369 if (ret) {
370 pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
371 qn->mas_rpm_id, ret);
372 return ret;
373 }
374 }
375
376 if (qn->slv_rpm_id != -1) {
377 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
378 RPM_BUS_SLAVE_REQ,
379 qn->slv_rpm_id,
380 sum_bw);
381 if (ret) {
382 pr_err("qcom_icc_rpm_smd_send slv error %d\n",
383 ret);
384 return ret;
385 }
386 }
387
388 rate = max(sum_bw, max_peak_bw);
389
390 do_div(rate, qn->buswidth);
391
392 if (qn->rate == rate)
393 return 0;
394
395 for (i = 0; i < qp->num_clks; i++) {
396 ret = clk_set_rate(qp->bus_clks[i].clk, rate);
397 if (ret) {
398 pr_err("%s clk_set_rate error: %d\n",
399 qp->bus_clks[i].id, ret);
400 return ret;
401 }
402 }
403
404 qn->rate = rate;
405
406 return 0;
407}
408
409static int qnoc_probe(struct platform_device *pdev)
410{
411 struct device *dev = &pdev->dev;
412 const struct qcom_icc_desc *desc;
413 struct icc_onecell_data *data;
414 struct icc_provider *provider;
415 struct qcom_icc_node **qnodes;
416 struct qcom_icc_provider *qp;
417 struct icc_node *node;
418 size_t num_nodes, i;
419 int ret;
420
421 /* wait for the RPM proxy */
422 if (!qcom_icc_rpm_smd_available())
423 return -EPROBE_DEFER;
424
425 desc = of_device_get_match_data(dev);
426 if (!desc)
427 return -EINVAL;
428
429 qnodes = desc->nodes;
430 num_nodes = desc->num_nodes;
431
432 qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
433 if (!qp)
434 return -ENOMEM;
435
436 data = devm_kcalloc(dev, num_nodes, sizeof(*node), GFP_KERNEL);
437 if (!data)
438 return -ENOMEM;
439
440 qp->bus_clks = devm_kmemdup(dev, bus_clocks, sizeof(bus_clocks),
441 GFP_KERNEL);
442 if (!qp->bus_clks)
443 return -ENOMEM;
444
445 qp->num_clks = ARRAY_SIZE(bus_clocks);
446 ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
447 if (ret)
448 return ret;
449
450 ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
451 if (ret)
452 return ret;
453
454 provider = &qp->provider;
455 INIT_LIST_HEAD(&provider->nodes);
456 provider->dev = dev;
457 provider->set = qcom_icc_set;
458 provider->aggregate = qcom_icc_aggregate;
459 provider->xlate = of_icc_xlate_onecell;
460 provider->data = data;
461
462 ret = icc_provider_add(provider);
463 if (ret) {
464 dev_err(dev, "error adding interconnect provider: %d\n", ret);
465 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
466 return ret;
467 }
468
469 for (i = 0; i < num_nodes; i++) {
470 size_t j;
471
472 node = icc_node_create(qnodes[i]->id);
473 if (IS_ERR(node)) {
474 ret = PTR_ERR(node);
475 goto err;
476 }
477
478 node->name = qnodes[i]->name;
479 node->data = qnodes[i];
480 icc_node_add(node, provider);
481
482 dev_dbg(dev, "registered node %s\n", node->name);
483
484 /* populate links */
485 for (j = 0; j < qnodes[i]->num_links; j++)
486 icc_link_create(node, qnodes[i]->links[j]);
487
488 data->nodes[i] = node;
489 }
490 data->num_nodes = num_nodes;
491
492 platform_set_drvdata(pdev, qp);
493
494 return 0;
495err:
496 list_for_each_entry(node, &provider->nodes, node_list) {
497 icc_node_del(node);
498 icc_node_destroy(node->id);
499 }
500 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
501 icc_provider_del(provider);
502
503 return ret;
504}
505
506static int qnoc_remove(struct platform_device *pdev)
507{
508 struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
509 struct icc_provider *provider = &qp->provider;
510 struct icc_node *n;
511
512 list_for_each_entry(n, &provider->nodes, node_list) {
513 icc_node_del(n);
514 icc_node_destroy(n->id);
515 }
516 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
517
518 return icc_provider_del(provider);
519}
520
521static const struct of_device_id qcs404_noc_of_match[] = {
522 { .compatible = "qcom,qcs404-bimc", .data = &qcs404_bimc },
523 { .compatible = "qcom,qcs404-pcnoc", .data = &qcs404_pcnoc },
524 { .compatible = "qcom,qcs404-snoc", .data = &qcs404_snoc },
525 { },
526};
527MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
528
529static struct platform_driver qcs404_noc_driver = {
530 .probe = qnoc_probe,
531 .remove = qnoc_remove,
532 .driver = {
533 .name = "qnoc-qcs404",
534 .of_match_table = qcs404_noc_of_match,
535 },
536};
537module_platform_driver(qcs404_noc_driver);
538MODULE_DESCRIPTION("Qualcomm QCS404 NoC driver");
539MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 4915b78da673..57955596bb59 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
4 * 4 *
5 */ 5 */
6 6
@@ -20,23 +20,6 @@
20#include <soc/qcom/rpmh.h> 20#include <soc/qcom/rpmh.h>
21#include <soc/qcom/tcs.h> 21#include <soc/qcom/tcs.h>
22 22
23#define BCM_TCS_CMD_COMMIT_SHFT 30
24#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
25#define BCM_TCS_CMD_VALID_SHFT 29
26#define BCM_TCS_CMD_VALID_MASK 0x20000000
27#define BCM_TCS_CMD_VOTE_X_SHFT 14
28#define BCM_TCS_CMD_VOTE_MASK 0x3fff
29#define BCM_TCS_CMD_VOTE_Y_SHFT 0
30#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
31
32#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
33 (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
34 ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
35 ((cpu_to_le32(vote_x) & \
36 BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
37 ((cpu_to_le32(vote_y) & \
38 BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
39
40#define to_qcom_provider(_provider) \ 23#define to_qcom_provider(_provider) \
41 container_of(_provider, struct qcom_icc_provider, provider) 24 container_of(_provider, struct qcom_icc_provider, provider)
42 25
@@ -66,6 +49,22 @@ struct bcm_db {
66#define SDM845_MAX_BCM_PER_NODE 2 49#define SDM845_MAX_BCM_PER_NODE 2
67#define SDM845_MAX_VCD 10 50#define SDM845_MAX_VCD 10
68 51
52/*
53 * The AMC bucket denotes constraints that are applied to hardware when
54 * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
55 * when the execution environment transitions between active and low power mode.
56 */
57#define QCOM_ICC_BUCKET_AMC 0
58#define QCOM_ICC_BUCKET_WAKE 1
59#define QCOM_ICC_BUCKET_SLEEP 2
60#define QCOM_ICC_NUM_BUCKETS 3
61#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
62#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
63#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
64#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
65#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
66 QCOM_ICC_TAG_SLEEP)
67
69/** 68/**
70 * struct qcom_icc_node - Qualcomm specific interconnect nodes 69 * struct qcom_icc_node - Qualcomm specific interconnect nodes
71 * @name: the node name used in debugfs 70 * @name: the node name used in debugfs
@@ -86,8 +85,8 @@ struct qcom_icc_node {
86 u16 num_links; 85 u16 num_links;
87 u16 channels; 86 u16 channels;
88 u16 buswidth; 87 u16 buswidth;
89 u64 sum_avg; 88 u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
90 u64 max_peak; 89 u64 max_peak[QCOM_ICC_NUM_BUCKETS];
91 struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE]; 90 struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
92 size_t num_bcms; 91 size_t num_bcms;
93}; 92};
@@ -112,8 +111,8 @@ struct qcom_icc_bcm {
112 const char *name; 111 const char *name;
113 u32 type; 112 u32 type;
114 u32 addr; 113 u32 addr;
115 u64 vote_x; 114 u64 vote_x[QCOM_ICC_NUM_BUCKETS];
116 u64 vote_y; 115 u64 vote_y[QCOM_ICC_NUM_BUCKETS];
117 bool dirty; 116 bool dirty;
118 bool keepalive; 117 bool keepalive;
119 struct bcm_db aux_data; 118 struct bcm_db aux_data;
@@ -555,7 +554,7 @@ inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
555 cmd->wait = true; 554 cmd->wait = true;
556} 555}
557 556
558static void tcs_list_gen(struct list_head *bcm_list, 557static void tcs_list_gen(struct list_head *bcm_list, int bucket,
559 struct tcs_cmd tcs_list[SDM845_MAX_VCD], 558 struct tcs_cmd tcs_list[SDM845_MAX_VCD],
560 int n[SDM845_MAX_VCD]) 559 int n[SDM845_MAX_VCD])
561{ 560{
@@ -573,8 +572,8 @@ static void tcs_list_gen(struct list_head *bcm_list,
573 commit = true; 572 commit = true;
574 cur_vcd_size = 0; 573 cur_vcd_size = 0;
575 } 574 }
576 tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y, 575 tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
577 bcm->addr, commit); 576 bcm->vote_y[bucket], bcm->addr, commit);
578 idx++; 577 idx++;
579 n[batch]++; 578 n[batch]++;
580 /* 579 /*
@@ -595,38 +594,56 @@ static void tcs_list_gen(struct list_head *bcm_list,
595 594
596static void bcm_aggregate(struct qcom_icc_bcm *bcm) 595static void bcm_aggregate(struct qcom_icc_bcm *bcm)
597{ 596{
598 size_t i; 597 size_t i, bucket;
599 u64 agg_avg = 0; 598 u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
600 u64 agg_peak = 0; 599 u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
601 u64 temp; 600 u64 temp;
602 601
603 for (i = 0; i < bcm->num_nodes; i++) { 602 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
604 temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width; 603 for (i = 0; i < bcm->num_nodes; i++) {
605 do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels); 604 temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
606 agg_avg = max(agg_avg, temp); 605 do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
606 agg_avg[bucket] = max(agg_avg[bucket], temp);
607 607
608 temp = bcm->nodes[i]->max_peak * bcm->aux_data.width; 608 temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
609 do_div(temp, bcm->nodes[i]->buswidth); 609 do_div(temp, bcm->nodes[i]->buswidth);
610 agg_peak = max(agg_peak, temp); 610 agg_peak[bucket] = max(agg_peak[bucket], temp);
611 } 611 }
612 612
613 temp = agg_avg * 1000ULL; 613 temp = agg_avg[bucket] * 1000ULL;
614 do_div(temp, bcm->aux_data.unit); 614 do_div(temp, bcm->aux_data.unit);
615 bcm->vote_x = temp; 615 bcm->vote_x[bucket] = temp;
616 616
617 temp = agg_peak * 1000ULL; 617 temp = agg_peak[bucket] * 1000ULL;
618 do_div(temp, bcm->aux_data.unit); 618 do_div(temp, bcm->aux_data.unit);
619 bcm->vote_y = temp; 619 bcm->vote_y[bucket] = temp;
620 }
620 621
621 if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) { 622 if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
622 bcm->vote_x = 1; 623 bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
623 bcm->vote_y = 1; 624 bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
625 bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
626 bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
627 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
624 } 628 }
625 629
626 bcm->dirty = false; 630 bcm->dirty = false;
627} 631}
628 632
629static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw, 633static void qcom_icc_pre_aggregate(struct icc_node *node)
634{
635 size_t i;
636 struct qcom_icc_node *qn;
637
638 qn = node->data;
639
640 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
641 qn->sum_avg[i] = 0;
642 qn->max_peak[i] = 0;
643 }
644}
645
646static int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
630 u32 peak_bw, u32 *agg_avg, u32 *agg_peak) 647 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
631{ 648{
632 size_t i; 649 size_t i;
@@ -634,12 +651,19 @@ static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
634 651
635 qn = node->data; 652 qn = node->data;
636 653
654 if (!tag)
655 tag = QCOM_ICC_TAG_ALWAYS;
656
657 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
658 if (tag & BIT(i)) {
659 qn->sum_avg[i] += avg_bw;
660 qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
661 }
662 }
663
637 *agg_avg += avg_bw; 664 *agg_avg += avg_bw;
638 *agg_peak = max_t(u32, *agg_peak, peak_bw); 665 *agg_peak = max_t(u32, *agg_peak, peak_bw);
639 666
640 qn->sum_avg = *agg_avg;
641 qn->max_peak = *agg_peak;
642
643 for (i = 0; i < qn->num_bcms; i++) 667 for (i = 0; i < qn->num_bcms; i++)
644 qn->bcms[i]->dirty = true; 668 qn->bcms[i]->dirty = true;
645 669
@@ -675,7 +699,7 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
675 * Construct the command list based on a pre ordered list of BCMs 699 * Construct the command list based on a pre ordered list of BCMs
676 * based on VCD. 700 * based on VCD.
677 */ 701 */
678 tcs_list_gen(&commit_list, cmds, commit_idx); 702 tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
679 703
680 if (!commit_idx[0]) 704 if (!commit_idx[0])
681 return ret; 705 return ret;
@@ -693,6 +717,41 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
693 return ret; 717 return ret;
694 } 718 }
695 719
720 INIT_LIST_HEAD(&commit_list);
721
722 for (i = 0; i < qp->num_bcms; i++) {
723 /*
724 * Only generate WAKE and SLEEP commands if a resource's
725 * requirements change as the execution environment transitions
726 * between different power states.
727 */
728 if (qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_WAKE] !=
729 qp->bcms[i]->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
730 qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_WAKE] !=
731 qp->bcms[i]->vote_y[QCOM_ICC_BUCKET_SLEEP]) {
732 list_add_tail(&qp->bcms[i]->list, &commit_list);
733 }
734 }
735
736 if (list_empty(&commit_list))
737 return ret;
738
739 tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
740
741 ret = rpmh_write_batch(qp->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
742 if (ret) {
743 pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
744 return ret;
745 }
746
747 tcs_list_gen(&commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
748
749 ret = rpmh_write_batch(qp->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
750 if (ret) {
751 pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
752 return ret;
753 }
754
696 return ret; 755 return ret;
697} 756}
698 757
@@ -738,6 +797,7 @@ static int qnoc_probe(struct platform_device *pdev)
738 provider = &qp->provider; 797 provider = &qp->provider;
739 provider->dev = &pdev->dev; 798 provider->dev = &pdev->dev;
740 provider->set = qcom_icc_set; 799 provider->set = qcom_icc_set;
800 provider->pre_aggregate = qcom_icc_pre_aggregate;
741 provider->aggregate = qcom_icc_aggregate; 801 provider->aggregate = qcom_icc_aggregate;
742 provider->xlate = of_icc_xlate_onecell; 802 provider->xlate = of_icc_xlate_onecell;
743 INIT_LIST_HEAD(&provider->nodes); 803 INIT_LIST_HEAD(&provider->nodes);
diff --git a/drivers/interconnect/qcom/smd-rpm.c b/drivers/interconnect/qcom/smd-rpm.c
new file mode 100644
index 000000000000..dc8ff8d133a9
--- /dev/null
+++ b/drivers/interconnect/qcom/smd-rpm.c
@@ -0,0 +1,77 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * RPM over SMD communication wrapper for interconnects
4 *
5 * Copyright (C) 2019 Linaro Ltd
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
9#include <linux/interconnect-provider.h>
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/of_platform.h>
13#include <linux/platform_device.h>
14#include <linux/soc/qcom/smd-rpm.h>
15
16#include "smd-rpm.h"
17
18#define RPM_KEY_BW 0x00007762
19
20static struct qcom_smd_rpm *icc_smd_rpm;
21
22struct icc_rpm_smd_req {
23 __le32 key;
24 __le32 nbytes;
25 __le32 value;
26};
27
28bool qcom_icc_rpm_smd_available(void)
29{
30 return !!icc_smd_rpm;
31}
32EXPORT_SYMBOL_GPL(qcom_icc_rpm_smd_available);
33
34int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val)
35{
36 struct icc_rpm_smd_req req = {
37 .key = cpu_to_le32(RPM_KEY_BW),
38 .nbytes = cpu_to_le32(sizeof(u32)),
39 .value = cpu_to_le32(val),
40 };
41
42 return qcom_rpm_smd_write(icc_smd_rpm, ctx, rsc_type, id, &req,
43 sizeof(req));
44}
45EXPORT_SYMBOL_GPL(qcom_icc_rpm_smd_send);
46
47static int qcom_icc_rpm_smd_remove(struct platform_device *pdev)
48{
49 icc_smd_rpm = NULL;
50
51 return 0;
52}
53
54static int qcom_icc_rpm_smd_probe(struct platform_device *pdev)
55{
56 icc_smd_rpm = dev_get_drvdata(pdev->dev.parent);
57
58 if (!icc_smd_rpm) {
59 dev_err(&pdev->dev, "unable to retrieve handle to RPM\n");
60 return -ENODEV;
61 }
62
63 return 0;
64}
65
66static struct platform_driver qcom_interconnect_rpm_smd_driver = {
67 .driver = {
68 .name = "icc_smd_rpm",
69 },
70 .probe = qcom_icc_rpm_smd_probe,
71 .remove = qcom_icc_rpm_smd_remove,
72};
73module_platform_driver(qcom_interconnect_rpm_smd_driver);
74MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
75MODULE_DESCRIPTION("Qualcomm SMD RPM interconnect proxy driver");
76MODULE_LICENSE("GPL v2");
77MODULE_ALIAS("platform:icc_smd_rpm");
diff --git a/drivers/interconnect/qcom/smd-rpm.h b/drivers/interconnect/qcom/smd-rpm.h
new file mode 100644
index 000000000000..ca9d0327b8ac
--- /dev/null
+++ b/drivers/interconnect/qcom/smd-rpm.h
@@ -0,0 +1,15 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2019, Linaro Ltd.
4 * Author: Georgi Djakov <georgi.djakov@linaro.org>
5 */
6
7#ifndef __DRIVERS_INTERCONNECT_QCOM_SMD_RPM_H
8#define __DRIVERS_INTERCONNECT_QCOM_SMD_RPM_H
9
10#include <linux/soc/qcom/smd-rpm.h>
11
12bool qcom_icc_rpm_smd_available(void);
13int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val);
14
15#endif
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 45b61b8d8442..c55b63750757 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -362,15 +362,6 @@ config DS1682
362 This driver can also be built as a module. If so, the module 362 This driver can also be built as a module. If so, the module
363 will be called ds1682. 363 will be called ds1682.
364 364
365config SPEAR13XX_PCIE_GADGET
366 bool "PCIe gadget support for SPEAr13XX platform"
367 depends on ARCH_SPEAR13XX && BROKEN
368 help
369 This option enables gadget support for PCIe controller. If
370 board file defines any controller as PCIe endpoint then a sysfs
371 entry will be created for that controller. User can use these
372 sysfs node to configure PCIe EP as per his requirements.
373
374config VMWARE_BALLOON 365config VMWARE_BALLOON
375 tristate "VMware Balloon Driver" 366 tristate "VMware Balloon Driver"
376 depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST 367 depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8dae0a976200..c1860d35dc7e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_C2PORT) += c2port/
36obj-$(CONFIG_HMC6352) += hmc6352.o 36obj-$(CONFIG_HMC6352) += hmc6352.o
37obj-y += eeprom/ 37obj-y += eeprom/
38obj-y += cb710/ 38obj-y += cb710/
39obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
40obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o 39obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
41obj-$(CONFIG_PCH_PHUB) += pch_phub.o 40obj-$(CONFIG_PCH_PHUB) += pch_phub.o
42obj-y += ti-st/ 41obj-y += ti-st/
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index bcb10fa4bc3a..259fe1dfec03 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -334,8 +334,7 @@ static void alcor_pci_remove(struct pci_dev *pdev)
334#ifdef CONFIG_PM_SLEEP 334#ifdef CONFIG_PM_SLEEP
335static int alcor_suspend(struct device *dev) 335static int alcor_suspend(struct device *dev)
336{ 336{
337 struct pci_dev *pdev = to_pci_dev(dev); 337 struct alcor_pci_priv *priv = dev_get_drvdata(dev);
338 struct alcor_pci_priv *priv = pci_get_drvdata(pdev);
339 338
340 alcor_pci_aspm_ctrl(priv, 1); 339 alcor_pci_aspm_ctrl(priv, 1);
341 return 0; 340 return 0;
@@ -344,8 +343,7 @@ static int alcor_suspend(struct device *dev)
344static int alcor_resume(struct device *dev) 343static int alcor_resume(struct device *dev)
345{ 344{
346 345
347 struct pci_dev *pdev = to_pci_dev(dev); 346 struct alcor_pci_priv *priv = dev_get_drvdata(dev);
348 struct alcor_pci_priv *priv = pci_get_drvdata(pdev);
349 347
350 alcor_pci_aspm_ctrl(priv, 0); 348 alcor_pci_aspm_ctrl(priv, 0);
351 return 0; 349 return 0;
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index f2abe27010ef..0f791bfdc1f5 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -45,13 +45,16 @@ config EEPROM_AT25
45 will be called at25. 45 will be called at25.
46 46
47config EEPROM_LEGACY 47config EEPROM_LEGACY
48 tristate "Old I2C EEPROM reader" 48 tristate "Old I2C EEPROM reader (DEPRECATED)"
49 depends on I2C && SYSFS 49 depends on I2C && SYSFS
50 help 50 help
51 If you say yes here you get read-only access to the EEPROM data 51 If you say yes here you get read-only access to the EEPROM data
52 available on modern memory DIMMs and Sony Vaio laptops via I2C. Such 52 available on modern memory DIMMs and Sony Vaio laptops via I2C. Such
53 EEPROMs could theoretically be available on other devices as well. 53 EEPROMs could theoretically be available on other devices as well.
54 54
55 This driver is deprecated and will be removed soon, please use the
56 better at24 driver instead.
57
55 This driver can also be built as a module. If so, the module 58 This driver can also be built as a module. If so, the module
56 will be called eeprom. 59 will be called eeprom.
57 60
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 6f00c33cfe22..b081c67416d7 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -195,13 +195,13 @@ static int ee1004_probe(struct i2c_client *client,
195 mutex_lock(&ee1004_bus_lock); 195 mutex_lock(&ee1004_bus_lock);
196 if (++ee1004_dev_count == 1) { 196 if (++ee1004_dev_count == 1) {
197 for (cnr = 0; cnr < 2; cnr++) { 197 for (cnr = 0; cnr < 2; cnr++) {
198 ee1004_set_page[cnr] = i2c_new_dummy(client->adapter, 198 ee1004_set_page[cnr] = i2c_new_dummy_device(client->adapter,
199 EE1004_ADDR_SET_PAGE + cnr); 199 EE1004_ADDR_SET_PAGE + cnr);
200 if (!ee1004_set_page[cnr]) { 200 if (IS_ERR(ee1004_set_page[cnr])) {
201 dev_err(&client->dev, 201 dev_err(&client->dev,
202 "address 0x%02x unavailable\n", 202 "address 0x%02x unavailable\n",
203 EE1004_ADDR_SET_PAGE + cnr); 203 EE1004_ADDR_SET_PAGE + cnr);
204 err = -EADDRINUSE; 204 err = PTR_ERR(ee1004_set_page[cnr]);
205 goto err_clients; 205 goto err_clients;
206 } 206 }
207 } 207 }
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 4d0cb90f4aeb..9da81f6d4a1c 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -150,9 +150,9 @@ static int max6875_probe(struct i2c_client *client,
150 return -ENOMEM; 150 return -ENOMEM;
151 151
152 /* A fake client is created on the odd address */ 152 /* A fake client is created on the odd address */
153 data->fake_client = i2c_new_dummy(client->adapter, client->addr + 1); 153 data->fake_client = i2c_new_dummy_device(client->adapter, client->addr + 1);
154 if (!data->fake_client) { 154 if (IS_ERR(data->fake_client)) {
155 err = -ENOMEM; 155 err = PTR_ERR(data->fake_client);
156 goto exit_kfree; 156 goto exit_kfree;
157 } 157 }
158 158
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 98603e235cf0..47ae84afac2e 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -33,7 +33,6 @@
33#define FASTRPC_INIT_HANDLE 1 33#define FASTRPC_INIT_HANDLE 1
34#define FASTRPC_CTXID_MASK (0xFF0) 34#define FASTRPC_CTXID_MASK (0xFF0)
35#define INIT_FILELEN_MAX (64 * 1024 * 1024) 35#define INIT_FILELEN_MAX (64 * 1024 * 1024)
36#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
37#define FASTRPC_DEVICE_NAME "fastrpc" 36#define FASTRPC_DEVICE_NAME "fastrpc"
38 37
39/* Retrives number of input buffers from the scalars parameter */ 38/* Retrives number of input buffers from the scalars parameter */
@@ -186,6 +185,7 @@ struct fastrpc_channel_ctx {
186 struct idr ctx_idr; 185 struct idr ctx_idr;
187 struct list_head users; 186 struct list_head users;
188 struct miscdevice miscdev; 187 struct miscdevice miscdev;
188 struct kref refcount;
189}; 189};
190 190
191struct fastrpc_user { 191struct fastrpc_user {
@@ -279,8 +279,11 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
279 279
280 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, 280 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
281 GFP_KERNEL); 281 GFP_KERNEL);
282 if (!buf->virt) 282 if (!buf->virt) {
283 mutex_destroy(&buf->lock);
284 kfree(buf);
283 return -ENOMEM; 285 return -ENOMEM;
286 }
284 287
285 if (fl->sctx && fl->sctx->sid) 288 if (fl->sctx && fl->sctx->sid)
286 buf->phys += ((u64)fl->sctx->sid << 32); 289 buf->phys += ((u64)fl->sctx->sid << 32);
@@ -290,6 +293,25 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
290 return 0; 293 return 0;
291} 294}
292 295
296static void fastrpc_channel_ctx_free(struct kref *ref)
297{
298 struct fastrpc_channel_ctx *cctx;
299
300 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
301
302 kfree(cctx);
303}
304
305static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
306{
307 kref_get(&cctx->refcount);
308}
309
310static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
311{
312 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
313}
314
293static void fastrpc_context_free(struct kref *ref) 315static void fastrpc_context_free(struct kref *ref)
294{ 316{
295 struct fastrpc_invoke_ctx *ctx; 317 struct fastrpc_invoke_ctx *ctx;
@@ -313,6 +335,8 @@ static void fastrpc_context_free(struct kref *ref)
313 kfree(ctx->maps); 335 kfree(ctx->maps);
314 kfree(ctx->olaps); 336 kfree(ctx->olaps);
315 kfree(ctx); 337 kfree(ctx);
338
339 fastrpc_channel_ctx_put(cctx);
316} 340}
317 341
318static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx) 342static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
@@ -419,6 +443,9 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
419 fastrpc_get_buff_overlaps(ctx); 443 fastrpc_get_buff_overlaps(ctx);
420 } 444 }
421 445
446 /* Released in fastrpc_context_put() */
447 fastrpc_channel_ctx_get(cctx);
448
422 ctx->sc = sc; 449 ctx->sc = sc;
423 ctx->retval = -1; 450 ctx->retval = -1;
424 ctx->pid = current->pid; 451 ctx->pid = current->pid;
@@ -448,6 +475,7 @@ err_idr:
448 spin_lock(&user->lock); 475 spin_lock(&user->lock);
449 list_del(&ctx->node); 476 list_del(&ctx->node);
450 spin_unlock(&user->lock); 477 spin_unlock(&user->lock);
478 fastrpc_channel_ctx_put(cctx);
451 kfree(ctx->maps); 479 kfree(ctx->maps);
452 kfree(ctx->olaps); 480 kfree(ctx->olaps);
453 kfree(ctx); 481 kfree(ctx);
@@ -522,6 +550,7 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
522 mutex_lock(&buffer->lock); 550 mutex_lock(&buffer->lock);
523 list_del(&a->node); 551 list_del(&a->node);
524 mutex_unlock(&buffer->lock); 552 mutex_unlock(&buffer->lock);
553 sg_free_table(&a->sgt);
525 kfree(a); 554 kfree(a);
526} 555}
527 556
@@ -884,6 +913,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
884 if (!fl->sctx) 913 if (!fl->sctx)
885 return -EINVAL; 914 return -EINVAL;
886 915
916 if (!fl->cctx->rpdev)
917 return -EPIPE;
918
887 ctx = fastrpc_context_alloc(fl, kernel, sc, args); 919 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
888 if (IS_ERR(ctx)) 920 if (IS_ERR(ctx))
889 return PTR_ERR(ctx); 921 return PTR_ERR(ctx);
@@ -1120,6 +1152,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
1120 } 1152 }
1121 1153
1122 fastrpc_session_free(cctx, fl->sctx); 1154 fastrpc_session_free(cctx, fl->sctx);
1155 fastrpc_channel_ctx_put(cctx);
1123 1156
1124 mutex_destroy(&fl->mutex); 1157 mutex_destroy(&fl->mutex);
1125 kfree(fl); 1158 kfree(fl);
@@ -1138,6 +1171,9 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
1138 if (!fl) 1171 if (!fl)
1139 return -ENOMEM; 1172 return -ENOMEM;
1140 1173
1174 /* Released in fastrpc_device_release() */
1175 fastrpc_channel_ctx_get(cctx);
1176
1141 filp->private_data = fl; 1177 filp->private_data = fl;
1142 spin_lock_init(&fl->lock); 1178 spin_lock_init(&fl->lock);
1143 mutex_init(&fl->mutex); 1179 mutex_init(&fl->mutex);
@@ -1163,26 +1199,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
1163 return 0; 1199 return 0;
1164} 1200}
1165 1201
1166static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
1167{
1168 struct dma_buf *buf;
1169 int info;
1170
1171 if (copy_from_user(&info, argp, sizeof(info)))
1172 return -EFAULT;
1173
1174 buf = dma_buf_get(info);
1175 if (IS_ERR_OR_NULL(buf))
1176 return -EINVAL;
1177 /*
1178 * one for the last get and other for the ALLOC_DMA_BUFF ioctl
1179 */
1180 dma_buf_put(buf);
1181 dma_buf_put(buf);
1182
1183 return 0;
1184}
1185
1186static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) 1202static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1187{ 1203{
1188 struct fastrpc_alloc_dma_buf bp; 1204 struct fastrpc_alloc_dma_buf bp;
@@ -1218,8 +1234,6 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1218 return -EFAULT; 1234 return -EFAULT;
1219 } 1235 }
1220 1236
1221 get_dma_buf(buf->dmabuf);
1222
1223 return 0; 1237 return 0;
1224} 1238}
1225 1239
@@ -1287,9 +1301,6 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1287 case FASTRPC_IOCTL_INIT_CREATE: 1301 case FASTRPC_IOCTL_INIT_CREATE:
1288 err = fastrpc_init_create_process(fl, argp); 1302 err = fastrpc_init_create_process(fl, argp);
1289 break; 1303 break;
1290 case FASTRPC_IOCTL_FREE_DMA_BUFF:
1291 err = fastrpc_dmabuf_free(fl, argp);
1292 break;
1293 case FASTRPC_IOCTL_ALLOC_DMA_BUFF: 1304 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1294 err = fastrpc_dmabuf_alloc(fl, argp); 1305 err = fastrpc_dmabuf_alloc(fl, argp);
1295 break; 1306 break;
@@ -1395,10 +1406,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1395 int i, err, domain_id = -1; 1406 int i, err, domain_id = -1;
1396 const char *domain; 1407 const char *domain;
1397 1408
1398 data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL);
1399 if (!data)
1400 return -ENOMEM;
1401
1402 err = of_property_read_string(rdev->of_node, "label", &domain); 1409 err = of_property_read_string(rdev->of_node, "label", &domain);
1403 if (err) { 1410 if (err) {
1404 dev_info(rdev, "FastRPC Domain not specified in DT\n"); 1411 dev_info(rdev, "FastRPC Domain not specified in DT\n");
@@ -1417,6 +1424,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1417 return -EINVAL; 1424 return -EINVAL;
1418 } 1425 }
1419 1426
1427 data = kzalloc(sizeof(*data), GFP_KERNEL);
1428 if (!data)
1429 return -ENOMEM;
1430
1420 data->miscdev.minor = MISC_DYNAMIC_MINOR; 1431 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1421 data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s", 1432 data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
1422 domains[domain_id]); 1433 domains[domain_id]);
@@ -1425,6 +1436,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1425 if (err) 1436 if (err)
1426 return err; 1437 return err;
1427 1438
1439 kref_init(&data->refcount);
1440
1428 dev_set_drvdata(&rpdev->dev, data); 1441 dev_set_drvdata(&rpdev->dev, data);
1429 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32)); 1442 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1430 INIT_LIST_HEAD(&data->users); 1443 INIT_LIST_HEAD(&data->users);
@@ -1459,7 +1472,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1459 1472
1460 misc_deregister(&cctx->miscdev); 1473 misc_deregister(&cctx->miscdev);
1461 of_platform_depopulate(&rpdev->dev); 1474 of_platform_depopulate(&rpdev->dev);
1462 kfree(cctx); 1475
1476 cctx->rpdev = NULL;
1477 fastrpc_channel_ctx_put(cctx);
1463} 1478}
1464 1479
1465static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, 1480static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c
index 2c01461701a3..a2fdf31cf27c 100644
--- a/drivers/misc/habanalabs/asid.c
+++ b/drivers/misc/habanalabs/asid.c
@@ -18,7 +18,7 @@ int hl_asid_init(struct hl_device *hdev)
18 18
19 mutex_init(&hdev->asid_mutex); 19 mutex_init(&hdev->asid_mutex);
20 20
21 /* ASID 0 is reserved for KMD and device CPU */ 21 /* ASID 0 is reserved for the kernel driver and device CPU */
22 set_bit(0, hdev->asid_bitmap); 22 set_bit(0, hdev->asid_bitmap);
23 23
24 return 0; 24 return 0;
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
index e495f44064fa..53fddbd8e693 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -397,7 +397,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
397 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle, 397 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
398 HL_KERNEL_ASID_ID); 398 HL_KERNEL_ASID_ID);
399 if (rc) { 399 if (rc) {
400 dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc); 400 dev_err(hdev->dev,
401 "Failed to allocate CB for the kernel driver %d\n", rc);
401 return NULL; 402 return NULL;
402 } 403 }
403 404
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index f00d1c32f6d6..a9ac045dcfde 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -178,11 +178,23 @@ static void cs_do_release(struct kref *ref)
178 178
179 /* We also need to update CI for internal queues */ 179 /* We also need to update CI for internal queues */
180 if (cs->submitted) { 180 if (cs->submitted) {
181 int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt); 181 hdev->asic_funcs->hw_queues_lock(hdev);
182 182
183 WARN_ONCE((cs_cnt < 0), 183 hdev->cs_active_cnt--;
184 "hl%d: error in CS active cnt %d\n", 184 if (!hdev->cs_active_cnt) {
185 hdev->id, cs_cnt); 185 struct hl_device_idle_busy_ts *ts;
186
187 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
188 ts->busy_to_idle_ts = ktime_get();
189
190 if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
191 hdev->idle_busy_ts_idx = 0;
192 } else if (hdev->cs_active_cnt < 0) {
193 dev_crit(hdev->dev, "CS active cnt %d is negative\n",
194 hdev->cs_active_cnt);
195 }
196
197 hdev->asic_funcs->hw_queues_unlock(hdev);
186 198
187 hl_int_hw_queue_update_ci(cs); 199 hl_int_hw_queue_update_ci(cs);
188 200
@@ -305,6 +317,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
305 other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)]; 317 other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
306 if ((other) && (!dma_fence_is_signaled(other))) { 318 if ((other) && (!dma_fence_is_signaled(other))) {
307 spin_unlock(&ctx->cs_lock); 319 spin_unlock(&ctx->cs_lock);
320 dev_dbg(hdev->dev,
321 "Rejecting CS because of too many in-flights CS\n");
308 rc = -EAGAIN; 322 rc = -EAGAIN;
309 goto free_fence; 323 goto free_fence;
310 } 324 }
@@ -395,8 +409,9 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev,
395 return NULL; 409 return NULL;
396 } 410 }
397 411
398 if (hw_queue_prop->kmd_only) { 412 if (hw_queue_prop->driver_only) {
399 dev_err(hdev->dev, "Queue index %d is restricted for KMD\n", 413 dev_err(hdev->dev,
414 "Queue index %d is restricted for the kernel driver\n",
400 chunk->queue_index); 415 chunk->queue_index);
401 return NULL; 416 return NULL;
402 } else if (hw_queue_prop->type == QUEUE_TYPE_INT) { 417 } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 8682590e3f6e..17db7b3dfb4c 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -26,12 +26,13 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
26 dma_fence_put(ctx->cs_pending[i]); 26 dma_fence_put(ctx->cs_pending[i]);
27 27
28 if (ctx->asid != HL_KERNEL_ASID_ID) { 28 if (ctx->asid != HL_KERNEL_ASID_ID) {
29 /* 29 /* The engines are stopped as there is no executing CS, but the
30 * The engines are stopped as there is no executing CS, but the
31 * Coresight might be still working by accessing addresses 30 * Coresight might be still working by accessing addresses
32 * related to the stopped engines. Hence stop it explicitly. 31 * related to the stopped engines. Hence stop it explicitly.
32 * Stop only if this is the compute context, as there can be
33 * only one compute context
33 */ 34 */
34 if (hdev->in_debug) 35 if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
35 hl_device_set_debug_mode(hdev, false); 36 hl_device_set_debug_mode(hdev, false);
36 37
37 hl_vm_ctx_fini(ctx); 38 hl_vm_ctx_fini(ctx);
@@ -67,29 +68,36 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
67 goto out_err; 68 goto out_err;
68 } 69 }
69 70
71 mutex_lock(&mgr->ctx_lock);
72 rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
73 mutex_unlock(&mgr->ctx_lock);
74
75 if (rc < 0) {
76 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
77 goto free_ctx;
78 }
79
80 ctx->handle = rc;
81
70 rc = hl_ctx_init(hdev, ctx, false); 82 rc = hl_ctx_init(hdev, ctx, false);
71 if (rc) 83 if (rc)
72 goto free_ctx; 84 goto remove_from_idr;
73 85
74 hl_hpriv_get(hpriv); 86 hl_hpriv_get(hpriv);
75 ctx->hpriv = hpriv; 87 ctx->hpriv = hpriv;
76 88
77 /* TODO: remove for multiple contexts */ 89 /* TODO: remove for multiple contexts per process */
78 hpriv->ctx = ctx; 90 hpriv->ctx = ctx;
79 hdev->user_ctx = ctx;
80 91
81 mutex_lock(&mgr->ctx_lock); 92 /* TODO: remove the following line for multiple process support */
82 rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); 93 hdev->compute_ctx = ctx;
83 mutex_unlock(&mgr->ctx_lock);
84
85 if (rc < 0) {
86 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
87 hl_ctx_free(hdev, ctx);
88 goto out_err;
89 }
90 94
91 return 0; 95 return 0;
92 96
97remove_from_idr:
98 mutex_lock(&mgr->ctx_lock);
99 idr_remove(&mgr->ctx_handles, ctx->handle);
100 mutex_unlock(&mgr->ctx_lock);
93free_ctx: 101free_ctx:
94 kfree(ctx); 102 kfree(ctx);
95out_err: 103out_err:
@@ -120,7 +128,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
120 ctx->thread_ctx_switch_wait_token = 0; 128 ctx->thread_ctx_switch_wait_token = 0;
121 129
122 if (is_kernel_ctx) { 130 if (is_kernel_ctx) {
123 ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */ 131 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
124 rc = hl_mmu_ctx_init(ctx); 132 rc = hl_mmu_ctx_init(ctx);
125 if (rc) { 133 if (rc) {
126 dev_err(hdev->dev, "Failed to init mmu ctx module\n"); 134 dev_err(hdev->dev, "Failed to init mmu ctx module\n");
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 18e499c900c7..87f37ac31ccd 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -29,7 +29,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
29 29
30 memset(&pkt, 0, sizeof(pkt)); 30 memset(&pkt, 0, sizeof(pkt));
31 31
32 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_RD << 32 pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_RD <<
33 ARMCP_PKT_CTL_OPCODE_SHIFT); 33 ARMCP_PKT_CTL_OPCODE_SHIFT);
34 pkt.i2c_bus = i2c_bus; 34 pkt.i2c_bus = i2c_bus;
35 pkt.i2c_addr = i2c_addr; 35 pkt.i2c_addr = i2c_addr;
@@ -55,12 +55,12 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
55 55
56 memset(&pkt, 0, sizeof(pkt)); 56 memset(&pkt, 0, sizeof(pkt));
57 57
58 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_WR << 58 pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_WR <<
59 ARMCP_PKT_CTL_OPCODE_SHIFT); 59 ARMCP_PKT_CTL_OPCODE_SHIFT);
60 pkt.i2c_bus = i2c_bus; 60 pkt.i2c_bus = i2c_bus;
61 pkt.i2c_addr = i2c_addr; 61 pkt.i2c_addr = i2c_addr;
62 pkt.i2c_reg = i2c_reg; 62 pkt.i2c_reg = i2c_reg;
63 pkt.value = __cpu_to_le64(val); 63 pkt.value = cpu_to_le64(val);
64 64
65 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 65 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
66 HL_DEVICE_TIMEOUT_USEC, NULL); 66 HL_DEVICE_TIMEOUT_USEC, NULL);
@@ -81,10 +81,10 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
81 81
82 memset(&pkt, 0, sizeof(pkt)); 82 memset(&pkt, 0, sizeof(pkt));
83 83
84 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_LED_SET << 84 pkt.ctl = cpu_to_le32(ARMCP_PACKET_LED_SET <<
85 ARMCP_PKT_CTL_OPCODE_SHIFT); 85 ARMCP_PKT_CTL_OPCODE_SHIFT);
86 pkt.led_index = __cpu_to_le32(led); 86 pkt.led_index = cpu_to_le32(led);
87 pkt.value = __cpu_to_le64(state); 87 pkt.value = cpu_to_le64(state);
88 88
89 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 89 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
90 HL_DEVICE_TIMEOUT_USEC, NULL); 90 HL_DEVICE_TIMEOUT_USEC, NULL);
@@ -370,7 +370,7 @@ static int mmu_show(struct seq_file *s, void *data)
370 if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID) 370 if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
371 ctx = hdev->kernel_ctx; 371 ctx = hdev->kernel_ctx;
372 else 372 else
373 ctx = hdev->user_ctx; 373 ctx = hdev->compute_ctx;
374 374
375 if (!ctx) { 375 if (!ctx) {
376 dev_err(hdev->dev, "no ctx available\n"); 376 dev_err(hdev->dev, "no ctx available\n");
@@ -533,7 +533,7 @@ out:
533static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, 533static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
534 u64 *phys_addr) 534 u64 *phys_addr)
535{ 535{
536 struct hl_ctx *ctx = hdev->user_ctx; 536 struct hl_ctx *ctx = hdev->compute_ctx;
537 u64 hop_addr, hop_pte_addr, hop_pte; 537 u64 hop_addr, hop_pte_addr, hop_pte;
538 u64 offset_mask = HOP4_MASK | OFFSET_MASK; 538 u64 offset_mask = HOP4_MASK | OFFSET_MASK;
539 int rc = 0; 539 int rc = 0;
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 7a8f9d0b71b5..459fee70a597 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -42,10 +42,12 @@ static void hpriv_release(struct kref *ref)
42{ 42{
43 struct hl_fpriv *hpriv; 43 struct hl_fpriv *hpriv;
44 struct hl_device *hdev; 44 struct hl_device *hdev;
45 struct hl_ctx *ctx;
45 46
46 hpriv = container_of(ref, struct hl_fpriv, refcount); 47 hpriv = container_of(ref, struct hl_fpriv, refcount);
47 48
48 hdev = hpriv->hdev; 49 hdev = hpriv->hdev;
50 ctx = hpriv->ctx;
49 51
50 put_pid(hpriv->taskpid); 52 put_pid(hpriv->taskpid);
51 53
@@ -53,13 +55,12 @@ static void hpriv_release(struct kref *ref)
53 55
54 mutex_destroy(&hpriv->restore_phase_mutex); 56 mutex_destroy(&hpriv->restore_phase_mutex);
55 57
56 kfree(hpriv); 58 mutex_lock(&hdev->fpriv_list_lock);
57 59 list_del(&hpriv->dev_node);
58 /* Now the FD is really closed */ 60 hdev->compute_ctx = NULL;
59 atomic_dec(&hdev->fd_open_cnt); 61 mutex_unlock(&hdev->fpriv_list_lock);
60 62
61 /* This allows a new user context to open the device */ 63 kfree(hpriv);
62 hdev->user_ctx = NULL;
63} 64}
64 65
65void hl_hpriv_get(struct hl_fpriv *hpriv) 66void hl_hpriv_get(struct hl_fpriv *hpriv)
@@ -94,6 +95,24 @@ static int hl_device_release(struct inode *inode, struct file *filp)
94 return 0; 95 return 0;
95} 96}
96 97
98static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
99{
100 struct hl_fpriv *hpriv = filp->private_data;
101 struct hl_device *hdev;
102
103 filp->private_data = NULL;
104
105 hdev = hpriv->hdev;
106
107 mutex_lock(&hdev->fpriv_list_lock);
108 list_del(&hpriv->dev_node);
109 mutex_unlock(&hdev->fpriv_list_lock);
110
111 kfree(hpriv);
112
113 return 0;
114}
115
97/* 116/*
98 * hl_mmap - mmap function for habanalabs device 117 * hl_mmap - mmap function for habanalabs device
99 * 118 *
@@ -124,55 +143,102 @@ static const struct file_operations hl_ops = {
124 .compat_ioctl = hl_ioctl 143 .compat_ioctl = hl_ioctl
125}; 144};
126 145
146static const struct file_operations hl_ctrl_ops = {
147 .owner = THIS_MODULE,
148 .open = hl_device_open_ctrl,
149 .release = hl_device_release_ctrl,
150 .unlocked_ioctl = hl_ioctl_control,
151 .compat_ioctl = hl_ioctl_control
152};
153
154static void device_release_func(struct device *dev)
155{
156 kfree(dev);
157}
158
127/* 159/*
128 * device_setup_cdev - setup cdev and device for habanalabs device 160 * device_init_cdev - Initialize cdev and device for habanalabs device
129 * 161 *
130 * @hdev: pointer to habanalabs device structure 162 * @hdev: pointer to habanalabs device structure
131 * @hclass: pointer to the class object of the device 163 * @hclass: pointer to the class object of the device
132 * @minor: minor number of the specific device 164 * @minor: minor number of the specific device
133 * @fpos : file operations to install for this device 165 * @fpos: file operations to install for this device
166 * @name: name of the device as it will appear in the filesystem
167 * @cdev: pointer to the char device object that will be initialized
168 * @dev: pointer to the device object that will be initialized
134 * 169 *
135 * Create a cdev and a Linux device for habanalabs's device. Need to be 170 * Initialize a cdev and a Linux device for habanalabs's device.
136 * called at the end of the habanalabs device initialization process,
137 * because this function exposes the device to the user
138 */ 171 */
139static int device_setup_cdev(struct hl_device *hdev, struct class *hclass, 172static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
140 int minor, const struct file_operations *fops) 173 int minor, const struct file_operations *fops,
174 char *name, struct cdev *cdev,
175 struct device **dev)
141{ 176{
142 int err, devno = MKDEV(hdev->major, minor); 177 cdev_init(cdev, fops);
143 struct cdev *hdev_cdev = &hdev->cdev; 178 cdev->owner = THIS_MODULE;
144 char *name;
145 179
146 name = kasprintf(GFP_KERNEL, "hl%d", hdev->id); 180 *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
147 if (!name) 181 if (!*dev)
148 return -ENOMEM; 182 return -ENOMEM;
149 183
150 cdev_init(hdev_cdev, fops); 184 device_initialize(*dev);
151 hdev_cdev->owner = THIS_MODULE; 185 (*dev)->devt = MKDEV(hdev->major, minor);
152 err = cdev_add(hdev_cdev, devno, 1); 186 (*dev)->class = hclass;
153 if (err) { 187 (*dev)->release = device_release_func;
154 pr_err("Failed to add char device %s\n", name); 188 dev_set_drvdata(*dev, hdev);
155 goto err_cdev_add; 189 dev_set_name(*dev, "%s", name);
190
191 return 0;
192}
193
194static int device_cdev_sysfs_add(struct hl_device *hdev)
195{
196 int rc;
197
198 rc = cdev_device_add(&hdev->cdev, hdev->dev);
199 if (rc) {
200 dev_err(hdev->dev,
201 "failed to add a char device to the system\n");
202 return rc;
156 } 203 }
157 204
158 hdev->dev = device_create(hclass, NULL, devno, NULL, "%s", name); 205 rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
159 if (IS_ERR(hdev->dev)) { 206 if (rc) {
160 pr_err("Failed to create device %s\n", name); 207 dev_err(hdev->dev,
161 err = PTR_ERR(hdev->dev); 208 "failed to add a control char device to the system\n");
162 goto err_device_create; 209 goto delete_cdev_device;
163 } 210 }
164 211
165 dev_set_drvdata(hdev->dev, hdev); 212 /* hl_sysfs_init() must be done after adding the device to the system */
213 rc = hl_sysfs_init(hdev);
214 if (rc) {
215 dev_err(hdev->dev, "failed to initialize sysfs\n");
216 goto delete_ctrl_cdev_device;
217 }
166 218
167 kfree(name); 219 hdev->cdev_sysfs_created = true;
168 220
169 return 0; 221 return 0;
170 222
171err_device_create: 223delete_ctrl_cdev_device:
172 cdev_del(hdev_cdev); 224 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
173err_cdev_add: 225delete_cdev_device:
174 kfree(name); 226 cdev_device_del(&hdev->cdev, hdev->dev);
175 return err; 227 return rc;
228}
229
230static void device_cdev_sysfs_del(struct hl_device *hdev)
231{
232 /* device_release() won't be called so must free devices explicitly */
233 if (!hdev->cdev_sysfs_created) {
234 kfree(hdev->dev_ctrl);
235 kfree(hdev->dev);
236 return;
237 }
238
239 hl_sysfs_fini(hdev);
240 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
241 cdev_device_del(&hdev->cdev, hdev->dev);
176} 242}
177 243
178/* 244/*
@@ -227,20 +293,29 @@ static int device_early_init(struct hl_device *hdev)
227 goto free_eq_wq; 293 goto free_eq_wq;
228 } 294 }
229 295
296 hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE,
297 sizeof(struct hl_device_idle_busy_ts),
298 (GFP_KERNEL | __GFP_ZERO));
299 if (!hdev->idle_busy_ts_arr) {
300 rc = -ENOMEM;
301 goto free_chip_info;
302 }
303
230 hl_cb_mgr_init(&hdev->kernel_cb_mgr); 304 hl_cb_mgr_init(&hdev->kernel_cb_mgr);
231 305
232 mutex_init(&hdev->fd_open_cnt_lock);
233 mutex_init(&hdev->send_cpu_message_lock); 306 mutex_init(&hdev->send_cpu_message_lock);
234 mutex_init(&hdev->debug_lock); 307 mutex_init(&hdev->debug_lock);
235 mutex_init(&hdev->mmu_cache_lock); 308 mutex_init(&hdev->mmu_cache_lock);
236 INIT_LIST_HEAD(&hdev->hw_queues_mirror_list); 309 INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
237 spin_lock_init(&hdev->hw_queues_mirror_lock); 310 spin_lock_init(&hdev->hw_queues_mirror_lock);
311 INIT_LIST_HEAD(&hdev->fpriv_list);
312 mutex_init(&hdev->fpriv_list_lock);
238 atomic_set(&hdev->in_reset, 0); 313 atomic_set(&hdev->in_reset, 0);
239 atomic_set(&hdev->fd_open_cnt, 0);
240 atomic_set(&hdev->cs_active_cnt, 0);
241 314
242 return 0; 315 return 0;
243 316
317free_chip_info:
318 kfree(hdev->hl_chip_info);
244free_eq_wq: 319free_eq_wq:
245 destroy_workqueue(hdev->eq_wq); 320 destroy_workqueue(hdev->eq_wq);
246free_cq_wq: 321free_cq_wq:
@@ -266,8 +341,11 @@ static void device_early_fini(struct hl_device *hdev)
266 mutex_destroy(&hdev->debug_lock); 341 mutex_destroy(&hdev->debug_lock);
267 mutex_destroy(&hdev->send_cpu_message_lock); 342 mutex_destroy(&hdev->send_cpu_message_lock);
268 343
344 mutex_destroy(&hdev->fpriv_list_lock);
345
269 hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); 346 hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
270 347
348 kfree(hdev->idle_busy_ts_arr);
271 kfree(hdev->hl_chip_info); 349 kfree(hdev->hl_chip_info);
272 350
273 destroy_workqueue(hdev->eq_wq); 351 destroy_workqueue(hdev->eq_wq);
@@ -277,8 +355,6 @@ static void device_early_fini(struct hl_device *hdev)
277 355
278 if (hdev->asic_funcs->early_fini) 356 if (hdev->asic_funcs->early_fini)
279 hdev->asic_funcs->early_fini(hdev); 357 hdev->asic_funcs->early_fini(hdev);
280
281 mutex_destroy(&hdev->fd_open_cnt_lock);
282} 358}
283 359
284static void set_freq_to_low_job(struct work_struct *work) 360static void set_freq_to_low_job(struct work_struct *work)
@@ -286,9 +362,13 @@ static void set_freq_to_low_job(struct work_struct *work)
286 struct hl_device *hdev = container_of(work, struct hl_device, 362 struct hl_device *hdev = container_of(work, struct hl_device,
287 work_freq.work); 363 work_freq.work);
288 364
289 if (atomic_read(&hdev->fd_open_cnt) == 0) 365 mutex_lock(&hdev->fpriv_list_lock);
366
367 if (!hdev->compute_ctx)
290 hl_device_set_frequency(hdev, PLL_LOW); 368 hl_device_set_frequency(hdev, PLL_LOW);
291 369
370 mutex_unlock(&hdev->fpriv_list_lock);
371
292 schedule_delayed_work(&hdev->work_freq, 372 schedule_delayed_work(&hdev->work_freq,
293 usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); 373 usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
294} 374}
@@ -338,7 +418,7 @@ static int device_late_init(struct hl_device *hdev)
338 hdev->high_pll = hdev->asic_prop.high_pll; 418 hdev->high_pll = hdev->asic_prop.high_pll;
339 419
340 /* force setting to low frequency */ 420 /* force setting to low frequency */
341 atomic_set(&hdev->curr_pll_profile, PLL_LOW); 421 hdev->curr_pll_profile = PLL_LOW;
342 422
343 if (hdev->pm_mng_profile == PM_AUTO) 423 if (hdev->pm_mng_profile == PM_AUTO)
344 hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW); 424 hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
@@ -381,44 +461,128 @@ static void device_late_fini(struct hl_device *hdev)
381 hdev->late_init_done = false; 461 hdev->late_init_done = false;
382} 462}
383 463
464uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms)
465{
466 struct hl_device_idle_busy_ts *ts;
467 ktime_t zero_ktime, curr = ktime_get();
468 u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx;
469 s64 period_us, last_start_us, last_end_us, last_busy_time_us,
470 total_busy_time_us = 0, total_busy_time_ms;
471
472 zero_ktime = ktime_set(0, 0);
473 period_us = period_ms * USEC_PER_MSEC;
474 ts = &hdev->idle_busy_ts_arr[last_index];
475
476 /* check case that device is currently in idle */
477 if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) &&
478 !ktime_compare(ts->idle_to_busy_ts, zero_ktime)) {
479
480 last_index--;
481 /* Handle case idle_busy_ts_idx was 0 */
482 if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
483 last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
484
485 ts = &hdev->idle_busy_ts_arr[last_index];
486 }
487
488 while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) {
489 /* Check if we are in last sample case. i.e. if the sample
490 * begun before the sampling period. This could be a real
491 * sample or 0 so need to handle both cases
492 */
493 last_start_us = ktime_to_us(
494 ktime_sub(curr, ts->idle_to_busy_ts));
495
496 if (last_start_us > period_us) {
497
498 /* First check two cases:
499 * 1. If the device is currently busy
500 * 2. If the device was idle during the whole sampling
501 * period
502 */
503
504 if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) {
505 /* Check if the device is currently busy */
506 if (ktime_compare(ts->idle_to_busy_ts,
507 zero_ktime))
508 return 100;
509
510 /* We either didn't have any activity or we
511 * reached an entry which is 0. Either way,
512 * exit and return what was accumulated so far
513 */
514 break;
515 }
516
517 /* If sample has finished, check it is relevant */
518 last_end_us = ktime_to_us(
519 ktime_sub(curr, ts->busy_to_idle_ts));
520
521 if (last_end_us > period_us)
522 break;
523
524 /* It is relevant so add it but with adjustment */
525 last_busy_time_us = ktime_to_us(
526 ktime_sub(ts->busy_to_idle_ts,
527 ts->idle_to_busy_ts));
528 total_busy_time_us += last_busy_time_us -
529 (last_start_us - period_us);
530 break;
531 }
532
533 /* Check if the sample is finished or still open */
534 if (ktime_compare(ts->busy_to_idle_ts, zero_ktime))
535 last_busy_time_us = ktime_to_us(
536 ktime_sub(ts->busy_to_idle_ts,
537 ts->idle_to_busy_ts));
538 else
539 last_busy_time_us = ktime_to_us(
540 ktime_sub(curr, ts->idle_to_busy_ts));
541
542 total_busy_time_us += last_busy_time_us;
543
544 last_index--;
545 /* Handle case idle_busy_ts_idx was 0 */
546 if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE)
547 last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1;
548
549 ts = &hdev->idle_busy_ts_arr[last_index];
550
551 overlap_cnt++;
552 }
553
554 total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us,
555 USEC_PER_MSEC);
556
557 return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms);
558}
559
384/* 560/*
385 * hl_device_set_frequency - set the frequency of the device 561 * hl_device_set_frequency - set the frequency of the device
386 * 562 *
387 * @hdev: pointer to habanalabs device structure 563 * @hdev: pointer to habanalabs device structure
388 * @freq: the new frequency value 564 * @freq: the new frequency value
389 * 565 *
390 * Change the frequency if needed. 566 * Change the frequency if needed. This function has no protection against
391 * We allose to set PLL to low only if there is no user process 567 * concurrency, therefore it is assumed that the calling function has protected
392 * Returns 0 if no change was done, otherwise returns 1; 568 * itself against the case of calling this function from multiple threads with
569 * different values
570 *
571 * Returns 0 if no change was done, otherwise returns 1
393 */ 572 */
394int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq) 573int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
395{ 574{
396 enum hl_pll_frequency old_freq = 575 if ((hdev->pm_mng_profile == PM_MANUAL) ||
397 (freq == PLL_HIGH) ? PLL_LOW : PLL_HIGH; 576 (hdev->curr_pll_profile == freq))
398 int ret;
399
400 if (hdev->pm_mng_profile == PM_MANUAL)
401 return 0;
402
403 ret = atomic_cmpxchg(&hdev->curr_pll_profile, old_freq, freq);
404 if (ret == freq)
405 return 0; 577 return 0;
406 578
407 /*
408 * in case we want to lower frequency, check if device is not
409 * opened. We must have a check here to workaround race condition with
410 * hl_device_open
411 */
412 if ((freq == PLL_LOW) && (atomic_read(&hdev->fd_open_cnt) > 0)) {
413 atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
414 return 0;
415 }
416
417 dev_dbg(hdev->dev, "Changing device frequency to %s\n", 579 dev_dbg(hdev->dev, "Changing device frequency to %s\n",
418 freq == PLL_HIGH ? "high" : "low"); 580 freq == PLL_HIGH ? "high" : "low");
419 581
420 hdev->asic_funcs->set_pll_profile(hdev, freq); 582 hdev->asic_funcs->set_pll_profile(hdev, freq);
421 583
584 hdev->curr_pll_profile = freq;
585
422 return 1; 586 return 1;
423} 587}
424 588
@@ -449,19 +613,8 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
449 goto out; 613 goto out;
450 } 614 }
451 615
452 mutex_lock(&hdev->fd_open_cnt_lock);
453
454 if (atomic_read(&hdev->fd_open_cnt) > 1) {
455 dev_err(hdev->dev,
456 "Failed to enable debug mode. More then a single user is using the device\n");
457 rc = -EPERM;
458 goto unlock_fd_open_lock;
459 }
460
461 hdev->in_debug = 1; 616 hdev->in_debug = 1;
462 617
463unlock_fd_open_lock:
464 mutex_unlock(&hdev->fd_open_cnt_lock);
465out: 618out:
466 mutex_unlock(&hdev->debug_lock); 619 mutex_unlock(&hdev->debug_lock);
467 620
@@ -568,6 +721,7 @@ disable_device:
568static void device_kill_open_processes(struct hl_device *hdev) 721static void device_kill_open_processes(struct hl_device *hdev)
569{ 722{
570 u16 pending_total, pending_cnt; 723 u16 pending_total, pending_cnt;
724 struct hl_fpriv *hpriv;
571 struct task_struct *task = NULL; 725 struct task_struct *task = NULL;
572 726
573 if (hdev->pldm) 727 if (hdev->pldm)
@@ -575,32 +729,31 @@ static void device_kill_open_processes(struct hl_device *hdev)
575 else 729 else
576 pending_total = HL_PENDING_RESET_PER_SEC; 730 pending_total = HL_PENDING_RESET_PER_SEC;
577 731
578 pending_cnt = pending_total; 732 /* Giving time for user to close FD, and for processes that are inside
579 733 * hl_device_open to finish
580 /* Flush all processes that are inside hl_open */ 734 */
581 mutex_lock(&hdev->fd_open_cnt_lock); 735 if (!list_empty(&hdev->fpriv_list))
582
583 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
584
585 pending_cnt--;
586
587 dev_info(hdev->dev,
588 "Can't HARD reset, waiting for user to close FD\n");
589 ssleep(1); 736 ssleep(1);
590 }
591 737
592 if (atomic_read(&hdev->fd_open_cnt)) { 738 mutex_lock(&hdev->fpriv_list_lock);
593 task = get_pid_task(hdev->user_ctx->hpriv->taskpid, 739
594 PIDTYPE_PID); 740 /* This section must be protected because we are dereferencing
741 * pointers that are freed if the process exits
742 */
743 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
744 task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
595 if (task) { 745 if (task) {
596 dev_info(hdev->dev, "Killing user processes\n"); 746 dev_info(hdev->dev, "Killing user process pid=%d\n",
747 task_pid_nr(task));
597 send_sig(SIGKILL, task, 1); 748 send_sig(SIGKILL, task, 1);
598 msleep(100); 749 usleep_range(1000, 10000);
599 750
600 put_task_struct(task); 751 put_task_struct(task);
601 } 752 }
602 } 753 }
603 754
755 mutex_unlock(&hdev->fpriv_list_lock);
756
604 /* We killed the open users, but because the driver cleans up after the 757 /* We killed the open users, but because the driver cleans up after the
605 * user contexts are closed (e.g. mmu mappings), we need to wait again 758 * user contexts are closed (e.g. mmu mappings), we need to wait again
606 * to make sure the cleaning phase is finished before continuing with 759 * to make sure the cleaning phase is finished before continuing with
@@ -609,19 +762,18 @@ static void device_kill_open_processes(struct hl_device *hdev)
609 762
610 pending_cnt = pending_total; 763 pending_cnt = pending_total;
611 764
612 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) { 765 while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
766 dev_info(hdev->dev,
767 "Waiting for all unmap operations to finish before hard reset\n");
613 768
614 pending_cnt--; 769 pending_cnt--;
615 770
616 ssleep(1); 771 ssleep(1);
617 } 772 }
618 773
619 if (atomic_read(&hdev->fd_open_cnt)) 774 if (!list_empty(&hdev->fpriv_list))
620 dev_crit(hdev->dev, 775 dev_crit(hdev->dev,
621 "Going to hard reset with open user contexts\n"); 776 "Going to hard reset with open user contexts\n");
622
623 mutex_unlock(&hdev->fd_open_cnt_lock);
624
625} 777}
626 778
627static void device_hard_reset_pending(struct work_struct *work) 779static void device_hard_reset_pending(struct work_struct *work)
@@ -630,8 +782,6 @@ static void device_hard_reset_pending(struct work_struct *work)
630 container_of(work, struct hl_device_reset_work, reset_work); 782 container_of(work, struct hl_device_reset_work, reset_work);
631 struct hl_device *hdev = device_reset_work->hdev; 783 struct hl_device *hdev = device_reset_work->hdev;
632 784
633 device_kill_open_processes(hdev);
634
635 hl_device_reset(hdev, true, true); 785 hl_device_reset(hdev, true, true);
636 786
637 kfree(device_reset_work); 787 kfree(device_reset_work);
@@ -679,13 +829,16 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
679 /* This also blocks future CS/VM/JOB completion operations */ 829 /* This also blocks future CS/VM/JOB completion operations */
680 hdev->disabled = true; 830 hdev->disabled = true;
681 831
682 /* 832 /* Flush anyone that is inside the critical section of enqueue
683 * Flush anyone that is inside the critical section of enqueue
684 * jobs to the H/W 833 * jobs to the H/W
685 */ 834 */
686 hdev->asic_funcs->hw_queues_lock(hdev); 835 hdev->asic_funcs->hw_queues_lock(hdev);
687 hdev->asic_funcs->hw_queues_unlock(hdev); 836 hdev->asic_funcs->hw_queues_unlock(hdev);
688 837
838 /* Flush anyone that is inside device open */
839 mutex_lock(&hdev->fpriv_list_lock);
840 mutex_unlock(&hdev->fpriv_list_lock);
841
689 dev_err(hdev->dev, "Going to RESET device!\n"); 842 dev_err(hdev->dev, "Going to RESET device!\n");
690 } 843 }
691 844
@@ -736,6 +889,13 @@ again:
736 /* Go over all the queues, release all CS and their jobs */ 889 /* Go over all the queues, release all CS and their jobs */
737 hl_cs_rollback_all(hdev); 890 hl_cs_rollback_all(hdev);
738 891
892 /* Kill processes here after CS rollback. This is because the process
893 * can't really exit until all its CSs are done, which is what we
894 * do in cs rollback
895 */
896 if (from_hard_reset_thread)
897 device_kill_open_processes(hdev);
898
739 /* Release kernel context */ 899 /* Release kernel context */
740 if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1)) 900 if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
741 hdev->kernel_ctx = NULL; 901 hdev->kernel_ctx = NULL;
@@ -754,12 +914,24 @@ again:
754 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 914 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
755 hl_cq_reset(hdev, &hdev->completion_queue[i]); 915 hl_cq_reset(hdev, &hdev->completion_queue[i]);
756 916
917 hdev->idle_busy_ts_idx = 0;
918 hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0);
919 hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0);
920
921 if (hdev->cs_active_cnt)
922 dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n",
923 hdev->cs_active_cnt);
924
925 mutex_lock(&hdev->fpriv_list_lock);
926
757 /* Make sure the context switch phase will run again */ 927 /* Make sure the context switch phase will run again */
758 if (hdev->user_ctx) { 928 if (hdev->compute_ctx) {
759 atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1); 929 atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
760 hdev->user_ctx->thread_ctx_switch_wait_token = 0; 930 hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
761 } 931 }
762 932
933 mutex_unlock(&hdev->fpriv_list_lock);
934
763 /* Finished tear-down, starting to re-initialize */ 935 /* Finished tear-down, starting to re-initialize */
764 936
765 if (hard_reset) { 937 if (hard_reset) {
@@ -788,7 +960,7 @@ again:
788 goto out_err; 960 goto out_err;
789 } 961 }
790 962
791 hdev->user_ctx = NULL; 963 hdev->compute_ctx = NULL;
792 964
793 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 965 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
794 if (rc) { 966 if (rc) {
@@ -849,6 +1021,8 @@ again:
849 else 1021 else
850 hdev->soft_reset_cnt++; 1022 hdev->soft_reset_cnt++;
851 1023
1024 dev_warn(hdev->dev, "Successfully finished resetting the device\n");
1025
852 return 0; 1026 return 0;
853 1027
854out_err: 1028out_err:
@@ -883,17 +1057,43 @@ out_err:
883int hl_device_init(struct hl_device *hdev, struct class *hclass) 1057int hl_device_init(struct hl_device *hdev, struct class *hclass)
884{ 1058{
885 int i, rc, cq_ready_cnt; 1059 int i, rc, cq_ready_cnt;
1060 char *name;
1061 bool add_cdev_sysfs_on_err = false;
886 1062
887 /* Create device */ 1063 name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
888 rc = device_setup_cdev(hdev, hclass, hdev->id, &hl_ops); 1064 if (!name) {
1065 rc = -ENOMEM;
1066 goto out_disabled;
1067 }
1068
1069 /* Initialize cdev and device structures */
1070 rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
1071 &hdev->cdev, &hdev->dev);
1072
1073 kfree(name);
889 1074
890 if (rc) 1075 if (rc)
891 goto out_disabled; 1076 goto out_disabled;
892 1077
1078 name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
1079 if (!name) {
1080 rc = -ENOMEM;
1081 goto free_dev;
1082 }
1083
1084 /* Initialize cdev and device structures for control device */
1085 rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
1086 name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
1087
1088 kfree(name);
1089
1090 if (rc)
1091 goto free_dev;
1092
893 /* Initialize ASIC function pointers and perform early init */ 1093 /* Initialize ASIC function pointers and perform early init */
894 rc = device_early_init(hdev); 1094 rc = device_early_init(hdev);
895 if (rc) 1095 if (rc)
896 goto release_device; 1096 goto free_dev_ctrl;
897 1097
898 /* 1098 /*
899 * Start calling ASIC initialization. First S/W then H/W and finally 1099 * Start calling ASIC initialization. First S/W then H/W and finally
@@ -965,7 +1165,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
965 goto mmu_fini; 1165 goto mmu_fini;
966 } 1166 }
967 1167
968 hdev->user_ctx = NULL; 1168 hdev->compute_ctx = NULL;
969 1169
970 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 1170 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
971 if (rc) { 1171 if (rc) {
@@ -980,12 +1180,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
980 goto release_ctx; 1180 goto release_ctx;
981 } 1181 }
982 1182
983 rc = hl_sysfs_init(hdev);
984 if (rc) {
985 dev_err(hdev->dev, "failed to initialize sysfs\n");
986 goto free_cb_pool;
987 }
988
989 hl_debugfs_add_device(hdev); 1183 hl_debugfs_add_device(hdev);
990 1184
991 if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { 1185 if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
@@ -994,6 +1188,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
994 hdev->asic_funcs->hw_fini(hdev, true); 1188 hdev->asic_funcs->hw_fini(hdev, true);
995 } 1189 }
996 1190
1191 /*
1192 * From this point, in case of an error, add char devices and create
1193 * sysfs nodes as part of the error flow, to allow debugging.
1194 */
1195 add_cdev_sysfs_on_err = true;
1196
997 rc = hdev->asic_funcs->hw_init(hdev); 1197 rc = hdev->asic_funcs->hw_init(hdev);
998 if (rc) { 1198 if (rc) {
999 dev_err(hdev->dev, "failed to initialize the H/W\n"); 1199 dev_err(hdev->dev, "failed to initialize the H/W\n");
@@ -1030,9 +1230,24 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
1030 } 1230 }
1031 1231
1032 /* 1232 /*
1033 * hl_hwmon_init must be called after device_late_init, because only 1233 * Expose devices and sysfs nodes to user.
1234 * From here there is no need to add char devices and create sysfs nodes
1235 * in case of an error.
1236 */
1237 add_cdev_sysfs_on_err = false;
1238 rc = device_cdev_sysfs_add(hdev);
1239 if (rc) {
1240 dev_err(hdev->dev,
1241 "Failed to add char devices and sysfs nodes\n");
1242 rc = 0;
1243 goto out_disabled;
1244 }
1245
1246 /*
1247 * hl_hwmon_init() must be called after device_late_init(), because only
1034 * there we get the information from the device about which 1248 * there we get the information from the device about which
1035 * hwmon-related sensors the device supports 1249 * hwmon-related sensors the device supports.
1250 * Furthermore, it must be done after adding the device to the system.
1036 */ 1251 */
1037 rc = hl_hwmon_init(hdev); 1252 rc = hl_hwmon_init(hdev);
1038 if (rc) { 1253 if (rc) {
@@ -1048,8 +1263,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
1048 1263
1049 return 0; 1264 return 0;
1050 1265
1051free_cb_pool:
1052 hl_cb_pool_fini(hdev);
1053release_ctx: 1266release_ctx:
1054 if (hl_ctx_put(hdev->kernel_ctx) != 1) 1267 if (hl_ctx_put(hdev->kernel_ctx) != 1)
1055 dev_err(hdev->dev, 1268 dev_err(hdev->dev,
@@ -1068,18 +1281,21 @@ sw_fini:
1068 hdev->asic_funcs->sw_fini(hdev); 1281 hdev->asic_funcs->sw_fini(hdev);
1069early_fini: 1282early_fini:
1070 device_early_fini(hdev); 1283 device_early_fini(hdev);
1071release_device: 1284free_dev_ctrl:
1072 device_destroy(hclass, hdev->dev->devt); 1285 kfree(hdev->dev_ctrl);
1073 cdev_del(&hdev->cdev); 1286free_dev:
1287 kfree(hdev->dev);
1074out_disabled: 1288out_disabled:
1075 hdev->disabled = true; 1289 hdev->disabled = true;
1290 if (add_cdev_sysfs_on_err)
1291 device_cdev_sysfs_add(hdev);
1076 if (hdev->pdev) 1292 if (hdev->pdev)
1077 dev_err(&hdev->pdev->dev, 1293 dev_err(&hdev->pdev->dev,
1078 "Failed to initialize hl%d. Device is NOT usable !\n", 1294 "Failed to initialize hl%d. Device is NOT usable !\n",
1079 hdev->id); 1295 hdev->id / 2);
1080 else 1296 else
1081 pr_err("Failed to initialize hl%d. Device is NOT usable !\n", 1297 pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
1082 hdev->id); 1298 hdev->id / 2);
1083 1299
1084 return rc; 1300 return rc;
1085} 1301}
@@ -1120,16 +1336,17 @@ void hl_device_fini(struct hl_device *hdev)
1120 /* Mark device as disabled */ 1336 /* Mark device as disabled */
1121 hdev->disabled = true; 1337 hdev->disabled = true;
1122 1338
1123 /* 1339 /* Flush anyone that is inside the critical section of enqueue
1124 * Flush anyone that is inside the critical section of enqueue
1125 * jobs to the H/W 1340 * jobs to the H/W
1126 */ 1341 */
1127 hdev->asic_funcs->hw_queues_lock(hdev); 1342 hdev->asic_funcs->hw_queues_lock(hdev);
1128 hdev->asic_funcs->hw_queues_unlock(hdev); 1343 hdev->asic_funcs->hw_queues_unlock(hdev);
1129 1344
1130 hdev->hard_reset_pending = true; 1345 /* Flush anyone that is inside device open */
1346 mutex_lock(&hdev->fpriv_list_lock);
1347 mutex_unlock(&hdev->fpriv_list_lock);
1131 1348
1132 device_kill_open_processes(hdev); 1349 hdev->hard_reset_pending = true;
1133 1350
1134 hl_hwmon_fini(hdev); 1351 hl_hwmon_fini(hdev);
1135 1352
@@ -1137,8 +1354,6 @@ void hl_device_fini(struct hl_device *hdev)
1137 1354
1138 hl_debugfs_remove_device(hdev); 1355 hl_debugfs_remove_device(hdev);
1139 1356
1140 hl_sysfs_fini(hdev);
1141
1142 /* 1357 /*
1143 * Halt the engines and disable interrupts so we won't get any more 1358 * Halt the engines and disable interrupts so we won't get any more
1144 * completions from H/W and we won't have any accesses from the 1359 * completions from H/W and we won't have any accesses from the
@@ -1149,6 +1364,12 @@ void hl_device_fini(struct hl_device *hdev)
1149 /* Go over all the queues, release all CS and their jobs */ 1364 /* Go over all the queues, release all CS and their jobs */
1150 hl_cs_rollback_all(hdev); 1365 hl_cs_rollback_all(hdev);
1151 1366
1367 /* Kill processes here after CS rollback. This is because the process
1368 * can't really exit until all its CSs are done, which is what we
1369 * do in cs rollback
1370 */
1371 device_kill_open_processes(hdev);
1372
1152 hl_cb_pool_fini(hdev); 1373 hl_cb_pool_fini(hdev);
1153 1374
1154 /* Release kernel context */ 1375 /* Release kernel context */
@@ -1175,9 +1396,8 @@ void hl_device_fini(struct hl_device *hdev)
1175 1396
1176 device_early_fini(hdev); 1397 device_early_fini(hdev);
1177 1398
1178 /* Hide device from user */ 1399 /* Hide devices and sysfs nodes from user */
1179 device_destroy(hdev->dev->class, hdev->dev->devt); 1400 device_cdev_sysfs_del(hdev);
1180 cdev_del(&hdev->cdev);
1181 1401
1182 pr_info("removed device successfully\n"); 1402 pr_info("removed device successfully\n");
1183} 1403}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 271c5c8f53b4..6fba14b81f90 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -9,6 +9,7 @@
9#include "include/hw_ip/mmu/mmu_general.h" 9#include "include/hw_ip/mmu/mmu_general.h"
10#include "include/hw_ip/mmu/mmu_v1_0.h" 10#include "include/hw_ip/mmu/mmu_v1_0.h"
11#include "include/goya/asic_reg/goya_masks.h" 11#include "include/goya/asic_reg/goya_masks.h"
12#include "include/goya/goya_reg_map.h"
12 13
13#include <linux/pci.h> 14#include <linux/pci.h>
14#include <linux/genalloc.h> 15#include <linux/genalloc.h>
@@ -41,8 +42,8 @@
41 * PQ, CQ and CP are not secured. 42 * PQ, CQ and CP are not secured.
42 * PQ, CB and the data are on the SRAM/DRAM. 43 * PQ, CB and the data are on the SRAM/DRAM.
43 * 44 *
44 * Since QMAN DMA is secured, KMD is parsing the DMA CB: 45 * Since QMAN DMA is secured, the driver is parsing the DMA CB:
45 * - KMD checks DMA pointer 46 * - checks DMA pointer
46 * - WREG, MSG_PROT are not allowed. 47 * - WREG, MSG_PROT are not allowed.
47 * - MSG_LONG/SHORT are allowed. 48 * - MSG_LONG/SHORT are allowed.
48 * 49 *
@@ -55,15 +56,15 @@
55 * QMAN DMA: PQ, CQ and CP are secured. 56 * QMAN DMA: PQ, CQ and CP are secured.
56 * MMU is set to bypass on the Secure props register of the QMAN. 57 * MMU is set to bypass on the Secure props register of the QMAN.
57 * The reasons we don't enable MMU for PQ, CQ and CP are: 58 * The reasons we don't enable MMU for PQ, CQ and CP are:
58 * - PQ entry is in kernel address space and KMD doesn't map it. 59 * - PQ entry is in kernel address space and the driver doesn't map it.
59 * - CP writes to MSIX register and to kernel address space (completion 60 * - CP writes to MSIX register and to kernel address space (completion
60 * queue). 61 * queue).
61 * 62 *
62 * DMA is not secured but because CP is secured, KMD still needs to parse the 63 * DMA is not secured but because CP is secured, the driver still needs to parse
63 * CB, but doesn't need to check the DMA addresses. 64 * the CB, but doesn't need to check the DMA addresses.
64 * 65 *
65 * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD 66 * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
66 * doesn't map memory in MMU. 67 * the driver doesn't map memory in MMU.
67 * 68 *
68 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode) 69 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
69 * 70 *
@@ -335,18 +336,18 @@ void goya_get_fixed_properties(struct hl_device *hdev)
335 336
336 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { 337 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
337 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; 338 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
338 prop->hw_queues_props[i].kmd_only = 0; 339 prop->hw_queues_props[i].driver_only = 0;
339 } 340 }
340 341
341 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) { 342 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
342 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; 343 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
343 prop->hw_queues_props[i].kmd_only = 1; 344 prop->hw_queues_props[i].driver_only = 1;
344 } 345 }
345 346
346 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES + 347 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
347 NUMBER_OF_INT_HW_QUEUES; i++) { 348 NUMBER_OF_INT_HW_QUEUES; i++) {
348 prop->hw_queues_props[i].type = QUEUE_TYPE_INT; 349 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
349 prop->hw_queues_props[i].kmd_only = 0; 350 prop->hw_queues_props[i].driver_only = 0;
350 } 351 }
351 352
352 for (; i < HL_MAX_QUEUES; i++) 353 for (; i < HL_MAX_QUEUES; i++)
@@ -1006,36 +1007,34 @@ int goya_init_cpu_queues(struct hl_device *hdev)
1006 1007
1007 eq = &hdev->event_queue; 1008 eq = &hdev->event_queue;
1008 1009
1009 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, 1010 WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
1010 lower_32_bits(cpu_pq->bus_address)); 1011 WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
1011 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1,
1012 upper_32_bits(cpu_pq->bus_address));
1013 1012
1014 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(eq->bus_address)); 1013 WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
1015 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(eq->bus_address)); 1014 WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
1016 1015
1017 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, 1016 WREG32(mmCPU_CQ_BASE_ADDR_LOW,
1018 lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR)); 1017 lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1019 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, 1018 WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
1020 upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR)); 1019 upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1021 1020
1022 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES); 1021 WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
1023 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES); 1022 WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
1024 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE); 1023 WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
1025 1024
1026 /* Used for EQ CI */ 1025 /* Used for EQ CI */
1027 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0); 1026 WREG32(mmCPU_EQ_CI, 0);
1028 1027
1029 WREG32(mmCPU_IF_PF_PQ_PI, 0); 1028 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1030 1029
1031 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP); 1030 WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
1032 1031
1033 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, 1032 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1034 GOYA_ASYNC_EVENT_ID_PI_UPDATE); 1033 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1035 1034
1036 err = hl_poll_timeout( 1035 err = hl_poll_timeout(
1037 hdev, 1036 hdev,
1038 mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, 1037 mmCPU_PQ_INIT_STATUS,
1039 status, 1038 status,
1040 (status == PQ_INIT_STATUS_READY_FOR_HOST), 1039 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1041 1000, 1040 1000,
@@ -2063,6 +2062,25 @@ static void goya_disable_msix(struct hl_device *hdev)
2063 goya->hw_cap_initialized &= ~HW_CAP_MSIX; 2062 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2064} 2063}
2065 2064
2065static void goya_enable_timestamp(struct hl_device *hdev)
2066{
2067 /* Disable the timestamp counter */
2068 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2069
2070 /* Zero the lower/upper parts of the 64-bit counter */
2071 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
2072 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
2073
2074 /* Enable the counter */
2075 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
2076}
2077
2078static void goya_disable_timestamp(struct hl_device *hdev)
2079{
2080 /* Disable the timestamp counter */
2081 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2082}
2083
2066static void goya_halt_engines(struct hl_device *hdev, bool hard_reset) 2084static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2067{ 2085{
2068 u32 wait_timeout_ms, cpu_timeout_ms; 2086 u32 wait_timeout_ms, cpu_timeout_ms;
@@ -2103,6 +2121,8 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2103 goya_disable_external_queues(hdev); 2121 goya_disable_external_queues(hdev);
2104 goya_disable_internal_queues(hdev); 2122 goya_disable_internal_queues(hdev);
2105 2123
2124 goya_disable_timestamp(hdev);
2125
2106 if (hard_reset) { 2126 if (hard_reset) {
2107 goya_disable_msix(hdev); 2127 goya_disable_msix(hdev);
2108 goya_mmu_remove_device_cpu_mappings(hdev); 2128 goya_mmu_remove_device_cpu_mappings(hdev);
@@ -2205,12 +2225,12 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
2205 2225
2206 switch (fwc) { 2226 switch (fwc) {
2207 case FW_COMP_UBOOT: 2227 case FW_COMP_UBOOT:
2208 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29); 2228 ver_off = RREG32(mmUBOOT_VER_OFFSET);
2209 dest = hdev->asic_prop.uboot_ver; 2229 dest = hdev->asic_prop.uboot_ver;
2210 name = "U-Boot"; 2230 name = "U-Boot";
2211 break; 2231 break;
2212 case FW_COMP_PREBOOT: 2232 case FW_COMP_PREBOOT:
2213 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28); 2233 ver_off = RREG32(mmPREBOOT_VER_OFFSET);
2214 dest = hdev->asic_prop.preboot_ver; 2234 dest = hdev->asic_prop.preboot_ver;
2215 name = "Preboot"; 2235 name = "Preboot";
2216 break; 2236 break;
@@ -2469,7 +2489,7 @@ static int goya_hw_init(struct hl_device *hdev)
2469 * we need to reset the chip before doing H/W init. This register is 2489 * we need to reset the chip before doing H/W init. This register is
2470 * cleared by the H/W upon H/W reset 2490 * cleared by the H/W upon H/W reset
2471 */ 2491 */
2472 WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY); 2492 WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
2473 2493
2474 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC); 2494 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2475 if (rc) { 2495 if (rc) {
@@ -2505,6 +2525,8 @@ static int goya_hw_init(struct hl_device *hdev)
2505 2525
2506 goya_init_tpc_qmans(hdev); 2526 goya_init_tpc_qmans(hdev);
2507 2527
2528 goya_enable_timestamp(hdev);
2529
2508 /* MSI-X must be enabled before CPU queues are initialized */ 2530 /* MSI-X must be enabled before CPU queues are initialized */
2509 rc = goya_enable_msix(hdev); 2531 rc = goya_enable_msix(hdev);
2510 if (rc) 2532 if (rc)
@@ -2831,7 +2853,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
2831 2853
2832 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) { 2854 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
2833 dev_err_ratelimited(hdev->dev, 2855 dev_err_ratelimited(hdev->dev,
2834 "Can't send KMD job on QMAN0 because the device is not idle\n"); 2856 "Can't send driver job on QMAN0 because the device is not idle\n");
2835 return -EBUSY; 2857 return -EBUSY;
2836 } 2858 }
2837 2859
@@ -3949,7 +3971,7 @@ void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
3949 3971
3950void goya_update_eq_ci(struct hl_device *hdev, u32 val) 3972void goya_update_eq_ci(struct hl_device *hdev, u32 val)
3951{ 3973{
3952 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val); 3974 WREG32(mmCPU_EQ_CI, val);
3953} 3975}
3954 3976
3955void goya_restore_phase_topology(struct hl_device *hdev) 3977void goya_restore_phase_topology(struct hl_device *hdev)
@@ -4447,6 +4469,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4447 struct goya_device *goya = hdev->asic_specific; 4469 struct goya_device *goya = hdev->asic_specific;
4448 4470
4449 goya->events_stat[event_type]++; 4471 goya->events_stat[event_type]++;
4472 goya->events_stat_aggregate[event_type]++;
4450 4473
4451 switch (event_type) { 4474 switch (event_type) {
4452 case GOYA_ASYNC_EVENT_ID_PCIE_IF: 4475 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
@@ -4528,12 +4551,16 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4528 } 4551 }
4529} 4552}
4530 4553
4531void *goya_get_events_stat(struct hl_device *hdev, u32 *size) 4554void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
4532{ 4555{
4533 struct goya_device *goya = hdev->asic_specific; 4556 struct goya_device *goya = hdev->asic_specific;
4534 4557
4535 *size = (u32) sizeof(goya->events_stat); 4558 if (aggregate) {
4559 *size = (u32) sizeof(goya->events_stat_aggregate);
4560 return goya->events_stat_aggregate;
4561 }
4536 4562
4563 *size = (u32) sizeof(goya->events_stat);
4537 return goya->events_stat; 4564 return goya->events_stat;
4538} 4565}
4539 4566
@@ -4934,6 +4961,10 @@ int goya_armcp_info_get(struct hl_device *hdev)
4934 prop->dram_end_address = prop->dram_base_address + dram_size; 4961 prop->dram_end_address = prop->dram_base_address + dram_size;
4935 } 4962 }
4936 4963
4964 if (!strlen(prop->armcp_info.card_name))
4965 strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
4966 CARD_NAME_MAX_LEN);
4967
4937 return 0; 4968 return 0;
4938} 4969}
4939 4970
@@ -5047,7 +5078,7 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5047 5078
5048static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev) 5079static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5049{ 5080{
5050 return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS); 5081 return RREG32(mmHW_STATE);
5051} 5082}
5052 5083
5053static const struct hl_asic_funcs goya_funcs = { 5084static const struct hl_asic_funcs goya_funcs = {
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index d7f48c9c41cd..89b6574f8e4f 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -55,6 +55,8 @@
55 55
56#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */ 56#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
57 57
58#define GOYA_DEFAULT_CARD_NAME "HL1000"
59
58/* DRAM Memory Map */ 60/* DRAM Memory Map */
59 61
60#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */ 62#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
@@ -68,19 +70,19 @@
68 MMU_PAGE_TABLES_SIZE) 70 MMU_PAGE_TABLES_SIZE)
69#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \ 71#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
70 MMU_DRAM_DEFAULT_PAGE_SIZE) 72 MMU_DRAM_DEFAULT_PAGE_SIZE)
71#define DRAM_KMD_END_ADDR (MMU_CACHE_MNG_ADDR + \ 73#define DRAM_DRIVER_END_ADDR (MMU_CACHE_MNG_ADDR + \
72 MMU_CACHE_MNG_SIZE) 74 MMU_CACHE_MNG_SIZE)
73 75
74#define DRAM_BASE_ADDR_USER 0x20000000 76#define DRAM_BASE_ADDR_USER 0x20000000
75 77
76#if (DRAM_KMD_END_ADDR > DRAM_BASE_ADDR_USER) 78#if (DRAM_DRIVER_END_ADDR > DRAM_BASE_ADDR_USER)
77#error "KMD must reserve no more than 512MB" 79#error "Driver must reserve no more than 512MB"
78#endif 80#endif
79 81
80/* 82/*
81 * SRAM Memory Map for KMD 83 * SRAM Memory Map for Driver
82 * 84 *
83 * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for 85 * Driver occupies DRIVER_SRAM_SIZE bytes from the start of SRAM. It is used for
84 * MME/TPC QMANs 86 * MME/TPC QMANs
85 * 87 *
86 */ 88 */
@@ -106,10 +108,10 @@
106#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \ 108#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \
107 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) 109 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
108 110
109#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \ 111#define SRAM_DRIVER_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \
110 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) 112 (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE))
111 113
112#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START) 114#if (SRAM_DRIVER_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START)
113#error "MME/TPC QMANs SRAM space exceeds limit" 115#error "MME/TPC QMANs SRAM space exceeds limit"
114#endif 116#endif
115 117
@@ -162,6 +164,7 @@ struct goya_device {
162 164
163 u64 ddr_bar_cur_addr; 165 u64 ddr_bar_cur_addr;
164 u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE]; 166 u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE];
167 u32 events_stat_aggregate[GOYA_ASYNC_EVENT_ID_SIZE];
165 u32 hw_cap_initialized; 168 u32 hw_cap_initialized;
166 u8 device_cpu_mmu_mappings_done; 169 u8 device_cpu_mmu_mappings_done;
167}; 170};
@@ -215,7 +218,7 @@ int goya_suspend(struct hl_device *hdev);
215int goya_resume(struct hl_device *hdev); 218int goya_resume(struct hl_device *hdev);
216 219
217void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry); 220void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
218void *goya_get_events_stat(struct hl_device *hdev, u32 *size); 221void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size);
219 222
220void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address, 223void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
221 u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec); 224 u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index d7ec7ad84cc6..b4d406af1bed 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -15,6 +15,10 @@
15 15
16#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100) 16#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
17 17
18#define SPMU_SECTION_SIZE DMA_CH_0_CS_SPMU_MAX_OFFSET
19#define SPMU_EVENT_TYPES_OFFSET 0x400
20#define SPMU_MAX_COUNTERS 6
21
18static u64 debug_stm_regs[GOYA_STM_LAST + 1] = { 22static u64 debug_stm_regs[GOYA_STM_LAST + 1] = {
19 [GOYA_STM_CPU] = mmCPU_STM_BASE, 23 [GOYA_STM_CPU] = mmCPU_STM_BASE,
20 [GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE, 24 [GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
@@ -226,9 +230,16 @@ static int goya_config_stm(struct hl_device *hdev,
226 struct hl_debug_params *params) 230 struct hl_debug_params *params)
227{ 231{
228 struct hl_debug_params_stm *input; 232 struct hl_debug_params_stm *input;
229 u64 base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE; 233 u64 base_reg;
230 int rc; 234 int rc;
231 235
236 if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
237 dev_err(hdev->dev, "Invalid register index in STM\n");
238 return -EINVAL;
239 }
240
241 base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
242
232 WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK); 243 WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
233 244
234 if (params->enable) { 245 if (params->enable) {
@@ -288,10 +299,17 @@ static int goya_config_etf(struct hl_device *hdev,
288 struct hl_debug_params *params) 299 struct hl_debug_params *params)
289{ 300{
290 struct hl_debug_params_etf *input; 301 struct hl_debug_params_etf *input;
291 u64 base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE; 302 u64 base_reg;
292 u32 val; 303 u32 val;
293 int rc; 304 int rc;
294 305
306 if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
307 dev_err(hdev->dev, "Invalid register index in ETF\n");
308 return -EINVAL;
309 }
310
311 base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
312
295 WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK); 313 WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
296 314
297 val = RREG32(base_reg + 0x304); 315 val = RREG32(base_reg + 0x304);
@@ -445,11 +463,18 @@ static int goya_config_etr(struct hl_device *hdev,
445static int goya_config_funnel(struct hl_device *hdev, 463static int goya_config_funnel(struct hl_device *hdev,
446 struct hl_debug_params *params) 464 struct hl_debug_params *params)
447{ 465{
448 WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE + 0xFB0, 466 u64 base_reg;
449 CORESIGHT_UNLOCK); 467
468 if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
469 dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
470 return -EINVAL;
471 }
472
473 base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE;
474
475 WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
450 476
451 WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE, 477 WREG32(base_reg, params->enable ? 0x33F : 0);
452 params->enable ? 0x33F : 0);
453 478
454 return 0; 479 return 0;
455} 480}
@@ -458,9 +483,16 @@ static int goya_config_bmon(struct hl_device *hdev,
458 struct hl_debug_params *params) 483 struct hl_debug_params *params)
459{ 484{
460 struct hl_debug_params_bmon *input; 485 struct hl_debug_params_bmon *input;
461 u64 base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE; 486 u64 base_reg;
462 u32 pcie_base = 0; 487 u32 pcie_base = 0;
463 488
489 if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
490 dev_err(hdev->dev, "Invalid register index in BMON\n");
491 return -EINVAL;
492 }
493
494 base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
495
464 WREG32(base_reg + 0x104, 1); 496 WREG32(base_reg + 0x104, 1);
465 497
466 if (params->enable) { 498 if (params->enable) {
@@ -522,7 +554,7 @@ static int goya_config_bmon(struct hl_device *hdev,
522static int goya_config_spmu(struct hl_device *hdev, 554static int goya_config_spmu(struct hl_device *hdev,
523 struct hl_debug_params *params) 555 struct hl_debug_params *params)
524{ 556{
525 u64 base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE; 557 u64 base_reg;
526 struct hl_debug_params_spmu *input = params->input; 558 struct hl_debug_params_spmu *input = params->input;
527 u64 *output; 559 u64 *output;
528 u32 output_arr_len; 560 u32 output_arr_len;
@@ -531,6 +563,13 @@ static int goya_config_spmu(struct hl_device *hdev,
531 u32 cycle_cnt_idx; 563 u32 cycle_cnt_idx;
532 int i; 564 int i;
533 565
566 if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
567 dev_err(hdev->dev, "Invalid register index in SPMU\n");
568 return -EINVAL;
569 }
570
571 base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
572
534 if (params->enable) { 573 if (params->enable) {
535 input = params->input; 574 input = params->input;
536 575
@@ -539,7 +578,13 @@ static int goya_config_spmu(struct hl_device *hdev,
539 578
540 if (input->event_types_num < 3) { 579 if (input->event_types_num < 3) {
541 dev_err(hdev->dev, 580 dev_err(hdev->dev,
542 "not enough values for SPMU enable\n"); 581 "not enough event types values for SPMU enable\n");
582 return -EINVAL;
583 }
584
585 if (input->event_types_num > SPMU_MAX_COUNTERS) {
586 dev_err(hdev->dev,
587 "too many event types values for SPMU enable\n");
543 return -EINVAL; 588 return -EINVAL;
544 } 589 }
545 590
@@ -547,7 +592,8 @@ static int goya_config_spmu(struct hl_device *hdev,
547 WREG32(base_reg + 0xE04, 0x41013040); 592 WREG32(base_reg + 0xE04, 0x41013040);
548 593
549 for (i = 0 ; i < input->event_types_num ; i++) 594 for (i = 0 ; i < input->event_types_num ; i++)
550 WREG32(base_reg + 0x400 + i * 4, input->event_types[i]); 595 WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4,
596 input->event_types[i]);
551 597
552 WREG32(base_reg + 0xE04, 0x41013041); 598 WREG32(base_reg + 0xE04, 0x41013041);
553 WREG32(base_reg + 0xC00, 0x8000003F); 599 WREG32(base_reg + 0xC00, 0x8000003F);
@@ -567,6 +613,12 @@ static int goya_config_spmu(struct hl_device *hdev,
567 return -EINVAL; 613 return -EINVAL;
568 } 614 }
569 615
616 if (events_num > SPMU_MAX_COUNTERS) {
617 dev_err(hdev->dev,
618 "too many events values for SPMU disable\n");
619 return -EINVAL;
620 }
621
570 WREG32(base_reg + 0xE04, 0x41013040); 622 WREG32(base_reg + 0xE04, 0x41013040);
571 623
572 for (i = 0 ; i < events_num ; i++) 624 for (i = 0 ; i < events_num ; i++)
@@ -584,24 +636,11 @@ static int goya_config_spmu(struct hl_device *hdev,
584 return 0; 636 return 0;
585} 637}
586 638
587static int goya_config_timestamp(struct hl_device *hdev,
588 struct hl_debug_params *params)
589{
590 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
591 if (params->enable) {
592 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
593 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
594 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
595 }
596
597 return 0;
598}
599
600int goya_debug_coresight(struct hl_device *hdev, void *data) 639int goya_debug_coresight(struct hl_device *hdev, void *data)
601{ 640{
602 struct hl_debug_params *params = data; 641 struct hl_debug_params *params = data;
603 u32 val; 642 u32 val;
604 int rc; 643 int rc = 0;
605 644
606 switch (params->op) { 645 switch (params->op) {
607 case HL_DEBUG_OP_STM: 646 case HL_DEBUG_OP_STM:
@@ -623,7 +662,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
623 rc = goya_config_spmu(hdev, params); 662 rc = goya_config_spmu(hdev, params);
624 break; 663 break;
625 case HL_DEBUG_OP_TIMESTAMP: 664 case HL_DEBUG_OP_TIMESTAMP:
626 rc = goya_config_timestamp(hdev, params); 665 /* Do nothing as this opcode is deprecated */
627 break; 666 break;
628 667
629 default: 668 default:
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 088692c852b6..a2a700c3d597 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -230,18 +230,127 @@ static ssize_t ic_clk_curr_show(struct device *dev,
230 return sprintf(buf, "%lu\n", value); 230 return sprintf(buf, "%lu\n", value);
231} 231}
232 232
233static ssize_t pm_mng_profile_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 struct hl_device *hdev = dev_get_drvdata(dev);
237
238 if (hl_device_disabled_or_in_reset(hdev))
239 return -ENODEV;
240
241 return sprintf(buf, "%s\n",
242 (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
243 (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
244 "unknown");
245}
246
247static ssize_t pm_mng_profile_store(struct device *dev,
248 struct device_attribute *attr, const char *buf, size_t count)
249{
250 struct hl_device *hdev = dev_get_drvdata(dev);
251
252 if (hl_device_disabled_or_in_reset(hdev)) {
253 count = -ENODEV;
254 goto out;
255 }
256
257 mutex_lock(&hdev->fpriv_list_lock);
258
259 if (hdev->compute_ctx) {
260 dev_err(hdev->dev,
261 "Can't change PM profile while compute context is opened on the device\n");
262 count = -EPERM;
263 goto unlock_mutex;
264 }
265
266 if (strncmp("auto", buf, strlen("auto")) == 0) {
267 /* Make sure we are in LOW PLL when changing modes */
268 if (hdev->pm_mng_profile == PM_MANUAL) {
269 hdev->curr_pll_profile = PLL_HIGH;
270 hl_device_set_frequency(hdev, PLL_LOW);
271 hdev->pm_mng_profile = PM_AUTO;
272 }
273 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
274 if (hdev->pm_mng_profile == PM_AUTO) {
275 /* Must release the lock because the work thread also
276 * takes this lock. But before we release it, set
277 * the mode to manual so nothing will change if a user
278 * suddenly opens the device
279 */
280 hdev->pm_mng_profile = PM_MANUAL;
281
282 mutex_unlock(&hdev->fpriv_list_lock);
283
284 /* Flush the current work so we can return to the user
285 * knowing that he is the only one changing frequencies
286 */
287 flush_delayed_work(&hdev->work_freq);
288
289 return count;
290 }
291 } else {
292 dev_err(hdev->dev, "value should be auto or manual\n");
293 count = -EINVAL;
294 }
295
296unlock_mutex:
297 mutex_unlock(&hdev->fpriv_list_lock);
298out:
299 return count;
300}
301
302static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
303 char *buf)
304{
305 struct hl_device *hdev = dev_get_drvdata(dev);
306
307 if (hl_device_disabled_or_in_reset(hdev))
308 return -ENODEV;
309
310 return sprintf(buf, "%u\n", hdev->high_pll);
311}
312
313static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
314 const char *buf, size_t count)
315{
316 struct hl_device *hdev = dev_get_drvdata(dev);
317 long value;
318 int rc;
319
320 if (hl_device_disabled_or_in_reset(hdev)) {
321 count = -ENODEV;
322 goto out;
323 }
324
325 rc = kstrtoul(buf, 0, &value);
326
327 if (rc) {
328 count = -EINVAL;
329 goto out;
330 }
331
332 hdev->high_pll = value;
333
334out:
335 return count;
336}
337
338static DEVICE_ATTR_RW(high_pll);
233static DEVICE_ATTR_RW(ic_clk); 339static DEVICE_ATTR_RW(ic_clk);
234static DEVICE_ATTR_RO(ic_clk_curr); 340static DEVICE_ATTR_RO(ic_clk_curr);
235static DEVICE_ATTR_RW(mme_clk); 341static DEVICE_ATTR_RW(mme_clk);
236static DEVICE_ATTR_RO(mme_clk_curr); 342static DEVICE_ATTR_RO(mme_clk_curr);
343static DEVICE_ATTR_RW(pm_mng_profile);
237static DEVICE_ATTR_RW(tpc_clk); 344static DEVICE_ATTR_RW(tpc_clk);
238static DEVICE_ATTR_RO(tpc_clk_curr); 345static DEVICE_ATTR_RO(tpc_clk_curr);
239 346
240static struct attribute *goya_dev_attrs[] = { 347static struct attribute *goya_dev_attrs[] = {
348 &dev_attr_high_pll.attr,
241 &dev_attr_ic_clk.attr, 349 &dev_attr_ic_clk.attr,
242 &dev_attr_ic_clk_curr.attr, 350 &dev_attr_ic_clk_curr.attr,
243 &dev_attr_mme_clk.attr, 351 &dev_attr_mme_clk.attr,
244 &dev_attr_mme_clk_curr.attr, 352 &dev_attr_mme_clk_curr.attr,
353 &dev_attr_pm_mng_profile.attr,
245 &dev_attr_tpc_clk.attr, 354 &dev_attr_tpc_clk.attr,
246 &dev_attr_tpc_clk_curr.attr, 355 &dev_attr_tpc_clk_curr.attr,
247 NULL, 356 NULL,
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index ce83adafcf2d..75862be53c60 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -36,6 +36,8 @@
36 36
37#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */ 37#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
38 38
39#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
40
39#define HL_MAX_QUEUES 128 41#define HL_MAX_QUEUES 128
40 42
41#define HL_MAX_JOBS_PER_CS 64 43#define HL_MAX_JOBS_PER_CS 64
@@ -43,6 +45,8 @@
43/* MUST BE POWER OF 2 and larger than 1 */ 45/* MUST BE POWER OF 2 and larger than 1 */
44#define HL_MAX_PENDING_CS 64 46#define HL_MAX_PENDING_CS 64
45 47
48#define HL_IDLE_BUSY_TS_ARR_SIZE 4096
49
46/* Memory */ 50/* Memory */
47#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ 51#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
48 52
@@ -92,12 +96,12 @@ enum hl_queue_type {
92/** 96/**
93 * struct hw_queue_properties - queue information. 97 * struct hw_queue_properties - queue information.
94 * @type: queue type. 98 * @type: queue type.
95 * @kmd_only: true if only KMD is allowed to send a job to this queue, false 99 * @driver_only: true if only the driver is allowed to send a job to this queue,
96 * otherwise. 100 * false otherwise.
97 */ 101 */
98struct hw_queue_properties { 102struct hw_queue_properties {
99 enum hl_queue_type type; 103 enum hl_queue_type type;
100 u8 kmd_only; 104 u8 driver_only;
101}; 105};
102 106
103/** 107/**
@@ -320,7 +324,7 @@ struct hl_cs_job;
320#define HL_EQ_LENGTH 64 324#define HL_EQ_LENGTH 64
321#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE) 325#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
322 326
323/* KMD <-> ArmCP shared memory size */ 327/* Host <-> ArmCP shared memory size */
324#define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M 328#define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M
325 329
326/** 330/**
@@ -401,7 +405,7 @@ struct hl_cs_parser;
401 405
402/** 406/**
403 * enum hl_pm_mng_profile - power management profile. 407 * enum hl_pm_mng_profile - power management profile.
404 * @PM_AUTO: internal clock is set by KMD. 408 * @PM_AUTO: internal clock is set by the Linux driver.
405 * @PM_MANUAL: internal clock is set by the user. 409 * @PM_MANUAL: internal clock is set by the user.
406 * @PM_LAST: last power management type. 410 * @PM_LAST: last power management type.
407 */ 411 */
@@ -554,7 +558,8 @@ struct hl_asic_funcs {
554 struct hl_eq_entry *eq_entry); 558 struct hl_eq_entry *eq_entry);
555 void (*set_pll_profile)(struct hl_device *hdev, 559 void (*set_pll_profile)(struct hl_device *hdev,
556 enum hl_pll_frequency freq); 560 enum hl_pll_frequency freq);
557 void* (*get_events_stat)(struct hl_device *hdev, u32 *size); 561 void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
562 u32 *size);
558 u64 (*read_pte)(struct hl_device *hdev, u64 addr); 563 u64 (*read_pte)(struct hl_device *hdev, u64 addr);
559 void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val); 564 void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
560 void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard); 565 void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
@@ -608,7 +613,7 @@ struct hl_va_range {
608 * descriptor (hl_vm_phys_pg_list or hl_userptr). 613 * descriptor (hl_vm_phys_pg_list or hl_userptr).
609 * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure. 614 * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
610 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure. 615 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
611 * @hpriv: pointer to the private (KMD) data of the process (fd). 616 * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
612 * @hdev: pointer to the device structure. 617 * @hdev: pointer to the device structure.
613 * @refcount: reference counter for the context. Context is released only when 618 * @refcount: reference counter for the context. Context is released only when
614 * this hits 0l. It is incremented on CS and CS_WAIT. 619 * this hits 0l. It is incremented on CS and CS_WAIT.
@@ -634,6 +639,7 @@ struct hl_va_range {
634 * execution phase before the context switch phase 639 * execution phase before the context switch phase
635 * has finished. 640 * has finished.
636 * @asid: context's unique address space ID in the device's MMU. 641 * @asid: context's unique address space ID in the device's MMU.
642 * @handle: context's opaque handle for user
637 */ 643 */
638struct hl_ctx { 644struct hl_ctx {
639 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS); 645 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
@@ -655,6 +661,7 @@ struct hl_ctx {
655 atomic_t thread_ctx_switch_token; 661 atomic_t thread_ctx_switch_token;
656 u32 thread_ctx_switch_wait_token; 662 u32 thread_ctx_switch_wait_token;
657 u32 asid; 663 u32 asid;
664 u32 handle;
658}; 665};
659 666
660/** 667/**
@@ -906,23 +913,27 @@ struct hl_debug_params {
906 * @hdev: habanalabs device structure. 913 * @hdev: habanalabs device structure.
907 * @filp: pointer to the given file structure. 914 * @filp: pointer to the given file structure.
908 * @taskpid: current process ID. 915 * @taskpid: current process ID.
909 * @ctx: current executing context. 916 * @ctx: current executing context. TODO: remove for multiple ctx per process
910 * @ctx_mgr: context manager to handle multiple context for this FD. 917 * @ctx_mgr: context manager to handle multiple context for this FD.
911 * @cb_mgr: command buffer manager to handle multiple buffers for this FD. 918 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
912 * @debugfs_list: list of relevant ASIC debugfs. 919 * @debugfs_list: list of relevant ASIC debugfs.
920 * @dev_node: node in the device list of file private data
913 * @refcount: number of related contexts. 921 * @refcount: number of related contexts.
914 * @restore_phase_mutex: lock for context switch and restore phase. 922 * @restore_phase_mutex: lock for context switch and restore phase.
923 * @is_control: true for control device, false otherwise
915 */ 924 */
916struct hl_fpriv { 925struct hl_fpriv {
917 struct hl_device *hdev; 926 struct hl_device *hdev;
918 struct file *filp; 927 struct file *filp;
919 struct pid *taskpid; 928 struct pid *taskpid;
920 struct hl_ctx *ctx; /* TODO: remove for multiple ctx */ 929 struct hl_ctx *ctx;
921 struct hl_ctx_mgr ctx_mgr; 930 struct hl_ctx_mgr ctx_mgr;
922 struct hl_cb_mgr cb_mgr; 931 struct hl_cb_mgr cb_mgr;
923 struct list_head debugfs_list; 932 struct list_head debugfs_list;
933 struct list_head dev_node;
924 struct kref refcount; 934 struct kref refcount;
925 struct mutex restore_phase_mutex; 935 struct mutex restore_phase_mutex;
936 u8 is_control;
926}; 937};
927 938
928 939
@@ -1009,7 +1020,7 @@ struct hl_dbg_device_entry {
1009 */ 1020 */
1010 1021
1011/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe 1022/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
1012 * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards 1023 * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
1013 */ 1024 */
1014#define HL_MAX_MINORS 256 1025#define HL_MAX_MINORS 256
1015 1026
@@ -1041,14 +1052,18 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1041 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \ 1052 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
1042 (val) << REG_FIELD_SHIFT(reg, field)) 1053 (val) << REG_FIELD_SHIFT(reg, field))
1043 1054
1055/* Timeout should be longer when working with simulator but cap the
1056 * increased timeout to some maximum
1057 */
1044#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \ 1058#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
1045({ \ 1059({ \
1046 ktime_t __timeout; \ 1060 ktime_t __timeout; \
1047 /* timeout should be longer when working with simulator */ \
1048 if (hdev->pdev) \ 1061 if (hdev->pdev) \
1049 __timeout = ktime_add_us(ktime_get(), timeout_us); \ 1062 __timeout = ktime_add_us(ktime_get(), timeout_us); \
1050 else \ 1063 else \
1051 __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \ 1064 __timeout = ktime_add_us(ktime_get(),\
1065 min((u64)(timeout_us * 10), \
1066 (u64) HL_SIM_MAX_TIMEOUT_US)); \
1052 might_sleep_if(sleep_us); \ 1067 might_sleep_if(sleep_us); \
1053 for (;;) { \ 1068 for (;;) { \
1054 (val) = RREG32(addr); \ 1069 (val) = RREG32(addr); \
@@ -1080,24 +1095,25 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1080 mem_written_by_device) \ 1095 mem_written_by_device) \
1081({ \ 1096({ \
1082 ktime_t __timeout; \ 1097 ktime_t __timeout; \
1083 /* timeout should be longer when working with simulator */ \
1084 if (hdev->pdev) \ 1098 if (hdev->pdev) \
1085 __timeout = ktime_add_us(ktime_get(), timeout_us); \ 1099 __timeout = ktime_add_us(ktime_get(), timeout_us); \
1086 else \ 1100 else \
1087 __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \ 1101 __timeout = ktime_add_us(ktime_get(),\
1102 min((u64)(timeout_us * 10), \
1103 (u64) HL_SIM_MAX_TIMEOUT_US)); \
1088 might_sleep_if(sleep_us); \ 1104 might_sleep_if(sleep_us); \
1089 for (;;) { \ 1105 for (;;) { \
1090 /* Verify we read updates done by other cores or by device */ \ 1106 /* Verify we read updates done by other cores or by device */ \
1091 mb(); \ 1107 mb(); \
1092 (val) = *((u32 *) (uintptr_t) (addr)); \ 1108 (val) = *((u32 *) (uintptr_t) (addr)); \
1093 if (mem_written_by_device) \ 1109 if (mem_written_by_device) \
1094 (val) = le32_to_cpu(val); \ 1110 (val) = le32_to_cpu(*(__le32 *) &(val)); \
1095 if (cond) \ 1111 if (cond) \
1096 break; \ 1112 break; \
1097 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \ 1113 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1098 (val) = *((u32 *) (uintptr_t) (addr)); \ 1114 (val) = *((u32 *) (uintptr_t) (addr)); \
1099 if (mem_written_by_device) \ 1115 if (mem_written_by_device) \
1100 (val) = le32_to_cpu(val); \ 1116 (val) = le32_to_cpu(*(__le32 *) &(val)); \
1101 break; \ 1117 break; \
1102 } \ 1118 } \
1103 if (sleep_us) \ 1119 if (sleep_us) \
@@ -1110,11 +1126,12 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1110 timeout_us) \ 1126 timeout_us) \
1111({ \ 1127({ \
1112 ktime_t __timeout; \ 1128 ktime_t __timeout; \
1113 /* timeout should be longer when working with simulator */ \
1114 if (hdev->pdev) \ 1129 if (hdev->pdev) \
1115 __timeout = ktime_add_us(ktime_get(), timeout_us); \ 1130 __timeout = ktime_add_us(ktime_get(), timeout_us); \
1116 else \ 1131 else \
1117 __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \ 1132 __timeout = ktime_add_us(ktime_get(),\
1133 min((u64)(timeout_us * 10), \
1134 (u64) HL_SIM_MAX_TIMEOUT_US)); \
1118 might_sleep_if(sleep_us); \ 1135 might_sleep_if(sleep_us); \
1119 for (;;) { \ 1136 for (;;) { \
1120 (val) = readl(addr); \ 1137 (val) = readl(addr); \
@@ -1143,12 +1160,24 @@ struct hl_device_reset_work {
1143}; 1160};
1144 1161
1145/** 1162/**
1163 * struct hl_device_idle_busy_ts - used for calculating device utilization rate.
1164 * @idle_to_busy_ts: timestamp where device changed from idle to busy.
1165 * @busy_to_idle_ts: timestamp where device changed from busy to idle.
1166 */
1167struct hl_device_idle_busy_ts {
1168 ktime_t idle_to_busy_ts;
1169 ktime_t busy_to_idle_ts;
1170};
1171
1172/**
1146 * struct hl_device - habanalabs device structure. 1173 * struct hl_device - habanalabs device structure.
1147 * @pdev: pointer to PCI device, can be NULL in case of simulator device. 1174 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
1148 * @pcie_bar: array of available PCIe bars. 1175 * @pcie_bar: array of available PCIe bars.
1149 * @rmmio: configuration area address on SRAM. 1176 * @rmmio: configuration area address on SRAM.
1150 * @cdev: related char device. 1177 * @cdev: related char device.
1151 * @dev: realted kernel basic device structure. 1178 * @cdev_ctrl: char device for control operations only (INFO IOCTL)
1179 * @dev: related kernel basic device structure.
1180 * @dev_ctrl: related kernel device structure for the control device
1152 * @work_freq: delayed work to lower device frequency if possible. 1181 * @work_freq: delayed work to lower device frequency if possible.
1153 * @work_heartbeat: delayed work for ArmCP is-alive check. 1182 * @work_heartbeat: delayed work for ArmCP is-alive check.
1154 * @asic_name: ASIC specific nmae. 1183 * @asic_name: ASIC specific nmae.
@@ -1156,25 +1185,19 @@ struct hl_device_reset_work {
1156 * @completion_queue: array of hl_cq. 1185 * @completion_queue: array of hl_cq.
1157 * @cq_wq: work queue of completion queues for executing work in process context 1186 * @cq_wq: work queue of completion queues for executing work in process context
1158 * @eq_wq: work queue of event queue for executing work in process context. 1187 * @eq_wq: work queue of event queue for executing work in process context.
1159 * @kernel_ctx: KMD context structure. 1188 * @kernel_ctx: Kernel driver context structure.
1160 * @kernel_queues: array of hl_hw_queue. 1189 * @kernel_queues: array of hl_hw_queue.
1161 * @hw_queues_mirror_list: CS mirror list for TDR. 1190 * @hw_queues_mirror_list: CS mirror list for TDR.
1162 * @hw_queues_mirror_lock: protects hw_queues_mirror_list. 1191 * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
1163 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. 1192 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
1164 * @event_queue: event queue for IRQ from ArmCP. 1193 * @event_queue: event queue for IRQ from ArmCP.
1165 * @dma_pool: DMA pool for small allocations. 1194 * @dma_pool: DMA pool for small allocations.
1166 * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address. 1195 * @cpu_accessible_dma_mem: Host <-> ArmCP shared memory CPU address.
1167 * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address. 1196 * @cpu_accessible_dma_address: Host <-> ArmCP shared memory DMA address.
1168 * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool. 1197 * @cpu_accessible_dma_pool: Host <-> ArmCP shared memory pool.
1169 * @asid_bitmap: holds used/available ASIDs. 1198 * @asid_bitmap: holds used/available ASIDs.
1170 * @asid_mutex: protects asid_bitmap. 1199 * @asid_mutex: protects asid_bitmap.
1171 * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although 1200 * @send_cpu_message_lock: enforces only one message in Host <-> ArmCP queue.
1172 * fd_open_cnt is atomic, we need this lock to serialize
1173 * the open function because the driver currently supports
1174 * only a single process at a time. In addition, we need a
1175 * lock here so we can flush user processes which are opening
1176 * the device while we are trying to hard reset it
1177 * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
1178 * @debug_lock: protects critical section of setting debug mode for device 1201 * @debug_lock: protects critical section of setting debug mode for device
1179 * @asic_prop: ASIC specific immutable properties. 1202 * @asic_prop: ASIC specific immutable properties.
1180 * @asic_funcs: ASIC specific functions. 1203 * @asic_funcs: ASIC specific functions.
@@ -1189,22 +1212,28 @@ struct hl_device_reset_work {
1189 * @hl_debugfs: device's debugfs manager. 1212 * @hl_debugfs: device's debugfs manager.
1190 * @cb_pool: list of preallocated CBs. 1213 * @cb_pool: list of preallocated CBs.
1191 * @cb_pool_lock: protects the CB pool. 1214 * @cb_pool_lock: protects the CB pool.
1192 * @user_ctx: current user context executing. 1215 * @fpriv_list: list of file private data structures. Each structure is created
1216 * when a user opens the device
1217 * @fpriv_list_lock: protects the fpriv_list
1218 * @compute_ctx: current compute context executing.
1219 * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
1220 * and vice-versa
1193 * @dram_used_mem: current DRAM memory consumption. 1221 * @dram_used_mem: current DRAM memory consumption.
1194 * @timeout_jiffies: device CS timeout value. 1222 * @timeout_jiffies: device CS timeout value.
1195 * @max_power: the max power of the device, as configured by the sysadmin. This 1223 * @max_power: the max power of the device, as configured by the sysadmin. This
1196 * value is saved so in case of hard-reset, KMD will restore this 1224 * value is saved so in case of hard-reset, the driver will restore
1197 * value and update the F/W after the re-initialization 1225 * this value and update the F/W after the re-initialization
1198 * @in_reset: is device in reset flow. 1226 * @in_reset: is device in reset flow.
1199 * @curr_pll_profile: current PLL profile. 1227 * @curr_pll_profile: current PLL profile.
1200 * @fd_open_cnt: number of open user processes.
1201 * @cs_active_cnt: number of active command submissions on this device (active 1228 * @cs_active_cnt: number of active command submissions on this device (active
1202 * means already in H/W queues) 1229 * means already in H/W queues)
1203 * @major: habanalabs KMD major. 1230 * @major: habanalabs kernel driver major.
1204 * @high_pll: high PLL profile frequency. 1231 * @high_pll: high PLL profile frequency.
1205 * @soft_reset_cnt: number of soft reset since KMD loading. 1232 * @soft_reset_cnt: number of soft reset since the driver was loaded.
1206 * @hard_reset_cnt: number of hard reset since KMD loading. 1233 * @hard_reset_cnt: number of hard reset since the driver was loaded.
1234 * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
1207 * @id: device minor. 1235 * @id: device minor.
1236 * @id_control: minor of the control device
1208 * @disabled: is device disabled. 1237 * @disabled: is device disabled.
1209 * @late_init_done: is late init stage was done during initialization. 1238 * @late_init_done: is late init stage was done during initialization.
1210 * @hwmon_initialized: is H/W monitor sensors was initialized. 1239 * @hwmon_initialized: is H/W monitor sensors was initialized.
@@ -1218,15 +1247,18 @@ struct hl_device_reset_work {
1218 * @mmu_enable: is MMU enabled. 1247 * @mmu_enable: is MMU enabled.
1219 * @device_cpu_disabled: is the device CPU disabled (due to timeouts) 1248 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
1220 * @dma_mask: the dma mask that was set for this device 1249 * @dma_mask: the dma mask that was set for this device
1221 * @in_debug: is device under debug. This, together with fd_open_cnt, enforces 1250 * @in_debug: is device under debug. This, together with fpriv_list, enforces
1222 * that only a single user is configuring the debug infrastructure. 1251 * that only a single user is configuring the debug infrastructure.
1252 * @cdev_sysfs_created: were char devices and sysfs nodes created.
1223 */ 1253 */
1224struct hl_device { 1254struct hl_device {
1225 struct pci_dev *pdev; 1255 struct pci_dev *pdev;
1226 void __iomem *pcie_bar[6]; 1256 void __iomem *pcie_bar[6];
1227 void __iomem *rmmio; 1257 void __iomem *rmmio;
1228 struct cdev cdev; 1258 struct cdev cdev;
1259 struct cdev cdev_ctrl;
1229 struct device *dev; 1260 struct device *dev;
1261 struct device *dev_ctrl;
1230 struct delayed_work work_freq; 1262 struct delayed_work work_freq;
1231 struct delayed_work work_heartbeat; 1263 struct delayed_work work_heartbeat;
1232 char asic_name[16]; 1264 char asic_name[16];
@@ -1246,8 +1278,6 @@ struct hl_device {
1246 struct gen_pool *cpu_accessible_dma_pool; 1278 struct gen_pool *cpu_accessible_dma_pool;
1247 unsigned long *asid_bitmap; 1279 unsigned long *asid_bitmap;
1248 struct mutex asid_mutex; 1280 struct mutex asid_mutex;
1249 /* TODO: remove fd_open_cnt_lock for multiple process support */
1250 struct mutex fd_open_cnt_lock;
1251 struct mutex send_cpu_message_lock; 1281 struct mutex send_cpu_message_lock;
1252 struct mutex debug_lock; 1282 struct mutex debug_lock;
1253 struct asic_fixed_properties asic_prop; 1283 struct asic_fixed_properties asic_prop;
@@ -1266,21 +1296,26 @@ struct hl_device {
1266 struct list_head cb_pool; 1296 struct list_head cb_pool;
1267 spinlock_t cb_pool_lock; 1297 spinlock_t cb_pool_lock;
1268 1298
1269 /* TODO: remove user_ctx for multiple process support */ 1299 struct list_head fpriv_list;
1270 struct hl_ctx *user_ctx; 1300 struct mutex fpriv_list_lock;
1301
1302 struct hl_ctx *compute_ctx;
1303
1304 struct hl_device_idle_busy_ts *idle_busy_ts_arr;
1271 1305
1272 atomic64_t dram_used_mem; 1306 atomic64_t dram_used_mem;
1273 u64 timeout_jiffies; 1307 u64 timeout_jiffies;
1274 u64 max_power; 1308 u64 max_power;
1275 atomic_t in_reset; 1309 atomic_t in_reset;
1276 atomic_t curr_pll_profile; 1310 enum hl_pll_frequency curr_pll_profile;
1277 atomic_t fd_open_cnt; 1311 int cs_active_cnt;
1278 atomic_t cs_active_cnt;
1279 u32 major; 1312 u32 major;
1280 u32 high_pll; 1313 u32 high_pll;
1281 u32 soft_reset_cnt; 1314 u32 soft_reset_cnt;
1282 u32 hard_reset_cnt; 1315 u32 hard_reset_cnt;
1316 u32 idle_busy_ts_idx;
1283 u16 id; 1317 u16 id;
1318 u16 id_control;
1284 u8 disabled; 1319 u8 disabled;
1285 u8 late_init_done; 1320 u8 late_init_done;
1286 u8 hwmon_initialized; 1321 u8 hwmon_initialized;
@@ -1293,6 +1328,7 @@ struct hl_device {
1293 u8 device_cpu_disabled; 1328 u8 device_cpu_disabled;
1294 u8 dma_mask; 1329 u8 dma_mask;
1295 u8 in_debug; 1330 u8 in_debug;
1331 u8 cdev_sysfs_created;
1296 1332
1297 /* Parameters for bring-up */ 1333 /* Parameters for bring-up */
1298 u8 mmu_enable; 1334 u8 mmu_enable;
@@ -1386,6 +1422,7 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
1386} 1422}
1387 1423
1388int hl_device_open(struct inode *inode, struct file *filp); 1424int hl_device_open(struct inode *inode, struct file *filp);
1425int hl_device_open_ctrl(struct inode *inode, struct file *filp);
1389bool hl_device_disabled_or_in_reset(struct hl_device *hdev); 1426bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
1390enum hl_device_status hl_device_status(struct hl_device *hdev); 1427enum hl_device_status hl_device_status(struct hl_device *hdev);
1391int hl_device_set_debug_mode(struct hl_device *hdev, bool enable); 1428int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
@@ -1439,6 +1476,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
1439void hl_hpriv_get(struct hl_fpriv *hpriv); 1476void hl_hpriv_get(struct hl_fpriv *hpriv);
1440void hl_hpriv_put(struct hl_fpriv *hpriv); 1477void hl_hpriv_put(struct hl_fpriv *hpriv);
1441int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq); 1478int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
1479uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms);
1442 1480
1443int hl_build_hwmon_channel_info(struct hl_device *hdev, 1481int hl_build_hwmon_channel_info(struct hl_device *hdev,
1444 struct armcp_sensor *sensors_arr); 1482 struct armcp_sensor *sensors_arr);
@@ -1625,6 +1663,7 @@ static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
1625 1663
1626/* IOCTLs */ 1664/* IOCTLs */
1627long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 1665long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
1666long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
1628int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data); 1667int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
1629int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data); 1668int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
1630int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data); 1669int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 6f6dbe93f1df..8c342fb499ca 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -95,80 +95,127 @@ int hl_device_open(struct inode *inode, struct file *filp)
95 return -ENXIO; 95 return -ENXIO;
96 } 96 }
97 97
98 mutex_lock(&hdev->fd_open_cnt_lock); 98 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
99 if (!hpriv)
100 return -ENOMEM;
101
102 hpriv->hdev = hdev;
103 filp->private_data = hpriv;
104 hpriv->filp = filp;
105 mutex_init(&hpriv->restore_phase_mutex);
106 kref_init(&hpriv->refcount);
107 nonseekable_open(inode, filp);
108
109 hl_cb_mgr_init(&hpriv->cb_mgr);
110 hl_ctx_mgr_init(&hpriv->ctx_mgr);
111
112 hpriv->taskpid = find_get_pid(current->pid);
113
114 mutex_lock(&hdev->fpriv_list_lock);
99 115
100 if (hl_device_disabled_or_in_reset(hdev)) { 116 if (hl_device_disabled_or_in_reset(hdev)) {
101 dev_err_ratelimited(hdev->dev, 117 dev_err_ratelimited(hdev->dev,
102 "Can't open %s because it is disabled or in reset\n", 118 "Can't open %s because it is disabled or in reset\n",
103 dev_name(hdev->dev)); 119 dev_name(hdev->dev));
104 mutex_unlock(&hdev->fd_open_cnt_lock); 120 rc = -EPERM;
105 return -EPERM; 121 goto out_err;
106 } 122 }
107 123
108 if (hdev->in_debug) { 124 if (hdev->in_debug) {
109 dev_err_ratelimited(hdev->dev, 125 dev_err_ratelimited(hdev->dev,
110 "Can't open %s because it is being debugged by another user\n", 126 "Can't open %s because it is being debugged by another user\n",
111 dev_name(hdev->dev)); 127 dev_name(hdev->dev));
112 mutex_unlock(&hdev->fd_open_cnt_lock); 128 rc = -EPERM;
113 return -EPERM; 129 goto out_err;
114 } 130 }
115 131
116 if (atomic_read(&hdev->fd_open_cnt)) { 132 if (hdev->compute_ctx) {
117 dev_info_ratelimited(hdev->dev, 133 dev_dbg_ratelimited(hdev->dev,
118 "Can't open %s because another user is working on it\n", 134 "Can't open %s because another user is working on it\n",
119 dev_name(hdev->dev)); 135 dev_name(hdev->dev));
120 mutex_unlock(&hdev->fd_open_cnt_lock); 136 rc = -EBUSY;
121 return -EBUSY; 137 goto out_err;
122 }
123
124 atomic_inc(&hdev->fd_open_cnt);
125
126 mutex_unlock(&hdev->fd_open_cnt_lock);
127
128 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
129 if (!hpriv) {
130 rc = -ENOMEM;
131 goto close_device;
132 } 138 }
133 139
134 hpriv->hdev = hdev;
135 filp->private_data = hpriv;
136 hpriv->filp = filp;
137 mutex_init(&hpriv->restore_phase_mutex);
138 kref_init(&hpriv->refcount);
139 nonseekable_open(inode, filp);
140
141 hl_cb_mgr_init(&hpriv->cb_mgr);
142 hl_ctx_mgr_init(&hpriv->ctx_mgr);
143
144 rc = hl_ctx_create(hdev, hpriv); 140 rc = hl_ctx_create(hdev, hpriv);
145 if (rc) { 141 if (rc) {
146 dev_err(hdev->dev, "Failed to open FD (CTX fail)\n"); 142 dev_err(hdev->dev, "Failed to create context %d\n", rc);
147 goto out_err; 143 goto out_err;
148 } 144 }
149 145
150 hpriv->taskpid = find_get_pid(current->pid); 146 /* Device is IDLE at this point so it is legal to change PLLs.
151 147 * There is no need to check anything because if the PLL is
152 /* 148 * already HIGH, the set function will return without doing
153 * Device is IDLE at this point so it is legal to change PLLs. There 149 * anything
154 * is no need to check anything because if the PLL is already HIGH, the
155 * set function will return without doing anything
156 */ 150 */
157 hl_device_set_frequency(hdev, PLL_HIGH); 151 hl_device_set_frequency(hdev, PLL_HIGH);
158 152
153 list_add(&hpriv->dev_node, &hdev->fpriv_list);
154 mutex_unlock(&hdev->fpriv_list_lock);
155
159 hl_debugfs_add_file(hpriv); 156 hl_debugfs_add_file(hpriv);
160 157
161 return 0; 158 return 0;
162 159
163out_err: 160out_err:
164 filp->private_data = NULL; 161 mutex_unlock(&hdev->fpriv_list_lock);
165 hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); 162
166 hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); 163 hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
164 hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
165 filp->private_data = NULL;
167 mutex_destroy(&hpriv->restore_phase_mutex); 166 mutex_destroy(&hpriv->restore_phase_mutex);
167 put_pid(hpriv->taskpid);
168
168 kfree(hpriv); 169 kfree(hpriv);
170 return rc;
171}
172
173int hl_device_open_ctrl(struct inode *inode, struct file *filp)
174{
175 struct hl_device *hdev;
176 struct hl_fpriv *hpriv;
177 int rc;
178
179 mutex_lock(&hl_devs_idr_lock);
180 hdev = idr_find(&hl_devs_idr, iminor(inode));
181 mutex_unlock(&hl_devs_idr_lock);
182
183 if (!hdev) {
184 pr_err("Couldn't find device %d:%d\n",
185 imajor(inode), iminor(inode));
186 return -ENXIO;
187 }
188
189 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
190 if (!hpriv)
191 return -ENOMEM;
192
193 mutex_lock(&hdev->fpriv_list_lock);
194
195 if (hl_device_disabled_or_in_reset(hdev)) {
196 dev_err_ratelimited(hdev->dev_ctrl,
197 "Can't open %s because it is disabled or in reset\n",
198 dev_name(hdev->dev_ctrl));
199 rc = -EPERM;
200 goto out_err;
201 }
169 202
170close_device: 203 list_add(&hpriv->dev_node, &hdev->fpriv_list);
171 atomic_dec(&hdev->fd_open_cnt); 204 mutex_unlock(&hdev->fpriv_list_lock);
205
206 hpriv->hdev = hdev;
207 filp->private_data = hpriv;
208 hpriv->filp = filp;
209 hpriv->is_control = true;
210 nonseekable_open(inode, filp);
211
212 hpriv->taskpid = find_get_pid(current->pid);
213
214 return 0;
215
216out_err:
217 mutex_unlock(&hdev->fpriv_list_lock);
218 kfree(hpriv);
172 return rc; 219 return rc;
173} 220}
174 221
@@ -199,7 +246,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
199 enum hl_asic_type asic_type, int minor) 246 enum hl_asic_type asic_type, int minor)
200{ 247{
201 struct hl_device *hdev; 248 struct hl_device *hdev;
202 int rc; 249 int rc, main_id, ctrl_id = 0;
203 250
204 *dev = NULL; 251 *dev = NULL;
205 252
@@ -240,33 +287,34 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
240 287
241 mutex_lock(&hl_devs_idr_lock); 288 mutex_lock(&hl_devs_idr_lock);
242 289
243 if (minor == -1) { 290 /* Always save 2 numbers, 1 for main device and 1 for control.
244 rc = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, 291 * They must be consecutive
292 */
293 main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
245 GFP_KERNEL); 294 GFP_KERNEL);
246 } else {
247 void *old_idr = idr_replace(&hl_devs_idr, hdev, minor);
248 295
249 if (IS_ERR_VALUE(old_idr)) { 296 if (main_id >= 0)
250 rc = PTR_ERR(old_idr); 297 ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1,
251 pr_err("Error %d when trying to replace minor %d\n", 298 main_id + 2, GFP_KERNEL);
252 rc, minor);
253 mutex_unlock(&hl_devs_idr_lock);
254 goto free_hdev;
255 }
256 rc = minor;
257 }
258 299
259 mutex_unlock(&hl_devs_idr_lock); 300 mutex_unlock(&hl_devs_idr_lock);
260 301
261 if (rc < 0) { 302 if ((main_id < 0) || (ctrl_id < 0)) {
262 if (rc == -ENOSPC) { 303 if ((main_id == -ENOSPC) || (ctrl_id == -ENOSPC))
263 pr_err("too many devices in the system\n"); 304 pr_err("too many devices in the system\n");
264 rc = -EBUSY; 305
306 if (main_id >= 0) {
307 mutex_lock(&hl_devs_idr_lock);
308 idr_remove(&hl_devs_idr, main_id);
309 mutex_unlock(&hl_devs_idr_lock);
265 } 310 }
311
312 rc = -EBUSY;
266 goto free_hdev; 313 goto free_hdev;
267 } 314 }
268 315
269 hdev->id = rc; 316 hdev->id = main_id;
317 hdev->id_control = ctrl_id;
270 318
271 *dev = hdev; 319 *dev = hdev;
272 320
@@ -288,6 +336,7 @@ void destroy_hdev(struct hl_device *hdev)
288 /* Remove device from the device list */ 336 /* Remove device from the device list */
289 mutex_lock(&hl_devs_idr_lock); 337 mutex_lock(&hl_devs_idr_lock);
290 idr_remove(&hl_devs_idr, hdev->id); 338 idr_remove(&hl_devs_idr, hdev->id);
339 idr_remove(&hl_devs_idr, hdev->id_control);
291 mutex_unlock(&hl_devs_idr_lock); 340 mutex_unlock(&hl_devs_idr_lock);
292 341
293 kfree(hdev); 342 kfree(hdev);
@@ -295,8 +344,7 @@ void destroy_hdev(struct hl_device *hdev)
295 344
296static int hl_pmops_suspend(struct device *dev) 345static int hl_pmops_suspend(struct device *dev)
297{ 346{
298 struct pci_dev *pdev = to_pci_dev(dev); 347 struct hl_device *hdev = dev_get_drvdata(dev);
299 struct hl_device *hdev = pci_get_drvdata(pdev);
300 348
301 pr_debug("Going to suspend PCI device\n"); 349 pr_debug("Going to suspend PCI device\n");
302 350
@@ -310,8 +358,7 @@ static int hl_pmops_suspend(struct device *dev)
310 358
311static int hl_pmops_resume(struct device *dev) 359static int hl_pmops_resume(struct device *dev)
312{ 360{
313 struct pci_dev *pdev = to_pci_dev(dev); 361 struct hl_device *hdev = dev_get_drvdata(dev);
314 struct hl_device *hdev = pci_get_drvdata(pdev);
315 362
316 pr_debug("Going to resume PCI device\n"); 363 pr_debug("Going to resume PCI device\n");
317 364
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 07127576b3e8..66d9c710073c 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -65,7 +65,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
65 hw_ip.num_of_events = prop->num_of_events; 65 hw_ip.num_of_events = prop->num_of_events;
66 memcpy(hw_ip.armcp_version, 66 memcpy(hw_ip.armcp_version,
67 prop->armcp_info.armcp_version, VERSION_MAX_LEN); 67 prop->armcp_info.armcp_version, VERSION_MAX_LEN);
68 hw_ip.armcp_cpld_version = __le32_to_cpu(prop->armcp_info.cpld_version); 68 hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
69 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr; 69 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
70 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf; 70 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
71 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; 71 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
@@ -75,7 +75,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
75 min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0; 75 min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
76} 76}
77 77
78static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args) 78static int hw_events_info(struct hl_device *hdev, bool aggregate,
79 struct hl_info_args *args)
79{ 80{
80 u32 size, max_size = args->return_size; 81 u32 size, max_size = args->return_size;
81 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 82 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -84,13 +85,14 @@ static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args)
84 if ((!max_size) || (!out)) 85 if ((!max_size) || (!out))
85 return -EINVAL; 86 return -EINVAL;
86 87
87 arr = hdev->asic_funcs->get_events_stat(hdev, &size); 88 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
88 89
89 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; 90 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
90} 91}
91 92
92static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args) 93static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
93{ 94{
95 struct hl_device *hdev = hpriv->hdev;
94 struct hl_info_dram_usage dram_usage = {0}; 96 struct hl_info_dram_usage dram_usage = {0};
95 u32 max_size = args->return_size; 97 u32 max_size = args->return_size;
96 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 98 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -104,7 +106,9 @@ static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args)
104 prop->dram_base_address); 106 prop->dram_base_address);
105 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) - 107 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
106 atomic64_read(&hdev->dram_used_mem); 108 atomic64_read(&hdev->dram_used_mem);
107 dram_usage.ctx_dram_mem = atomic64_read(&hdev->user_ctx->dram_phys_mem); 109 if (hpriv->ctx)
110 dram_usage.ctx_dram_mem =
111 atomic64_read(&hpriv->ctx->dram_phys_mem);
108 112
109 return copy_to_user(out, &dram_usage, 113 return copy_to_user(out, &dram_usage,
110 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0; 114 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
@@ -141,13 +145,16 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
141 params->op = args->op; 145 params->op = args->op;
142 146
143 if (args->input_ptr && args->input_size) { 147 if (args->input_ptr && args->input_size) {
144 input = memdup_user(u64_to_user_ptr(args->input_ptr), 148 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
145 args->input_size); 149 if (!input) {
146 if (IS_ERR(input)) { 150 rc = -ENOMEM;
147 rc = PTR_ERR(input); 151 goto out;
148 input = NULL; 152 }
149 dev_err(hdev->dev, 153
150 "error %d when copying input debug data\n", rc); 154 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
155 args->input_size)) {
156 rc = -EFAULT;
157 dev_err(hdev->dev, "failed to copy input debug data\n");
151 goto out; 158 goto out;
152 } 159 }
153 160
@@ -191,42 +198,81 @@ out:
191 return rc; 198 return rc;
192} 199}
193 200
194static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data) 201static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
202{
203 struct hl_info_device_utilization device_util = {0};
204 u32 max_size = args->return_size;
205 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
206
207 if ((!max_size) || (!out))
208 return -EINVAL;
209
210 if ((args->period_ms < 100) || (args->period_ms > 1000) ||
211 (args->period_ms % 100)) {
212 dev_err(hdev->dev,
213 "period %u must be between 100 - 1000 and must be divisible by 100\n",
214 args->period_ms);
215 return -EINVAL;
216 }
217
218 device_util.utilization = hl_device_utilization(hdev, args->period_ms);
219
220 return copy_to_user(out, &device_util,
221 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
222}
223
224static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
225 struct device *dev)
195{ 226{
196 struct hl_info_args *args = data; 227 struct hl_info_args *args = data;
197 struct hl_device *hdev = hpriv->hdev; 228 struct hl_device *hdev = hpriv->hdev;
198 int rc; 229 int rc;
199 230
200 /* We want to return device status even if it disabled or in reset */ 231 /*
201 if (args->op == HL_INFO_DEVICE_STATUS) 232 * Information is returned for the following opcodes even if the device
233 * is disabled or in reset.
234 */
235 switch (args->op) {
236 case HL_INFO_HW_IP_INFO:
237 return hw_ip_info(hdev, args);
238
239 case HL_INFO_DEVICE_STATUS:
202 return device_status_info(hdev, args); 240 return device_status_info(hdev, args);
203 241
242 default:
243 break;
244 }
245
204 if (hl_device_disabled_or_in_reset(hdev)) { 246 if (hl_device_disabled_or_in_reset(hdev)) {
205 dev_warn_ratelimited(hdev->dev, 247 dev_warn_ratelimited(dev,
206 "Device is %s. Can't execute INFO IOCTL\n", 248 "Device is %s. Can't execute INFO IOCTL\n",
207 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); 249 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
208 return -EBUSY; 250 return -EBUSY;
209 } 251 }
210 252
211 switch (args->op) { 253 switch (args->op) {
212 case HL_INFO_HW_IP_INFO:
213 rc = hw_ip_info(hdev, args);
214 break;
215
216 case HL_INFO_HW_EVENTS: 254 case HL_INFO_HW_EVENTS:
217 rc = hw_events_info(hdev, args); 255 rc = hw_events_info(hdev, false, args);
218 break; 256 break;
219 257
220 case HL_INFO_DRAM_USAGE: 258 case HL_INFO_DRAM_USAGE:
221 rc = dram_usage_info(hdev, args); 259 rc = dram_usage_info(hpriv, args);
222 break; 260 break;
223 261
224 case HL_INFO_HW_IDLE: 262 case HL_INFO_HW_IDLE:
225 rc = hw_idle(hdev, args); 263 rc = hw_idle(hdev, args);
226 break; 264 break;
227 265
266 case HL_INFO_DEVICE_UTILIZATION:
267 rc = device_utilization(hdev, args);
268 break;
269
270 case HL_INFO_HW_EVENTS_AGGREGATE:
271 rc = hw_events_info(hdev, true, args);
272 break;
273
228 default: 274 default:
229 dev_err(hdev->dev, "Invalid request %d\n", args->op); 275 dev_err(dev, "Invalid request %d\n", args->op);
230 rc = -ENOTTY; 276 rc = -ENOTTY;
231 break; 277 break;
232 } 278 }
@@ -234,6 +280,16 @@ static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
234 return rc; 280 return rc;
235} 281}
236 282
283static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
284{
285 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
286}
287
288static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
289{
290 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
291}
292
237static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data) 293static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
238{ 294{
239 struct hl_debug_args *args = data; 295 struct hl_debug_args *args = data;
@@ -288,52 +344,45 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
288 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl) 344 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
289}; 345};
290 346
291#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls) 347static const struct hl_ioctl_desc hl_ioctls_control[] = {
348 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
349};
292 350
293long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 351static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
352 const struct hl_ioctl_desc *ioctl, struct device *dev)
294{ 353{
295 struct hl_fpriv *hpriv = filep->private_data; 354 struct hl_fpriv *hpriv = filep->private_data;
296 struct hl_device *hdev = hpriv->hdev; 355 struct hl_device *hdev = hpriv->hdev;
297 hl_ioctl_t *func;
298 const struct hl_ioctl_desc *ioctl = NULL;
299 unsigned int nr = _IOC_NR(cmd); 356 unsigned int nr = _IOC_NR(cmd);
300 char stack_kdata[128] = {0}; 357 char stack_kdata[128] = {0};
301 char *kdata = NULL; 358 char *kdata = NULL;
302 unsigned int usize, asize; 359 unsigned int usize, asize;
360 hl_ioctl_t *func;
361 u32 hl_size;
303 int retcode; 362 int retcode;
304 363
305 if (hdev->hard_reset_pending) { 364 if (hdev->hard_reset_pending) {
306 dev_crit_ratelimited(hdev->dev, 365 dev_crit_ratelimited(hdev->dev_ctrl,
307 "Device HARD reset pending! Please close FD\n"); 366 "Device HARD reset pending! Please close FD\n");
308 return -ENODEV; 367 return -ENODEV;
309 } 368 }
310 369
311 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
312 u32 hl_size;
313
314 ioctl = &hl_ioctls[nr];
315
316 hl_size = _IOC_SIZE(ioctl->cmd);
317 usize = asize = _IOC_SIZE(cmd);
318 if (hl_size > asize)
319 asize = hl_size;
320
321 cmd = ioctl->cmd;
322 } else {
323 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
324 task_pid_nr(current), nr);
325 return -ENOTTY;
326 }
327
328 /* Do not trust userspace, use our own definition */ 370 /* Do not trust userspace, use our own definition */
329 func = ioctl->func; 371 func = ioctl->func;
330 372
331 if (unlikely(!func)) { 373 if (unlikely(!func)) {
332 dev_dbg(hdev->dev, "no function\n"); 374 dev_dbg(dev, "no function\n");
333 retcode = -ENOTTY; 375 retcode = -ENOTTY;
334 goto out_err; 376 goto out_err;
335 } 377 }
336 378
379 hl_size = _IOC_SIZE(ioctl->cmd);
380 usize = asize = _IOC_SIZE(cmd);
381 if (hl_size > asize)
382 asize = hl_size;
383
384 cmd = ioctl->cmd;
385
337 if (cmd & (IOC_IN | IOC_OUT)) { 386 if (cmd & (IOC_IN | IOC_OUT)) {
338 if (asize <= sizeof(stack_kdata)) { 387 if (asize <= sizeof(stack_kdata)) {
339 kdata = stack_kdata; 388 kdata = stack_kdata;
@@ -363,8 +412,7 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
363 412
364out_err: 413out_err:
365 if (retcode) 414 if (retcode)
366 dev_dbg(hdev->dev, 415 dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
367 "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
368 task_pid_nr(current), cmd, nr); 416 task_pid_nr(current), cmd, nr);
369 417
370 if (kdata != stack_kdata) 418 if (kdata != stack_kdata)
@@ -372,3 +420,39 @@ out_err:
372 420
373 return retcode; 421 return retcode;
374} 422}
423
424long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
425{
426 struct hl_fpriv *hpriv = filep->private_data;
427 struct hl_device *hdev = hpriv->hdev;
428 const struct hl_ioctl_desc *ioctl = NULL;
429 unsigned int nr = _IOC_NR(cmd);
430
431 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
432 ioctl = &hl_ioctls[nr];
433 } else {
434 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
435 task_pid_nr(current), nr);
436 return -ENOTTY;
437 }
438
439 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
440}
441
442long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
443{
444 struct hl_fpriv *hpriv = filep->private_data;
445 struct hl_device *hdev = hpriv->hdev;
446 const struct hl_ioctl_desc *ioctl = NULL;
447 unsigned int nr = _IOC_NR(cmd);
448
449 if (nr == _IOC_NR(HL_IOCTL_INFO)) {
450 ioctl = &hl_ioctls_control[nr];
451 } else {
452 dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
453 task_pid_nr(current), nr);
454 return -ENOTTY;
455 }
456
457 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
458}
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 5f5673b74985..55b383b2a116 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -80,9 +80,9 @@ static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
80 80
81 bd = (struct hl_bd *) (uintptr_t) q->kernel_address; 81 bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
82 bd += hl_pi_2_offset(q->pi); 82 bd += hl_pi_2_offset(q->pi);
83 bd->ctl = __cpu_to_le32(ctl); 83 bd->ctl = cpu_to_le32(ctl);
84 bd->len = __cpu_to_le32(len); 84 bd->len = cpu_to_le32(len);
85 bd->ptr = __cpu_to_le64(ptr); 85 bd->ptr = cpu_to_le64(ptr);
86 86
87 q->pi = hl_queue_inc_ptr(q->pi); 87 q->pi = hl_queue_inc_ptr(q->pi);
88 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); 88 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
@@ -249,7 +249,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
249 len = job->job_cb_size; 249 len = job->job_cb_size;
250 ptr = cb->bus_address; 250 ptr = cb->bus_address;
251 251
252 cq_pkt.data = __cpu_to_le32( 252 cq_pkt.data = cpu_to_le32(
253 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) 253 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
254 & CQ_ENTRY_SHADOW_INDEX_MASK) | 254 & CQ_ENTRY_SHADOW_INDEX_MASK) |
255 (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) | 255 (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
@@ -267,7 +267,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
267 267
268 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len, 268 hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
269 cq_addr, 269 cq_addr,
270 __le32_to_cpu(cq_pkt.data), 270 le32_to_cpu(cq_pkt.data),
271 q->hw_queue_id); 271 q->hw_queue_id);
272 272
273 q->shadow_queue[hl_pi_2_offset(q->pi)] = job; 273 q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
@@ -364,7 +364,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
364 spin_unlock(&hdev->hw_queues_mirror_lock); 364 spin_unlock(&hdev->hw_queues_mirror_lock);
365 } 365 }
366 366
367 atomic_inc(&hdev->cs_active_cnt); 367 if (!hdev->cs_active_cnt++) {
368 struct hl_device_idle_busy_ts *ts;
369
370 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
371 ts->busy_to_idle_ts = ktime_set(0, 0);
372 ts->idle_to_busy_ts = ktime_get();
373 }
368 374
369 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) 375 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
370 if (job->ext_queue) 376 if (job->ext_queue)
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
index 77facd25c4a2..7be4bace9b4f 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -26,7 +26,7 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
26 int rc, i, j; 26 int rc, i, j;
27 27
28 for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) { 28 for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) {
29 type = __le32_to_cpu(sensors_arr[i].type); 29 type = le32_to_cpu(sensors_arr[i].type);
30 30
31 if ((type == 0) && (sensors_arr[i].flags == 0)) 31 if ((type == 0) && (sensors_arr[i].flags == 0))
32 break; 32 break;
@@ -58,10 +58,10 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
58 } 58 }
59 59
60 for (i = 0 ; i < arr_size ; i++) { 60 for (i = 0 ; i < arr_size ; i++) {
61 type = __le32_to_cpu(sensors_arr[i].type); 61 type = le32_to_cpu(sensors_arr[i].type);
62 curr_arr = sensors_by_type[type]; 62 curr_arr = sensors_by_type[type];
63 curr_arr[sensors_by_type_next_index[type]++] = 63 curr_arr[sensors_by_type_next_index[type]++] =
64 __le32_to_cpu(sensors_arr[i].flags); 64 le32_to_cpu(sensors_arr[i].flags);
65 } 65 }
66 66
67 channels_info = kcalloc(num_active_sensor_types + 1, 67 channels_info = kcalloc(num_active_sensor_types + 1,
@@ -273,7 +273,7 @@ long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr)
273 273
274 memset(&pkt, 0, sizeof(pkt)); 274 memset(&pkt, 0, sizeof(pkt));
275 275
276 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET << 276 pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
277 ARMCP_PKT_CTL_OPCODE_SHIFT); 277 ARMCP_PKT_CTL_OPCODE_SHIFT);
278 pkt.sensor_index = __cpu_to_le16(sensor_index); 278 pkt.sensor_index = __cpu_to_le16(sensor_index);
279 pkt.type = __cpu_to_le16(attr); 279 pkt.type = __cpu_to_le16(attr);
@@ -299,7 +299,7 @@ long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr)
299 299
300 memset(&pkt, 0, sizeof(pkt)); 300 memset(&pkt, 0, sizeof(pkt));
301 301
302 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET << 302 pkt.ctl = cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
303 ARMCP_PKT_CTL_OPCODE_SHIFT); 303 ARMCP_PKT_CTL_OPCODE_SHIFT);
304 pkt.sensor_index = __cpu_to_le16(sensor_index); 304 pkt.sensor_index = __cpu_to_le16(sensor_index);
305 pkt.type = __cpu_to_le16(attr); 305 pkt.type = __cpu_to_le16(attr);
@@ -325,7 +325,7 @@ long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr)
325 325
326 memset(&pkt, 0, sizeof(pkt)); 326 memset(&pkt, 0, sizeof(pkt));
327 327
328 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_CURRENT_GET << 328 pkt.ctl = cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
329 ARMCP_PKT_CTL_OPCODE_SHIFT); 329 ARMCP_PKT_CTL_OPCODE_SHIFT);
330 pkt.sensor_index = __cpu_to_le16(sensor_index); 330 pkt.sensor_index = __cpu_to_le16(sensor_index);
331 pkt.type = __cpu_to_le16(attr); 331 pkt.type = __cpu_to_le16(attr);
@@ -351,7 +351,7 @@ long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr)
351 351
352 memset(&pkt, 0, sizeof(pkt)); 352 memset(&pkt, 0, sizeof(pkt));
353 353
354 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET << 354 pkt.ctl = cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
355 ARMCP_PKT_CTL_OPCODE_SHIFT); 355 ARMCP_PKT_CTL_OPCODE_SHIFT);
356 pkt.sensor_index = __cpu_to_le16(sensor_index); 356 pkt.sensor_index = __cpu_to_le16(sensor_index);
357 pkt.type = __cpu_to_le16(attr); 357 pkt.type = __cpu_to_le16(attr);
@@ -377,7 +377,7 @@ long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr)
377 377
378 memset(&pkt, 0, sizeof(pkt)); 378 memset(&pkt, 0, sizeof(pkt));
379 379
380 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_GET << 380 pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_GET <<
381 ARMCP_PKT_CTL_OPCODE_SHIFT); 381 ARMCP_PKT_CTL_OPCODE_SHIFT);
382 pkt.sensor_index = __cpu_to_le16(sensor_index); 382 pkt.sensor_index = __cpu_to_le16(sensor_index);
383 pkt.type = __cpu_to_le16(attr); 383 pkt.type = __cpu_to_le16(attr);
@@ -403,11 +403,11 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
403 403
404 memset(&pkt, 0, sizeof(pkt)); 404 memset(&pkt, 0, sizeof(pkt));
405 405
406 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_SET << 406 pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_SET <<
407 ARMCP_PKT_CTL_OPCODE_SHIFT); 407 ARMCP_PKT_CTL_OPCODE_SHIFT);
408 pkt.sensor_index = __cpu_to_le16(sensor_index); 408 pkt.sensor_index = __cpu_to_le16(sensor_index);
409 pkt.type = __cpu_to_le16(attr); 409 pkt.type = __cpu_to_le16(attr);
410 pkt.value = __cpu_to_le64(value); 410 pkt.value = cpu_to_le64(value);
411 411
412 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 412 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
413 SENSORS_PKT_TIMEOUT, NULL); 413 SENSORS_PKT_TIMEOUT, NULL);
@@ -421,6 +421,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
421int hl_hwmon_init(struct hl_device *hdev) 421int hl_hwmon_init(struct hl_device *hdev)
422{ 422{
423 struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev; 423 struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
424 struct asic_fixed_properties *prop = &hdev->asic_prop;
424 int rc; 425 int rc;
425 426
426 if ((hdev->hwmon_initialized) || !(hdev->fw_loading)) 427 if ((hdev->hwmon_initialized) || !(hdev->fw_loading))
@@ -430,7 +431,8 @@ int hl_hwmon_init(struct hl_device *hdev)
430 hdev->hl_chip_info->ops = &hl_hwmon_ops; 431 hdev->hl_chip_info->ops = &hl_hwmon_ops;
431 432
432 hdev->hwmon_dev = hwmon_device_register_with_info(dev, 433 hdev->hwmon_dev = hwmon_device_register_with_info(dev,
433 "habanalabs", hdev, hdev->hl_chip_info, NULL); 434 prop->armcp_info.card_name, hdev,
435 hdev->hl_chip_info, NULL);
434 if (IS_ERR(hdev->hwmon_dev)) { 436 if (IS_ERR(hdev->hwmon_dev)) {
435 rc = PTR_ERR(hdev->hwmon_dev); 437 rc = PTR_ERR(hdev->hwmon_dev);
436 dev_err(hdev->dev, 438 dev_err(hdev->dev,
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index 1f1e35e86d84..e4c6699a1868 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0
2 * 2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd. 3 * Copyright 2016-2019 HabanaLabs, Ltd.
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 */ 6 */
@@ -41,33 +41,34 @@ enum pq_init_status {
41/* 41/*
42 * ArmCP Primary Queue Packets 42 * ArmCP Primary Queue Packets
43 * 43 *
44 * During normal operation, KMD needs to send various messages to ArmCP, 44 * During normal operation, the host's kernel driver needs to send various
45 * usually either to SET some value into a H/W periphery or to GET the current 45 * messages to ArmCP, usually either to SET some value into a H/W periphery or
46 * value of some H/W periphery. For example, SET the frequency of MME/TPC and 46 * to GET the current value of some H/W periphery. For example, SET the
47 * GET the value of the thermal sensor. 47 * frequency of MME/TPC and GET the value of the thermal sensor.
48 * 48 *
49 * These messages can be initiated either by the User application or by KMD 49 * These messages can be initiated either by the User application or by the
50 * itself, e.g. power management code. In either case, the communication from 50 * host's driver itself, e.g. power management code. In either case, the
51 * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will 51 * communication from the host's driver to ArmCP will *always* be in
52 * send a single message and poll until the message was acknowledged and the 52 * synchronous mode, meaning that the host will send a single message and poll
53 * results are ready (if results are needed). 53 * until the message was acknowledged and the results are ready (if results are
54 * 54 * needed).
55 * This means that only a single message can be sent at a time and KMD must 55 *
56 * wait for its result before sending the next message. Having said that, 56 * This means that only a single message can be sent at a time and the host's
57 * because these are control messages which are sent in a relatively low 57 * driver must wait for its result before sending the next message. Having said
58 * that, because these are control messages which are sent in a relatively low
58 * frequency, this limitation seems acceptable. It's important to note that 59 * frequency, this limitation seems acceptable. It's important to note that
59 * in case of multiple devices, messages to different devices *can* be sent 60 * in case of multiple devices, messages to different devices *can* be sent
60 * at the same time. 61 * at the same time.
61 * 62 *
62 * The message, inputs/outputs (if relevant) and fence object will be located 63 * The message, inputs/outputs (if relevant) and fence object will be located
63 * on the device DDR at an address that will be determined by KMD. During 64 * on the device DDR at an address that will be determined by the host's driver.
64 * device initialization phase, KMD will pass to ArmCP that address. Most of 65 * During device initialization phase, the host will pass to ArmCP that address.
65 * the message types will contain inputs/outputs inside the message itself. 66 * Most of the message types will contain inputs/outputs inside the message
66 * The common part of each message will contain the opcode of the message (its 67 * itself. The common part of each message will contain the opcode of the
67 * type) and a field representing a fence object. 68 * message (its type) and a field representing a fence object.
68 * 69 *
69 * When KMD wishes to send a message to ArmCP, it will write the message 70 * When the host's driver wishes to send a message to ArmCP, it will write the
70 * contents to the device DDR, clear the fence object and then write the 71 * message contents to the device DDR, clear the fence object and then write the
71 * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue 72 * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue
72 * the 484 interrupt-id to the ARM core. 73 * the 484 interrupt-id to the ARM core.
73 * 74 *
@@ -78,12 +79,13 @@ enum pq_init_status {
78 * device DDR and then write to the fence object. If an error occurred, ArmCP 79 * device DDR and then write to the fence object. If an error occurred, ArmCP
79 * will fill the rc field with the right error code. 80 * will fill the rc field with the right error code.
80 * 81 *
81 * In the meantime, KMD will poll on the fence object. Once KMD sees that the 82 * In the meantime, the host's driver will poll on the fence object. Once the
82 * fence object is signaled, it will read the results from the device DDR 83 * host sees that the fence object is signaled, it will read the results from
83 * (if relevant) and resume the code execution in KMD. 84 * the device DDR (if relevant) and resume the code execution in the host's
85 * driver.
84 * 86 *
85 * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8 87 * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
86 * so the value being put by the KMD matches the value read by ArmCP 88 * so the value being put by the host's driver matches the value read by ArmCP
87 * 89 *
88 * Non-QMAN packets should be limited to values 1 through (2^8 - 1) 90 * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
89 * 91 *
@@ -148,9 +150,9 @@ enum pq_init_status {
148 * 150 *
149 * ARMCP_PACKET_INFO_GET - 151 * ARMCP_PACKET_INFO_GET -
150 * Fetch information from the device as specified in the packet's 152 * Fetch information from the device as specified in the packet's
151 * structure. KMD passes the max size it allows the ArmCP to write to 153 * structure. The host's driver passes the max size it allows the ArmCP to
152 * the structure, to prevent data corruption in case of mismatched 154 * write to the structure, to prevent data corruption in case of
153 * KMD/FW versions. 155 * mismatched driver/FW versions.
154 * 156 *
155 * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed 157 * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
156 * 158 *
@@ -183,9 +185,9 @@ enum pq_init_status {
183 * ARMCP_PACKET_EEPROM_DATA_GET - 185 * ARMCP_PACKET_EEPROM_DATA_GET -
184 * Get EEPROM data from the ArmCP kernel. The buffer is specified in the 186 * Get EEPROM data from the ArmCP kernel. The buffer is specified in the
185 * addr field. The CPU will put the returned data size in the result 187 * addr field. The CPU will put the returned data size in the result
186 * field. In addition, KMD passes the max size it allows the ArmCP to 188 * field. In addition, the host's driver passes the max size it allows the
187 * write to the structure, to prevent data corruption in case of 189 * ArmCP to write to the structure, to prevent data corruption in case of
188 * mismatched KMD/FW versions. 190 * mismatched driver/FW versions.
189 * 191 *
190 */ 192 */
191 193
@@ -231,7 +233,7 @@ struct armcp_packet {
231 233
232 __le32 ctl; 234 __le32 ctl;
233 235
234 __le32 fence; /* Signal to KMD that message is completed */ 236 __le32 fence; /* Signal to host that message is completed */
235 237
236 union { 238 union {
237 struct {/* For temperature/current/voltage/fan/pwm get/set */ 239 struct {/* For temperature/current/voltage/fan/pwm get/set */
@@ -310,6 +312,7 @@ struct eq_generic_event {
310 * ArmCP info 312 * ArmCP info
311 */ 313 */
312 314
315#define CARD_NAME_MAX_LEN 16
313#define VERSION_MAX_LEN 128 316#define VERSION_MAX_LEN 128
314#define ARMCP_MAX_SENSORS 128 317#define ARMCP_MAX_SENSORS 128
315 318
@@ -318,6 +321,19 @@ struct armcp_sensor {
318 __le32 flags; 321 __le32 flags;
319}; 322};
320 323
324/**
325 * struct armcp_info - Info from ArmCP that is necessary to the host's driver
326 * @sensors: available sensors description.
327 * @kernel_version: ArmCP linux kernel version.
328 * @reserved: reserved field.
329 * @cpld_version: CPLD programmed F/W version.
330 * @infineon_version: Infineon main DC-DC version.
331 * @fuse_version: silicon production FUSE information.
332 * @thermal_version: thermald S/W version.
333 * @armcp_version: ArmCP S/W version.
334 * @dram_size: available DRAM size.
335 * @card_name: card name that will be displayed in HWMON subsystem on the host
336 */
321struct armcp_info { 337struct armcp_info {
322 struct armcp_sensor sensors[ARMCP_MAX_SENSORS]; 338 struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
323 __u8 kernel_version[VERSION_MAX_LEN]; 339 __u8 kernel_version[VERSION_MAX_LEN];
@@ -328,6 +344,7 @@ struct armcp_info {
328 __u8 thermal_version[VERSION_MAX_LEN]; 344 __u8 thermal_version[VERSION_MAX_LEN];
329 __u8 armcp_version[VERSION_MAX_LEN]; 345 __u8 armcp_version[VERSION_MAX_LEN];
330 __le64 dram_size; 346 __le64 dram_size;
347 char card_name[CARD_NAME_MAX_LEN];
331}; 348};
332 349
333#endif /* ARMCP_IF_H */ 350#endif /* ARMCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
index 3f02a52ba4ce..43d241891e45 100644
--- a/drivers/misc/habanalabs/include/goya/goya.h
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -38,4 +38,6 @@
38 38
39#define TPC_MAX_NUM 8 39#define TPC_MAX_NUM 8
40 40
41#define MME_MAX_NUM 1
42
41#endif /* GOYA_H */ 43#endif /* GOYA_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
new file mode 100644
index 000000000000..cd89723c7f61
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -0,0 +1,34 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2019 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef GOYA_REG_MAP_H_
9#define GOYA_REG_MAP_H_
10
11/*
12 * PSOC scratch-pad registers
13 */
14#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
15#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
16#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
17#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
18#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
19#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
20#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
21#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
22#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
23#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
24#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
25#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
26#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
27#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
28#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
29#define mmUBOOT_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
30#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
31
32#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
33
34#endif /* GOYA_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index 199791b57caf..fac65fbd70e8 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -160,7 +160,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
160 160
161 while (1) { 161 while (1) {
162 bool entry_ready = 162 bool entry_ready =
163 ((__le32_to_cpu(eq_base[eq->ci].hdr.ctl) & 163 ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
164 EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT); 164 EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
165 165
166 if (!entry_ready) 166 if (!entry_ready)
@@ -194,7 +194,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
194skip_irq: 194skip_irq:
195 /* Clear EQ entry ready bit */ 195 /* Clear EQ entry ready bit */
196 eq_entry->hdr.ctl = 196 eq_entry->hdr.ctl =
197 __cpu_to_le32(__le32_to_cpu(eq_entry->hdr.ctl) & 197 cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
198 ~EQ_CTL_READY_MASK); 198 ~EQ_CTL_READY_MASK);
199 199
200 eq->ci = hl_eq_inc_ptr(eq->ci); 200 eq->ci = hl_eq_inc_ptr(eq->ci);
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
index 25eb46d29d88..4cd622b017b9 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -21,12 +21,12 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
21 memset(&pkt, 0, sizeof(pkt)); 21 memset(&pkt, 0, sizeof(pkt));
22 22
23 if (curr) 23 if (curr)
24 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET << 24 pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
25 ARMCP_PKT_CTL_OPCODE_SHIFT); 25 ARMCP_PKT_CTL_OPCODE_SHIFT);
26 else 26 else
27 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET << 27 pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
28 ARMCP_PKT_CTL_OPCODE_SHIFT); 28 ARMCP_PKT_CTL_OPCODE_SHIFT);
29 pkt.pll_index = __cpu_to_le32(pll_index); 29 pkt.pll_index = cpu_to_le32(pll_index);
30 30
31 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 31 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
32 SET_CLK_PKT_TIMEOUT, &result); 32 SET_CLK_PKT_TIMEOUT, &result);
@@ -48,10 +48,10 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
48 48
49 memset(&pkt, 0, sizeof(pkt)); 49 memset(&pkt, 0, sizeof(pkt));
50 50
51 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET << 51 pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
52 ARMCP_PKT_CTL_OPCODE_SHIFT); 52 ARMCP_PKT_CTL_OPCODE_SHIFT);
53 pkt.pll_index = __cpu_to_le32(pll_index); 53 pkt.pll_index = cpu_to_le32(pll_index);
54 pkt.value = __cpu_to_le64(freq); 54 pkt.value = cpu_to_le64(freq);
55 55
56 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 56 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
57 SET_CLK_PKT_TIMEOUT, NULL); 57 SET_CLK_PKT_TIMEOUT, NULL);
@@ -70,7 +70,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
70 70
71 memset(&pkt, 0, sizeof(pkt)); 71 memset(&pkt, 0, sizeof(pkt));
72 72
73 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET << 73 pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
74 ARMCP_PKT_CTL_OPCODE_SHIFT); 74 ARMCP_PKT_CTL_OPCODE_SHIFT);
75 75
76 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 76 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -91,9 +91,9 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
91 91
92 memset(&pkt, 0, sizeof(pkt)); 92 memset(&pkt, 0, sizeof(pkt));
93 93
94 pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET << 94 pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
95 ARMCP_PKT_CTL_OPCODE_SHIFT); 95 ARMCP_PKT_CTL_OPCODE_SHIFT);
96 pkt.value = __cpu_to_le64(value); 96 pkt.value = cpu_to_le64(value);
97 97
98 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 98 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
99 SET_PWR_PKT_TIMEOUT, NULL); 99 SET_PWR_PKT_TIMEOUT, NULL);
@@ -102,100 +102,6 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
102 dev_err(hdev->dev, "Failed to set max power, error %d\n", rc); 102 dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
103} 103}
104 104
105static ssize_t pm_mng_profile_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
107{
108 struct hl_device *hdev = dev_get_drvdata(dev);
109
110 if (hl_device_disabled_or_in_reset(hdev))
111 return -ENODEV;
112
113 return sprintf(buf, "%s\n",
114 (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
115 (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
116 "unknown");
117}
118
119static ssize_t pm_mng_profile_store(struct device *dev,
120 struct device_attribute *attr, const char *buf, size_t count)
121{
122 struct hl_device *hdev = dev_get_drvdata(dev);
123
124 if (hl_device_disabled_or_in_reset(hdev)) {
125 count = -ENODEV;
126 goto out;
127 }
128
129 mutex_lock(&hdev->fd_open_cnt_lock);
130
131 if (atomic_read(&hdev->fd_open_cnt) > 0) {
132 dev_err(hdev->dev,
133 "Can't change PM profile while user process is opened on the device\n");
134 count = -EPERM;
135 goto unlock_mutex;
136 }
137
138 if (strncmp("auto", buf, strlen("auto")) == 0) {
139 /* Make sure we are in LOW PLL when changing modes */
140 if (hdev->pm_mng_profile == PM_MANUAL) {
141 atomic_set(&hdev->curr_pll_profile, PLL_HIGH);
142 hl_device_set_frequency(hdev, PLL_LOW);
143 hdev->pm_mng_profile = PM_AUTO;
144 }
145 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
146 /* Make sure we are in LOW PLL when changing modes */
147 if (hdev->pm_mng_profile == PM_AUTO) {
148 flush_delayed_work(&hdev->work_freq);
149 hdev->pm_mng_profile = PM_MANUAL;
150 }
151 } else {
152 dev_err(hdev->dev, "value should be auto or manual\n");
153 count = -EINVAL;
154 goto unlock_mutex;
155 }
156
157unlock_mutex:
158 mutex_unlock(&hdev->fd_open_cnt_lock);
159out:
160 return count;
161}
162
163static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
164 char *buf)
165{
166 struct hl_device *hdev = dev_get_drvdata(dev);
167
168 if (hl_device_disabled_or_in_reset(hdev))
169 return -ENODEV;
170
171 return sprintf(buf, "%u\n", hdev->high_pll);
172}
173
174static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
175 const char *buf, size_t count)
176{
177 struct hl_device *hdev = dev_get_drvdata(dev);
178 long value;
179 int rc;
180
181 if (hl_device_disabled_or_in_reset(hdev)) {
182 count = -ENODEV;
183 goto out;
184 }
185
186 rc = kstrtoul(buf, 0, &value);
187
188 if (rc) {
189 count = -EINVAL;
190 goto out;
191 }
192
193 hdev->high_pll = value;
194
195out:
196 return count;
197}
198
199static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr, 105static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
200 char *buf) 106 char *buf)
201{ 107{
@@ -351,14 +257,6 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
351 return sprintf(buf, "%s\n", str); 257 return sprintf(buf, "%s\n", str);
352} 258}
353 259
354static ssize_t write_open_cnt_show(struct device *dev,
355 struct device_attribute *attr, char *buf)
356{
357 struct hl_device *hdev = dev_get_drvdata(dev);
358
359 return sprintf(buf, "%d\n", hdev->user_ctx ? 1 : 0);
360}
361
362static ssize_t soft_reset_cnt_show(struct device *dev, 260static ssize_t soft_reset_cnt_show(struct device *dev,
363 struct device_attribute *attr, char *buf) 261 struct device_attribute *attr, char *buf)
364{ 262{
@@ -450,18 +348,15 @@ static DEVICE_ATTR_RO(device_type);
450static DEVICE_ATTR_RO(fuse_ver); 348static DEVICE_ATTR_RO(fuse_ver);
451static DEVICE_ATTR_WO(hard_reset); 349static DEVICE_ATTR_WO(hard_reset);
452static DEVICE_ATTR_RO(hard_reset_cnt); 350static DEVICE_ATTR_RO(hard_reset_cnt);
453static DEVICE_ATTR_RW(high_pll);
454static DEVICE_ATTR_RO(infineon_ver); 351static DEVICE_ATTR_RO(infineon_ver);
455static DEVICE_ATTR_RW(max_power); 352static DEVICE_ATTR_RW(max_power);
456static DEVICE_ATTR_RO(pci_addr); 353static DEVICE_ATTR_RO(pci_addr);
457static DEVICE_ATTR_RW(pm_mng_profile);
458static DEVICE_ATTR_RO(preboot_btl_ver); 354static DEVICE_ATTR_RO(preboot_btl_ver);
459static DEVICE_ATTR_WO(soft_reset); 355static DEVICE_ATTR_WO(soft_reset);
460static DEVICE_ATTR_RO(soft_reset_cnt); 356static DEVICE_ATTR_RO(soft_reset_cnt);
461static DEVICE_ATTR_RO(status); 357static DEVICE_ATTR_RO(status);
462static DEVICE_ATTR_RO(thermal_ver); 358static DEVICE_ATTR_RO(thermal_ver);
463static DEVICE_ATTR_RO(uboot_ver); 359static DEVICE_ATTR_RO(uboot_ver);
464static DEVICE_ATTR_RO(write_open_cnt);
465 360
466static struct bin_attribute bin_attr_eeprom = { 361static struct bin_attribute bin_attr_eeprom = {
467 .attr = {.name = "eeprom", .mode = (0444)}, 362 .attr = {.name = "eeprom", .mode = (0444)},
@@ -477,18 +372,15 @@ static struct attribute *hl_dev_attrs[] = {
477 &dev_attr_fuse_ver.attr, 372 &dev_attr_fuse_ver.attr,
478 &dev_attr_hard_reset.attr, 373 &dev_attr_hard_reset.attr,
479 &dev_attr_hard_reset_cnt.attr, 374 &dev_attr_hard_reset_cnt.attr,
480 &dev_attr_high_pll.attr,
481 &dev_attr_infineon_ver.attr, 375 &dev_attr_infineon_ver.attr,
482 &dev_attr_max_power.attr, 376 &dev_attr_max_power.attr,
483 &dev_attr_pci_addr.attr, 377 &dev_attr_pci_addr.attr,
484 &dev_attr_pm_mng_profile.attr,
485 &dev_attr_preboot_btl_ver.attr, 378 &dev_attr_preboot_btl_ver.attr,
486 &dev_attr_soft_reset.attr, 379 &dev_attr_soft_reset.attr,
487 &dev_attr_soft_reset_cnt.attr, 380 &dev_attr_soft_reset_cnt.attr,
488 &dev_attr_status.attr, 381 &dev_attr_status.attr,
489 &dev_attr_thermal_ver.attr, 382 &dev_attr_thermal_ver.attr,
490 &dev_attr_uboot_ver.attr, 383 &dev_attr_uboot_ver.attr,
491 &dev_attr_write_open_cnt.attr,
492 NULL, 384 NULL,
493}; 385};
494 386
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index fb10eafe9bde..c70b3822013f 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -9,6 +9,7 @@ lkdtm-$(CONFIG_LKDTM) += refcount.o
9lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o 9lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
10lkdtm-$(CONFIG_LKDTM) += usercopy.o 10lkdtm-$(CONFIG_LKDTM) += usercopy.o
11lkdtm-$(CONFIG_LKDTM) += stackleak.o 11lkdtm-$(CONFIG_LKDTM) += stackleak.o
12lkdtm-$(CONFIG_LKDTM) += cfi.o
12 13
13KASAN_SANITIZE_stackleak.o := n 14KASAN_SANITIZE_stackleak.o := n
14KCOV_INSTRUMENT_rodata.o := n 15KCOV_INSTRUMENT_rodata.o := n
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 24245ccdba72..7284a22b1a09 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -75,7 +75,12 @@ static int warn_counter;
75 75
76void lkdtm_WARNING(void) 76void lkdtm_WARNING(void)
77{ 77{
78 WARN(1, "Warning message trigger count: %d\n", warn_counter++); 78 WARN_ON(++warn_counter);
79}
80
81void lkdtm_WARNING_MESSAGE(void)
82{
83 WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
79} 84}
80 85
81void lkdtm_EXCEPTION(void) 86void lkdtm_EXCEPTION(void)
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
new file mode 100644
index 000000000000..e73ebdbfa806
--- /dev/null
+++ b/drivers/misc/lkdtm/cfi.c
@@ -0,0 +1,42 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This is for all the tests relating directly to Control Flow Integrity.
4 */
5#include "lkdtm.h"
6
7static int called_count;
8
9/* Function taking one argument, without a return value. */
10static noinline void lkdtm_increment_void(int *counter)
11{
12 (*counter)++;
13}
14
15/* Function taking one argument, returning int. */
16static noinline int lkdtm_increment_int(int *counter)
17{
18 (*counter)++;
19
20 return *counter;
21}
22/*
23 * This tries to call an indirect function with a mismatched prototype.
24 */
25void lkdtm_CFI_FORWARD_PROTO(void)
26{
27 /*
28 * Matches lkdtm_increment_void()'s prototype, but not
29 * lkdtm_increment_int()'s prototype.
30 */
31 void (*func)(int *);
32
33 pr_info("Calling matched prototype ...\n");
34 func = lkdtm_increment_void;
35 func(&called_count);
36
37 pr_info("Calling mismatched prototype ...\n");
38 func = (void *)lkdtm_increment_int;
39 func(&called_count);
40
41 pr_info("Fail: survived mismatched prototype function call!\n");
42}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 66ae6b2a6950..cbc4c9045a99 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -104,6 +104,7 @@ static const struct crashtype crashtypes[] = {
104 CRASHTYPE(PANIC), 104 CRASHTYPE(PANIC),
105 CRASHTYPE(BUG), 105 CRASHTYPE(BUG),
106 CRASHTYPE(WARNING), 106 CRASHTYPE(WARNING),
107 CRASHTYPE(WARNING_MESSAGE),
107 CRASHTYPE(EXCEPTION), 108 CRASHTYPE(EXCEPTION),
108 CRASHTYPE(LOOP), 109 CRASHTYPE(LOOP),
109 CRASHTYPE(EXHAUST_STACK), 110 CRASHTYPE(EXHAUST_STACK),
@@ -169,6 +170,7 @@ static const struct crashtype crashtypes[] = {
169 CRASHTYPE(USERCOPY_KERNEL), 170 CRASHTYPE(USERCOPY_KERNEL),
170 CRASHTYPE(USERCOPY_KERNEL_DS), 171 CRASHTYPE(USERCOPY_KERNEL_DS),
171 CRASHTYPE(STACKLEAK_ERASING), 172 CRASHTYPE(STACKLEAK_ERASING),
173 CRASHTYPE(CFI_FORWARD_PROTO),
172}; 174};
173 175
174 176
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 6a284a87a037..ab446e0bde97 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -11,6 +11,7 @@ void __init lkdtm_bugs_init(int *recur_param);
11void lkdtm_PANIC(void); 11void lkdtm_PANIC(void);
12void lkdtm_BUG(void); 12void lkdtm_BUG(void);
13void lkdtm_WARNING(void); 13void lkdtm_WARNING(void);
14void lkdtm_WARNING_MESSAGE(void);
14void lkdtm_EXCEPTION(void); 15void lkdtm_EXCEPTION(void);
15void lkdtm_LOOP(void); 16void lkdtm_LOOP(void);
16void lkdtm_EXHAUST_STACK(void); 17void lkdtm_EXHAUST_STACK(void);
@@ -95,4 +96,7 @@ void lkdtm_USERCOPY_KERNEL_DS(void);
95/* lkdtm_stackleak.c */ 96/* lkdtm_stackleak.c */
96void lkdtm_STACKLEAK_ERASING(void); 97void lkdtm_STACKLEAK_ERASING(void);
97 98
99/* cfi.c */
100void lkdtm_CFI_FORWARD_PROTO(void);
101
98#endif 102#endif
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 541538eff8b1..d5a92c6eadb3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -383,12 +383,11 @@ static int mei_me_pci_resume(struct device *device)
383#ifdef CONFIG_PM 383#ifdef CONFIG_PM
384static int mei_me_pm_runtime_idle(struct device *device) 384static int mei_me_pm_runtime_idle(struct device *device)
385{ 385{
386 struct pci_dev *pdev = to_pci_dev(device);
387 struct mei_device *dev; 386 struct mei_device *dev;
388 387
389 dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); 388 dev_dbg(device, "rpm: me: runtime_idle\n");
390 389
391 dev = pci_get_drvdata(pdev); 390 dev = dev_get_drvdata(device);
392 if (!dev) 391 if (!dev)
393 return -ENODEV; 392 return -ENODEV;
394 if (mei_write_is_idle(dev)) 393 if (mei_write_is_idle(dev))
@@ -399,13 +398,12 @@ static int mei_me_pm_runtime_idle(struct device *device)
399 398
400static int mei_me_pm_runtime_suspend(struct device *device) 399static int mei_me_pm_runtime_suspend(struct device *device)
401{ 400{
402 struct pci_dev *pdev = to_pci_dev(device);
403 struct mei_device *dev; 401 struct mei_device *dev;
404 int ret; 402 int ret;
405 403
406 dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); 404 dev_dbg(device, "rpm: me: runtime suspend\n");
407 405
408 dev = pci_get_drvdata(pdev); 406 dev = dev_get_drvdata(device);
409 if (!dev) 407 if (!dev)
410 return -ENODEV; 408 return -ENODEV;
411 409
@@ -418,7 +416,7 @@ static int mei_me_pm_runtime_suspend(struct device *device)
418 416
419 mutex_unlock(&dev->device_lock); 417 mutex_unlock(&dev->device_lock);
420 418
421 dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); 419 dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret);
422 420
423 if (ret && ret != -EAGAIN) 421 if (ret && ret != -EAGAIN)
424 schedule_work(&dev->reset_work); 422 schedule_work(&dev->reset_work);
@@ -428,13 +426,12 @@ static int mei_me_pm_runtime_suspend(struct device *device)
428 426
429static int mei_me_pm_runtime_resume(struct device *device) 427static int mei_me_pm_runtime_resume(struct device *device)
430{ 428{
431 struct pci_dev *pdev = to_pci_dev(device);
432 struct mei_device *dev; 429 struct mei_device *dev;
433 int ret; 430 int ret;
434 431
435 dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); 432 dev_dbg(device, "rpm: me: runtime resume\n");
436 433
437 dev = pci_get_drvdata(pdev); 434 dev = dev_get_drvdata(device);
438 if (!dev) 435 if (!dev)
439 return -ENODEV; 436 return -ENODEV;
440 437
@@ -444,7 +441,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
444 441
445 mutex_unlock(&dev->device_lock); 442 mutex_unlock(&dev->device_lock);
446 443
447 dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); 444 dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret);
448 445
449 if (ret) 446 if (ret)
450 schedule_work(&dev->reset_work); 447 schedule_work(&dev->reset_work);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 2e37fc2e0fa8..f1c16a587495 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -276,12 +276,11 @@ static int mei_txe_pci_resume(struct device *device)
276#ifdef CONFIG_PM 276#ifdef CONFIG_PM
277static int mei_txe_pm_runtime_idle(struct device *device) 277static int mei_txe_pm_runtime_idle(struct device *device)
278{ 278{
279 struct pci_dev *pdev = to_pci_dev(device);
280 struct mei_device *dev; 279 struct mei_device *dev;
281 280
282 dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); 281 dev_dbg(device, "rpm: txe: runtime_idle\n");
283 282
284 dev = pci_get_drvdata(pdev); 283 dev = dev_get_drvdata(device);
285 if (!dev) 284 if (!dev)
286 return -ENODEV; 285 return -ENODEV;
287 if (mei_write_is_idle(dev)) 286 if (mei_write_is_idle(dev))
@@ -291,13 +290,12 @@ static int mei_txe_pm_runtime_idle(struct device *device)
291} 290}
292static int mei_txe_pm_runtime_suspend(struct device *device) 291static int mei_txe_pm_runtime_suspend(struct device *device)
293{ 292{
294 struct pci_dev *pdev = to_pci_dev(device);
295 struct mei_device *dev; 293 struct mei_device *dev;
296 int ret; 294 int ret;
297 295
298 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); 296 dev_dbg(device, "rpm: txe: runtime suspend\n");
299 297
300 dev = pci_get_drvdata(pdev); 298 dev = dev_get_drvdata(device);
301 if (!dev) 299 if (!dev)
302 return -ENODEV; 300 return -ENODEV;
303 301
@@ -310,7 +308,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
310 308
311 /* keep irq on we are staying in D0 */ 309 /* keep irq on we are staying in D0 */
312 310
313 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 311 dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
314 312
315 mutex_unlock(&dev->device_lock); 313 mutex_unlock(&dev->device_lock);
316 314
@@ -322,13 +320,12 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
322 320
323static int mei_txe_pm_runtime_resume(struct device *device) 321static int mei_txe_pm_runtime_resume(struct device *device)
324{ 322{
325 struct pci_dev *pdev = to_pci_dev(device);
326 struct mei_device *dev; 323 struct mei_device *dev;
327 int ret; 324 int ret;
328 325
329 dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); 326 dev_dbg(device, "rpm: txe: runtime resume\n");
330 327
331 dev = pci_get_drvdata(pdev); 328 dev = dev_get_drvdata(device);
332 if (!dev) 329 if (!dev)
333 return -ENODEV; 330 return -ENODEV;
334 331
@@ -340,7 +337,7 @@ static int mei_txe_pm_runtime_resume(struct device *device)
340 337
341 mutex_unlock(&dev->device_lock); 338 mutex_unlock(&dev->device_lock);
342 339
343 dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); 340 dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
344 341
345 if (ret) 342 if (ret)
346 schedule_work(&dev->reset_work); 343 schedule_work(&dev->reset_work);
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index 266ffb6f6c44..c8bff2916d3d 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -237,6 +237,9 @@ static int __init mic_probe(struct platform_device *pdev)
237 mdrv->dev = &pdev->dev; 237 mdrv->dev = &pdev->dev;
238 snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name); 238 snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name);
239 239
240 /* FIXME: use dma_set_mask_and_coherent() and check result */
241 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
242
240 mdev->mmio.pa = MIC_X100_MMIO_BASE; 243 mdev->mmio.pa = MIC_X100_MMIO_BASE;
241 mdev->mmio.len = MIC_X100_MMIO_LEN; 244 mdev->mmio.len = MIC_X100_MMIO_LEN;
242 mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE, 245 mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
@@ -282,18 +285,6 @@ static void mic_platform_shutdown(struct platform_device *pdev)
282 mic_remove(pdev); 285 mic_remove(pdev);
283} 286}
284 287
285static u64 mic_dma_mask = DMA_BIT_MASK(64);
286
287static struct platform_device mic_platform_dev = {
288 .name = mic_driver_name,
289 .id = 0,
290 .num_resources = 0,
291 .dev = {
292 .dma_mask = &mic_dma_mask,
293 .coherent_dma_mask = DMA_BIT_MASK(64),
294 },
295};
296
297static struct platform_driver __refdata mic_platform_driver = { 288static struct platform_driver __refdata mic_platform_driver = {
298 .probe = mic_probe, 289 .probe = mic_probe,
299 .remove = mic_remove, 290 .remove = mic_remove,
@@ -303,6 +294,8 @@ static struct platform_driver __refdata mic_platform_driver = {
303 }, 294 },
304}; 295};
305 296
297static struct platform_device *mic_platform_dev;
298
306static int __init mic_init(void) 299static int __init mic_init(void)
307{ 300{
308 int ret; 301 int ret;
@@ -316,9 +309,12 @@ static int __init mic_init(void)
316 309
317 request_module("mic_x100_dma"); 310 request_module("mic_x100_dma");
318 mic_init_card_debugfs(); 311 mic_init_card_debugfs();
319 ret = platform_device_register(&mic_platform_dev); 312
313 mic_platform_dev = platform_device_register_simple(mic_driver_name,
314 0, NULL, 0);
315 ret = PTR_ERR_OR_ZERO(mic_platform_dev);
320 if (ret) { 316 if (ret) {
321 pr_err("platform_device_register ret %d\n", ret); 317 pr_err("platform_device_register_full ret %d\n", ret);
322 goto cleanup_debugfs; 318 goto cleanup_debugfs;
323 } 319 }
324 ret = platform_driver_register(&mic_platform_driver); 320 ret = platform_driver_register(&mic_platform_driver);
@@ -329,7 +325,7 @@ static int __init mic_init(void)
329 return ret; 325 return ret;
330 326
331device_unregister: 327device_unregister:
332 platform_device_unregister(&mic_platform_dev); 328 platform_device_unregister(mic_platform_dev);
333cleanup_debugfs: 329cleanup_debugfs:
334 mic_exit_card_debugfs(); 330 mic_exit_card_debugfs();
335done: 331done:
@@ -339,7 +335,7 @@ done:
339static void __exit mic_exit(void) 335static void __exit mic_exit(void)
340{ 336{
341 platform_driver_unregister(&mic_platform_driver); 337 platform_driver_unregister(&mic_platform_driver);
342 platform_device_unregister(&mic_platform_dev); 338 platform_device_unregister(mic_platform_dev);
343 mic_exit_card_debugfs(); 339 mic_exit_card_debugfs();
344} 340}
345 341
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
index d3837f8a5ba0..0b9dfe1cc06c 100644
--- a/drivers/misc/mic/scif/scif_epd.h
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -156,9 +156,8 @@ static inline int scif_verify_epd(struct scif_endpt *ep)
156static inline int scif_anon_inode_getfile(scif_epd_t epd) 156static inline int scif_anon_inode_getfile(scif_epd_t epd)
157{ 157{
158 epd->anon = anon_inode_getfile("scif", &scif_anon_fops, NULL, 0); 158 epd->anon = anon_inode_getfile("scif", &scif_anon_fops, NULL, 0);
159 if (IS_ERR(epd->anon)) 159
160 return PTR_ERR(epd->anon); 160 return PTR_ERR_OR_ZERO(epd->anon);
161 return 0;
162} 161}
163 162
164static inline void scif_anon_inode_fput(scif_epd_t epd) 163static inline void scif_anon_inode_fput(scif_epd_t epd)
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 09e24659ef3d..98c60f11b76b 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -696,7 +696,7 @@ again:
696 if (gru_mq_desc == NULL) { 696 if (gru_mq_desc == NULL) {
697 gru_mq_desc = kmalloc(sizeof(struct 697 gru_mq_desc = kmalloc(sizeof(struct
698 gru_message_queue_desc), 698 gru_message_queue_desc),
699 GFP_KERNEL); 699 GFP_ATOMIC);
700 if (gru_mq_desc == NULL) { 700 if (gru_mq_desc == NULL) {
701 ret = xpNoMemory; 701 ret = xpNoMemory;
702 goto done; 702 goto done;
@@ -1680,7 +1680,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1680 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); 1680 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1681} 1681}
1682 1682
1683static struct xpc_arch_operations xpc_arch_ops_uv = { 1683static const struct xpc_arch_operations xpc_arch_ops_uv = {
1684 .setup_partitions = xpc_setup_partitions_uv, 1684 .setup_partitions = xpc_setup_partitions_uv,
1685 .teardown_partitions = xpc_teardown_partitions_uv, 1685 .teardown_partitions = xpc_teardown_partitions_uv,
1686 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, 1686 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
deleted file mode 100644
index ee120dcbb3e6..000000000000
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ /dev/null
@@ -1,797 +0,0 @@
1/*
2 * drivers/misc/spear13xx_pcie_gadget.c
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Pratyush Anand<pratyush.anand@gmail.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/device.h>
13#include <linux/clk.h>
14#include <linux/slab.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/pci_regs.h>
23#include <linux/configfs.h>
24#include <mach/pcie.h>
25#include <mach/misc_regs.h>
26
27#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
28/* In current implementation address translation is done using IN0 only.
29 * So IN1 start address and IN0 end address has been kept same
30*/
31#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
32#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
33#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1)
34#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1)
35#define IN_MSG_SIZE (12 * 1024 * 1024 - 1)
36/* Keep default BAR size as 4K*/
37/* AORAM would be mapped by default*/
38#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1)
39
40#define INT_TYPE_NO_INT 0
41#define INT_TYPE_INTX 1
42#define INT_TYPE_MSI 2
43struct spear_pcie_gadget_config {
44 void __iomem *base;
45 void __iomem *va_app_base;
46 void __iomem *va_dbi_base;
47 char int_type[10];
48 ulong requested_msi;
49 ulong configured_msi;
50 ulong bar0_size;
51 ulong bar0_rw_offset;
52 void __iomem *va_bar0_address;
53};
54
55struct pcie_gadget_target {
56 struct configfs_subsystem subsys;
57 struct spear_pcie_gadget_config config;
58};
59
60struct pcie_gadget_target_attr {
61 struct configfs_attribute attr;
62 ssize_t (*show)(struct spear_pcie_gadget_config *config,
63 char *buf);
64 ssize_t (*store)(struct spear_pcie_gadget_config *config,
65 const char *buf,
66 size_t count);
67};
68
69static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
70{
71 /* Enable DBI access */
72 writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
73 &app_reg->slv_armisc);
74 writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
75 &app_reg->slv_awmisc);
76
77}
78
79static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
80{
81 /* disable DBI access */
82 writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
83 &app_reg->slv_armisc);
84 writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
85 &app_reg->slv_awmisc);
86
87}
88
89static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
90 int where, int size, u32 *val)
91{
92 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
93 ulong va_address;
94
95 /* Enable DBI access */
96 enable_dbi_access(app_reg);
97
98 va_address = (ulong)config->va_dbi_base + (where & ~0x3);
99
100 *val = readl(va_address);
101
102 if (size == 1)
103 *val = (*val >> (8 * (where & 3))) & 0xff;
104 else if (size == 2)
105 *val = (*val >> (8 * (where & 3))) & 0xffff;
106
107 /* Disable DBI access */
108 disable_dbi_access(app_reg);
109}
110
111static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
112 int where, int size, u32 val)
113{
114 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
115 ulong va_address;
116
117 /* Enable DBI access */
118 enable_dbi_access(app_reg);
119
120 va_address = (ulong)config->va_dbi_base + (where & ~0x3);
121
122 if (size == 4)
123 writel(val, va_address);
124 else if (size == 2)
125 writew(val, va_address + (where & 2));
126 else if (size == 1)
127 writeb(val, va_address + (where & 3));
128
129 /* Disable DBI access */
130 disable_dbi_access(app_reg);
131}
132
133#define PCI_FIND_CAP_TTL 48
134
135static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
136 u32 pos, int cap, int *ttl)
137{
138 u32 id;
139
140 while ((*ttl)--) {
141 spear_dbi_read_reg(config, pos, 1, &pos);
142 if (pos < 0x40)
143 break;
144 pos &= ~3;
145 spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
146 if (id == 0xff)
147 break;
148 if (id == cap)
149 return pos;
150 pos += PCI_CAP_LIST_NEXT;
151 }
152 return 0;
153}
154
155static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
156 u32 pos, int cap)
157{
158 int ttl = PCI_FIND_CAP_TTL;
159
160 return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
161}
162
163static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
164 u8 hdr_type)
165{
166 u32 status;
167
168 spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
169 if (!(status & PCI_STATUS_CAP_LIST))
170 return 0;
171
172 switch (hdr_type) {
173 case PCI_HEADER_TYPE_NORMAL:
174 case PCI_HEADER_TYPE_BRIDGE:
175 return PCI_CAPABILITY_LIST;
176 case PCI_HEADER_TYPE_CARDBUS:
177 return PCI_CB_CAPABILITY_LIST;
178 default:
179 return 0;
180 }
181
182 return 0;
183}
184
185/*
186 * Tell if a device supports a given PCI capability.
187 * Returns the address of the requested capability structure within the
188 * device's PCI configuration space or 0 in case the device does not
189 * support it. Possible values for @cap:
190 *
191 * %PCI_CAP_ID_PM Power Management
192 * %PCI_CAP_ID_AGP Accelerated Graphics Port
193 * %PCI_CAP_ID_VPD Vital Product Data
194 * %PCI_CAP_ID_SLOTID Slot Identification
195 * %PCI_CAP_ID_MSI Message Signalled Interrupts
196 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
197 * %PCI_CAP_ID_PCIX PCI-X
198 * %PCI_CAP_ID_EXP PCI Express
199 */
200static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
201 int cap)
202{
203 u32 pos;
204 u32 hdr_type;
205
206 spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
207
208 pos = pci_find_own_cap_start(config, hdr_type);
209 if (pos)
210 pos = pci_find_own_next_cap(config, pos, cap);
211
212 return pos;
213}
214
215static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
216{
217 return 0;
218}
219
220/*
221 * configfs interfaces show/store functions
222 */
223
224static struct pcie_gadget_target *to_target(struct config_item *item)
225{
226 return item ?
227 container_of(to_configfs_subsystem(to_config_group(item)),
228 struct pcie_gadget_target, subsys) : NULL;
229}
230
231static ssize_t pcie_gadget_link_show(struct config_item *item, char *buf)
232{
233 struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
234
235 if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
236 return sprintf(buf, "UP");
237 else
238 return sprintf(buf, "DOWN");
239}
240
241static ssize_t pcie_gadget_link_store(struct config_item *item,
242 const char *buf, size_t count)
243{
244 struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
245
246 if (sysfs_streq(buf, "UP"))
247 writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
248 &app_reg->app_ctrl_0);
249 else if (sysfs_streq(buf, "DOWN"))
250 writel(readl(&app_reg->app_ctrl_0)
251 & ~(1 << APP_LTSSM_ENABLE_ID),
252 &app_reg->app_ctrl_0);
253 else
254 return -EINVAL;
255 return count;
256}
257
258static ssize_t pcie_gadget_int_type_show(struct config_item *item, char *buf)
259{
260 return sprintf(buf, "%s", to_target(item)->int_type);
261}
262
263static ssize_t pcie_gadget_int_type_store(struct config_item *item,
264 const char *buf, size_t count)
265{
266 struct spear_pcie_gadget_config *config = to_target(item)
267 u32 cap, vec, flags;
268 ulong vector;
269
270 if (sysfs_streq(buf, "INTA"))
271 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
272
273 else if (sysfs_streq(buf, "MSI")) {
274 vector = config->requested_msi;
275 vec = 0;
276 while (vector > 1) {
277 vector /= 2;
278 vec++;
279 }
280 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
281 cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
282 spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
283 flags &= ~PCI_MSI_FLAGS_QMASK;
284 flags |= vec << 1;
285 spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
286 } else
287 return -EINVAL;
288
289 strcpy(config->int_type, buf);
290
291 return count;
292}
293
294static ssize_t pcie_gadget_no_of_msi_show(struct config_item *item, char *buf)
295{
296 struct spear_pcie_gadget_config *config = to_target(item)
297 struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
298 u32 cap, vec, flags;
299 ulong vector;
300
301 if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
302 != (1 << CFG_MSI_EN_ID))
303 vector = 0;
304 else {
305 cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
306 spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
307 flags &= ~PCI_MSI_FLAGS_QSIZE;
308 vec = flags >> 4;
309 vector = 1;
310 while (vec--)
311 vector *= 2;
312 }
313 config->configured_msi = vector;
314
315 return sprintf(buf, "%lu", vector);
316}
317
318static ssize_t pcie_gadget_no_of_msi_store(struct config_item *item,
319 const char *buf, size_t count)
320{
321 int ret;
322
323 ret = kstrtoul(buf, 0, &to_target(item)->requested_msi);
324 if (ret)
325 return ret;
326
327 if (config->requested_msi > 32)
328 config->requested_msi = 32;
329
330 return count;
331}
332
333static ssize_t pcie_gadget_inta_store(struct config_item *item,
334 const char *buf, size_t count)
335{
336 struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
337 ulong en;
338 int ret;
339
340 ret = kstrtoul(buf, 0, &en);
341 if (ret)
342 return ret;
343
344 if (en)
345 writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
346 &app_reg->app_ctrl_0);
347 else
348 writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
349 &app_reg->app_ctrl_0);
350
351 return count;
352}
353
354static ssize_t pcie_gadget_send_msi_store(struct config_item *item,
355 const char *buf, size_t count)
356{
357 struct spear_pcie_gadget_config *config = to_target(item)
358 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
359 ulong vector;
360 u32 ven_msi;
361 int ret;
362
363 ret = kstrtoul(buf, 0, &vector);
364 if (ret)
365 return ret;
366
367 if (!config->configured_msi)
368 return -EINVAL;
369
370 if (vector >= config->configured_msi)
371 return -EINVAL;
372
373 ven_msi = readl(&app_reg->ven_msi_1);
374 ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
375 ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
376 ven_msi &= ~VEN_MSI_TC_MASK;
377 ven_msi |= 0 << VEN_MSI_TC_ID;
378 ven_msi &= ~VEN_MSI_VECTOR_MASK;
379 ven_msi |= vector << VEN_MSI_VECTOR_ID;
380
381 /* generating interrupt for msi vector */
382 ven_msi |= VEN_MSI_REQ_EN;
383 writel(ven_msi, &app_reg->ven_msi_1);
384 udelay(1);
385 ven_msi &= ~VEN_MSI_REQ_EN;
386 writel(ven_msi, &app_reg->ven_msi_1);
387
388 return count;
389}
390
391static ssize_t pcie_gadget_vendor_id_show(struct config_item *item, char *buf)
392{
393 u32 id;
394
395 spear_dbi_read_reg(to_target(item), PCI_VENDOR_ID, 2, &id);
396
397 return sprintf(buf, "%x", id);
398}
399
400static ssize_t pcie_gadget_vendor_id_store(struct config_item *item,
401 const char *buf, size_t count)
402{
403 ulong id;
404 int ret;
405
406 ret = kstrtoul(buf, 0, &id);
407 if (ret)
408 return ret;
409
410 spear_dbi_write_reg(to_target(item), PCI_VENDOR_ID, 2, id);
411
412 return count;
413}
414
415static ssize_t pcie_gadget_device_id_show(struct config_item *item, char *buf)
416{
417 u32 id;
418
419 spear_dbi_read_reg(to_target(item), PCI_DEVICE_ID, 2, &id);
420
421 return sprintf(buf, "%x", id);
422}
423
424static ssize_t pcie_gadget_device_id_store(struct config_item *item,
425 const char *buf, size_t count)
426{
427 ulong id;
428 int ret;
429
430 ret = kstrtoul(buf, 0, &id);
431 if (ret)
432 return ret;
433
434 spear_dbi_write_reg(to_target(item), PCI_DEVICE_ID, 2, id);
435
436 return count;
437}
438
439static ssize_t pcie_gadget_bar0_size_show(struct config_item *item, char *buf)
440{
441 return sprintf(buf, "%lx", to_target(item)->bar0_size);
442}
443
444static ssize_t pcie_gadget_bar0_size_store(struct config_item *item,
445 const char *buf, size_t count)
446{
447 struct spear_pcie_gadget_config *config = to_target(item)
448 ulong size;
449 u32 pos, pos1;
450 u32 no_of_bit = 0;
451 int ret;
452
453 ret = kstrtoul(buf, 0, &size);
454 if (ret)
455 return ret;
456
457 /* min bar size is 256 */
458 if (size <= 0x100)
459 size = 0x100;
460 /* max bar size is 1MB*/
461 else if (size >= 0x100000)
462 size = 0x100000;
463 else {
464 pos = 0;
465 pos1 = 0;
466 while (pos < 21) {
467 pos = find_next_bit((ulong *)&size, 21, pos);
468 if (pos != 21)
469 pos1 = pos + 1;
470 pos++;
471 no_of_bit++;
472 }
473 if (no_of_bit == 2)
474 pos1--;
475
476 size = 1 << pos1;
477 }
478 config->bar0_size = size;
479 spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
480
481 return count;
482}
483
484static ssize_t pcie_gadget_bar0_address_show(struct config_item *item,
485 char *buf)
486{
487 struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
488
489 u32 address = readl(&app_reg->pim0_mem_addr_start);
490
491 return sprintf(buf, "%x", address);
492}
493
494static ssize_t pcie_gadget_bar0_address_store(struct config_item *item,
495 const char *buf, size_t count)
496{
497 struct spear_pcie_gadget_config *config = to_target(item)
498 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
499 ulong address;
500 int ret;
501
502 ret = kstrtoul(buf, 0, &address);
503 if (ret)
504 return ret;
505
506 address &= ~(config->bar0_size - 1);
507 if (config->va_bar0_address)
508 iounmap(config->va_bar0_address);
509 config->va_bar0_address = ioremap(address, config->bar0_size);
510 if (!config->va_bar0_address)
511 return -ENOMEM;
512
513 writel(address, &app_reg->pim0_mem_addr_start);
514
515 return count;
516}
517
518static ssize_t pcie_gadget_bar0_rw_offset_show(struct config_item *item,
519 char *buf)
520{
521 return sprintf(buf, "%lx", to_target(item)->bar0_rw_offset);
522}
523
524static ssize_t pcie_gadget_bar0_rw_offset_store(struct config_item *item,
525 const char *buf, size_t count)
526{
527 ulong offset;
528 int ret;
529
530 ret = kstrtoul(buf, 0, &offset);
531 if (ret)
532 return ret;
533
534 if (offset % 4)
535 return -EINVAL;
536
537 to_target(item)->bar0_rw_offset = offset;
538
539 return count;
540}
541
542static ssize_t pcie_gadget_bar0_data_show(struct config_item *item, char *buf)
543{
544 struct spear_pcie_gadget_config *config = to_target(item)
545 ulong data;
546
547 if (!config->va_bar0_address)
548 return -ENOMEM;
549
550 data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
551
552 return sprintf(buf, "%lx", data);
553}
554
555static ssize_t pcie_gadget_bar0_data_store(struct config_item *item,
556 const char *buf, size_t count)
557{
558 struct spear_pcie_gadget_config *config = to_target(item)
559 ulong data;
560 int ret;
561
562 ret = kstrtoul(buf, 0, &data);
563 if (ret)
564 return ret;
565
566 if (!config->va_bar0_address)
567 return -ENOMEM;
568
569 writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
570
571 return count;
572}
573
574CONFIGFS_ATTR(pcie_gadget_, link);
575CONFIGFS_ATTR(pcie_gadget_, int_type);
576CONFIGFS_ATTR(pcie_gadget_, no_of_msi);
577CONFIGFS_ATTR_WO(pcie_gadget_, inta);
578CONFIGFS_ATTR_WO(pcie_gadget_, send_msi);
579CONFIGFS_ATTR(pcie_gadget_, vendor_id);
580CONFIGFS_ATTR(pcie_gadget_, device_id);
581CONFIGFS_ATTR(pcie_gadget_, bar0_size);
582CONFIGFS_ATTR(pcie_gadget_, bar0_address);
583CONFIGFS_ATTR(pcie_gadget_, bar0_rw_offset);
584CONFIGFS_ATTR(pcie_gadget_, bar0_data);
585
586static struct configfs_attribute *pcie_gadget_target_attrs[] = {
587 &pcie_gadget_attr_link,
588 &pcie_gadget_attr_int_type,
589 &pcie_gadget_attr_no_of_msi,
590 &pcie_gadget_attr_inta,
591 &pcie_gadget_attr_send_msi,
592 &pcie_gadget_attr_vendor_id,
593 &pcie_gadget_attr_device_id,
594 &pcie_gadget_attr_bar0_size,
595 &pcie_gadget_attr_bar0_address,
596 &pcie_gadget_attr_bar0_rw_offset,
597 &pcie_gadget_attr_bar0_data,
598 NULL,
599};
600
601static struct config_item_type pcie_gadget_target_type = {
602 .ct_attrs = pcie_gadget_target_attrs,
603 .ct_owner = THIS_MODULE,
604};
605
606static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
607{
608 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
609
610 /*setup registers for outbound translation */
611
612 writel(config->base, &app_reg->in0_mem_addr_start);
613 writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
614 &app_reg->in0_mem_addr_limit);
615 writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
616 writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
617 &app_reg->in1_mem_addr_limit);
618 writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
619 writel(app_reg->in_io_addr_start + IN_IO_SIZE,
620 &app_reg->in_io_addr_limit);
621 writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
622 writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
623 &app_reg->in_cfg0_addr_limit);
624 writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
625 writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
626 &app_reg->in_cfg1_addr_limit);
627 writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
628 writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
629 &app_reg->in_msg_addr_limit);
630
631 writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
632 writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
633 writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
634
635 /*setup registers for inbound translation */
636
637 /* Keep AORAM mapped at BAR0 as default */
638 config->bar0_size = INBOUND_ADDR_MASK + 1;
639 spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
640 spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
641 config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
642 config->bar0_size);
643
644 writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
645 writel(0, &app_reg->pim1_mem_addr_start);
646 writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
647
648 writel(0x0, &app_reg->pim_io_addr_start);
649 writel(0x0, &app_reg->pim_io_addr_start);
650 writel(0x0, &app_reg->pim_rom_addr_start);
651
652 writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
653 | ((u32)1 << REG_TRANSLATION_ENABLE),
654 &app_reg->app_ctrl_0);
655 /* disable all rx interrupts */
656 writel(0, &app_reg->int_mask);
657
658 /* Select INTA as default*/
659 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
660}
661
662static int spear_pcie_gadget_probe(struct platform_device *pdev)
663{
664 struct resource *res0, *res1;
665 unsigned int status = 0;
666 int irq;
667 struct clk *clk;
668 static struct pcie_gadget_target *target;
669 struct spear_pcie_gadget_config *config;
670 struct config_item *cg_item;
671 struct configfs_subsystem *subsys;
672
673 target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL);
674 if (!target) {
675 dev_err(&pdev->dev, "out of memory\n");
676 return -ENOMEM;
677 }
678
679 cg_item = &target->subsys.su_group.cg_item;
680 sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
681 cg_item->ci_type = &pcie_gadget_target_type;
682 config = &target->config;
683
684 /* get resource for application registers*/
685 res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
686 config->va_app_base = devm_ioremap_resource(&pdev->dev, res0);
687 if (IS_ERR(config->va_app_base)) {
688 dev_err(&pdev->dev, "ioremap fail\n");
689 return PTR_ERR(config->va_app_base);
690 }
691
692 /* get resource for dbi registers*/
693 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
694 config->base = (void __iomem *)res1->start;
695
696 config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1);
697 if (IS_ERR(config->va_dbi_base)) {
698 dev_err(&pdev->dev, "ioremap fail\n");
699 return PTR_ERR(config->va_dbi_base);
700 }
701
702 platform_set_drvdata(pdev, target);
703
704 irq = platform_get_irq(pdev, 0);
705 if (irq < 0) {
706 dev_err(&pdev->dev, "no update irq?\n");
707 return irq;
708 }
709
710 status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq,
711 0, pdev->name, NULL);
712 if (status) {
713 dev_err(&pdev->dev,
714 "pcie gadget interrupt IRQ%d already claimed\n", irq);
715 return status;
716 }
717
718 /* Register configfs hooks */
719 subsys = &target->subsys;
720 config_group_init(&subsys->su_group);
721 mutex_init(&subsys->su_mutex);
722 status = configfs_register_subsystem(subsys);
723 if (status)
724 return status;
725
726 /*
727 * init basic pcie application registers
728 * do not enable clock if it is PCIE0.Ideally , all controller should
729 * have been independent from others with respect to clock. But PCIE1
730 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
731 */
732 if (pdev->id == 1) {
733 /*
734 * Ideally CFG Clock should have been also enabled here. But
735 * it is done currently during board init routne
736 */
737 clk = clk_get_sys("pcie1", NULL);
738 if (IS_ERR(clk)) {
739 pr_err("%s:couldn't get clk for pcie1\n", __func__);
740 return PTR_ERR(clk);
741 }
742 status = clk_enable(clk);
743 if (status) {
744 pr_err("%s:couldn't enable clk for pcie1\n", __func__);
745 return status;
746 }
747 } else if (pdev->id == 2) {
748 /*
749 * Ideally CFG Clock should have been also enabled here. But
750 * it is done currently during board init routne
751 */
752 clk = clk_get_sys("pcie2", NULL);
753 if (IS_ERR(clk)) {
754 pr_err("%s:couldn't get clk for pcie2\n", __func__);
755 return PTR_ERR(clk);
756 }
757 status = clk_enable(clk);
758 if (status) {
759 pr_err("%s:couldn't enable clk for pcie2\n", __func__);
760 return status;
761 }
762 }
763 spear13xx_pcie_device_init(config);
764
765 return 0;
766}
767
768static int spear_pcie_gadget_remove(struct platform_device *pdev)
769{
770 static struct pcie_gadget_target *target;
771
772 target = platform_get_drvdata(pdev);
773
774 configfs_unregister_subsystem(&target->subsys);
775
776 return 0;
777}
778
779static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
780{
781}
782
783static struct platform_driver spear_pcie_gadget_driver = {
784 .probe = spear_pcie_gadget_probe,
785 .remove = spear_pcie_gadget_remove,
786 .shutdown = spear_pcie_gadget_shutdown,
787 .driver = {
788 .name = "pcie-gadget-spear",
789 .bus = &platform_bus_type
790 },
791};
792
793module_platform_driver(spear_pcie_gadget_driver);
794
795MODULE_ALIAS("platform:pcie-gadget-spear");
796MODULE_AUTHOR("Pratyush Anand");
797MODULE_LICENSE("GPL");
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index f257d3812110..11835969e982 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -19,11 +19,150 @@
19#include <linux/poll.h> 19#include <linux/poll.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/clk.h> 21#include <linux/clk.h>
22#include <linux/compat.h>
23#include <linux/highmem.h>
24
25#include <uapi/misc/xilinx_sdfec.h>
22 26
23#define DEV_NAME_LEN 12 27#define DEV_NAME_LEN 12
24 28
25static struct idr dev_idr; 29static DEFINE_IDA(dev_nrs);
26static struct mutex dev_idr_lock; 30
31/* Xilinx SDFEC Register Map */
32/* CODE_WRI_PROTECT Register */
33#define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
34
35/* ACTIVE Register */
36#define XSDFEC_ACTIVE_ADDR (0x8)
37#define XSDFEC_IS_ACTIVITY_SET (0x1)
38
39/* AXIS_WIDTH Register */
40#define XSDFEC_AXIS_WIDTH_ADDR (0xC)
41#define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
42#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
43#define XSDFEC_AXIS_DIN_WORDS_LSB (2)
44#define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
45
46/* AXIS_ENABLE Register */
47#define XSDFEC_AXIS_ENABLE_ADDR (0x10)
48#define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
49#define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
50#define XSDFEC_AXIS_ENABLE_MASK \
51 (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
52
53/* FEC_CODE Register */
54#define XSDFEC_FEC_CODE_ADDR (0x14)
55
56/* ORDER Register Map */
57#define XSDFEC_ORDER_ADDR (0x18)
58
59/* Interrupt Status Register */
60#define XSDFEC_ISR_ADDR (0x1C)
61/* Interrupt Status Register Bit Mask */
62#define XSDFEC_ISR_MASK (0x3F)
63
64/* Write Only - Interrupt Enable Register */
65#define XSDFEC_IER_ADDR (0x20)
66/* Write Only - Interrupt Disable Register */
67#define XSDFEC_IDR_ADDR (0x24)
68/* Read Only - Interrupt Mask Register */
69#define XSDFEC_IMR_ADDR (0x28)
70
71/* ECC Interrupt Status Register */
72#define XSDFEC_ECC_ISR_ADDR (0x2C)
73/* Single Bit Errors */
74#define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
75/* PL Initialize Single Bit Errors */
76#define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
77/* Multi Bit Errors */
78#define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
79/* PL Initialize Multi Bit Errors */
80#define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
81/* Multi Bit Error to Event Shift */
82#define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
83/* PL Initialize Multi Bit Error to Event Shift */
84#define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
85/* ECC Interrupt Status Bit Mask */
86#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
87/* ECC Interrupt Status PL Initialize Bit Mask */
88#define XSDFEC_PL_INIT_ECC_ISR_MASK \
89 (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
90/* ECC Interrupt Status All Bit Mask */
91#define XSDFEC_ALL_ECC_ISR_MASK \
92 (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
93/* ECC Interrupt Status Single Bit Errors Mask */
94#define XSDFEC_ALL_ECC_ISR_SBE_MASK \
95 (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
96/* ECC Interrupt Status Multi Bit Errors Mask */
97#define XSDFEC_ALL_ECC_ISR_MBE_MASK \
98 (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
99
100/* Write Only - ECC Interrupt Enable Register */
101#define XSDFEC_ECC_IER_ADDR (0x30)
102/* Write Only - ECC Interrupt Disable Register */
103#define XSDFEC_ECC_IDR_ADDR (0x34)
104/* Read Only - ECC Interrupt Mask Register */
105#define XSDFEC_ECC_IMR_ADDR (0x38)
106
107/* BYPASS Register */
108#define XSDFEC_BYPASS_ADDR (0x3C)
109
110/* Turbo Code Register */
111#define XSDFEC_TURBO_ADDR (0x100)
112#define XSDFEC_TURBO_SCALE_MASK (0xFFF)
113#define XSDFEC_TURBO_SCALE_BIT_POS (8)
114#define XSDFEC_TURBO_SCALE_MAX (15)
115
116/* REG0 Register */
117#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
118#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
119#define XSDFEC_REG0_N_MIN (4)
120#define XSDFEC_REG0_N_MAX (32768)
121#define XSDFEC_REG0_N_MUL_P (256)
122#define XSDFEC_REG0_N_LSB (0)
123#define XSDFEC_REG0_K_MIN (2)
124#define XSDFEC_REG0_K_MAX (32766)
125#define XSDFEC_REG0_K_MUL_P (256)
126#define XSDFEC_REG0_K_LSB (16)
127
128/* REG1 Register */
129#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
130#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
131#define XSDFEC_REG1_PSIZE_MIN (2)
132#define XSDFEC_REG1_PSIZE_MAX (512)
133#define XSDFEC_REG1_NO_PACKING_MASK (0x400)
134#define XSDFEC_REG1_NO_PACKING_LSB (10)
135#define XSDFEC_REG1_NM_MASK (0xFF800)
136#define XSDFEC_REG1_NM_LSB (11)
137#define XSDFEC_REG1_BYPASS_MASK (0x100000)
138
139/* REG2 Register */
140#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
141#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
142#define XSDFEC_REG2_NLAYERS_MIN (1)
143#define XSDFEC_REG2_NLAYERS_MAX (256)
144#define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
145#define XSDFEC_REG2_NMQC_LSB (9)
146#define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
147#define XSDFEC_REG2_NORM_TYPE_LSB (20)
148#define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
149#define XSDFEC_REG2_SPEICAL_QC_LSB (21)
150#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
151#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
152#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
153#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
154
155/* REG3 Register */
156#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
157#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
158#define XSDFEC_REG3_LA_OFF_LSB (8)
159#define XSDFEC_REG3_QC_OFF_LSB (16)
160
161#define XSDFEC_LDPC_REG_JUMP (0x10)
162#define XSDFEC_REG_WIDTH_JUMP (4)
163
164/* The maximum number of pinned pages */
165#define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
27 166
28/** 167/**
29 * struct xsdfec_clks - For managing SD-FEC clocks 168 * struct xsdfec_clks - For managing SD-FEC clocks
@@ -49,31 +188,1043 @@ struct xsdfec_clks {
49 188
50/** 189/**
51 * struct xsdfec_dev - Driver data for SDFEC 190 * struct xsdfec_dev - Driver data for SDFEC
52 * @regs: device physical base address
53 * @dev: pointer to device struct
54 * @miscdev: Misc device handle 191 * @miscdev: Misc device handle
55 * @error_data_lock: Error counter and states spinlock
56 * @clks: Clocks managed by the SDFEC driver 192 * @clks: Clocks managed by the SDFEC driver
193 * @waitq: Driver wait queue
194 * @config: Configuration of the SDFEC device
57 * @dev_name: Device name 195 * @dev_name: Device name
196 * @flags: spinlock flags
197 * @regs: device physical base address
198 * @dev: pointer to device struct
199 * @state: State of the SDFEC device
200 * @error_data_lock: Error counter and states spinlock
58 * @dev_id: Device ID 201 * @dev_id: Device ID
202 * @isr_err_count: Count of ISR errors
203 * @cecc_count: Count of Correctable ECC errors (SBE)
204 * @uecc_count: Count of Uncorrectable ECC errors (MBE)
205 * @irq: IRQ number
206 * @state_updated: indicates State updated by interrupt handler
207 * @stats_updated: indicates Stats updated by interrupt handler
208 * @intr_enabled: indicates IRQ enabled
59 * 209 *
60 * This structure contains necessary state for SDFEC driver to operate 210 * This structure contains necessary state for SDFEC driver to operate
61 */ 211 */
62struct xsdfec_dev { 212struct xsdfec_dev {
213 struct miscdevice miscdev;
214 struct xsdfec_clks clks;
215 wait_queue_head_t waitq;
216 struct xsdfec_config config;
217 char dev_name[DEV_NAME_LEN];
218 unsigned long flags;
63 void __iomem *regs; 219 void __iomem *regs;
64 struct device *dev; 220 struct device *dev;
65 struct miscdevice miscdev; 221 enum xsdfec_state state;
66 /* Spinlock to protect state_updated and stats_updated */ 222 /* Spinlock to protect state_updated and stats_updated */
67 spinlock_t error_data_lock; 223 spinlock_t error_data_lock;
68 struct xsdfec_clks clks;
69 char dev_name[DEV_NAME_LEN];
70 int dev_id; 224 int dev_id;
225 u32 isr_err_count;
226 u32 cecc_count;
227 u32 uecc_count;
228 int irq;
229 bool state_updated;
230 bool stats_updated;
231 bool intr_enabled;
71}; 232};
72 233
234static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
235 u32 value)
236{
237 dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
238 iowrite32(value, xsdfec->regs + addr);
239}
240
241static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
242{
243 u32 rval;
244
245 rval = ioread32(xsdfec->regs + addr);
246 dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
247 return rval;
248}
249
250static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
251 u32 reg_offset, u32 bit_num,
252 char *config_value)
253{
254 u32 reg_val;
255 u32 bit_mask = 1 << bit_num;
256
257 reg_val = xsdfec_regread(xsdfec, reg_offset);
258 *config_value = (reg_val & bit_mask) > 0;
259}
260
261static void update_config_from_hw(struct xsdfec_dev *xsdfec)
262{
263 u32 reg_value;
264 bool sdfec_started;
265
266 /* Update the Order */
267 reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
268 xsdfec->config.order = reg_value;
269
270 update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
271 0, /* Bit Number, maybe change to mask */
272 &xsdfec->config.bypass);
273
274 update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
275 0, /* Bit Number */
276 &xsdfec->config.code_wr_protect);
277
278 reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
279 xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
280
281 reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
282 xsdfec->config.irq.enable_ecc_isr =
283 (reg_value & XSDFEC_ECC_ISR_MASK) > 0;
284
285 reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
286 sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
287 if (sdfec_started)
288 xsdfec->state = XSDFEC_STARTED;
289 else
290 xsdfec->state = XSDFEC_STOPPED;
291}
292
293static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
294{
295 struct xsdfec_status status;
296 int err;
297
298 memset(&status, 0, sizeof(status));
299 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
300 status.state = xsdfec->state;
301 xsdfec->state_updated = false;
302 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
303 status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
304 XSDFEC_IS_ACTIVITY_SET);
305
306 err = copy_to_user(arg, &status, sizeof(status));
307 if (err)
308 err = -EFAULT;
309
310 return err;
311}
312
313static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
314{
315 int err;
316
317 err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
318 if (err)
319 err = -EFAULT;
320
321 return err;
322}
323
324static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
325{
326 u32 mask_read;
327
328 if (enable) {
329 /* Enable */
330 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
331 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
332 if (mask_read & XSDFEC_ISR_MASK) {
333 dev_dbg(xsdfec->dev,
334 "SDFEC enabling irq with IER failed");
335 return -EIO;
336 }
337 } else {
338 /* Disable */
339 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
340 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
341 if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
342 dev_dbg(xsdfec->dev,
343 "SDFEC disabling irq with IDR failed");
344 return -EIO;
345 }
346 }
347 return 0;
348}
349
350static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
351{
352 u32 mask_read;
353
354 if (enable) {
355 /* Enable */
356 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
357 XSDFEC_ALL_ECC_ISR_MASK);
358 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
359 if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
360 dev_dbg(xsdfec->dev,
361 "SDFEC enabling ECC irq with ECC IER failed");
362 return -EIO;
363 }
364 } else {
365 /* Disable */
366 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
367 XSDFEC_ALL_ECC_ISR_MASK);
368 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
369 if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
370 XSDFEC_ECC_ISR_MASK) ||
371 ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
372 XSDFEC_PL_INIT_ECC_ISR_MASK))) {
373 dev_dbg(xsdfec->dev,
374 "SDFEC disable ECC irq with ECC IDR failed");
375 return -EIO;
376 }
377 }
378 return 0;
379}
380
381static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
382{
383 struct xsdfec_irq irq;
384 int err;
385 int isr_err;
386 int ecc_err;
387
388 err = copy_from_user(&irq, arg, sizeof(irq));
389 if (err)
390 return -EFAULT;
391
392 /* Setup tlast related IRQ */
393 isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
394 if (!isr_err)
395 xsdfec->config.irq.enable_isr = irq.enable_isr;
396
397 /* Setup ECC related IRQ */
398 ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
399 if (!ecc_err)
400 xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
401
402 if (isr_err < 0 || ecc_err < 0)
403 err = -EIO;
404
405 return err;
406}
407
408static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
409{
410 struct xsdfec_turbo turbo;
411 int err;
412 u32 turbo_write;
413
414 err = copy_from_user(&turbo, arg, sizeof(turbo));
415 if (err)
416 return -EFAULT;
417
418 if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
419 return -EINVAL;
420
421 if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
422 return -EINVAL;
423
424 /* Check to see what device tree says about the FEC codes */
425 if (xsdfec->config.code == XSDFEC_LDPC_CODE)
426 return -EIO;
427
428 turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
429 << XSDFEC_TURBO_SCALE_BIT_POS) |
430 turbo.alg;
431 xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
432 return err;
433}
434
435static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
436{
437 u32 reg_value;
438 struct xsdfec_turbo turbo_params;
439 int err;
440
441 if (xsdfec->config.code == XSDFEC_LDPC_CODE)
442 return -EIO;
443
444 memset(&turbo_params, 0, sizeof(turbo_params));
445 reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
446
447 turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
448 XSDFEC_TURBO_SCALE_BIT_POS;
449 turbo_params.alg = reg_value & 0x1;
450
451 err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
452 if (err)
453 err = -EFAULT;
454
455 return err;
456}
457
458static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
459 u32 offset)
460{
461 u32 wdata;
462
463 if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
464 (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
465 dev_dbg(xsdfec->dev, "N value is not in range");
466 return -EINVAL;
467 }
468 n <<= XSDFEC_REG0_N_LSB;
469
470 if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
471 (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
472 dev_dbg(xsdfec->dev, "K value is not in range");
473 return -EINVAL;
474 }
475 k = k << XSDFEC_REG0_K_LSB;
476 wdata = k | n;
477
478 if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
479 XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
480 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
481 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
482 (offset * XSDFEC_LDPC_REG_JUMP));
483 return -EINVAL;
484 }
485 xsdfec_regwrite(xsdfec,
486 XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
487 (offset * XSDFEC_LDPC_REG_JUMP),
488 wdata);
489 return 0;
490}
491
492static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
493 u32 no_packing, u32 nm, u32 offset)
494{
495 u32 wdata;
496
497 if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
498 dev_dbg(xsdfec->dev, "Psize is not in range");
499 return -EINVAL;
500 }
501
502 if (no_packing != 0 && no_packing != 1)
503 dev_dbg(xsdfec->dev, "No-packing bit register invalid");
504 no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
505 XSDFEC_REG1_NO_PACKING_MASK);
506
507 if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
508 dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
509 nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
510
511 wdata = nm | no_packing | psize;
512 if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
513 XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
514 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
515 XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
516 (offset * XSDFEC_LDPC_REG_JUMP));
517 return -EINVAL;
518 }
519 xsdfec_regwrite(xsdfec,
520 XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
521 (offset * XSDFEC_LDPC_REG_JUMP),
522 wdata);
523 return 0;
524}
525
526static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
527 u32 norm_type, u32 special_qc, u32 no_final_parity,
528 u32 max_schedule, u32 offset)
529{
530 u32 wdata;
531
532 if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
533 nlayers > XSDFEC_REG2_NLAYERS_MAX) {
534 dev_dbg(xsdfec->dev, "Nlayers is not in range");
535 return -EINVAL;
536 }
537
538 if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
539 dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
540 nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
541
542 if (norm_type > 1)
543 dev_dbg(xsdfec->dev, "Norm type is invalid");
544 norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
545 XSDFEC_REG2_NORM_TYPE_MASK);
546 if (special_qc > 1)
547 dev_dbg(xsdfec->dev, "Special QC in invalid");
548 special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
549 XSDFEC_REG2_SPECIAL_QC_MASK);
550
551 if (no_final_parity > 1)
552 dev_dbg(xsdfec->dev, "No final parity check invalid");
553 no_final_parity =
554 ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
555 XSDFEC_REG2_NO_FINAL_PARITY_MASK);
556 if (max_schedule &
557 ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
558 dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
559 max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
560 XSDFEC_REG2_MAX_SCHEDULE_MASK);
561
562 wdata = (max_schedule | no_final_parity | special_qc | norm_type |
563 nmqc | nlayers);
564
565 if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
566 XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
567 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
568 XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
569 (offset * XSDFEC_LDPC_REG_JUMP));
570 return -EINVAL;
571 }
572 xsdfec_regwrite(xsdfec,
573 XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
574 (offset * XSDFEC_LDPC_REG_JUMP),
575 wdata);
576 return 0;
577}
578
579static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
580 u16 qc_off, u32 offset)
581{
582 u32 wdata;
583
584 wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
585 (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
586 if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
587 XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
588 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
589 XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
590 (offset * XSDFEC_LDPC_REG_JUMP));
591 return -EINVAL;
592 }
593 xsdfec_regwrite(xsdfec,
594 XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
595 (offset * XSDFEC_LDPC_REG_JUMP),
596 wdata);
597 return 0;
598}
599
600static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
601 u32 *src_ptr, u32 len, const u32 base_addr,
602 const u32 depth)
603{
604 u32 reg = 0;
605 u32 res;
606 u32 n, i;
607 u32 *addr = NULL;
608 struct page *page[MAX_NUM_PAGES];
609
610 /*
611 * Writes that go beyond the length of
612 * Shared Scale(SC) table should fail
613 */
614 if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
615 len > depth / XSDFEC_REG_WIDTH_JUMP ||
616 offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
617 dev_dbg(xsdfec->dev, "Write exceeds SC table length");
618 return -EINVAL;
619 }
620
621 n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
622 if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
623 n += 1;
624
625 res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page);
626 if (res < n) {
627 for (i = 0; i < res; i++)
628 put_page(page[i]);
629 return -EINVAL;
630 }
631
632 for (i = 0; i < n; i++) {
633 addr = kmap(page[i]);
634 do {
635 xsdfec_regwrite(xsdfec,
636 base_addr + ((offset + reg) *
637 XSDFEC_REG_WIDTH_JUMP),
638 addr[reg]);
639 reg++;
640 } while ((reg < len) &&
641 ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
642 put_page(page[i]);
643 }
644 return reg;
645}
646
647static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
648{
649 struct xsdfec_ldpc_params *ldpc;
650 int ret, n;
651
652 ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
653 if (!ldpc)
654 return -ENOMEM;
655
656 if (copy_from_user(ldpc, arg, sizeof(*ldpc))) {
657 ret = -EFAULT;
658 goto err_out;
659 }
660
661 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
662 ret = -EIO;
663 goto err_out;
664 }
665
666 /* Verify Device has not started */
667 if (xsdfec->state == XSDFEC_STARTED) {
668 ret = -EIO;
669 goto err_out;
670 }
671
672 if (xsdfec->config.code_wr_protect) {
673 ret = -EIO;
674 goto err_out;
675 }
676
677 /* Write Reg 0 */
678 ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
679 ldpc->code_id);
680 if (ret)
681 goto err_out;
682
683 /* Write Reg 1 */
684 ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
685 ldpc->code_id);
686 if (ret)
687 goto err_out;
688
689 /* Write Reg 2 */
690 ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
691 ldpc->norm_type, ldpc->special_qc,
692 ldpc->no_final_parity, ldpc->max_schedule,
693 ldpc->code_id);
694 if (ret)
695 goto err_out;
696
697 /* Write Reg 3 */
698 ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
699 ldpc->qc_off, ldpc->code_id);
700 if (ret)
701 goto err_out;
702
703 /* Write Shared Codes */
704 n = ldpc->nlayers / 4;
705 if (ldpc->nlayers % 4)
706 n++;
707
708 ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
709 XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
710 XSDFEC_SC_TABLE_DEPTH);
711 if (ret < 0)
712 goto err_out;
713
714 ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
715 ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
716 XSDFEC_LA_TABLE_DEPTH);
717 if (ret < 0)
718 goto err_out;
719
720 ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
721 ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
722 XSDFEC_QC_TABLE_DEPTH);
723 if (ret > 0)
724 ret = 0;
725err_out:
726 kfree(ldpc);
727 return ret;
728}
729
730static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
731{
732 bool order_invalid;
733 enum xsdfec_order order;
734 int err;
735
736 err = get_user(order, (enum xsdfec_order *)arg);
737 if (err)
738 return -EFAULT;
739
740 order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
741 (order != XSDFEC_OUT_OF_ORDER);
742 if (order_invalid)
743 return -EINVAL;
744
745 /* Verify Device has not started */
746 if (xsdfec->state == XSDFEC_STARTED)
747 return -EIO;
748
749 xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
750
751 xsdfec->config.order = order;
752
753 return 0;
754}
755
756static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
757{
758 bool bypass;
759 int err;
760
761 err = get_user(bypass, arg);
762 if (err)
763 return -EFAULT;
764
765 /* Verify Device has not started */
766 if (xsdfec->state == XSDFEC_STARTED)
767 return -EIO;
768
769 if (bypass)
770 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
771 else
772 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
773
774 xsdfec->config.bypass = bypass;
775
776 return 0;
777}
778
779static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
780{
781 u32 reg_value;
782 bool is_active;
783 int err;
784
785 reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
786 /* using a double ! operator instead of casting */
787 is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
788 err = put_user(is_active, arg);
789 if (err)
790 return -EFAULT;
791
792 return err;
793}
794
795static u32
796xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
797{
798 u32 axis_width_field = 0;
799
800 switch (axis_width_cfg) {
801 case XSDFEC_1x128b:
802 axis_width_field = 0;
803 break;
804 case XSDFEC_2x128b:
805 axis_width_field = 1;
806 break;
807 case XSDFEC_4x128b:
808 axis_width_field = 2;
809 break;
810 }
811
812 return axis_width_field;
813}
814
815static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
816 axis_word_inc_cfg)
817{
818 u32 axis_words_field = 0;
819
820 if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
821 axis_word_inc_cfg == XSDFEC_IN_BLOCK)
822 axis_words_field = 0;
823 else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
824 axis_words_field = 1;
825
826 return axis_words_field;
827}
828
829static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
830{
831 u32 reg_value;
832 u32 dout_words_field;
833 u32 dout_width_field;
834 u32 din_words_field;
835 u32 din_width_field;
836 struct xsdfec_config *config = &xsdfec->config;
837
838 /* translate config info to register values */
839 dout_words_field =
840 xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
841 dout_width_field =
842 xsdfec_translate_axis_width_cfg_val(config->dout_width);
843 din_words_field =
844 xsdfec_translate_axis_words_cfg_val(config->din_word_include);
845 din_width_field =
846 xsdfec_translate_axis_width_cfg_val(config->din_width);
847
848 reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
849 reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
850 reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
851 reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
852
853 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
854
855 return 0;
856}
857
858static int xsdfec_dev_open(struct inode *iptr, struct file *fptr)
859{
860 return 0;
861}
862
863static int xsdfec_dev_release(struct inode *iptr, struct file *fptr)
864{
865 return 0;
866}
867
868static int xsdfec_start(struct xsdfec_dev *xsdfec)
869{
870 u32 regread;
871
872 regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
873 regread &= 0x1;
874 if (regread != xsdfec->config.code) {
875 dev_dbg(xsdfec->dev,
876 "%s SDFEC HW code does not match driver code, reg %d, code %d",
877 __func__, regread, xsdfec->config.code);
878 return -EINVAL;
879 }
880
881 /* Set AXIS enable */
882 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
883 XSDFEC_AXIS_ENABLE_MASK);
884 /* Done */
885 xsdfec->state = XSDFEC_STARTED;
886 return 0;
887}
888
889static int xsdfec_stop(struct xsdfec_dev *xsdfec)
890{
891 u32 regread;
892
893 if (xsdfec->state != XSDFEC_STARTED)
894 dev_dbg(xsdfec->dev, "Device not started correctly");
895 /* Disable AXIS_ENABLE Input interfaces only */
896 regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
897 regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
898 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
899 /* Stop */
900 xsdfec->state = XSDFEC_STOPPED;
901 return 0;
902}
903
904static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
905{
906 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
907 xsdfec->isr_err_count = 0;
908 xsdfec->uecc_count = 0;
909 xsdfec->cecc_count = 0;
910 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
911
912 return 0;
913}
914
915static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
916{
917 int err;
918 struct xsdfec_stats user_stats;
919
920 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
921 user_stats.isr_err_count = xsdfec->isr_err_count;
922 user_stats.cecc_count = xsdfec->cecc_count;
923 user_stats.uecc_count = xsdfec->uecc_count;
924 xsdfec->stats_updated = false;
925 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
926
927 err = copy_to_user(arg, &user_stats, sizeof(user_stats));
928 if (err)
929 err = -EFAULT;
930
931 return err;
932}
933
934static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
935{
936 /* Ensure registers are aligned with core configuration */
937 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
938 xsdfec_cfg_axi_streams(xsdfec);
939 update_config_from_hw(xsdfec);
940
941 return 0;
942}
943
944static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
945 unsigned long data)
946{
947 struct xsdfec_dev *xsdfec;
948 void __user *arg = NULL;
949 int rval = -EINVAL;
950
951 xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
952
953 /* In failed state allow only reset and get status IOCTLs */
954 if (xsdfec->state == XSDFEC_NEEDS_RESET &&
955 (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
956 cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
957 return -EPERM;
958 }
959
960 if (_IOC_TYPE(cmd) != XSDFEC_MAGIC)
961 return -ENOTTY;
962
963 /* check if ioctl argument is present and valid */
964 if (_IOC_DIR(cmd) != _IOC_NONE) {
965 arg = (void __user *)data;
966 if (!arg)
967 return rval;
968 }
969
970 switch (cmd) {
971 case XSDFEC_START_DEV:
972 rval = xsdfec_start(xsdfec);
973 break;
974 case XSDFEC_STOP_DEV:
975 rval = xsdfec_stop(xsdfec);
976 break;
977 case XSDFEC_CLEAR_STATS:
978 rval = xsdfec_clear_stats(xsdfec);
979 break;
980 case XSDFEC_GET_STATS:
981 rval = xsdfec_get_stats(xsdfec, arg);
982 break;
983 case XSDFEC_GET_STATUS:
984 rval = xsdfec_get_status(xsdfec, arg);
985 break;
986 case XSDFEC_GET_CONFIG:
987 rval = xsdfec_get_config(xsdfec, arg);
988 break;
989 case XSDFEC_SET_DEFAULT_CONFIG:
990 rval = xsdfec_set_default_config(xsdfec);
991 break;
992 case XSDFEC_SET_IRQ:
993 rval = xsdfec_set_irq(xsdfec, arg);
994 break;
995 case XSDFEC_SET_TURBO:
996 rval = xsdfec_set_turbo(xsdfec, arg);
997 break;
998 case XSDFEC_GET_TURBO:
999 rval = xsdfec_get_turbo(xsdfec, arg);
1000 break;
1001 case XSDFEC_ADD_LDPC_CODE_PARAMS:
1002 rval = xsdfec_add_ldpc(xsdfec, arg);
1003 break;
1004 case XSDFEC_SET_ORDER:
1005 rval = xsdfec_set_order(xsdfec, arg);
1006 break;
1007 case XSDFEC_SET_BYPASS:
1008 rval = xsdfec_set_bypass(xsdfec, arg);
1009 break;
1010 case XSDFEC_IS_ACTIVE:
1011 rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
1012 break;
1013 default:
1014 /* Should not get here */
1015 break;
1016 }
1017 return rval;
1018}
1019
1020#ifdef CONFIG_COMPAT
1021static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
1022 unsigned long data)
1023{
1024 return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data));
1025}
1026#endif
1027
1028static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
1029{
1030 unsigned int mask = 0;
1031 struct xsdfec_dev *xsdfec;
1032
1033 xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
1034
1035 if (!xsdfec)
1036 return POLLNVAL | POLLHUP;
1037
1038 poll_wait(file, &xsdfec->waitq, wait);
1039
1040 /* XSDFEC ISR detected an error */
1041 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
1042 if (xsdfec->state_updated)
1043 mask |= POLLIN | POLLPRI;
1044
1045 if (xsdfec->stats_updated)
1046 mask |= POLLIN | POLLRDNORM;
1047 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
1048
1049 return mask;
1050}
1051
73static const struct file_operations xsdfec_fops = { 1052static const struct file_operations xsdfec_fops = {
74 .owner = THIS_MODULE, 1053 .owner = THIS_MODULE,
1054 .open = xsdfec_dev_open,
1055 .release = xsdfec_dev_release,
1056 .unlocked_ioctl = xsdfec_dev_ioctl,
1057 .poll = xsdfec_poll,
1058#ifdef CONFIG_COMPAT
1059 .compat_ioctl = xsdfec_dev_compat_ioctl,
1060#endif
75}; 1061};
76 1062
1063static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1064{
1065 struct device *dev = xsdfec->dev;
1066 struct device_node *node = dev->of_node;
1067 int rval;
1068 const char *fec_code;
1069 u32 din_width;
1070 u32 din_word_include;
1071 u32 dout_width;
1072 u32 dout_word_include;
1073
1074 rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
1075 if (rval < 0)
1076 return rval;
1077
1078 if (!strcasecmp(fec_code, "ldpc"))
1079 xsdfec->config.code = XSDFEC_LDPC_CODE;
1080 else if (!strcasecmp(fec_code, "turbo"))
1081 xsdfec->config.code = XSDFEC_TURBO_CODE;
1082 else
1083 return -EINVAL;
1084
1085 rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
1086 &din_word_include);
1087 if (rval < 0)
1088 return rval;
1089
1090 if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
1091 xsdfec->config.din_word_include = din_word_include;
1092 else
1093 return -EINVAL;
1094
1095 rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
1096 if (rval < 0)
1097 return rval;
1098
1099 switch (din_width) {
1100 /* Fall through and set for valid values */
1101 case XSDFEC_1x128b:
1102 case XSDFEC_2x128b:
1103 case XSDFEC_4x128b:
1104 xsdfec->config.din_width = din_width;
1105 break;
1106 default:
1107 return -EINVAL;
1108 }
1109
1110 rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
1111 &dout_word_include);
1112 if (rval < 0)
1113 return rval;
1114
1115 if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
1116 xsdfec->config.dout_word_include = dout_word_include;
1117 else
1118 return -EINVAL;
1119
1120 rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
1121 if (rval < 0)
1122 return rval;
1123
1124 switch (dout_width) {
1125 /* Fall through and set for valid values */
1126 case XSDFEC_1x128b:
1127 case XSDFEC_2x128b:
1128 case XSDFEC_4x128b:
1129 xsdfec->config.dout_width = dout_width;
1130 break;
1131 default:
1132 return -EINVAL;
1133 }
1134
1135 /* Write LDPC to CODE Register */
1136 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
1137
1138 xsdfec_cfg_axi_streams(xsdfec);
1139
1140 return 0;
1141}
1142
1143static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
1144{
1145 struct xsdfec_dev *xsdfec = dev_id;
1146 irqreturn_t ret = IRQ_HANDLED;
1147 u32 ecc_err;
1148 u32 isr_err;
1149 u32 uecc_count;
1150 u32 cecc_count;
1151 u32 isr_err_count;
1152 u32 aecc_count;
1153 u32 tmp;
1154
1155 WARN_ON(xsdfec->irq != irq);
1156
1157 /* Mask Interrupts */
1158 xsdfec_isr_enable(xsdfec, false);
1159 xsdfec_ecc_isr_enable(xsdfec, false);
1160 /* Read ISR */
1161 ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1162 isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1163 /* Clear the interrupts */
1164 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
1165 xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
1166
1167 tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
1168 /* Count uncorrectable 2-bit errors */
1169 uecc_count = hweight32(tmp);
1170 /* Count all ECC errors */
1171 aecc_count = hweight32(ecc_err);
1172 /* Number of correctable 1-bit ECC error */
1173 cecc_count = aecc_count - 2 * uecc_count;
1174 /* Count ISR errors */
1175 isr_err_count = hweight32(isr_err);
1176 dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
1177 uecc_count, aecc_count, cecc_count, isr_err_count);
1178 dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
1179 xsdfec->cecc_count, xsdfec->isr_err_count);
1180
1181 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
1182 /* Add new errors to a 2-bits counter */
1183 if (uecc_count)
1184 xsdfec->uecc_count += uecc_count;
1185 /* Add new errors to a 1-bits counter */
1186 if (cecc_count)
1187 xsdfec->cecc_count += cecc_count;
1188 /* Add new errors to a ISR counter */
1189 if (isr_err_count)
1190 xsdfec->isr_err_count += isr_err_count;
1191
1192 /* Update state/stats flag */
1193 if (uecc_count) {
1194 if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
1195 xsdfec->state = XSDFEC_NEEDS_RESET;
1196 else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
1197 xsdfec->state = XSDFEC_PL_RECONFIGURE;
1198 xsdfec->stats_updated = true;
1199 xsdfec->state_updated = true;
1200 }
1201
1202 if (cecc_count)
1203 xsdfec->stats_updated = true;
1204
1205 if (isr_err_count) {
1206 xsdfec->state = XSDFEC_NEEDS_RESET;
1207 xsdfec->stats_updated = true;
1208 xsdfec->state_updated = true;
1209 }
1210
1211 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
1212 dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
1213 xsdfec->stats_updated);
1214
1215 /* Enable another polling */
1216 if (xsdfec->state_updated || xsdfec->stats_updated)
1217 wake_up_interruptible(&xsdfec->waitq);
1218 else
1219 ret = IRQ_NONE;
1220
1221 /* Unmask Interrupts */
1222 xsdfec_isr_enable(xsdfec, true);
1223 xsdfec_ecc_isr_enable(xsdfec, true);
1224
1225 return ret;
1226}
1227
77static int xsdfec_clk_init(struct platform_device *pdev, 1228static int xsdfec_clk_init(struct platform_device *pdev,
78 struct xsdfec_clks *clks) 1229 struct xsdfec_clks *clks)
79{ 1230{
@@ -227,19 +1378,13 @@ static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
227 clk_disable_unprepare(clks->axi_clk); 1378 clk_disable_unprepare(clks->axi_clk);
228} 1379}
229 1380
230static void xsdfec_idr_remove(struct xsdfec_dev *xsdfec)
231{
232 mutex_lock(&dev_idr_lock);
233 idr_remove(&dev_idr, xsdfec->dev_id);
234 mutex_unlock(&dev_idr_lock);
235}
236
237static int xsdfec_probe(struct platform_device *pdev) 1381static int xsdfec_probe(struct platform_device *pdev)
238{ 1382{
239 struct xsdfec_dev *xsdfec; 1383 struct xsdfec_dev *xsdfec;
240 struct device *dev; 1384 struct device *dev;
241 struct resource *res; 1385 struct resource *res;
242 int err; 1386 int err;
1387 bool irq_enabled = true;
243 1388
244 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL); 1389 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
245 if (!xsdfec) 1390 if (!xsdfec)
@@ -260,12 +1405,34 @@ static int xsdfec_probe(struct platform_device *pdev)
260 goto err_xsdfec_dev; 1405 goto err_xsdfec_dev;
261 } 1406 }
262 1407
1408 xsdfec->irq = platform_get_irq(pdev, 0);
1409 if (xsdfec->irq < 0) {
1410 dev_dbg(dev, "platform_get_irq failed");
1411 irq_enabled = false;
1412 }
1413
1414 err = xsdfec_parse_of(xsdfec);
1415 if (err < 0)
1416 goto err_xsdfec_dev;
1417
1418 update_config_from_hw(xsdfec);
1419
263 /* Save driver private data */ 1420 /* Save driver private data */
264 platform_set_drvdata(pdev, xsdfec); 1421 platform_set_drvdata(pdev, xsdfec);
265 1422
266 mutex_lock(&dev_idr_lock); 1423 if (irq_enabled) {
267 err = idr_alloc(&dev_idr, xsdfec->dev_name, 0, 0, GFP_KERNEL); 1424 init_waitqueue_head(&xsdfec->waitq);
268 mutex_unlock(&dev_idr_lock); 1425 /* Register IRQ thread */
1426 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1427 xsdfec_irq_thread, IRQF_ONESHOT,
1428 "xilinx-sdfec16", xsdfec);
1429 if (err < 0) {
1430 dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1431 goto err_xsdfec_dev;
1432 }
1433 }
1434
1435 err = ida_alloc(&dev_nrs, GFP_KERNEL);
269 if (err < 0) 1436 if (err < 0)
270 goto err_xsdfec_dev; 1437 goto err_xsdfec_dev;
271 xsdfec->dev_id = err; 1438 xsdfec->dev_id = err;
@@ -278,12 +1445,12 @@ static int xsdfec_probe(struct platform_device *pdev)
278 err = misc_register(&xsdfec->miscdev); 1445 err = misc_register(&xsdfec->miscdev);
279 if (err) { 1446 if (err) {
280 dev_err(dev, "error:%d. Unable to register device", err); 1447 dev_err(dev, "error:%d. Unable to register device", err);
281 goto err_xsdfec_idr; 1448 goto err_xsdfec_ida;
282 } 1449 }
283 return 0; 1450 return 0;
284 1451
285err_xsdfec_idr: 1452err_xsdfec_ida:
286 xsdfec_idr_remove(xsdfec); 1453 ida_free(&dev_nrs, xsdfec->dev_id);
287err_xsdfec_dev: 1454err_xsdfec_dev:
288 xsdfec_disable_all_clks(&xsdfec->clks); 1455 xsdfec_disable_all_clks(&xsdfec->clks);
289 return err; 1456 return err;
@@ -295,7 +1462,7 @@ static int xsdfec_remove(struct platform_device *pdev)
295 1462
296 xsdfec = platform_get_drvdata(pdev); 1463 xsdfec = platform_get_drvdata(pdev);
297 misc_deregister(&xsdfec->miscdev); 1464 misc_deregister(&xsdfec->miscdev);
298 xsdfec_idr_remove(xsdfec); 1465 ida_free(&dev_nrs, xsdfec->dev_id);
299 xsdfec_disable_all_clks(&xsdfec->clks); 1466 xsdfec_disable_all_clks(&xsdfec->clks);
300 return 0; 1467 return 0;
301} 1468}
@@ -321,8 +1488,6 @@ static int __init xsdfec_init(void)
321{ 1488{
322 int err; 1489 int err;
323 1490
324 mutex_init(&dev_idr_lock);
325 idr_init(&dev_idr);
326 err = platform_driver_register(&xsdfec_driver); 1491 err = platform_driver_register(&xsdfec_driver);
327 if (err < 0) { 1492 if (err < 0) {
328 pr_err("%s Unabled to register SDFEC driver", __func__); 1493 pr_err("%s Unabled to register SDFEC driver", __func__);
@@ -334,7 +1499,6 @@ static int __init xsdfec_init(void)
334static void __exit xsdfec_exit(void) 1499static void __exit xsdfec_exit(void)
335{ 1500{
336 platform_driver_unregister(&xsdfec_driver); 1501 platform_driver_unregister(&xsdfec_driver);
337 idr_destroy(&dev_idr);
338} 1502}
339 1503
340module_init(xsdfec_init); 1504module_init(xsdfec_init);
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index d9dc482ecb2f..61a17f943f47 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -16,6 +16,7 @@
16 16
17enum ocotp_devtype { 17enum ocotp_devtype {
18 IMX8QXP, 18 IMX8QXP,
19 IMX8QM,
19}; 20};
20 21
21struct ocotp_devtype_data { 22struct ocotp_devtype_data {
@@ -39,6 +40,11 @@ static struct ocotp_devtype_data imx8qxp_data = {
39 .nregs = 800, 40 .nregs = 800,
40}; 41};
41 42
43static struct ocotp_devtype_data imx8qm_data = {
44 .devtype = IMX8QM,
45 .nregs = 800,
46};
47
42static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word, 48static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
43 u32 *val) 49 u32 *val)
44{ 50{
@@ -118,6 +124,7 @@ static struct nvmem_config imx_scu_ocotp_nvmem_config = {
118 124
119static const struct of_device_id imx_scu_ocotp_dt_ids[] = { 125static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
120 { .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data }, 126 { .compatible = "fsl,imx8qxp-scu-ocotp", (void *)&imx8qxp_data },
127 { .compatible = "fsl,imx8qm-scu-ocotp", (void *)&imx8qm_data },
121 { }, 128 { },
122}; 129};
123MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids); 130MODULE_DEVICE_TABLE(of, imx_scu_ocotp_dt_ids);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 42d4451e7d67..dff2f3c357f5 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -479,6 +479,12 @@ static const struct ocotp_params imx8mm_params = {
479 .set_timing = imx_ocotp_set_imx6_timing, 479 .set_timing = imx_ocotp_set_imx6_timing,
480}; 480};
481 481
482static const struct ocotp_params imx8mn_params = {
483 .nregs = 256,
484 .bank_address_words = 0,
485 .set_timing = imx_ocotp_set_imx6_timing,
486};
487
482static const struct of_device_id imx_ocotp_dt_ids[] = { 488static const struct of_device_id imx_ocotp_dt_ids[] = {
483 { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params }, 489 { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
484 { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params }, 490 { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
@@ -490,6 +496,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
490 { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params }, 496 { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
491 { .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params }, 497 { .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
492 { .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params }, 498 { .compatible = "fsl,imx8mm-ocotp", .data = &imx8mm_params },
499 { .compatible = "fsl,imx8mn-ocotp", .data = &imx8mn_params },
493 { }, 500 { },
494}; 501};
495MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); 502MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index b9f9ce089de9..07c9f38c1c60 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -155,7 +155,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
155 if (err) 155 if (err)
156 break; 156 break;
157 157
158 memcpy(buf + i, &tmp, efuse->config.word_size); 158 memcpy(buf + i, &tmp,
159 min_t(size_t, bytes - i, efuse->config.word_size));
159 } 160 }
160 161
161 meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1, 162 meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index c34d9fecfb10..8e4898dec002 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -200,6 +200,6 @@ static struct platform_driver mxs_ocotp_driver = {
200}; 200};
201 201
202module_platform_driver(mxs_ocotp_driver); 202module_platform_driver(mxs_ocotp_driver);
203MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); 203MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net");
204MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28"); 204MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28");
205MODULE_LICENSE("GPL v2"); 205MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index a079a80ddf2c..e26ef1bbf198 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -186,6 +186,7 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
186static const struct sunxi_sid_cfg sun50i_a64_cfg = { 186static const struct sunxi_sid_cfg sun50i_a64_cfg = {
187 .value_offset = 0x200, 187 .value_offset = 0x200,
188 .size = 0x100, 188 .size = 0x100,
189 .need_register_readout = true,
189}; 190};
190 191
191static const struct sunxi_sid_cfg sun50i_h6_cfg = { 192static const struct sunxi_sid_cfg sun50i_h6_cfg = {
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile
index 6fa41f8173b6..022c566c0f32 100644
--- a/drivers/parport/Makefile
+++ b/drivers/parport/Makefile
@@ -19,4 +19,4 @@ obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o
19obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o 19obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o
20obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o 20obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o
21obj-$(CONFIG_PARPORT_AX88796) += parport_ax88796.o 21obj-$(CONFIG_PARPORT_AX88796) += parport_ax88796.o
22obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o \ No newline at end of file 22obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 60d5d985113c..96b888bb49c6 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -680,8 +680,7 @@ static void parport_serial_pci_remove(struct pci_dev *dev)
680 680
681static int __maybe_unused parport_serial_pci_suspend(struct device *dev) 681static int __maybe_unused parport_serial_pci_suspend(struct device *dev)
682{ 682{
683 struct pci_dev *pdev = to_pci_dev(dev); 683 struct parport_serial_private *priv = dev_get_drvdata(dev);
684 struct parport_serial_private *priv = pci_get_drvdata(pdev);
685 684
686 if (priv->serial) 685 if (priv->serial)
687 pciserial_suspend_ports(priv->serial); 686 pciserial_suspend_ports(priv->serial);
@@ -692,8 +691,7 @@ static int __maybe_unused parport_serial_pci_suspend(struct device *dev)
692 691
693static int __maybe_unused parport_serial_pci_resume(struct device *dev) 692static int __maybe_unused parport_serial_pci_resume(struct device *dev)
694{ 693{
695 struct pci_dev *pdev = to_pci_dev(dev); 694 struct parport_serial_private *priv = dev_get_drvdata(dev);
696 struct parport_serial_private *priv = pci_get_drvdata(pdev);
697 695
698 if (priv->serial) 696 if (priv->serial)
699 pciserial_resume_ports(priv->serial); 697 pciserial_resume_ports(priv->serial);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index ec54a2aa5cb8..245d60189375 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -117,9 +117,9 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
117 117
118 if (card_present(i)) { 118 if (card_present(i)) {
119 sockets[i].card_state = 3; 119 sockets[i].card_state = 3;
120 dprintk(KERN_DEBUG "i82092aa: slot %i is occupied\n",i); 120 dev_dbg(&dev->dev, "i82092aa: slot %i is occupied\n", i);
121 } else { 121 } else {
122 dprintk(KERN_DEBUG "i82092aa: slot %i is vacant\n",i); 122 dev_dbg(&dev->dev, "i82092aa: slot %i is vacant\n", i);
123 } 123 }
124 } 124 }
125 125
@@ -128,7 +128,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i
128 pci_write_config_byte(dev, 0x50, configbyte); /* PCI Interrupt Routing Register */ 128 pci_write_config_byte(dev, 0x50, configbyte); /* PCI Interrupt Routing Register */
129 129
130 /* Register the interrupt handler */ 130 /* Register the interrupt handler */
131 dprintk(KERN_DEBUG "Requesting interrupt %i \n",dev->irq); 131 dev_dbg(&dev->dev, "Requesting interrupt %i\n", dev->irq);
132 if ((ret = request_irq(dev->irq, i82092aa_interrupt, IRQF_SHARED, "i82092aa", i82092aa_interrupt))) { 132 if ((ret = request_irq(dev->irq, i82092aa_interrupt, IRQF_SHARED, "i82092aa", i82092aa_interrupt))) {
133 printk(KERN_ERR "i82092aa: Failed to register IRQ %d, aborting\n", dev->irq); 133 printk(KERN_ERR "i82092aa: Failed to register IRQ %d, aborting\n", dev->irq);
134 goto err_out_free_res; 134 goto err_out_free_res;
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 0d9fddc498a6..c96a1afc95bd 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
10obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o 10obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
11obj-$(CONFIG_ARCH_SUNXI) += allwinner/ 11obj-$(CONFIG_ARCH_SUNXI) += allwinner/
12obj-$(CONFIG_ARCH_MESON) += amlogic/ 12obj-$(CONFIG_ARCH_MESON) += amlogic/
13obj-$(CONFIG_LANTIQ) += lantiq/
14obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ 13obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
15obj-$(CONFIG_ARCH_RENESAS) += renesas/ 14obj-$(CONFIG_ARCH_RENESAS) += renesas/
16obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ 15obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
@@ -19,6 +18,7 @@ obj-y += broadcom/ \
19 cadence/ \ 18 cadence/ \
20 freescale/ \ 19 freescale/ \
21 hisilicon/ \ 20 hisilicon/ \
21 lantiq/ \
22 marvell/ \ 22 marvell/ \
23 motorola/ \ 23 motorola/ \
24 mscc/ \ 24 mscc/ \
diff --git a/drivers/phy/lantiq/Kconfig b/drivers/phy/lantiq/Kconfig
index eb66c857ce25..c4df9709d53f 100644
--- a/drivers/phy/lantiq/Kconfig
+++ b/drivers/phy/lantiq/Kconfig
@@ -2,6 +2,17 @@
2# 2#
3# Phy drivers for Lantiq / Intel platforms 3# Phy drivers for Lantiq / Intel platforms
4# 4#
5config PHY_LANTIQ_VRX200_PCIE
6 tristate "Lantiq VRX200/ARX300 PCIe PHY"
7 depends on SOC_TYPE_XWAY || COMPILE_TEST
8 depends on OF && HAS_IOMEM
9 select GENERIC_PHY
10 select REGMAP_MMIO
11 help
12 Support for the PCIe PHY(s) on the Lantiq / Intel VRX200 and ARX300
13 family SoCs.
14 If unsure, say N.
15
5config PHY_LANTIQ_RCU_USB2 16config PHY_LANTIQ_RCU_USB2
6 tristate "Lantiq XWAY SoC RCU based USB PHY" 17 tristate "Lantiq XWAY SoC RCU based USB PHY"
7 depends on OF && (SOC_TYPE_XWAY || COMPILE_TEST) 18 depends on OF && (SOC_TYPE_XWAY || COMPILE_TEST)
diff --git a/drivers/phy/lantiq/Makefile b/drivers/phy/lantiq/Makefile
index 540049039092..7c14eb24ab73 100644
--- a/drivers/phy/lantiq/Makefile
+++ b/drivers/phy/lantiq/Makefile
@@ -1,2 +1,3 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2obj-$(CONFIG_PHY_LANTIQ_RCU_USB2) += phy-lantiq-rcu-usb2.o 2obj-$(CONFIG_PHY_LANTIQ_RCU_USB2) += phy-lantiq-rcu-usb2.o
3obj-$(CONFIG_PHY_LANTIQ_VRX200_PCIE) += phy-lantiq-vrx200-pcie.o
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
new file mode 100644
index 000000000000..544d64a84cc0
--- /dev/null
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -0,0 +1,494 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * PCIe PHY driver for Lantiq VRX200 and ARX300 SoCs.
4 *
5 * Copyright (C) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
6 *
7 * Based on the BSP (called "UGW") driver:
8 * Copyright (C) 2009-2015 Lei Chuanhua <chuanhua.lei@lantiq.com>
9 * Copyright (C) 2016 Intel Corporation
10 *
11 * TODO: PHY modes other than 36MHz (without "SSC")
12 */
13
14#include <linux/bitfield.h>
15#include <linux/bits.h>
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/mfd/syscon.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/phy/phy.h>
22#include <linux/platform_device.h>
23#include <linux/property.h>
24#include <linux/regmap.h>
25#include <linux/reset.h>
26
27#include <dt-bindings/phy/phy-lantiq-vrx200-pcie.h>
28
29#define PCIE_PHY_PLL_CTRL1 0x44
30
31#define PCIE_PHY_PLL_CTRL2 0x46
32#define PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK GENMASK(7, 0)
33#define PCIE_PHY_PLL_CTRL2_CONST_SDM_EN BIT(8)
34#define PCIE_PHY_PLL_CTRL2_PLL_SDM_EN BIT(9)
35
36#define PCIE_PHY_PLL_CTRL3 0x48
37#define PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_EN BIT(1)
38#define PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_MASK GENMASK(6, 4)
39
40#define PCIE_PHY_PLL_CTRL4 0x4a
41#define PCIE_PHY_PLL_CTRL5 0x4c
42#define PCIE_PHY_PLL_CTRL6 0x4e
43#define PCIE_PHY_PLL_CTRL7 0x50
44#define PCIE_PHY_PLL_A_CTRL1 0x52
45
46#define PCIE_PHY_PLL_A_CTRL2 0x54
47#define PCIE_PHY_PLL_A_CTRL2_LF_MODE_EN BIT(14)
48
49#define PCIE_PHY_PLL_A_CTRL3 0x56
50#define PCIE_PHY_PLL_A_CTRL3_MMD_MASK GENMASK(15, 13)
51
52#define PCIE_PHY_PLL_STATUS 0x58
53
54#define PCIE_PHY_TX1_CTRL1 0x60
55#define PCIE_PHY_TX1_CTRL1_FORCE_EN BIT(3)
56#define PCIE_PHY_TX1_CTRL1_LOAD_EN BIT(4)
57
58#define PCIE_PHY_TX1_CTRL2 0x62
59#define PCIE_PHY_TX1_CTRL3 0x64
60#define PCIE_PHY_TX1_A_CTRL1 0x66
61#define PCIE_PHY_TX1_A_CTRL2 0x68
62#define PCIE_PHY_TX1_MOD1 0x6a
63#define PCIE_PHY_TX1_MOD2 0x6c
64#define PCIE_PHY_TX1_MOD3 0x6e
65
66#define PCIE_PHY_TX2_CTRL1 0x70
67#define PCIE_PHY_TX2_CTRL1_LOAD_EN BIT(4)
68
69#define PCIE_PHY_TX2_CTRL2 0x72
70#define PCIE_PHY_TX2_A_CTRL1 0x76
71#define PCIE_PHY_TX2_A_CTRL2 0x78
72#define PCIE_PHY_TX2_MOD1 0x7a
73#define PCIE_PHY_TX2_MOD2 0x7c
74#define PCIE_PHY_TX2_MOD3 0x7e
75
76#define PCIE_PHY_RX1_CTRL1 0xa0
77#define PCIE_PHY_RX1_CTRL1_LOAD_EN BIT(1)
78
79#define PCIE_PHY_RX1_CTRL2 0xa2
80#define PCIE_PHY_RX1_CDR 0xa4
81#define PCIE_PHY_RX1_EI 0xa6
82#define PCIE_PHY_RX1_A_CTRL 0xaa
83
84struct ltq_vrx200_pcie_phy_priv {
85 struct phy *phy;
86 unsigned int mode;
87 struct device *dev;
88 struct regmap *phy_regmap;
89 struct regmap *rcu_regmap;
90 struct clk *pdi_clk;
91 struct clk *phy_clk;
92 struct reset_control *phy_reset;
93 struct reset_control *pcie_reset;
94 u32 rcu_ahb_endian_offset;
95 u32 rcu_ahb_endian_big_endian_mask;
96};
97
98static void ltq_vrx200_pcie_phy_common_setup(struct phy *phy)
99{
100 struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
101
102 /* PLL Setting */
103 regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL1, 0x120e);
104
105 /* increase the bias reference voltage */
106 regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL2, 0x39d7);
107 regmap_write(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL3, 0x0900);
108
109 /* Endcnt */
110 regmap_write(priv->phy_regmap, PCIE_PHY_RX1_EI, 0x0004);
111 regmap_write(priv->phy_regmap, PCIE_PHY_RX1_A_CTRL, 0x6803);
112
113 regmap_update_bits(priv->phy_regmap, PCIE_PHY_TX1_CTRL1,
114 PCIE_PHY_TX1_CTRL1_FORCE_EN,
115 PCIE_PHY_TX1_CTRL1_FORCE_EN);
116
117 /* predrv_ser_en */
118 regmap_write(priv->phy_regmap, PCIE_PHY_TX1_A_CTRL2, 0x0706);
119
120 /* ctrl_lim */
121 regmap_write(priv->phy_regmap, PCIE_PHY_TX1_CTRL3, 0x1fff);
122
123 /* ctrl */
124 regmap_write(priv->phy_regmap, PCIE_PHY_TX1_A_CTRL1, 0x0810);
125
126 /* predrv_ser_en */
127 regmap_update_bits(priv->phy_regmap, PCIE_PHY_TX2_A_CTRL2, 0x7f00,
128 0x4700);
129
130 /* RTERM */
131 regmap_write(priv->phy_regmap, PCIE_PHY_TX1_CTRL2, 0x2e00);
132
133 /* Improved 100MHz clock output */
134 regmap_write(priv->phy_regmap, PCIE_PHY_TX2_CTRL2, 0x3096);
135 regmap_write(priv->phy_regmap, PCIE_PHY_TX2_A_CTRL2, 0x4707);
136
137 /* Reduced CDR BW to avoid glitches */
138 regmap_write(priv->phy_regmap, PCIE_PHY_RX1_CDR, 0x0235);
139}
140
141static void pcie_phy_36mhz_mode_setup(struct phy *phy)
142{
143 struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
144
145 regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL3,
146 PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_EN, 0x0000);
147
148 regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL3,
149 PCIE_PHY_PLL_CTRL3_EXT_MMD_DIV_RATIO_MASK, 0x0000);
150
151 regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
152 PCIE_PHY_PLL_CTRL2_PLL_SDM_EN,
153 PCIE_PHY_PLL_CTRL2_PLL_SDM_EN);
154
155 regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
156 PCIE_PHY_PLL_CTRL2_CONST_SDM_EN,
157 PCIE_PHY_PLL_CTRL2_CONST_SDM_EN);
158
159 regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL3,
160 PCIE_PHY_PLL_A_CTRL3_MMD_MASK,
161 FIELD_PREP(PCIE_PHY_PLL_A_CTRL3_MMD_MASK, 0x1));
162
163 regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_A_CTRL2,
164 PCIE_PHY_PLL_A_CTRL2_LF_MODE_EN, 0x0000);
165
166 /* const_sdm */
167 regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL1, 0x38e4);
168
169 regmap_update_bits(priv->phy_regmap, PCIE_PHY_PLL_CTRL2,
170 PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK,
171 FIELD_PREP(PCIE_PHY_PLL_CTRL2_CONST_SDM_MASK,
172 0xee));
173
174 /* pllmod */
175 regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL7, 0x0002);
176 regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL6, 0x3a04);
177 regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL5, 0xfae3);
178 regmap_write(priv->phy_regmap, PCIE_PHY_PLL_CTRL4, 0x1b72);
179}
180
181static int ltq_vrx200_pcie_phy_wait_for_pll(struct phy *phy)
182{
183 struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
184 unsigned int tmp;
185 int ret;
186
187 ret = regmap_read_poll_timeout(priv->phy_regmap, PCIE_PHY_PLL_STATUS,
188 tmp, ((tmp & 0x0070) == 0x0070), 10,
189 10000);
190 if (ret) {
191 dev_err(priv->dev, "PLL Link timeout, PLL status = 0x%04x\n",
192 tmp);
193 return ret;
194 }
195
196 return 0;
197}
198
199static void ltq_vrx200_pcie_phy_apply_workarounds(struct phy *phy)
200{
201 struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
202 static const struct reg_default slices[] = {
203 {
204 .reg = PCIE_PHY_TX1_CTRL1,
205 .def = PCIE_PHY_TX1_CTRL1_LOAD_EN,
206 },
207 {
208 .reg = PCIE_PHY_TX2_CTRL1,
209 .def = PCIE_PHY_TX2_CTRL1_LOAD_EN,
210 },
211 {
212 .reg = PCIE_PHY_RX1_CTRL1,
213 .def = PCIE_PHY_RX1_CTRL1_LOAD_EN,
214 }
215 };
216 int i;
217
218 for (i = 0; i < ARRAY_SIZE(slices); i++) {
219 /* enable load_en */
220 regmap_update_bits(priv->phy_regmap, slices[i].reg,
221 slices[i].def, slices[i].def);
222
223 udelay(1);
224
225 /* disable load_en */
226 regmap_update_bits(priv->phy_regmap, slices[i].reg,
227 slices[i].def, 0x0);
228 }
229
230 for (i = 0; i < 5; i++) {
231 /* TX2 modulation */
232 regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD1, 0x1ffe);
233 regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD2, 0xfffe);
234 regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD3, 0x0601);
235 usleep_range(1000, 2000);
236 regmap_write(priv->phy_regmap, PCIE_PHY_TX2_MOD3, 0x0001);
237
238 /* TX1 modulation */
239 regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD1, 0x1ffe);
240 regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD2, 0xfffe);
241 regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD3, 0x0601);
242 usleep_range(1000, 2000);
243 regmap_write(priv->phy_regmap, PCIE_PHY_TX1_MOD3, 0x0001);
244 }
245}
246
247static int ltq_vrx200_pcie_phy_init(struct phy *phy)
248{
249 struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
250 int ret;
251
252 if (of_device_is_big_endian(priv->dev->of_node))
253 regmap_update_bits(priv->rcu_regmap,
254 priv->rcu_ahb_endian_offset,
255 priv->rcu_ahb_endian_big_endian_mask,
256 priv->rcu_ahb_endian_big_endian_mask);
257 else
258 regmap_update_bits(priv->rcu_regmap,
259 priv->rcu_ahb_endian_offset,
260 priv->rcu_ahb_endian_big_endian_mask, 0x0);
261
262 ret = reset_control_assert(priv->phy_reset);
263 if (ret)
264 goto err;
265
266 udelay(1);
267
268 ret = reset_control_deassert(priv->phy_reset);
269 if (ret)
270 goto err;
271
272 udelay(1);
273
274 ret = reset_control_deassert(priv->pcie_reset);
275 if (ret)
276 goto err_assert_phy_reset;
277
278 /* Make sure PHY PLL is stable */
279 usleep_range(20, 40);
280
281 return 0;
282
283err_assert_phy_reset:
284 reset_control_assert(priv->phy_reset);
285err:
286 return ret;
287}
288
289static int ltq_vrx200_pcie_phy_exit(struct phy *phy)
290{
291 struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
292 int ret;
293
294 ret = reset_control_assert(priv->pcie_reset);
295 if (ret)
296 return ret;
297
298 ret = reset_control_assert(priv->phy_reset);
299 if (ret)
300 return ret;
301
302 return 0;
303}
304
305static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
306{
307 struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
308 int ret;
309
310 /* Enable PDI to access PCIe PHY register */
311 ret = clk_prepare_enable(priv->pdi_clk);
312 if (ret)
313 goto err;
314
315 /* Configure PLL and PHY clock */
316 ltq_vrx200_pcie_phy_common_setup(phy);
317
318 pcie_phy_36mhz_mode_setup(phy);
319
320 /* Enable the PCIe PHY and make PLL setting take effect */
321 ret = clk_prepare_enable(priv->phy_clk);
322 if (ret)
323 goto err_disable_pdi_clk;
324
325 /* Check if we are in "startup ready" status */
326 if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
327 goto err_disable_phy_clk;
328
329 ltq_vrx200_pcie_phy_apply_workarounds(phy);
330
331 return 0;
332
333err_disable_phy_clk:
334 clk_disable_unprepare(priv->phy_clk);
335err_disable_pdi_clk:
336 clk_disable_unprepare(priv->pdi_clk);
337err:
338 return ret;
339}
340
341static int ltq_vrx200_pcie_phy_power_off(struct phy *phy)
342{
343 struct ltq_vrx200_pcie_phy_priv *priv = phy_get_drvdata(phy);
344
345 clk_disable_unprepare(priv->phy_clk);
346 clk_disable_unprepare(priv->pdi_clk);
347
348 return 0;
349}
350
351static struct phy_ops ltq_vrx200_pcie_phy_ops = {
352 .init = ltq_vrx200_pcie_phy_init,
353 .exit = ltq_vrx200_pcie_phy_exit,
354 .power_on = ltq_vrx200_pcie_phy_power_on,
355 .power_off = ltq_vrx200_pcie_phy_power_off,
356 .owner = THIS_MODULE,
357};
358
359static struct phy *ltq_vrx200_pcie_phy_xlate(struct device *dev,
360 struct of_phandle_args *args)
361{
362 struct ltq_vrx200_pcie_phy_priv *priv = dev_get_drvdata(dev);
363 unsigned int mode;
364
365 if (args->args_count != 1) {
366 dev_err(dev, "invalid number of arguments\n");
367 return ERR_PTR(-EINVAL);
368 }
369
370 mode = args->args[0];
371
372 switch (mode) {
373 case LANTIQ_PCIE_PHY_MODE_36MHZ:
374 priv->mode = mode;
375 break;
376
377 case LANTIQ_PCIE_PHY_MODE_25MHZ:
378 case LANTIQ_PCIE_PHY_MODE_25MHZ_SSC:
379 case LANTIQ_PCIE_PHY_MODE_36MHZ_SSC:
380 case LANTIQ_PCIE_PHY_MODE_100MHZ:
381 case LANTIQ_PCIE_PHY_MODE_100MHZ_SSC:
382 dev_err(dev, "PHY mode not implemented yet: %u\n", mode);
383 return ERR_PTR(-EINVAL);
384
385 default:
386 dev_err(dev, "invalid PHY mode %u\n", mode);
387 return ERR_PTR(-EINVAL);
388 };
389
390 return priv->phy;
391}
392
393static int ltq_vrx200_pcie_phy_probe(struct platform_device *pdev)
394{
395 static const struct regmap_config regmap_config = {
396 .reg_bits = 8,
397 .val_bits = 16,
398 .reg_stride = 2,
399 .max_register = PCIE_PHY_RX1_A_CTRL,
400 };
401 struct ltq_vrx200_pcie_phy_priv *priv;
402 struct device *dev = &pdev->dev;
403 struct phy_provider *provider;
404 struct resource *res;
405 void __iomem *base;
406 int ret;
407
408 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
409 if (!priv)
410 return -ENOMEM;
411
412 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
413 base = devm_ioremap_resource(dev, res);
414 if (IS_ERR(base))
415 return PTR_ERR(base);
416
417 priv->phy_regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
418 if (IS_ERR(priv->phy_regmap))
419 return PTR_ERR(priv->phy_regmap);
420
421 priv->rcu_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
422 "lantiq,rcu");
423 if (IS_ERR(priv->rcu_regmap))
424 return PTR_ERR(priv->rcu_regmap);
425
426 ret = device_property_read_u32(dev, "lantiq,rcu-endian-offset",
427 &priv->rcu_ahb_endian_offset);
428 if (ret) {
429 dev_err(dev,
430 "failed to parse the 'lantiq,rcu-endian-offset' property\n");
431 return ret;
432 }
433
434 ret = device_property_read_u32(dev, "lantiq,rcu-big-endian-mask",
435 &priv->rcu_ahb_endian_big_endian_mask);
436 if (ret) {
437 dev_err(dev,
438 "failed to parse the 'lantiq,rcu-big-endian-mask' property\n");
439 return ret;
440 }
441
442 priv->pdi_clk = devm_clk_get(dev, "pdi");
443 if (IS_ERR(priv->pdi_clk))
444 return PTR_ERR(priv->pdi_clk);
445
446 priv->phy_clk = devm_clk_get(dev, "phy");
447 if (IS_ERR(priv->phy_clk))
448 return PTR_ERR(priv->phy_clk);
449
450 priv->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
451 if (IS_ERR(priv->phy_reset))
452 return PTR_ERR(priv->phy_reset);
453
454 priv->pcie_reset = devm_reset_control_get_shared(dev, "pcie");
455 if (IS_ERR(priv->pcie_reset))
456 return PTR_ERR(priv->pcie_reset);
457
458 priv->dev = dev;
459
460 priv->phy = devm_phy_create(dev, dev->of_node,
461 &ltq_vrx200_pcie_phy_ops);
462 if (IS_ERR(priv->phy)) {
463 dev_err(dev, "failed to create PHY\n");
464 return PTR_ERR(priv->phy);
465 }
466
467 phy_set_drvdata(priv->phy, priv);
468 dev_set_drvdata(dev, priv);
469
470 provider = devm_of_phy_provider_register(dev,
471 ltq_vrx200_pcie_phy_xlate);
472
473 return PTR_ERR_OR_ZERO(provider);
474}
475
476static const struct of_device_id ltq_vrx200_pcie_phy_of_match[] = {
477 { .compatible = "lantiq,vrx200-pcie-phy", },
478 { .compatible = "lantiq,arx300-pcie-phy", },
479 { /* sentinel */ },
480};
481MODULE_DEVICE_TABLE(of, ltq_vrx200_pcie_phy_of_match);
482
483static struct platform_driver ltq_vrx200_pcie_phy_driver = {
484 .probe = ltq_vrx200_pcie_phy_probe,
485 .driver = {
486 .name = "ltq-vrx200-pcie-phy",
487 .of_match_table = ltq_vrx200_pcie_phy_of_match,
488 }
489};
490module_platform_driver(ltq_vrx200_pcie_phy_driver);
491
492MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
493MODULE_DESCRIPTION("Lantiq VRX200 and ARX300 PCIe PHY driver");
494MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 0e1642419c0b..4053ba6cd0fb 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -57,6 +57,7 @@ config PHY_MVEBU_CP110_COMPHY
57 tristate "Marvell CP110 comphy driver" 57 tristate "Marvell CP110 comphy driver"
58 depends on ARCH_MVEBU || COMPILE_TEST 58 depends on ARCH_MVEBU || COMPILE_TEST
59 depends on OF 59 depends on OF
60 depends on HAVE_ARM_SMCCC
60 select GENERIC_PHY 61 select GENERIC_PHY
61 help 62 help
62 This driver allows to control the comphy, an hardware block providing 63 This driver allows to control the comphy, an hardware block providing
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
index 3e00bc679d4e..6960dfd8ad8c 100644
--- a/drivers/phy/marvell/phy-armada38x-comphy.c
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -200,8 +200,10 @@ static int a38x_comphy_probe(struct platform_device *pdev)
200 } 200 }
201 201
202 phy = devm_phy_create(&pdev->dev, child, &a38x_comphy_ops); 202 phy = devm_phy_create(&pdev->dev, child, &a38x_comphy_ops);
203 if (IS_ERR(phy)) 203 if (IS_ERR(phy)) {
204 of_node_put(child);
204 return PTR_ERR(phy); 205 return PTR_ERR(phy);
206 }
205 207
206 priv->lane[val].base = base + 0x28 * val; 208 priv->lane[val].base = base + 0x28 * val;
207 priv->lane[val].priv = priv; 209 priv->lane[val].priv = priv;
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 8812a104c233..1a138be8bd6a 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -26,6 +26,7 @@
26#define COMPHY_SIP_POWER_ON 0x82000001 26#define COMPHY_SIP_POWER_ON 0x82000001
27#define COMPHY_SIP_POWER_OFF 0x82000002 27#define COMPHY_SIP_POWER_OFF 0x82000002
28#define COMPHY_SIP_PLL_LOCK 0x82000003 28#define COMPHY_SIP_PLL_LOCK 0x82000003
29#define COMPHY_FW_NOT_SUPPORTED (-1)
29 30
30#define COMPHY_FW_MODE_SATA 0x1 31#define COMPHY_FW_MODE_SATA 0x1
31#define COMPHY_FW_MODE_SGMII 0x2 32#define COMPHY_FW_MODE_SGMII 0x2
@@ -169,6 +170,7 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
169 struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); 170 struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
170 u32 fw_param; 171 u32 fw_param;
171 int fw_mode; 172 int fw_mode;
173 int ret;
172 174
173 fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port, 175 fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port,
174 lane->mode, lane->submode); 176 lane->mode, lane->submode);
@@ -217,7 +219,12 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
217 return -ENOTSUPP; 219 return -ENOTSUPP;
218 } 220 }
219 221
220 return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param); 222 ret = mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param);
223 if (ret == COMPHY_FW_NOT_SUPPORTED)
224 dev_err(lane->dev,
225 "unsupported SMC call, try updating your firmware\n");
226
227 return ret;
221} 228}
222 229
223static int mvebu_a3700_comphy_power_off(struct phy *phy) 230static int mvebu_a3700_comphy_power_off(struct phy *phy)
@@ -277,13 +284,17 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
277 } 284 }
278 285
279 lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL); 286 lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL);
280 if (!lane) 287 if (!lane) {
288 of_node_put(child);
281 return -ENOMEM; 289 return -ENOMEM;
290 }
282 291
283 phy = devm_phy_create(&pdev->dev, child, 292 phy = devm_phy_create(&pdev->dev, child,
284 &mvebu_a3700_comphy_ops); 293 &mvebu_a3700_comphy_ops);
285 if (IS_ERR(phy)) 294 if (IS_ERR(phy)) {
295 of_node_put(child);
286 return PTR_ERR(phy); 296 return PTR_ERR(phy);
297 }
287 298
288 lane->dev = &pdev->dev; 299 lane->dev = &pdev->dev;
289 lane->mode = PHY_MODE_INVALID; 300 lane->mode = PHY_MODE_INVALID;
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index d98e0451f6a1..e3b87c94aaf6 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -5,6 +5,8 @@
5 * Antoine Tenart <antoine.tenart@free-electrons.com> 5 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 */ 6 */
7 7
8#include <linux/arm-smccc.h>
9#include <linux/clk.h>
8#include <linux/io.h> 10#include <linux/io.h>
9#include <linux/iopoll.h> 11#include <linux/iopoll.h>
10#include <linux/mfd/syscon.h> 12#include <linux/mfd/syscon.h>
@@ -22,6 +24,7 @@
22#define MVEBU_COMPHY_SERDES_CFG0_PU_RX BIT(11) 24#define MVEBU_COMPHY_SERDES_CFG0_PU_RX BIT(11)
23#define MVEBU_COMPHY_SERDES_CFG0_PU_TX BIT(12) 25#define MVEBU_COMPHY_SERDES_CFG0_PU_TX BIT(12)
24#define MVEBU_COMPHY_SERDES_CFG0_HALF_BUS BIT(14) 26#define MVEBU_COMPHY_SERDES_CFG0_HALF_BUS BIT(14)
27#define MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE BIT(15)
25#define MVEBU_COMPHY_SERDES_CFG1(n) (0x4 + (n) * 0x1000) 28#define MVEBU_COMPHY_SERDES_CFG1(n) (0x4 + (n) * 0x1000)
26#define MVEBU_COMPHY_SERDES_CFG1_RESET BIT(3) 29#define MVEBU_COMPHY_SERDES_CFG1_RESET BIT(3)
27#define MVEBU_COMPHY_SERDES_CFG1_RX_INIT BIT(4) 30#define MVEBU_COMPHY_SERDES_CFG1_RX_INIT BIT(4)
@@ -77,8 +80,8 @@
77#define MVEBU_COMPHY_TX_SLEW_RATE(n) (0x974 + (n) * 0x1000) 80#define MVEBU_COMPHY_TX_SLEW_RATE(n) (0x974 + (n) * 0x1000)
78#define MVEBU_COMPHY_TX_SLEW_RATE_EMPH(n) ((n) << 5) 81#define MVEBU_COMPHY_TX_SLEW_RATE_EMPH(n) ((n) << 5)
79#define MVEBU_COMPHY_TX_SLEW_RATE_SLC(n) ((n) << 10) 82#define MVEBU_COMPHY_TX_SLEW_RATE_SLC(n) ((n) << 10)
80#define MVEBU_COMPHY_DLT_CTRL(n) (0x984 + (n) * 0x1000) 83#define MVEBU_COMPHY_DTL_CTRL(n) (0x984 + (n) * 0x1000)
81#define MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN BIT(2) 84#define MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN BIT(2)
82#define MVEBU_COMPHY_FRAME_DETECT0(n) (0xa14 + (n) * 0x1000) 85#define MVEBU_COMPHY_FRAME_DETECT0(n) (0xa14 + (n) * 0x1000)
83#define MVEBU_COMPHY_FRAME_DETECT0_PATN(n) ((n) << 7) 86#define MVEBU_COMPHY_FRAME_DETECT0_PATN(n) ((n) << 7)
84#define MVEBU_COMPHY_FRAME_DETECT3(n) (0xa20 + (n) * 0x1000) 87#define MVEBU_COMPHY_FRAME_DETECT3(n) (0xa20 + (n) * 0x1000)
@@ -111,55 +114,151 @@
111#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4) 114#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4)
112#define MVEBU_COMPHY_PIPE_SELECTOR 0x1144 115#define MVEBU_COMPHY_PIPE_SELECTOR 0x1144
113#define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4) 116#define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4)
117#define MVEBU_COMPHY_SD1_CTRL1 0x1148
118#define MVEBU_COMPHY_SD1_CTRL1_RXAUI1_EN BIT(26)
119#define MVEBU_COMPHY_SD1_CTRL1_RXAUI0_EN BIT(27)
114 120
115#define MVEBU_COMPHY_LANES 6 121#define MVEBU_COMPHY_LANES 6
116#define MVEBU_COMPHY_PORTS 3 122#define MVEBU_COMPHY_PORTS 3
117 123
124#define COMPHY_SIP_POWER_ON 0x82000001
125#define COMPHY_SIP_POWER_OFF 0x82000002
126#define COMPHY_FW_NOT_SUPPORTED (-1)
127
128/*
129 * A lane is described by the following bitfields:
130 * [ 1- 0]: COMPHY polarity invertion
131 * [ 2- 7]: COMPHY speed
132 * [ 5-11]: COMPHY port index
133 * [12-16]: COMPHY mode
134 * [17]: Clock source
135 * [18-20]: PCIe width (x1, x2, x4)
136 */
137#define COMPHY_FW_POL_OFFSET 0
138#define COMPHY_FW_POL_MASK GENMASK(1, 0)
139#define COMPHY_FW_SPEED_OFFSET 2
140#define COMPHY_FW_SPEED_MASK GENMASK(7, 2)
141#define COMPHY_FW_SPEED_MAX COMPHY_FW_SPEED_MASK
142#define COMPHY_FW_SPEED_1250 0
143#define COMPHY_FW_SPEED_3125 2
144#define COMPHY_FW_SPEED_5000 3
145#define COMPHY_FW_SPEED_103125 6
146#define COMPHY_FW_PORT_OFFSET 8
147#define COMPHY_FW_PORT_MASK GENMASK(11, 8)
148#define COMPHY_FW_MODE_OFFSET 12
149#define COMPHY_FW_MODE_MASK GENMASK(16, 12)
150#define COMPHY_FW_WIDTH_OFFSET 18
151#define COMPHY_FW_WIDTH_MASK GENMASK(20, 18)
152
153#define COMPHY_FW_PARAM_FULL(mode, port, speed, pol, width) \
154 ((((pol) << COMPHY_FW_POL_OFFSET) & COMPHY_FW_POL_MASK) | \
155 (((mode) << COMPHY_FW_MODE_OFFSET) & COMPHY_FW_MODE_MASK) | \
156 (((port) << COMPHY_FW_PORT_OFFSET) & COMPHY_FW_PORT_MASK) | \
157 (((speed) << COMPHY_FW_SPEED_OFFSET) & COMPHY_FW_SPEED_MASK) | \
158 (((width) << COMPHY_FW_WIDTH_OFFSET) & COMPHY_FW_WIDTH_MASK))
159
160#define COMPHY_FW_PARAM(mode, port) \
161 COMPHY_FW_PARAM_FULL(mode, port, COMPHY_FW_SPEED_MAX, 0, 0)
162
163#define COMPHY_FW_PARAM_ETH(mode, port, speed) \
164 COMPHY_FW_PARAM_FULL(mode, port, speed, 0, 0)
165
166#define COMPHY_FW_PARAM_PCIE(mode, port, width) \
167 COMPHY_FW_PARAM_FULL(mode, port, COMPHY_FW_SPEED_5000, 0, width)
168
169#define COMPHY_FW_MODE_SATA 0x1
170#define COMPHY_FW_MODE_SGMII 0x2 /* SGMII 1G */
171#define COMPHY_FW_MODE_HS_SGMII 0x3 /* SGMII 2.5G */
172#define COMPHY_FW_MODE_USB3H 0x4
173#define COMPHY_FW_MODE_USB3D 0x5
174#define COMPHY_FW_MODE_PCIE 0x6
175#define COMPHY_FW_MODE_RXAUI 0x7
176#define COMPHY_FW_MODE_XFI 0x8 /* SFI: 0x9 (is treated like XFI) */
177
118struct mvebu_comphy_conf { 178struct mvebu_comphy_conf {
119 enum phy_mode mode; 179 enum phy_mode mode;
120 int submode; 180 int submode;
121 unsigned lane; 181 unsigned lane;
122 unsigned port; 182 unsigned port;
123 u32 mux; 183 u32 mux;
184 u32 fw_mode;
124}; 185};
125 186
126#define MVEBU_COMPHY_CONF(_lane, _port, _submode, _mux) \ 187#define ETH_CONF(_lane, _port, _submode, _mux, _fw) \
127 { \ 188 { \
128 .lane = _lane, \ 189 .lane = _lane, \
129 .port = _port, \ 190 .port = _port, \
130 .mode = PHY_MODE_ETHERNET, \ 191 .mode = PHY_MODE_ETHERNET, \
131 .submode = _submode, \ 192 .submode = _submode, \
132 .mux = _mux, \ 193 .mux = _mux, \
194 .fw_mode = _fw, \
195 }
196
197#define GEN_CONF(_lane, _port, _mode, _fw) \
198 { \
199 .lane = _lane, \
200 .port = _port, \
201 .mode = _mode, \
202 .submode = PHY_INTERFACE_MODE_NA, \
203 .mux = -1, \
204 .fw_mode = _fw, \
133 } 205 }
134 206
135static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = { 207static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
136 /* lane 0 */ 208 /* lane 0 */
137 MVEBU_COMPHY_CONF(0, 1, PHY_INTERFACE_MODE_SGMII, 0x1), 209 GEN_CONF(0, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
138 MVEBU_COMPHY_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1), 210 ETH_CONF(0, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
211 ETH_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
212 GEN_CONF(0, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
139 /* lane 1 */ 213 /* lane 1 */
140 MVEBU_COMPHY_CONF(1, 2, PHY_INTERFACE_MODE_SGMII, 0x1), 214 GEN_CONF(1, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
141 MVEBU_COMPHY_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1), 215 GEN_CONF(1, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
216 GEN_CONF(1, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
217 GEN_CONF(1, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
218 ETH_CONF(1, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
219 ETH_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
142 /* lane 2 */ 220 /* lane 2 */
143 MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1), 221 ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
144 MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1), 222 ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
145 MVEBU_COMPHY_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1), 223 ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
224 ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GKR, 0x1, COMPHY_FW_MODE_XFI),
225 GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
226 GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
227 GEN_CONF(2, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
146 /* lane 3 */ 228 /* lane 3 */
147 MVEBU_COMPHY_CONF(3, 1, PHY_INTERFACE_MODE_SGMII, 0x2), 229 GEN_CONF(3, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
148 MVEBU_COMPHY_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2), 230 ETH_CONF(3, 1, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
231 ETH_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
232 ETH_CONF(3, 1, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
233 GEN_CONF(3, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
234 GEN_CONF(3, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
149 /* lane 4 */ 235 /* lane 4 */
150 MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2), 236 ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
151 MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2), 237 ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII),
152 MVEBU_COMPHY_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2), 238 ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GKR, 0x2, COMPHY_FW_MODE_XFI),
153 MVEBU_COMPHY_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1), 239 ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
240 GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
241 GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
242 GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
243 ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
244 ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_HS_SGMII),
245 ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GKR, -1, COMPHY_FW_MODE_XFI),
154 /* lane 5 */ 246 /* lane 5 */
155 MVEBU_COMPHY_CONF(5, 2, PHY_INTERFACE_MODE_SGMII, 0x1), 247 ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
156 MVEBU_COMPHY_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1), 248 GEN_CONF(5, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
249 ETH_CONF(5, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
250 ETH_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII),
251 GEN_CONF(5, 2, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
157}; 252};
158 253
159struct mvebu_comphy_priv { 254struct mvebu_comphy_priv {
160 void __iomem *base; 255 void __iomem *base;
161 struct regmap *regmap; 256 struct regmap *regmap;
162 struct device *dev; 257 struct device *dev;
258 struct clk *mg_domain_clk;
259 struct clk *mg_core_clk;
260 struct clk *axi_clk;
261 unsigned long cp_phys;
163}; 262};
164 263
165struct mvebu_comphy_lane { 264struct mvebu_comphy_lane {
@@ -170,30 +269,59 @@ struct mvebu_comphy_lane {
170 int port; 269 int port;
171}; 270};
172 271
173static int mvebu_comphy_get_mux(int lane, int port, 272static int mvebu_comphy_smc(unsigned long function, unsigned long phys,
174 enum phy_mode mode, int submode) 273 unsigned long lane, unsigned long mode)
274{
275 struct arm_smccc_res res;
276
277 arm_smccc_smc(function, phys, lane, mode, 0, 0, 0, 0, &res);
278
279 return res.a0;
280}
281
282static int mvebu_comphy_get_mode(bool fw_mode, int lane, int port,
283 enum phy_mode mode, int submode)
175{ 284{
176 int i, n = ARRAY_SIZE(mvebu_comphy_cp110_modes); 285 int i, n = ARRAY_SIZE(mvebu_comphy_cp110_modes);
286 /* Ignore PCIe submode: it represents the width */
287 bool ignore_submode = (mode == PHY_MODE_PCIE);
288 const struct mvebu_comphy_conf *conf;
177 289
178 /* Unused PHY mux value is 0x0 */ 290 /* Unused PHY mux value is 0x0 */
179 if (mode == PHY_MODE_INVALID) 291 if (mode == PHY_MODE_INVALID)
180 return 0; 292 return 0;
181 293
182 for (i = 0; i < n; i++) { 294 for (i = 0; i < n; i++) {
183 if (mvebu_comphy_cp110_modes[i].lane == lane && 295 conf = &mvebu_comphy_cp110_modes[i];
184 mvebu_comphy_cp110_modes[i].port == port && 296 if (conf->lane == lane &&
185 mvebu_comphy_cp110_modes[i].mode == mode && 297 conf->port == port &&
186 mvebu_comphy_cp110_modes[i].submode == submode) 298 conf->mode == mode &&
299 (conf->submode == submode || ignore_submode))
187 break; 300 break;
188 } 301 }
189 302
190 if (i == n) 303 if (i == n)
191 return -EINVAL; 304 return -EINVAL;
192 305
193 return mvebu_comphy_cp110_modes[i].mux; 306 if (fw_mode)
307 return conf->fw_mode;
308 else
309 return conf->mux;
194} 310}
195 311
196static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane) 312static inline int mvebu_comphy_get_mux(int lane, int port,
313 enum phy_mode mode, int submode)
314{
315 return mvebu_comphy_get_mode(false, lane, port, mode, submode);
316}
317
318static inline int mvebu_comphy_get_fw_mode(int lane, int port,
319 enum phy_mode mode, int submode)
320{
321 return mvebu_comphy_get_mode(true, lane, port, mode, submode);
322}
323
324static int mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
197{ 325{
198 struct mvebu_comphy_priv *priv = lane->priv; 326 struct mvebu_comphy_priv *priv = lane->priv;
199 u32 val; 327 u32 val;
@@ -210,20 +338,61 @@ static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
210 MVEBU_COMPHY_SERDES_CFG0_PU_TX | 338 MVEBU_COMPHY_SERDES_CFG0_PU_TX |
211 MVEBU_COMPHY_SERDES_CFG0_HALF_BUS | 339 MVEBU_COMPHY_SERDES_CFG0_HALF_BUS |
212 MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xf) | 340 MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xf) |
213 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xf)); 341 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xf) |
214 if (lane->submode == PHY_INTERFACE_MODE_10GKR) 342 MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE);
343
344 switch (lane->submode) {
345 case PHY_INTERFACE_MODE_10GKR:
215 val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) | 346 val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) |
216 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe); 347 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe);
217 else if (lane->submode == PHY_INTERFACE_MODE_2500BASEX) 348 break;
349 case PHY_INTERFACE_MODE_RXAUI:
350 val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xb) |
351 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xb) |
352 MVEBU_COMPHY_SERDES_CFG0_RXAUI_MODE;
353 break;
354 case PHY_INTERFACE_MODE_2500BASEX:
218 val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x8) | 355 val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x8) |
219 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x8) | 356 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x8) |
220 MVEBU_COMPHY_SERDES_CFG0_HALF_BUS; 357 MVEBU_COMPHY_SERDES_CFG0_HALF_BUS;
221 else if (lane->submode == PHY_INTERFACE_MODE_SGMII) 358 break;
359 case PHY_INTERFACE_MODE_SGMII:
222 val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x6) | 360 val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x6) |
223 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x6) | 361 MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x6) |
224 MVEBU_COMPHY_SERDES_CFG0_HALF_BUS; 362 MVEBU_COMPHY_SERDES_CFG0_HALF_BUS;
363 break;
364 default:
365 dev_err(priv->dev,
366 "unsupported comphy submode (%d) on lane %d\n",
367 lane->submode,
368 lane->id);
369 return -ENOTSUPP;
370 }
371
225 writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG0(lane->id)); 372 writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG0(lane->id));
226 373
374 if (lane->submode == PHY_INTERFACE_MODE_RXAUI) {
375 regmap_read(priv->regmap, MVEBU_COMPHY_SD1_CTRL1, &val);
376
377 switch (lane->id) {
378 case 2:
379 case 3:
380 val |= MVEBU_COMPHY_SD1_CTRL1_RXAUI0_EN;
381 break;
382 case 4:
383 case 5:
384 val |= MVEBU_COMPHY_SD1_CTRL1_RXAUI1_EN;
385 break;
386 default:
387 dev_err(priv->dev,
388 "RXAUI is not supported on comphy lane %d\n",
389 lane->id);
390 return -EINVAL;
391 }
392
393 regmap_write(priv->regmap, MVEBU_COMPHY_SD1_CTRL1, val);
394 }
395
227 /* reset */ 396 /* reset */
228 val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id)); 397 val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG1(lane->id));
229 val &= ~(MVEBU_COMPHY_SERDES_CFG1_RESET | 398 val &= ~(MVEBU_COMPHY_SERDES_CFG1_RESET |
@@ -264,6 +433,8 @@ static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane)
264 val &= ~MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x7); 433 val &= ~MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x7);
265 val |= MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x1); 434 val |= MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(0x1);
266 writel(val, priv->base + MVEBU_COMPHY_LOOPBACK(lane->id)); 435 writel(val, priv->base + MVEBU_COMPHY_LOOPBACK(lane->id));
436
437 return 0;
267} 438}
268 439
269static int mvebu_comphy_init_plls(struct mvebu_comphy_lane *lane) 440static int mvebu_comphy_init_plls(struct mvebu_comphy_lane *lane)
@@ -312,17 +483,20 @@ static int mvebu_comphy_set_mode_sgmii(struct phy *phy)
312 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); 483 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
313 struct mvebu_comphy_priv *priv = lane->priv; 484 struct mvebu_comphy_priv *priv = lane->priv;
314 u32 val; 485 u32 val;
486 int err;
315 487
316 mvebu_comphy_ethernet_init_reset(lane); 488 err = mvebu_comphy_ethernet_init_reset(lane);
489 if (err)
490 return err;
317 491
318 val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); 492 val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
319 val &= ~MVEBU_COMPHY_RX_CTRL1_CLK8T_EN; 493 val &= ~MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
320 val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL; 494 val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL;
321 writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); 495 writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
322 496
323 val = readl(priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id)); 497 val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
324 val &= ~MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN; 498 val &= ~MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
325 writel(val, priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id)); 499 writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
326 500
327 regmap_read(priv->regmap, MVEBU_COMPHY_CONF1(lane->id), &val); 501 regmap_read(priv->regmap, MVEBU_COMPHY_CONF1(lane->id), &val);
328 val &= ~MVEBU_COMPHY_CONF1_USB_PCIE; 502 val &= ~MVEBU_COMPHY_CONF1_USB_PCIE;
@@ -337,22 +511,78 @@ static int mvebu_comphy_set_mode_sgmii(struct phy *phy)
337 return mvebu_comphy_init_plls(lane); 511 return mvebu_comphy_init_plls(lane);
338} 512}
339 513
514static int mvebu_comphy_set_mode_rxaui(struct phy *phy)
515{
516 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
517 struct mvebu_comphy_priv *priv = lane->priv;
518 u32 val;
519 int err;
520
521 err = mvebu_comphy_ethernet_init_reset(lane);
522 if (err)
523 return err;
524
525 val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
526 val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL |
527 MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
528 writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
529
530 val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
531 val |= MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
532 writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
533
534 val = readl(priv->base + MVEBU_COMPHY_SERDES_CFG2(lane->id));
535 val |= MVEBU_COMPHY_SERDES_CFG2_DFE_EN;
536 writel(val, priv->base + MVEBU_COMPHY_SERDES_CFG2(lane->id));
537
538 val = readl(priv->base + MVEBU_COMPHY_DFE_RES(lane->id));
539 val |= MVEBU_COMPHY_DFE_RES_FORCE_GEN_TBL;
540 writel(val, priv->base + MVEBU_COMPHY_DFE_RES(lane->id));
541
542 val = readl(priv->base + MVEBU_COMPHY_GEN1_S0(lane->id));
543 val &= ~MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xf);
544 val |= MVEBU_COMPHY_GEN1_S0_TX_EMPH(0xd);
545 writel(val, priv->base + MVEBU_COMPHY_GEN1_S0(lane->id));
546
547 val = readl(priv->base + MVEBU_COMPHY_GEN1_S1(lane->id));
548 val &= ~(MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(0x7) |
549 MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(0x7));
550 val |= MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(0x1) |
551 MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(0x1) |
552 MVEBU_COMPHY_GEN1_S1_RX_DFE_EN;
553 writel(val, priv->base + MVEBU_COMPHY_GEN1_S1(lane->id));
554
555 val = readl(priv->base + MVEBU_COMPHY_COEF(lane->id));
556 val &= ~(MVEBU_COMPHY_COEF_DFE_EN | MVEBU_COMPHY_COEF_DFE_CTRL);
557 writel(val, priv->base + MVEBU_COMPHY_COEF(lane->id));
558
559 val = readl(priv->base + MVEBU_COMPHY_GEN1_S4(lane->id));
560 val &= ~MVEBU_COMPHY_GEN1_S4_DFE_RES(0x3);
561 val |= MVEBU_COMPHY_GEN1_S4_DFE_RES(0x1);
562 writel(val, priv->base + MVEBU_COMPHY_GEN1_S4(lane->id));
563
564 return mvebu_comphy_init_plls(lane);
565}
566
340static int mvebu_comphy_set_mode_10gkr(struct phy *phy) 567static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
341{ 568{
342 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); 569 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
343 struct mvebu_comphy_priv *priv = lane->priv; 570 struct mvebu_comphy_priv *priv = lane->priv;
344 u32 val; 571 u32 val;
572 int err;
345 573
346 mvebu_comphy_ethernet_init_reset(lane); 574 err = mvebu_comphy_ethernet_init_reset(lane);
575 if (err)
576 return err;
347 577
348 val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); 578 val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
349 val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL | 579 val |= MVEBU_COMPHY_RX_CTRL1_RXCLK2X_SEL |
350 MVEBU_COMPHY_RX_CTRL1_CLK8T_EN; 580 MVEBU_COMPHY_RX_CTRL1_CLK8T_EN;
351 writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); 581 writel(val, priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id));
352 582
353 val = readl(priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id)); 583 val = readl(priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
354 val |= MVEBU_COMPHY_DLT_CTRL_DTL_FLOOP_EN; 584 val |= MVEBU_COMPHY_DTL_CTRL_DTL_FLOOP_EN;
355 writel(val, priv->base + MVEBU_COMPHY_DLT_CTRL(lane->id)); 585 writel(val, priv->base + MVEBU_COMPHY_DTL_CTRL(lane->id));
356 586
357 /* Speed divider */ 587 /* Speed divider */
358 val = readl(priv->base + MVEBU_COMPHY_SPEED_DIV(lane->id)); 588 val = readl(priv->base + MVEBU_COMPHY_SPEED_DIV(lane->id));
@@ -476,7 +706,7 @@ static int mvebu_comphy_set_mode_10gkr(struct phy *phy)
476 return mvebu_comphy_init_plls(lane); 706 return mvebu_comphy_init_plls(lane);
477} 707}
478 708
479static int mvebu_comphy_power_on(struct phy *phy) 709static int mvebu_comphy_power_on_legacy(struct phy *phy)
480{ 710{
481 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); 711 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
482 struct mvebu_comphy_priv *priv = lane->priv; 712 struct mvebu_comphy_priv *priv = lane->priv;
@@ -502,6 +732,9 @@ static int mvebu_comphy_power_on(struct phy *phy)
502 case PHY_INTERFACE_MODE_2500BASEX: 732 case PHY_INTERFACE_MODE_2500BASEX:
503 ret = mvebu_comphy_set_mode_sgmii(phy); 733 ret = mvebu_comphy_set_mode_sgmii(phy);
504 break; 734 break;
735 case PHY_INTERFACE_MODE_RXAUI:
736 ret = mvebu_comphy_set_mode_rxaui(phy);
737 break;
505 case PHY_INTERFACE_MODE_10GKR: 738 case PHY_INTERFACE_MODE_10GKR:
506 ret = mvebu_comphy_set_mode_10gkr(phy); 739 ret = mvebu_comphy_set_mode_10gkr(phy);
507 break; 740 break;
@@ -517,26 +750,110 @@ static int mvebu_comphy_power_on(struct phy *phy)
517 return ret; 750 return ret;
518} 751}
519 752
753static int mvebu_comphy_power_on(struct phy *phy)
754{
755 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
756 struct mvebu_comphy_priv *priv = lane->priv;
757 int fw_mode, fw_speed;
758 u32 fw_param = 0;
759 int ret;
760
761 fw_mode = mvebu_comphy_get_fw_mode(lane->id, lane->port,
762 lane->mode, lane->submode);
763 if (fw_mode < 0)
764 goto try_legacy;
765
766 /* Try SMC flow first */
767 switch (lane->mode) {
768 case PHY_MODE_ETHERNET:
769 switch (lane->submode) {
770 case PHY_INTERFACE_MODE_RXAUI:
771 dev_dbg(priv->dev, "set lane %d to RXAUI mode\n",
772 lane->id);
773 fw_speed = 0;
774 break;
775 case PHY_INTERFACE_MODE_SGMII:
776 dev_dbg(priv->dev, "set lane %d to 1000BASE-X mode\n",
777 lane->id);
778 fw_speed = COMPHY_FW_SPEED_1250;
779 break;
780 case PHY_INTERFACE_MODE_2500BASEX:
781 dev_dbg(priv->dev, "set lane %d to 2500BASE-X mode\n",
782 lane->id);
783 fw_speed = COMPHY_FW_SPEED_3125;
784 break;
785 case PHY_INTERFACE_MODE_10GKR:
786 dev_dbg(priv->dev, "set lane %d to 10G-KR mode\n",
787 lane->id);
788 fw_speed = COMPHY_FW_SPEED_103125;
789 break;
790 default:
791 dev_err(priv->dev, "unsupported Ethernet mode (%d)\n",
792 lane->submode);
793 return -ENOTSUPP;
794 }
795 fw_param = COMPHY_FW_PARAM_ETH(fw_mode, lane->port, fw_speed);
796 break;
797 case PHY_MODE_USB_HOST_SS:
798 case PHY_MODE_USB_DEVICE_SS:
799 dev_dbg(priv->dev, "set lane %d to USB3 mode\n", lane->id);
800 fw_param = COMPHY_FW_PARAM(fw_mode, lane->port);
801 break;
802 case PHY_MODE_SATA:
803 dev_dbg(priv->dev, "set lane %d to SATA mode\n", lane->id);
804 fw_param = COMPHY_FW_PARAM(fw_mode, lane->port);
805 break;
806 case PHY_MODE_PCIE:
807 dev_dbg(priv->dev, "set lane %d to PCIe mode (x%d)\n", lane->id,
808 lane->submode);
809 fw_param = COMPHY_FW_PARAM_PCIE(fw_mode, lane->port,
810 lane->submode);
811 break;
812 default:
813 dev_err(priv->dev, "unsupported PHY mode (%d)\n", lane->mode);
814 return -ENOTSUPP;
815 }
816
817 ret = mvebu_comphy_smc(COMPHY_SIP_POWER_ON, priv->cp_phys, lane->id,
818 fw_param);
819 if (!ret)
820 return ret;
821
822 if (ret == COMPHY_FW_NOT_SUPPORTED)
823 dev_err(priv->dev,
824 "unsupported SMC call, try updating your firmware\n");
825
826 dev_warn(priv->dev,
827 "Firmware could not configure PHY %d with mode %d (ret: %d), trying legacy method\n",
828 lane->id, lane->mode, ret);
829
830try_legacy:
831 /* Fallback to Linux's implementation */
832 return mvebu_comphy_power_on_legacy(phy);
833}
834
520static int mvebu_comphy_set_mode(struct phy *phy, 835static int mvebu_comphy_set_mode(struct phy *phy,
521 enum phy_mode mode, int submode) 836 enum phy_mode mode, int submode)
522{ 837{
523 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); 838 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
524 839
525 if (mode != PHY_MODE_ETHERNET)
526 return -EINVAL;
527
528 if (submode == PHY_INTERFACE_MODE_1000BASEX) 840 if (submode == PHY_INTERFACE_MODE_1000BASEX)
529 submode = PHY_INTERFACE_MODE_SGMII; 841 submode = PHY_INTERFACE_MODE_SGMII;
530 842
531 if (mvebu_comphy_get_mux(lane->id, lane->port, mode, submode) < 0) 843 if (mvebu_comphy_get_fw_mode(lane->id, lane->port, mode, submode) < 0)
532 return -EINVAL; 844 return -EINVAL;
533 845
534 lane->mode = mode; 846 lane->mode = mode;
535 lane->submode = submode; 847 lane->submode = submode;
848
849 /* PCIe submode represents the width */
850 if (mode == PHY_MODE_PCIE && !lane->submode)
851 lane->submode = 1;
852
536 return 0; 853 return 0;
537} 854}
538 855
539static int mvebu_comphy_power_off(struct phy *phy) 856static int mvebu_comphy_power_off_legacy(struct phy *phy)
540{ 857{
541 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); 858 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
542 struct mvebu_comphy_priv *priv = lane->priv; 859 struct mvebu_comphy_priv *priv = lane->priv;
@@ -559,6 +876,21 @@ static int mvebu_comphy_power_off(struct phy *phy)
559 return 0; 876 return 0;
560} 877}
561 878
879static int mvebu_comphy_power_off(struct phy *phy)
880{
881 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
882 struct mvebu_comphy_priv *priv = lane->priv;
883 int ret;
884
885 ret = mvebu_comphy_smc(COMPHY_SIP_POWER_OFF, priv->cp_phys,
886 lane->id, 0);
887 if (!ret)
888 return ret;
889
890 /* Fallback to Linux's implementation */
891 return mvebu_comphy_power_off_legacy(phy);
892}
893
562static const struct phy_ops mvebu_comphy_ops = { 894static const struct phy_ops mvebu_comphy_ops = {
563 .power_on = mvebu_comphy_power_on, 895 .power_on = mvebu_comphy_power_on,
564 .power_off = mvebu_comphy_power_off, 896 .power_off = mvebu_comphy_power_off,
@@ -585,12 +917,72 @@ static struct phy *mvebu_comphy_xlate(struct device *dev,
585 return phy; 917 return phy;
586} 918}
587 919
920static int mvebu_comphy_init_clks(struct mvebu_comphy_priv *priv)
921{
922 int ret;
923
924 priv->mg_domain_clk = devm_clk_get(priv->dev, "mg_clk");
925 if (IS_ERR(priv->mg_domain_clk))
926 return PTR_ERR(priv->mg_domain_clk);
927
928 ret = clk_prepare_enable(priv->mg_domain_clk);
929 if (ret < 0)
930 return ret;
931
932 priv->mg_core_clk = devm_clk_get(priv->dev, "mg_core_clk");
933 if (IS_ERR(priv->mg_core_clk)) {
934 ret = PTR_ERR(priv->mg_core_clk);
935 goto dis_mg_domain_clk;
936 }
937
938 ret = clk_prepare_enable(priv->mg_core_clk);
939 if (ret < 0)
940 goto dis_mg_domain_clk;
941
942 priv->axi_clk = devm_clk_get(priv->dev, "axi_clk");
943 if (IS_ERR(priv->axi_clk)) {
944 ret = PTR_ERR(priv->axi_clk);
945 goto dis_mg_core_clk;
946 }
947
948 ret = clk_prepare_enable(priv->axi_clk);
949 if (ret < 0)
950 goto dis_mg_core_clk;
951
952 return 0;
953
954dis_mg_core_clk:
955 clk_disable_unprepare(priv->mg_core_clk);
956
957dis_mg_domain_clk:
958 clk_disable_unprepare(priv->mg_domain_clk);
959
960 priv->mg_domain_clk = NULL;
961 priv->mg_core_clk = NULL;
962 priv->axi_clk = NULL;
963
964 return ret;
965};
966
967static void mvebu_comphy_disable_unprepare_clks(struct mvebu_comphy_priv *priv)
968{
969 if (priv->axi_clk)
970 clk_disable_unprepare(priv->axi_clk);
971
972 if (priv->mg_core_clk)
973 clk_disable_unprepare(priv->mg_core_clk);
974
975 if (priv->mg_domain_clk)
976 clk_disable_unprepare(priv->mg_domain_clk);
977}
978
588static int mvebu_comphy_probe(struct platform_device *pdev) 979static int mvebu_comphy_probe(struct platform_device *pdev)
589{ 980{
590 struct mvebu_comphy_priv *priv; 981 struct mvebu_comphy_priv *priv;
591 struct phy_provider *provider; 982 struct phy_provider *provider;
592 struct device_node *child; 983 struct device_node *child;
593 struct resource *res; 984 struct resource *res;
985 int ret;
594 986
595 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 987 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
596 if (!priv) 988 if (!priv)
@@ -607,10 +999,26 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
607 if (IS_ERR(priv->base)) 999 if (IS_ERR(priv->base))
608 return PTR_ERR(priv->base); 1000 return PTR_ERR(priv->base);
609 1001
1002 /*
1003 * Ignore error if clocks have not been initialized properly for DT
1004 * compatibility reasons.
1005 */
1006 ret = mvebu_comphy_init_clks(priv);
1007 if (ret) {
1008 if (ret == -EPROBE_DEFER)
1009 return ret;
1010 dev_warn(&pdev->dev, "cannot initialize clocks\n");
1011 }
1012
1013 /*
1014 * Hack to retrieve a physical offset relative to this CP that will be
1015 * given to the firmware
1016 */
1017 priv->cp_phys = res->start;
1018
610 for_each_available_child_of_node(pdev->dev.of_node, child) { 1019 for_each_available_child_of_node(pdev->dev.of_node, child) {
611 struct mvebu_comphy_lane *lane; 1020 struct mvebu_comphy_lane *lane;
612 struct phy *phy; 1021 struct phy *phy;
613 int ret;
614 u32 val; 1022 u32 val;
615 1023
616 ret = of_property_read_u32(child, "reg", &val); 1024 ret = of_property_read_u32(child, "reg", &val);
@@ -626,30 +1034,45 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
626 } 1034 }
627 1035
628 lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL); 1036 lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL);
629 if (!lane) 1037 if (!lane) {
630 return -ENOMEM; 1038 of_node_put(child);
1039 ret = -ENOMEM;
1040 goto disable_clks;
1041 }
631 1042
632 phy = devm_phy_create(&pdev->dev, child, &mvebu_comphy_ops); 1043 phy = devm_phy_create(&pdev->dev, child, &mvebu_comphy_ops);
633 if (IS_ERR(phy)) 1044 if (IS_ERR(phy)) {
634 return PTR_ERR(phy); 1045 of_node_put(child);
1046 ret = PTR_ERR(phy);
1047 goto disable_clks;
1048 }
635 1049
636 lane->priv = priv; 1050 lane->priv = priv;
637 lane->mode = PHY_MODE_INVALID; 1051 lane->mode = PHY_MODE_INVALID;
1052 lane->submode = PHY_INTERFACE_MODE_NA;
638 lane->id = val; 1053 lane->id = val;
639 lane->port = -1; 1054 lane->port = -1;
640 phy_set_drvdata(phy, lane); 1055 phy_set_drvdata(phy, lane);
641 1056
642 /* 1057 /*
643 * Once all modes are supported in this driver we should call 1058 * All modes are supported in this driver so we could call
644 * mvebu_comphy_power_off(phy) here to avoid relying on the 1059 * mvebu_comphy_power_off(phy) here to avoid relying on the
645 * bootloader/firmware configuration. 1060 * bootloader/firmware configuration, but for compatibility
1061 * reasons we cannot de-configure the COMPHY without being sure
1062 * that the firmware is up-to-date and fully-featured.
646 */ 1063 */
647 } 1064 }
648 1065
649 dev_set_drvdata(&pdev->dev, priv); 1066 dev_set_drvdata(&pdev->dev, priv);
650 provider = devm_of_phy_provider_register(&pdev->dev, 1067 provider = devm_of_phy_provider_register(&pdev->dev,
651 mvebu_comphy_xlate); 1068 mvebu_comphy_xlate);
1069
652 return PTR_ERR_OR_ZERO(provider); 1070 return PTR_ERR_OR_ZERO(provider);
1071
1072disable_clks:
1073 mvebu_comphy_disable_unprepare_clks(priv);
1074
1075 return ret;
653} 1076}
654 1077
655static const struct of_device_id mvebu_comphy_of_match_table[] = { 1078static const struct of_device_id mvebu_comphy_of_match_table[] = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index e3880c4a15f2..b04f4fe85ac2 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -394,6 +394,16 @@ int phy_reset(struct phy *phy)
394} 394}
395EXPORT_SYMBOL_GPL(phy_reset); 395EXPORT_SYMBOL_GPL(phy_reset);
396 396
397/**
398 * phy_calibrate() - Tunes the phy hw parameters for current configuration
399 * @phy: the phy returned by phy_get()
400 *
401 * Used to calibrate phy hardware, typically by adjusting some parameters in
402 * runtime, which are otherwise lost after host controller reset and cannot
403 * be applied in phy_init() or phy_power_on().
404 *
405 * Returns: 0 if successful, an negative error code otherwise
406 */
397int phy_calibrate(struct phy *phy) 407int phy_calibrate(struct phy *phy)
398{ 408{
399 int ret; 409 int ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 34ff6434da8f..39e8deb8001e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -35,7 +35,7 @@
35#define PLL_READY_GATE_EN BIT(3) 35#define PLL_READY_GATE_EN BIT(3)
36/* QPHY_PCS_STATUS bit */ 36/* QPHY_PCS_STATUS bit */
37#define PHYSTATUS BIT(6) 37#define PHYSTATUS BIT(6)
38/* QPHY_COM_PCS_READY_STATUS bit */ 38/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
39#define PCS_READY BIT(0) 39#define PCS_READY BIT(0)
40 40
41/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */ 41/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
@@ -115,6 +115,7 @@ enum qphy_reg_layout {
115 QPHY_SW_RESET, 115 QPHY_SW_RESET,
116 QPHY_START_CTRL, 116 QPHY_START_CTRL,
117 QPHY_PCS_READY_STATUS, 117 QPHY_PCS_READY_STATUS,
118 QPHY_PCS_STATUS,
118 QPHY_PCS_AUTONOMOUS_MODE_CTRL, 119 QPHY_PCS_AUTONOMOUS_MODE_CTRL,
119 QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR, 120 QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
120 QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, 121 QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
@@ -133,7 +134,7 @@ static const unsigned int pciephy_regs_layout[] = {
133 [QPHY_FLL_MAN_CODE] = 0xd4, 134 [QPHY_FLL_MAN_CODE] = 0xd4,
134 [QPHY_SW_RESET] = 0x00, 135 [QPHY_SW_RESET] = 0x00,
135 [QPHY_START_CTRL] = 0x08, 136 [QPHY_START_CTRL] = 0x08,
136 [QPHY_PCS_READY_STATUS] = 0x174, 137 [QPHY_PCS_STATUS] = 0x174,
137}; 138};
138 139
139static const unsigned int usb3phy_regs_layout[] = { 140static const unsigned int usb3phy_regs_layout[] = {
@@ -144,7 +145,7 @@ static const unsigned int usb3phy_regs_layout[] = {
144 [QPHY_FLL_MAN_CODE] = 0xd0, 145 [QPHY_FLL_MAN_CODE] = 0xd0,
145 [QPHY_SW_RESET] = 0x00, 146 [QPHY_SW_RESET] = 0x00,
146 [QPHY_START_CTRL] = 0x08, 147 [QPHY_START_CTRL] = 0x08,
147 [QPHY_PCS_READY_STATUS] = 0x17c, 148 [QPHY_PCS_STATUS] = 0x17c,
148 [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4, 149 [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
149 [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8, 150 [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
150 [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178, 151 [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
@@ -153,7 +154,7 @@ static const unsigned int usb3phy_regs_layout[] = {
153static const unsigned int qmp_v3_usb3phy_regs_layout[] = { 154static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
154 [QPHY_SW_RESET] = 0x00, 155 [QPHY_SW_RESET] = 0x00,
155 [QPHY_START_CTRL] = 0x08, 156 [QPHY_START_CTRL] = 0x08,
156 [QPHY_PCS_READY_STATUS] = 0x174, 157 [QPHY_PCS_STATUS] = 0x174,
157 [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8, 158 [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
158 [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc, 159 [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
159 [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170, 160 [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
@@ -911,7 +912,6 @@ struct qmp_phy_cfg {
911 912
912 unsigned int start_ctrl; 913 unsigned int start_ctrl;
913 unsigned int pwrdn_ctrl; 914 unsigned int pwrdn_ctrl;
914 unsigned int mask_pcs_ready;
915 unsigned int mask_com_pcs_ready; 915 unsigned int mask_com_pcs_ready;
916 916
917 /* true, if PHY has a separate PHY_COM control block */ 917 /* true, if PHY has a separate PHY_COM control block */
@@ -1074,7 +1074,6 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
1074 1074
1075 .start_ctrl = PCS_START | PLL_READY_GATE_EN, 1075 .start_ctrl = PCS_START | PLL_READY_GATE_EN,
1076 .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, 1076 .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
1077 .mask_pcs_ready = PHYSTATUS,
1078 .mask_com_pcs_ready = PCS_READY, 1077 .mask_com_pcs_ready = PCS_READY,
1079 1078
1080 .has_phy_com_ctrl = true, 1079 .has_phy_com_ctrl = true,
@@ -1106,7 +1105,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
1106 1105
1107 .start_ctrl = SERDES_START | PCS_START, 1106 .start_ctrl = SERDES_START | PCS_START,
1108 .pwrdn_ctrl = SW_PWRDN, 1107 .pwrdn_ctrl = SW_PWRDN,
1109 .mask_pcs_ready = PHYSTATUS,
1110}; 1108};
1111 1109
1112/* list of resets */ 1110/* list of resets */
@@ -1136,7 +1134,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
1136 1134
1137 .start_ctrl = SERDES_START | PCS_START, 1135 .start_ctrl = SERDES_START | PCS_START,
1138 .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, 1136 .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
1139 .mask_pcs_ready = PHYSTATUS,
1140 1137
1141 .has_phy_com_ctrl = false, 1138 .has_phy_com_ctrl = false,
1142 .has_lane_rst = false, 1139 .has_lane_rst = false,
@@ -1167,7 +1164,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
1167 1164
1168 .start_ctrl = SERDES_START | PCS_START, 1165 .start_ctrl = SERDES_START | PCS_START,
1169 .pwrdn_ctrl = SW_PWRDN, 1166 .pwrdn_ctrl = SW_PWRDN,
1170 .mask_pcs_ready = PHYSTATUS,
1171 1167
1172 .has_pwrdn_delay = true, 1168 .has_pwrdn_delay = true,
1173 .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, 1169 .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -1199,7 +1195,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
1199 1195
1200 .start_ctrl = SERDES_START | PCS_START, 1196 .start_ctrl = SERDES_START | PCS_START,
1201 .pwrdn_ctrl = SW_PWRDN, 1197 .pwrdn_ctrl = SW_PWRDN,
1202 .mask_pcs_ready = PHYSTATUS,
1203 1198
1204 .has_pwrdn_delay = true, 1199 .has_pwrdn_delay = true,
1205 .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, 1200 .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
@@ -1226,7 +1221,6 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
1226 1221
1227 .start_ctrl = SERDES_START, 1222 .start_ctrl = SERDES_START,
1228 .pwrdn_ctrl = SW_PWRDN, 1223 .pwrdn_ctrl = SW_PWRDN,
1229 .mask_pcs_ready = PCS_READY,
1230 1224
1231 .is_dual_lane_phy = true, 1225 .is_dual_lane_phy = true,
1232 .no_pcs_sw_reset = true, 1226 .no_pcs_sw_reset = true,
@@ -1254,7 +1248,6 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
1254 1248
1255 .start_ctrl = SERDES_START | PCS_START, 1249 .start_ctrl = SERDES_START | PCS_START,
1256 .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, 1250 .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
1257 .mask_pcs_ready = PHYSTATUS,
1258}; 1251};
1259 1252
1260static const struct qmp_phy_cfg msm8998_usb3phy_cfg = { 1253static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
@@ -1279,7 +1272,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
1279 1272
1280 .start_ctrl = SERDES_START | PCS_START, 1273 .start_ctrl = SERDES_START | PCS_START,
1281 .pwrdn_ctrl = SW_PWRDN, 1274 .pwrdn_ctrl = SW_PWRDN,
1282 .mask_pcs_ready = PHYSTATUS,
1283 1275
1284 .is_dual_lane_phy = true, 1276 .is_dual_lane_phy = true,
1285}; 1277};
@@ -1457,7 +1449,7 @@ static int qcom_qmp_phy_enable(struct phy *phy)
1457 void __iomem *pcs = qphy->pcs; 1449 void __iomem *pcs = qphy->pcs;
1458 void __iomem *dp_com = qmp->dp_com; 1450 void __iomem *dp_com = qmp->dp_com;
1459 void __iomem *status; 1451 void __iomem *status;
1460 unsigned int mask, val; 1452 unsigned int mask, val, ready;
1461 int ret; 1453 int ret;
1462 1454
1463 dev_vdbg(qmp->dev, "Initializing QMP phy\n"); 1455 dev_vdbg(qmp->dev, "Initializing QMP phy\n");
@@ -1545,10 +1537,17 @@ static int qcom_qmp_phy_enable(struct phy *phy)
1545 /* start SerDes and Phy-Coding-Sublayer */ 1537 /* start SerDes and Phy-Coding-Sublayer */
1546 qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); 1538 qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
1547 1539
1548 status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; 1540 if (cfg->type == PHY_TYPE_UFS) {
1549 mask = cfg->mask_pcs_ready; 1541 status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
1542 mask = PCS_READY;
1543 ready = PCS_READY;
1544 } else {
1545 status = pcs + cfg->regs[QPHY_PCS_STATUS];
1546 mask = PHYSTATUS;
1547 ready = 0;
1548 }
1550 1549
1551 ret = readl_poll_timeout(status, val, val & mask, 10, 1550 ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
1552 PHY_INIT_COMPLETE_TIMEOUT); 1551 PHY_INIT_COMPLETE_TIMEOUT);
1553 if (ret) { 1552 if (ret) {
1554 dev_err(qmp->dev, "phy initialization timed-out\n"); 1553 dev_err(qmp->dev, "phy initialization timed-out\n");
@@ -2093,8 +2092,7 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
2093 if (ret) { 2092 if (ret) {
2094 dev_err(dev, "failed to create lane%d phy, %d\n", 2093 dev_err(dev, "failed to create lane%d phy, %d\n",
2095 id, ret); 2094 id, ret);
2096 pm_runtime_disable(dev); 2095 goto err_node_put;
2097 return ret;
2098 } 2096 }
2099 2097
2100 /* 2098 /*
@@ -2105,8 +2103,7 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
2105 if (ret) { 2103 if (ret) {
2106 dev_err(qmp->dev, 2104 dev_err(qmp->dev,
2107 "failed to register pipe clock source\n"); 2105 "failed to register pipe clock source\n");
2108 pm_runtime_disable(dev); 2106 goto err_node_put;
2109 return ret;
2110 } 2107 }
2111 id++; 2108 id++;
2112 } 2109 }
@@ -2118,6 +2115,11 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
2118 pm_runtime_disable(dev); 2115 pm_runtime_disable(dev);
2119 2116
2120 return PTR_ERR_OR_ZERO(phy_provider); 2117 return PTR_ERR_OR_ZERO(phy_provider);
2118
2119err_node_put:
2120 pm_runtime_disable(dev);
2121 of_node_put(child);
2122 return ret;
2121} 2123}
2122 2124
2123static struct platform_driver qcom_qmp_phy_driver = { 2125static struct platform_driver qcom_qmp_phy_driver = {
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 8ffba67568ec..b7f6b1324395 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -61,6 +61,7 @@
61 USB2_OBINT_IDDIGCHG) 61 USB2_OBINT_IDDIGCHG)
62 62
63/* VBCTRL */ 63/* VBCTRL */
64#define USB2_VBCTRL_OCCLREN BIT(16)
64#define USB2_VBCTRL_DRVVBUSSEL BIT(8) 65#define USB2_VBCTRL_DRVVBUSSEL BIT(8)
65 66
66/* LINECTRL1 */ 67/* LINECTRL1 */
@@ -374,6 +375,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
374 writel(val, usb2_base + USB2_LINECTRL1); 375 writel(val, usb2_base + USB2_LINECTRL1);
375 376
376 val = readl(usb2_base + USB2_VBCTRL); 377 val = readl(usb2_base + USB2_VBCTRL);
378 val &= ~USB2_VBCTRL_OCCLREN;
377 writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL); 379 writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
378 val = readl(usb2_base + USB2_ADPCTRL); 380 val = readl(usb2_base + USB2_ADPCTRL);
379 writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); 381 writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index b10a84cab4a7..2b97fb1185a0 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -198,7 +198,7 @@
198#define RK3328_BYPASS_TERM_RESISTOR_CALIB BIT(7) 198#define RK3328_BYPASS_TERM_RESISTOR_CALIB BIT(7)
199#define RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(x) UPDATE((x) >> 8, 6, 0) 199#define RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(x) UPDATE((x) >> 8, 6, 0)
200/* REG:0xc6 */ 200/* REG:0xc6 */
201#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 9) 201#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 0)
202/* REG:0xc7 */ 202/* REG:0xc7 */
203#define RK3328_TERM_RESISTOR_50 UPDATE(0, 2, 1) 203#define RK3328_TERM_RESISTOR_50 UPDATE(0, 2, 1)
204#define RK3328_TERM_RESISTOR_62_5 UPDATE(1, 2, 1) 204#define RK3328_TERM_RESISTOR_62_5 UPDATE(1, 2, 1)
diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
index aebd216dcf2f..6c607df1dc9a 100644
--- a/drivers/phy/samsung/phy-exynos-dp-video.c
+++ b/drivers/phy/samsung/phy-exynos-dp-video.c
@@ -109,6 +109,7 @@ static struct platform_driver exynos_dp_video_phy_driver = {
109 .driver = { 109 .driver = {
110 .name = "exynos-dp-video-phy", 110 .name = "exynos-dp-video-phy",
111 .of_match_table = exynos_dp_video_phy_of_match, 111 .of_match_table = exynos_dp_video_phy_of_match,
112 .suppress_bind_attrs = true,
112 } 113 }
113}; 114};
114module_platform_driver(exynos_dp_video_phy_driver); 115module_platform_driver(exynos_dp_video_phy_driver);
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index 3784bf100b95..bb51195f189f 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -359,6 +359,7 @@ static struct platform_driver exynos_mipi_video_phy_driver = {
359 .driver = { 359 .driver = {
360 .of_match_table = exynos_mipi_video_phy_of_match, 360 .of_match_table = exynos_mipi_video_phy_of_match,
361 .name = "exynos-mipi-video-phy", 361 .name = "exynos-mipi-video-phy",
362 .suppress_bind_attrs = true,
362 } 363 }
363}; 364};
364module_platform_driver(exynos_mipi_video_phy_driver); 365module_platform_driver(exynos_mipi_video_phy_driver);
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 1b4ba8bdb43c..659e7ae0a6cf 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -272,6 +272,7 @@ static struct platform_driver exynos_pcie_phy_driver = {
272 .driver = { 272 .driver = {
273 .of_match_table = exynos_pcie_phy_match, 273 .of_match_table = exynos_pcie_phy_match,
274 .name = "exynos_pcie_phy", 274 .name = "exynos_pcie_phy",
275 .suppress_bind_attrs = true,
275 } 276 }
276}; 277};
277 278
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 646259bee909..e510732afb8b 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -953,6 +953,7 @@ static struct platform_driver exynos5_usb3drd_phy = {
953 .driver = { 953 .driver = {
954 .of_match_table = exynos5_usbdrd_phy_of_match, 954 .of_match_table = exynos5_usbdrd_phy_of_match,
955 .name = "exynos5_usb3drd_phy", 955 .name = "exynos5_usb3drd_phy",
956 .suppress_bind_attrs = true,
956 } 957 }
957}; 958};
958 959
diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
index 9e5fc126032c..4dd7324d91b2 100644
--- a/drivers/phy/samsung/phy-exynos5250-sata.c
+++ b/drivers/phy/samsung/phy-exynos5250-sata.c
@@ -237,6 +237,7 @@ static struct platform_driver exynos_sata_phy_driver = {
237 .driver = { 237 .driver = {
238 .of_match_table = exynos_sata_phy_of_match, 238 .of_match_table = exynos_sata_phy_of_match,
239 .name = "samsung,sata-phy", 239 .name = "samsung,sata-phy",
240 .suppress_bind_attrs = true,
240 } 241 }
241}; 242};
242module_platform_driver(exynos_sata_phy_driver); 243module_platform_driver(exynos_sata_phy_driver);
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index 6c82f4fbe8a2..090aa02e02de 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -250,6 +250,7 @@ static struct platform_driver samsung_usb2_phy_driver = {
250 .driver = { 250 .driver = {
251 .of_match_table = samsung_usb2_phy_of_match, 251 .of_match_table = samsung_usb2_phy_of_match,
252 .name = "samsung-usb2-phy", 252 .name = "samsung-usb2-phy",
253 .suppress_bind_attrs = true,
253 } 254 }
254}; 255};
255 256
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index f8edd0840fa2..f14f1f053a75 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -405,6 +405,7 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
405 const __be32 *addr; 405 const __be32 *addr;
406 unsigned int reg; 406 unsigned int reg;
407 struct clk *clk; 407 struct clk *clk;
408 int ret = 0;
408 409
409 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); 410 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
410 if (!mux) 411 if (!mux)
@@ -413,34 +414,40 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
413 init = &mux->clk_data; 414 init = &mux->clk_data;
414 415
415 regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0); 416 regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0);
416 of_node_put(regmap_node);
417 if (!regmap_node) { 417 if (!regmap_node) {
418 dev_err(dev, "Fail to get serdes-clk node\n"); 418 dev_err(dev, "Fail to get serdes-clk node\n");
419 return -ENODEV; 419 ret = -ENODEV;
420 goto out_put_node;
420 } 421 }
421 422
422 regmap = syscon_node_to_regmap(regmap_node->parent); 423 regmap = syscon_node_to_regmap(regmap_node->parent);
423 if (IS_ERR(regmap)) { 424 if (IS_ERR(regmap)) {
424 dev_err(dev, "Fail to get Syscon regmap\n"); 425 dev_err(dev, "Fail to get Syscon regmap\n");
425 return PTR_ERR(regmap); 426 ret = PTR_ERR(regmap);
427 goto out_put_node;
426 } 428 }
427 429
428 num_parents = of_clk_get_parent_count(node); 430 num_parents = of_clk_get_parent_count(node);
429 if (num_parents < 2) { 431 if (num_parents < 2) {
430 dev_err(dev, "SERDES clock must have parents\n"); 432 dev_err(dev, "SERDES clock must have parents\n");
431 return -EINVAL; 433 ret = -EINVAL;
434 goto out_put_node;
432 } 435 }
433 436
434 parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), 437 parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents),
435 GFP_KERNEL); 438 GFP_KERNEL);
436 if (!parent_names) 439 if (!parent_names) {
437 return -ENOMEM; 440 ret = -ENOMEM;
441 goto out_put_node;
442 }
438 443
439 of_clk_parent_fill(node, parent_names, num_parents); 444 of_clk_parent_fill(node, parent_names, num_parents);
440 445
441 addr = of_get_address(regmap_node, 0, NULL, NULL); 446 addr = of_get_address(regmap_node, 0, NULL, NULL);
442 if (!addr) 447 if (!addr) {
443 return -EINVAL; 448 ret = -EINVAL;
449 goto out_put_node;
450 }
444 451
445 reg = be32_to_cpu(*addr); 452 reg = be32_to_cpu(*addr);
446 453
@@ -456,12 +463,16 @@ static int serdes_am654_clk_register(struct serdes_am654 *am654_phy,
456 mux->hw.init = init; 463 mux->hw.init = init;
457 464
458 clk = devm_clk_register(dev, &mux->hw); 465 clk = devm_clk_register(dev, &mux->hw);
459 if (IS_ERR(clk)) 466 if (IS_ERR(clk)) {
460 return PTR_ERR(clk); 467 ret = PTR_ERR(clk);
468 goto out_put_node;
469 }
461 470
462 am654_phy->clks[clock_num] = clk; 471 am654_phy->clks[clock_num] = clk;
463 472
464 return 0; 473out_put_node:
474 of_node_put(regmap_node);
475 return ret;
465} 476}
466 477
467static const struct of_device_id serdes_am654_id_table[] = { 478static const struct of_device_id serdes_am654_id_table[] = {
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index f3585777324c..29fbab55c3b3 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1338,12 +1338,15 @@ static int of_qcom_slim_ngd_register(struct device *parent,
1338 continue; 1338 continue;
1339 1339
1340 ngd = kzalloc(sizeof(*ngd), GFP_KERNEL); 1340 ngd = kzalloc(sizeof(*ngd), GFP_KERNEL);
1341 if (!ngd) 1341 if (!ngd) {
1342 of_node_put(node);
1342 return -ENOMEM; 1343 return -ENOMEM;
1344 }
1343 1345
1344 ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id); 1346 ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
1345 if (!ngd->pdev) { 1347 if (!ngd->pdev) {
1346 kfree(ngd); 1348 kfree(ngd);
1349 of_node_put(node);
1347 return -ENOMEM; 1350 return -ENOMEM;
1348 } 1351 }
1349 ngd->id = id; 1352 ngd->id = id;
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index 9be41089edde..b2f013bfe42e 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -439,7 +439,7 @@ static inline bool slim_tid_txn(u8 mt, u8 mc)
439 (mc == SLIM_MSG_MC_REQUEST_INFORMATION || 439 (mc == SLIM_MSG_MC_REQUEST_INFORMATION ||
440 mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION || 440 mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION ||
441 mc == SLIM_MSG_MC_REQUEST_VALUE || 441 mc == SLIM_MSG_MC_REQUEST_VALUE ||
442 mc == SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION)); 442 mc == SLIM_MSG_MC_REQUEST_CHANGE_VALUE));
443} 443}
444 444
445static inline bool slim_ec_txn(u8 mt, u8 mc) 445static inline bool slim_ec_txn(u8 mt, u8 mc)
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 3f55cb3c81b2..001187c577bf 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2obj-${CONFIG_THUNDERBOLT} := thunderbolt.o 2obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
3thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o 3thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
4thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o 4thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2427d73be731..2ec1af8f7968 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -930,6 +930,23 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
930 return res; 930 return res;
931} 931}
932 932
933static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
934 const struct tb_cfg_result *res)
935{
936 /*
937 * For unimplemented ports access to port config space may return
938 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
939 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
940 * that the caller can mark the port as disabled.
941 */
942 if (space == TB_CFG_PORT &&
943 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
944 return -ENODEV;
945
946 tb_cfg_print_error(ctl, res);
947 return -EIO;
948}
949
933int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, 950int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
934 enum tb_cfg_space space, u32 offset, u32 length) 951 enum tb_cfg_space space, u32 offset, u32 length)
935{ 952{
@@ -942,8 +959,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
942 959
943 case 1: 960 case 1:
944 /* Thunderbolt error, tb_error holds the actual number */ 961 /* Thunderbolt error, tb_error holds the actual number */
945 tb_cfg_print_error(ctl, &res); 962 return tb_cfg_get_error(ctl, space, &res);
946 return -EIO;
947 963
948 case -ETIMEDOUT: 964 case -ETIMEDOUT:
949 tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n", 965 tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
@@ -969,8 +985,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
969 985
970 case 1: 986 case 1:
971 /* Thunderbolt error, tb_error holds the actual number */ 987 /* Thunderbolt error, tb_error holds the actual number */
972 tb_cfg_print_error(ctl, &res); 988 return tb_cfg_get_error(ctl, space, &res);
973 return -EIO;
974 989
975 case -ETIMEDOUT: 990 case -ETIMEDOUT:
976 tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n", 991 tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 81e8ac4c5805..ee5196479854 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -414,7 +414,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
414 struct device *dev = &sw->tb->nhi->pdev->dev; 414 struct device *dev = &sw->tb->nhi->pdev->dev;
415 int len, res; 415 int len, res;
416 416
417 len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0); 417 len = device_property_count_u8(dev, "ThunderboltDROM");
418 if (len < 0 || len < sizeof(struct tb_drom_header)) 418 if (len < 0 || len < sizeof(struct tb_drom_header))
419 return -EINVAL; 419 return -EINVAL;
420 420
@@ -525,10 +525,6 @@ int tb_drom_read(struct tb_switch *sw)
525 sw->ports[3].dual_link_port = &sw->ports[4]; 525 sw->ports[3].dual_link_port = &sw->ports[4];
526 sw->ports[4].dual_link_port = &sw->ports[3]; 526 sw->ports[4].dual_link_port = &sw->ports[3];
527 527
528 /* Port 5 is inaccessible on this gen 1 controller */
529 if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
530 sw->ports[5].disabled = true;
531
532 return 0; 528 return 0;
533 } 529 }
534 530
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index fbdcef56a676..245588f691e7 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -55,16 +55,20 @@
55 * @safe_mode: ICM is in safe mode 55 * @safe_mode: ICM is in safe mode
56 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) 56 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
57 * @rpm: Does the controller support runtime PM (RTD3) 57 * @rpm: Does the controller support runtime PM (RTD3)
58 * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
59 * @veto: Is RTD3 veto in effect
58 * @is_supported: Checks if we can support ICM on this controller 60 * @is_supported: Checks if we can support ICM on this controller
59 * @cio_reset: Trigger CIO reset 61 * @cio_reset: Trigger CIO reset
60 * @get_mode: Read and return the ICM firmware mode (optional) 62 * @get_mode: Read and return the ICM firmware mode (optional)
61 * @get_route: Find a route string for given switch 63 * @get_route: Find a route string for given switch
62 * @save_devices: Ask ICM to save devices to ACL when suspending (optional) 64 * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
63 * @driver_ready: Send driver ready message to ICM 65 * @driver_ready: Send driver ready message to ICM
66 * @set_uuid: Set UUID for the root switch (optional)
64 * @device_connected: Handle device connected ICM message 67 * @device_connected: Handle device connected ICM message
65 * @device_disconnected: Handle device disconnected ICM message 68 * @device_disconnected: Handle device disconnected ICM message
66 * @xdomain_connected - Handle XDomain connected ICM message 69 * @xdomain_connected - Handle XDomain connected ICM message
67 * @xdomain_disconnected - Handle XDomain disconnected ICM message 70 * @xdomain_disconnected - Handle XDomain disconnected ICM message
71 * @rtd3_veto: Handle RTD3 veto notification ICM message
68 */ 72 */
69struct icm { 73struct icm {
70 struct mutex request_lock; 74 struct mutex request_lock;
@@ -74,6 +78,8 @@ struct icm {
74 int vnd_cap; 78 int vnd_cap;
75 bool safe_mode; 79 bool safe_mode;
76 bool rpm; 80 bool rpm;
81 bool can_upgrade_nvm;
82 bool veto;
77 bool (*is_supported)(struct tb *tb); 83 bool (*is_supported)(struct tb *tb);
78 int (*cio_reset)(struct tb *tb); 84 int (*cio_reset)(struct tb *tb);
79 int (*get_mode)(struct tb *tb); 85 int (*get_mode)(struct tb *tb);
@@ -82,6 +88,7 @@ struct icm {
82 int (*driver_ready)(struct tb *tb, 88 int (*driver_ready)(struct tb *tb,
83 enum tb_security_level *security_level, 89 enum tb_security_level *security_level,
84 size_t *nboot_acl, bool *rpm); 90 size_t *nboot_acl, bool *rpm);
91 void (*set_uuid)(struct tb *tb);
85 void (*device_connected)(struct tb *tb, 92 void (*device_connected)(struct tb *tb,
86 const struct icm_pkg_header *hdr); 93 const struct icm_pkg_header *hdr);
87 void (*device_disconnected)(struct tb *tb, 94 void (*device_disconnected)(struct tb *tb,
@@ -90,6 +97,7 @@ struct icm {
90 const struct icm_pkg_header *hdr); 97 const struct icm_pkg_header *hdr);
91 void (*xdomain_disconnected)(struct tb *tb, 98 void (*xdomain_disconnected)(struct tb *tb,
92 const struct icm_pkg_header *hdr); 99 const struct icm_pkg_header *hdr);
100 void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
93}; 101};
94 102
95struct icm_notification { 103struct icm_notification {
@@ -294,6 +302,43 @@ static int icm_request(struct tb *tb, const void *request, size_t request_size,
294 return -ETIMEDOUT; 302 return -ETIMEDOUT;
295} 303}
296 304
305/*
306 * If rescan is queued to run (we are resuming), postpone it to give the
307 * firmware some more time to send device connected notifications for next
308 * devices in the chain.
309 */
310static void icm_postpone_rescan(struct tb *tb)
311{
312 struct icm *icm = tb_priv(tb);
313
314 if (delayed_work_pending(&icm->rescan_work))
315 mod_delayed_work(tb->wq, &icm->rescan_work,
316 msecs_to_jiffies(500));
317}
318
319static void icm_veto_begin(struct tb *tb)
320{
321 struct icm *icm = tb_priv(tb);
322
323 if (!icm->veto) {
324 icm->veto = true;
325 /* Keep the domain powered while veto is in effect */
326 pm_runtime_get(&tb->dev);
327 }
328}
329
330static void icm_veto_end(struct tb *tb)
331{
332 struct icm *icm = tb_priv(tb);
333
334 if (icm->veto) {
335 icm->veto = false;
336 /* Allow the domain suspend now */
337 pm_runtime_mark_last_busy(&tb->dev);
338 pm_runtime_put_autosuspend(&tb->dev);
339 }
340}
341
297static bool icm_fr_is_supported(struct tb *tb) 342static bool icm_fr_is_supported(struct tb *tb)
298{ 343{
299 return !x86_apple_machine; 344 return !x86_apple_machine;
@@ -517,14 +562,16 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
517 return 0; 562 return 0;
518} 563}
519 564
520static void add_switch(struct tb_switch *parent_sw, u64 route, 565static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
521 const uuid_t *uuid, const u8 *ep_name, 566 const uuid_t *uuid, const u8 *ep_name,
522 size_t ep_name_size, u8 connection_id, u8 connection_key, 567 size_t ep_name_size, u8 connection_id,
523 u8 link, u8 depth, enum tb_security_level security_level, 568 u8 connection_key, u8 link, u8 depth,
524 bool authorized, bool boot) 569 enum tb_security_level security_level,
570 bool authorized, bool boot)
525{ 571{
526 const struct intel_vss *vss; 572 const struct intel_vss *vss;
527 struct tb_switch *sw; 573 struct tb_switch *sw;
574 int ret;
528 575
529 pm_runtime_get_sync(&parent_sw->dev); 576 pm_runtime_get_sync(&parent_sw->dev);
530 577
@@ -555,14 +602,18 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
555 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 602 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
556 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); 603 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
557 604
558 if (tb_switch_add(sw)) { 605 ret = tb_switch_add(sw);
606 if (ret) {
559 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 607 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
560 tb_switch_put(sw); 608 tb_switch_put(sw);
609 sw = ERR_PTR(ret);
561 } 610 }
562 611
563out: 612out:
564 pm_runtime_mark_last_busy(&parent_sw->dev); 613 pm_runtime_mark_last_busy(&parent_sw->dev);
565 pm_runtime_put_autosuspend(&parent_sw->dev); 614 pm_runtime_put_autosuspend(&parent_sw->dev);
615
616 return sw;
566} 617}
567 618
568static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, 619static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -654,6 +705,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
654 u64 route; 705 u64 route;
655 int ret; 706 int ret;
656 707
708 icm_postpone_rescan(tb);
709
657 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 710 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
658 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 711 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
659 ICM_LINK_INFO_DEPTH_SHIFT; 712 ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1084,7 +1137,8 @@ static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1084} 1137}
1085 1138
1086static void 1139static void
1087icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1140__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
1141 bool force_rtd3)
1088{ 1142{
1089 const struct icm_tr_event_device_connected *pkg = 1143 const struct icm_tr_event_device_connected *pkg =
1090 (const struct icm_tr_event_device_connected *)hdr; 1144 (const struct icm_tr_event_device_connected *)hdr;
@@ -1094,6 +1148,8 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1094 bool authorized, boot; 1148 bool authorized, boot;
1095 u64 route; 1149 u64 route;
1096 1150
1151 icm_postpone_rescan(tb);
1152
1097 /* 1153 /*
1098 * Currently we don't use the QoS information coming with the 1154 * Currently we don't use the QoS information coming with the
1099 * device connected message so simply just ignore that extra 1155 * device connected message so simply just ignore that extra
@@ -1149,14 +1205,22 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1149 return; 1205 return;
1150 } 1206 }
1151 1207
1152 add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, 1208 sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
1153 sizeof(pkg->ep_name), pkg->connection_id, 1209 sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
1154 0, 0, 0, security_level, authorized, boot); 1210 security_level, authorized, boot);
1211 if (!IS_ERR(sw) && force_rtd3)
1212 sw->rpm = true;
1155 1213
1156 tb_switch_put(parent_sw); 1214 tb_switch_put(parent_sw);
1157} 1215}
1158 1216
1159static void 1217static void
1218icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1219{
1220 __icm_tr_device_connected(tb, hdr, false);
1221}
1222
1223static void
1160icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) 1224icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1161{ 1225{
1162 const struct icm_tr_event_device_disconnected *pkg = 1226 const struct icm_tr_event_device_disconnected *pkg =
@@ -1466,6 +1530,61 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1466 return 0; 1530 return 0;
1467} 1531}
1468 1532
1533static int
1534icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1535 size_t *nboot_acl, bool *rpm)
1536{
1537 struct icm_tr_pkg_driver_ready_response reply;
1538 struct icm_pkg_driver_ready request = {
1539 .hdr.code = ICM_DRIVER_READY,
1540 };
1541 int ret;
1542
1543 memset(&reply, 0, sizeof(reply));
1544 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1545 1, 20000);
1546 if (ret)
1547 return ret;
1548
1549 /* Ice Lake always supports RTD3 */
1550 if (rpm)
1551 *rpm = true;
1552
1553 return 0;
1554}
1555
1556static void icm_icl_set_uuid(struct tb *tb)
1557{
1558 struct tb_nhi *nhi = tb->nhi;
1559 u32 uuid[4];
1560
1561 pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
1562 pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
1563 uuid[2] = 0xffffffff;
1564 uuid[3] = 0xffffffff;
1565
1566 tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1567}
1568
1569static void
1570icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1571{
1572 __icm_tr_device_connected(tb, hdr, true);
1573}
1574
1575static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
1576{
1577 const struct icm_icl_event_rtd3_veto *pkg =
1578 (const struct icm_icl_event_rtd3_veto *)hdr;
1579
1580 tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
1581
1582 if (pkg->veto_reason)
1583 icm_veto_begin(tb);
1584 else
1585 icm_veto_end(tb);
1586}
1587
1469static void icm_handle_notification(struct work_struct *work) 1588static void icm_handle_notification(struct work_struct *work)
1470{ 1589{
1471 struct icm_notification *n = container_of(work, typeof(*n), work); 1590 struct icm_notification *n = container_of(work, typeof(*n), work);
@@ -1493,6 +1612,9 @@ static void icm_handle_notification(struct work_struct *work)
1493 case ICM_EVENT_XDOMAIN_DISCONNECTED: 1612 case ICM_EVENT_XDOMAIN_DISCONNECTED:
1494 icm->xdomain_disconnected(tb, n->pkg); 1613 icm->xdomain_disconnected(tb, n->pkg);
1495 break; 1614 break;
1615 case ICM_EVENT_RTD3_VETO:
1616 icm->rtd3_veto(tb, n->pkg);
1617 break;
1496 } 1618 }
1497 } 1619 }
1498 1620
@@ -1851,6 +1973,13 @@ static void icm_complete(struct tb *tb)
1851 if (tb->nhi->going_away) 1973 if (tb->nhi->going_away)
1852 return; 1974 return;
1853 1975
1976 /*
1977 * If RTD3 was vetoed before we entered system suspend allow it
1978 * again now before driver ready is sent. Firmware sends a new RTD3
1979 * veto if it is still the case after we have sent it driver ready
1980 * command.
1981 */
1982 icm_veto_end(tb);
1854 icm_unplug_children(tb->root_switch); 1983 icm_unplug_children(tb->root_switch);
1855 1984
1856 /* 1985 /*
@@ -1913,14 +2042,12 @@ static int icm_start(struct tb *tb)
1913 if (IS_ERR(tb->root_switch)) 2042 if (IS_ERR(tb->root_switch))
1914 return PTR_ERR(tb->root_switch); 2043 return PTR_ERR(tb->root_switch);
1915 2044
1916 /* 2045 tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
1917 * NVM upgrade has not been tested on Apple systems and they
1918 * don't provide images publicly either. To be on the safe side
1919 * prevent root switch NVM upgrade on Macs for now.
1920 */
1921 tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1922 tb->root_switch->rpm = icm->rpm; 2046 tb->root_switch->rpm = icm->rpm;
1923 2047
2048 if (icm->set_uuid)
2049 icm->set_uuid(tb);
2050
1924 ret = tb_switch_add(tb->root_switch); 2051 ret = tb_switch_add(tb->root_switch);
1925 if (ret) { 2052 if (ret) {
1926 tb_switch_put(tb->root_switch); 2053 tb_switch_put(tb->root_switch);
@@ -2005,6 +2132,19 @@ static const struct tb_cm_ops icm_tr_ops = {
2005 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, 2132 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2006}; 2133};
2007 2134
2135/* Ice Lake */
2136static const struct tb_cm_ops icm_icl_ops = {
2137 .driver_ready = icm_driver_ready,
2138 .start = icm_start,
2139 .stop = icm_stop,
2140 .complete = icm_complete,
2141 .runtime_suspend = icm_runtime_suspend,
2142 .runtime_resume = icm_runtime_resume,
2143 .handle_event = icm_handle_event,
2144 .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
2145 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2146};
2147
2008struct tb *icm_probe(struct tb_nhi *nhi) 2148struct tb *icm_probe(struct tb_nhi *nhi)
2009{ 2149{
2010 struct icm *icm; 2150 struct icm *icm;
@@ -2021,6 +2161,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
2021 switch (nhi->pdev->device) { 2161 switch (nhi->pdev->device) {
2022 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: 2162 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2023 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: 2163 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2164 icm->can_upgrade_nvm = true;
2024 icm->is_supported = icm_fr_is_supported; 2165 icm->is_supported = icm_fr_is_supported;
2025 icm->get_route = icm_fr_get_route; 2166 icm->get_route = icm_fr_get_route;
2026 icm->save_devices = icm_fr_save_devices; 2167 icm->save_devices = icm_fr_save_devices;
@@ -2038,6 +2179,13 @@ struct tb *icm_probe(struct tb_nhi *nhi)
2038 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: 2179 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
2039 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: 2180 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
2040 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; 2181 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2182 /*
2183 * NVM upgrade has not been tested on Apple systems and
2184 * they don't provide images publicly either. To be on
2185 * the safe side prevent root switch NVM upgrade on Macs
2186 * for now.
2187 */
2188 icm->can_upgrade_nvm = !x86_apple_machine;
2041 icm->is_supported = icm_ar_is_supported; 2189 icm->is_supported = icm_ar_is_supported;
2042 icm->cio_reset = icm_ar_cio_reset; 2190 icm->cio_reset = icm_ar_cio_reset;
2043 icm->get_mode = icm_ar_get_mode; 2191 icm->get_mode = icm_ar_get_mode;
@@ -2054,6 +2202,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
2054 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: 2202 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
2055 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: 2203 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
2056 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; 2204 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2205 icm->can_upgrade_nvm = !x86_apple_machine;
2057 icm->is_supported = icm_ar_is_supported; 2206 icm->is_supported = icm_ar_is_supported;
2058 icm->cio_reset = icm_tr_cio_reset; 2207 icm->cio_reset = icm_tr_cio_reset;
2059 icm->get_mode = icm_ar_get_mode; 2208 icm->get_mode = icm_ar_get_mode;
@@ -2064,6 +2213,19 @@ struct tb *icm_probe(struct tb_nhi *nhi)
2064 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 2213 icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2065 tb->cm_ops = &icm_tr_ops; 2214 tb->cm_ops = &icm_tr_ops;
2066 break; 2215 break;
2216
2217 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2218 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2219 icm->is_supported = icm_ar_is_supported;
2220 icm->driver_ready = icm_icl_driver_ready;
2221 icm->set_uuid = icm_icl_set_uuid;
2222 icm->device_connected = icm_icl_device_connected;
2223 icm->device_disconnected = icm_tr_device_disconnected;
2224 icm->xdomain_connected = icm_tr_xdomain_connected;
2225 icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2226 icm->rtd3_veto = icm_icl_rtd3_veto;
2227 tb->cm_ops = &icm_icl_ops;
2228 break;
2067 } 2229 }
2068 2230
2069 if (!icm->is_supported || !icm->is_supported(tb)) { 2231 if (!icm->is_supported || !icm->is_supported(tb)) {
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 27fbe62c7ddd..641b21b54460 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/property.h>
19 20
20#include "nhi.h" 21#include "nhi.h"
21#include "nhi_regs.h" 22#include "nhi_regs.h"
@@ -143,9 +144,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
143 return io; 144 return io;
144} 145}
145 146
146static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) 147static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
147{ 148{
148 iowrite16(value, ring_desc_base(ring) + offset); 149 /*
150 * The other 16-bits in the register is read-only and writes to it
151 * are ignored by the hardware so we can save one ioread32() by
152 * filling the read-only bits with zeroes.
153 */
154 iowrite32(cons, ring_desc_base(ring) + 8);
155}
156
157static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
158{
159 /* See ring_iowrite_cons() above for explanation */
160 iowrite32(prod << 16, ring_desc_base(ring) + 8);
149} 161}
150 162
151static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) 163static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@@ -197,7 +209,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
197 descriptor->sof = frame->sof; 209 descriptor->sof = frame->sof;
198 } 210 }
199 ring->head = (ring->head + 1) % ring->size; 211 ring->head = (ring->head + 1) % ring->size;
200 ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); 212 if (ring->is_tx)
213 ring_iowrite_prod(ring, ring->head);
214 else
215 ring_iowrite_cons(ring, ring->head);
201 } 216 }
202} 217}
203 218
@@ -662,7 +677,7 @@ void tb_ring_stop(struct tb_ring *ring)
662 677
663 ring_iowrite32options(ring, 0, 0); 678 ring_iowrite32options(ring, 0, 0);
664 ring_iowrite64desc(ring, 0, 0); 679 ring_iowrite64desc(ring, 0, 0);
665 ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); 680 ring_iowrite32desc(ring, 0, 8);
666 ring_iowrite32desc(ring, 0, 12); 681 ring_iowrite32desc(ring, 0, 12);
667 ring->head = 0; 682 ring->head = 0;
668 ring->tail = 0; 683 ring->tail = 0;
@@ -845,12 +860,52 @@ static irqreturn_t nhi_msi(int irq, void *data)
845 return IRQ_HANDLED; 860 return IRQ_HANDLED;
846} 861}
847 862
848static int nhi_suspend_noirq(struct device *dev) 863static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
849{ 864{
850 struct pci_dev *pdev = to_pci_dev(dev); 865 struct pci_dev *pdev = to_pci_dev(dev);
851 struct tb *tb = pci_get_drvdata(pdev); 866 struct tb *tb = pci_get_drvdata(pdev);
867 struct tb_nhi *nhi = tb->nhi;
868 int ret;
869
870 ret = tb_domain_suspend_noirq(tb);
871 if (ret)
872 return ret;
873
874 if (nhi->ops && nhi->ops->suspend_noirq) {
875 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
876 if (ret)
877 return ret;
878 }
879
880 return 0;
881}
882
883static int nhi_suspend_noirq(struct device *dev)
884{
885 return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
886}
887
888static bool nhi_wake_supported(struct pci_dev *pdev)
889{
890 u8 val;
891
892 /*
893 * If power rails are sustainable for wakeup from S4 this
894 * property is set by the BIOS.
895 */
896 if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
897 return !!val;
898
899 return true;
900}
901
902static int nhi_poweroff_noirq(struct device *dev)
903{
904 struct pci_dev *pdev = to_pci_dev(dev);
905 bool wakeup;
852 906
853 return tb_domain_suspend_noirq(tb); 907 wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
908 return __nhi_suspend_noirq(dev, wakeup);
854} 909}
855 910
856static void nhi_enable_int_throttling(struct tb_nhi *nhi) 911static void nhi_enable_int_throttling(struct tb_nhi *nhi)
@@ -873,16 +928,24 @@ static int nhi_resume_noirq(struct device *dev)
873{ 928{
874 struct pci_dev *pdev = to_pci_dev(dev); 929 struct pci_dev *pdev = to_pci_dev(dev);
875 struct tb *tb = pci_get_drvdata(pdev); 930 struct tb *tb = pci_get_drvdata(pdev);
931 struct tb_nhi *nhi = tb->nhi;
932 int ret;
876 933
877 /* 934 /*
878 * Check that the device is still there. It may be that the user 935 * Check that the device is still there. It may be that the user
879 * unplugged last device which causes the host controller to go 936 * unplugged last device which causes the host controller to go
880 * away on PCs. 937 * away on PCs.
881 */ 938 */
882 if (!pci_device_is_present(pdev)) 939 if (!pci_device_is_present(pdev)) {
883 tb->nhi->going_away = true; 940 nhi->going_away = true;
884 else 941 } else {
942 if (nhi->ops && nhi->ops->resume_noirq) {
943 ret = nhi->ops->resume_noirq(nhi);
944 if (ret)
945 return ret;
946 }
885 nhi_enable_int_throttling(tb->nhi); 947 nhi_enable_int_throttling(tb->nhi);
948 }
886 949
887 return tb_domain_resume_noirq(tb); 950 return tb_domain_resume_noirq(tb);
888} 951}
@@ -915,16 +978,35 @@ static int nhi_runtime_suspend(struct device *dev)
915{ 978{
916 struct pci_dev *pdev = to_pci_dev(dev); 979 struct pci_dev *pdev = to_pci_dev(dev);
917 struct tb *tb = pci_get_drvdata(pdev); 980 struct tb *tb = pci_get_drvdata(pdev);
981 struct tb_nhi *nhi = tb->nhi;
982 int ret;
983
984 ret = tb_domain_runtime_suspend(tb);
985 if (ret)
986 return ret;
918 987
919 return tb_domain_runtime_suspend(tb); 988 if (nhi->ops && nhi->ops->runtime_suspend) {
989 ret = nhi->ops->runtime_suspend(tb->nhi);
990 if (ret)
991 return ret;
992 }
993 return 0;
920} 994}
921 995
922static int nhi_runtime_resume(struct device *dev) 996static int nhi_runtime_resume(struct device *dev)
923{ 997{
924 struct pci_dev *pdev = to_pci_dev(dev); 998 struct pci_dev *pdev = to_pci_dev(dev);
925 struct tb *tb = pci_get_drvdata(pdev); 999 struct tb *tb = pci_get_drvdata(pdev);
1000 struct tb_nhi *nhi = tb->nhi;
1001 int ret;
926 1002
927 nhi_enable_int_throttling(tb->nhi); 1003 if (nhi->ops && nhi->ops->runtime_resume) {
1004 ret = nhi->ops->runtime_resume(nhi);
1005 if (ret)
1006 return ret;
1007 }
1008
1009 nhi_enable_int_throttling(nhi);
928 return tb_domain_runtime_resume(tb); 1010 return tb_domain_runtime_resume(tb);
929} 1011}
930 1012
@@ -952,6 +1034,9 @@ static void nhi_shutdown(struct tb_nhi *nhi)
952 flush_work(&nhi->interrupt_work); 1034 flush_work(&nhi->interrupt_work);
953 } 1035 }
954 ida_destroy(&nhi->msix_ida); 1036 ida_destroy(&nhi->msix_ida);
1037
1038 if (nhi->ops && nhi->ops->shutdown)
1039 nhi->ops->shutdown(nhi);
955} 1040}
956 1041
957static int nhi_init_msi(struct tb_nhi *nhi) 1042static int nhi_init_msi(struct tb_nhi *nhi)
@@ -996,12 +1081,27 @@ static int nhi_init_msi(struct tb_nhi *nhi)
996 return 0; 1081 return 0;
997} 1082}
998 1083
1084static bool nhi_imr_valid(struct pci_dev *pdev)
1085{
1086 u8 val;
1087
1088 if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
1089 return !!val;
1090
1091 return true;
1092}
1093
999static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1094static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1000{ 1095{
1001 struct tb_nhi *nhi; 1096 struct tb_nhi *nhi;
1002 struct tb *tb; 1097 struct tb *tb;
1003 int res; 1098 int res;
1004 1099
1100 if (!nhi_imr_valid(pdev)) {
1101 dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
1102 return -ENODEV;
1103 }
1104
1005 res = pcim_enable_device(pdev); 1105 res = pcim_enable_device(pdev);
1006 if (res) { 1106 if (res) {
1007 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); 1107 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
@@ -1019,6 +1119,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1019 return -ENOMEM; 1119 return -ENOMEM;
1020 1120
1021 nhi->pdev = pdev; 1121 nhi->pdev = pdev;
1122 nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
1022 /* cannot fail - table is allocated bin pcim_iomap_regions */ 1123 /* cannot fail - table is allocated bin pcim_iomap_regions */
1023 nhi->iobase = pcim_iomap_table(pdev)[0]; 1124 nhi->iobase = pcim_iomap_table(pdev)[0];
1024 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; 1125 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
@@ -1051,6 +1152,12 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1051 1152
1052 pci_set_master(pdev); 1153 pci_set_master(pdev);
1053 1154
1155 if (nhi->ops && nhi->ops->init) {
1156 res = nhi->ops->init(nhi);
1157 if (res)
1158 return res;
1159 }
1160
1054 tb = icm_probe(nhi); 1161 tb = icm_probe(nhi);
1055 if (!tb) 1162 if (!tb)
1056 tb = tb_probe(nhi); 1163 tb = tb_probe(nhi);
@@ -1111,6 +1218,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
1111 .restore_noirq = nhi_resume_noirq, 1218 .restore_noirq = nhi_resume_noirq,
1112 .suspend = nhi_suspend, 1219 .suspend = nhi_suspend,
1113 .freeze = nhi_suspend, 1220 .freeze = nhi_suspend,
1221 .poweroff_noirq = nhi_poweroff_noirq,
1114 .poweroff = nhi_suspend, 1222 .poweroff = nhi_suspend,
1115 .complete = nhi_complete, 1223 .complete = nhi_complete,
1116 .runtime_suspend = nhi_runtime_suspend, 1224 .runtime_suspend = nhi_runtime_suspend,
@@ -1158,6 +1266,10 @@ static struct pci_device_id nhi_ids[] = {
1158 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, 1266 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
1159 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, 1267 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
1160 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, 1268 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
1269 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
1270 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1271 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
1272 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1161 1273
1162 { 0,} 1274 { 0,}
1163}; 1275};
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 1b5d47ecd3ed..b7b973949f8e 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -30,6 +30,26 @@ enum nhi_mailbox_cmd {
30int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data); 30int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
31enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); 31enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
32 32
33/**
34 * struct tb_nhi_ops - NHI specific optional operations
35 * @init: NHI specific initialization
36 * @suspend_noirq: NHI specific suspend_noirq hook
37 * @resume_noirq: NHI specific resume_noirq hook
38 * @runtime_suspend: NHI specific runtime_suspend hook
39 * @runtime_resume: NHI specific runtime_resume hook
40 * @shutdown: NHI specific shutdown
41 */
42struct tb_nhi_ops {
43 int (*init)(struct tb_nhi *nhi);
44 int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup);
45 int (*resume_noirq)(struct tb_nhi *nhi);
46 int (*runtime_suspend)(struct tb_nhi *nhi);
47 int (*runtime_resume)(struct tb_nhi *nhi);
48 void (*shutdown)(struct tb_nhi *nhi);
49};
50
51extern const struct tb_nhi_ops icl_nhi_ops;
52
33/* 53/*
34 * PCI IDs used in this driver from Win Ridge forward. There is no 54 * PCI IDs used in this driver from Win Ridge forward. There is no
35 * need for the PCI quirk anymore as we will use ICM also on Apple 55 * need for the PCI quirk anymore as we will use ICM also on Apple
@@ -51,5 +71,7 @@ enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
51#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea 71#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea
52#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb 72#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb
53#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef 73#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
74#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
75#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
54 76
55#endif 77#endif
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
new file mode 100644
index 000000000000..61cd09cef943
--- /dev/null
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -0,0 +1,179 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NHI specific operations
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <linux/delay.h>
10#include <linux/suspend.h>
11
12#include "nhi.h"
13#include "nhi_regs.h"
14#include "tb.h"
15
16/* Ice Lake specific NHI operations */
17
18#define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */
19
20static int check_for_device(struct device *dev, void *data)
21{
22 return tb_is_switch(dev);
23}
24
25static bool icl_nhi_is_device_connected(struct tb_nhi *nhi)
26{
27 struct tb *tb = pci_get_drvdata(nhi->pdev);
28 int ret;
29
30 ret = device_for_each_child(&tb->root_switch->dev, NULL,
31 check_for_device);
32 return ret > 0;
33}
34
35static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
36{
37 u32 vs_cap;
38
39 /*
40 * The Thunderbolt host controller is present always in Ice Lake
41 * but the firmware may not be loaded and running (depending
42 * whether there is device connected and so on). Each time the
43 * controller is used we need to "Force Power" it first and wait
44 * for the firmware to indicate it is up and running. This "Force
45 * Power" is really not about actually powering on/off the
46 * controller so it is accessible even if "Force Power" is off.
47 *
48 * The actual power management happens inside shared ACPI power
49 * resources using standard ACPI methods.
50 */
51 pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap);
52 if (power) {
53 vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK;
54 vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT;
55 vs_cap |= VS_CAP_22_FORCE_POWER;
56 } else {
57 vs_cap &= ~VS_CAP_22_FORCE_POWER;
58 }
59 pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
60
61 if (power) {
62 unsigned int retries = 10;
63 u32 val;
64
65 /* Wait until the firmware tells it is up and running */
66 do {
67 pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
68 if (val & VS_CAP_9_FW_READY)
69 return 0;
70 msleep(250);
71 } while (--retries);
72
73 return -ETIMEDOUT;
74 }
75
76 return 0;
77}
78
79static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd)
80{
81 u32 data;
82
83 pci_read_config_dword(nhi->pdev, VS_CAP_19, &data);
84 data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
85 pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
86}
87
88static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
89{
90 unsigned long end;
91 u32 data;
92
93 if (!timeout)
94 goto clear;
95
96 end = jiffies + msecs_to_jiffies(timeout);
97 do {
98 pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
99 if (data & VS_CAP_18_DONE)
100 goto clear;
101 msleep(100);
102 } while (time_before(jiffies, end));
103
104 return -ETIMEDOUT;
105
106clear:
107 /* Clear the valid bit */
108 pci_write_config_dword(nhi->pdev, VS_CAP_19, 0);
109 return 0;
110}
111
112static void icl_nhi_set_ltr(struct tb_nhi *nhi)
113{
114 u32 max_ltr, ltr;
115
116 pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr);
117 max_ltr &= 0xffff;
118 /* Program the same value for both snoop and no-snoop */
119 ltr = max_ltr << 16 | max_ltr;
120 pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr);
121}
122
123static int icl_nhi_suspend(struct tb_nhi *nhi)
124{
125 int ret;
126
127 if (icl_nhi_is_device_connected(nhi))
128 return 0;
129
130 /*
131 * If there is no device connected we need to perform both: a
132 * handshake through LC mailbox and force power down before
133 * entering D3.
134 */
135 icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
136 ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
137 if (ret)
138 return ret;
139
140 return icl_nhi_force_power(nhi, false);
141}
142
143static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
144{
145 enum icl_lc_mailbox_cmd cmd;
146
147 if (!pm_suspend_via_firmware())
148 return icl_nhi_suspend(nhi);
149
150 cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
151 icl_nhi_lc_mailbox_cmd(nhi, cmd);
152 return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
153}
154
155static int icl_nhi_resume(struct tb_nhi *nhi)
156{
157 int ret;
158
159 ret = icl_nhi_force_power(nhi, true);
160 if (ret)
161 return ret;
162
163 icl_nhi_set_ltr(nhi);
164 return 0;
165}
166
167static void icl_nhi_shutdown(struct tb_nhi *nhi)
168{
169 icl_nhi_force_power(nhi, false);
170}
171
172const struct tb_nhi_ops icl_nhi_ops = {
173 .init = icl_nhi_resume,
174 .suspend_noirq = icl_nhi_suspend_noirq,
175 .resume_noirq = icl_nhi_resume,
176 .runtime_suspend = icl_nhi_suspend,
177 .runtime_resume = icl_nhi_resume,
178 .shutdown = icl_nhi_shutdown,
179};
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index a60bd98c1d04..0d4970dcef84 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -124,4 +124,41 @@ struct ring_desc {
124#define REG_FW_STS_ICM_EN_INVERT BIT(1) 124#define REG_FW_STS_ICM_EN_INVERT BIT(1)
125#define REG_FW_STS_ICM_EN BIT(0) 125#define REG_FW_STS_ICM_EN BIT(0)
126 126
127/* ICL NHI VSEC registers */
128
129/* FW ready */
130#define VS_CAP_9 0xc8
131#define VS_CAP_9_FW_READY BIT(31)
132/* UUID */
133#define VS_CAP_10 0xcc
134#define VS_CAP_11 0xd0
135/* LTR */
136#define VS_CAP_15 0xe0
137#define VS_CAP_16 0xe4
138/* TBT2PCIe */
139#define VS_CAP_18 0xec
140#define VS_CAP_18_DONE BIT(0)
141/* PCIe2TBT */
142#define VS_CAP_19 0xf0
143#define VS_CAP_19_VALID BIT(0)
144#define VS_CAP_19_CMD_SHIFT 1
145#define VS_CAP_19_CMD_MASK GENMASK(7, 1)
146/* Force power */
147#define VS_CAP_22 0xfc
148#define VS_CAP_22_FORCE_POWER BIT(1)
149#define VS_CAP_22_DMA_DELAY_MASK GENMASK(31, 24)
150#define VS_CAP_22_DMA_DELAY_SHIFT 24
151
152/**
153 * enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands
154 * @ICL_LC_GO2SX: Ask LC to enter Sx without wake
155 * @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake
156 * @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset
157 */
158enum icl_lc_mailbox_cmd {
159 ICL_LC_GO2SX = 0x02,
160 ICL_LC_GO2SX_NO_WAKE = 0x03,
161 ICL_LC_PREPARE_FOR_RESET = 0x21,
162};
163
127#endif 164#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 5668a44e0653..410bf1bceeee 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -364,12 +364,14 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
364 nvm->active = nvm_dev; 364 nvm->active = nvm_dev;
365 } 365 }
366 366
367 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); 367 if (!sw->no_nvm_upgrade) {
368 if (IS_ERR(nvm_dev)) { 368 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
369 ret = PTR_ERR(nvm_dev); 369 if (IS_ERR(nvm_dev)) {
370 goto err_nvm_active; 370 ret = PTR_ERR(nvm_dev);
371 goto err_nvm_active;
372 }
373 nvm->non_active = nvm_dev;
371 } 374 }
372 nvm->non_active = nvm_dev;
373 375
374 sw->nvm = nvm; 376 sw->nvm = nvm;
375 return 0; 377 return 0;
@@ -398,7 +400,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
398 if (!nvm->authenticating) 400 if (!nvm->authenticating)
399 nvm_clear_auth_status(sw); 401 nvm_clear_auth_status(sw);
400 402
401 nvmem_unregister(nvm->non_active); 403 if (nvm->non_active)
404 nvmem_unregister(nvm->non_active);
402 if (nvm->active) 405 if (nvm->active)
403 nvmem_unregister(nvm->active); 406 nvmem_unregister(nvm->active);
404 ida_simple_remove(&nvm_ida, nvm->id); 407 ida_simple_remove(&nvm_ida, nvm->id);
@@ -611,8 +614,14 @@ static int tb_init_port(struct tb_port *port)
611 int cap; 614 int cap;
612 615
613 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); 616 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
614 if (res) 617 if (res) {
618 if (res == -ENODEV) {
619 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
620 port->port);
621 return 0;
622 }
615 return res; 623 return res;
624 }
616 625
617 /* Port 0 is the switch itself and has no PHY. */ 626 /* Port 0 is the switch itself and has no PHY. */
618 if (port->config.type == TB_TYPE_PORT && port->port != 0) { 627 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
@@ -1331,14 +1340,29 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
1331 struct device *dev = container_of(kobj, struct device, kobj); 1340 struct device *dev = container_of(kobj, struct device, kobj);
1332 struct tb_switch *sw = tb_to_switch(dev); 1341 struct tb_switch *sw = tb_to_switch(dev);
1333 1342
1334 if (attr == &dev_attr_key.attr) { 1343 if (attr == &dev_attr_device.attr) {
1344 if (!sw->device)
1345 return 0;
1346 } else if (attr == &dev_attr_device_name.attr) {
1347 if (!sw->device_name)
1348 return 0;
1349 } else if (attr == &dev_attr_vendor.attr) {
1350 if (!sw->vendor)
1351 return 0;
1352 } else if (attr == &dev_attr_vendor_name.attr) {
1353 if (!sw->vendor_name)
1354 return 0;
1355 } else if (attr == &dev_attr_key.attr) {
1335 if (tb_route(sw) && 1356 if (tb_route(sw) &&
1336 sw->tb->security_level == TB_SECURITY_SECURE && 1357 sw->tb->security_level == TB_SECURITY_SECURE &&
1337 sw->security_level == TB_SECURITY_SECURE) 1358 sw->security_level == TB_SECURITY_SECURE)
1338 return attr->mode; 1359 return attr->mode;
1339 return 0; 1360 return 0;
1340 } else if (attr == &dev_attr_nvm_authenticate.attr || 1361 } else if (attr == &dev_attr_nvm_authenticate.attr) {
1341 attr == &dev_attr_nvm_version.attr) { 1362 if (sw->dma_port && !sw->no_nvm_upgrade)
1363 return attr->mode;
1364 return 0;
1365 } else if (attr == &dev_attr_nvm_version.attr) {
1342 if (sw->dma_port) 1366 if (sw->dma_port)
1343 return attr->mode; 1367 return attr->mode;
1344 return 0; 1368 return 0;
@@ -1446,6 +1470,8 @@ static int tb_switch_get_generation(struct tb_switch *sw)
1446 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1470 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1447 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1471 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1448 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1472 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1473 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1474 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
1449 return 3; 1475 return 3;
1450 1476
1451 default: 1477 default:
@@ -1689,13 +1715,17 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
1689 break; 1715 break;
1690 } 1716 }
1691 1717
1692 if (sw->no_nvm_upgrade) 1718 /* Root switch DMA port requires running firmware */
1719 if (!tb_route(sw) && sw->config.enabled)
1693 return 0; 1720 return 0;
1694 1721
1695 sw->dma_port = dma_port_alloc(sw); 1722 sw->dma_port = dma_port_alloc(sw);
1696 if (!sw->dma_port) 1723 if (!sw->dma_port)
1697 return 0; 1724 return 0;
1698 1725
1726 if (sw->no_nvm_upgrade)
1727 return 0;
1728
1699 /* 1729 /*
1700 * Check status of the previous flash authentication. If there 1730 * Check status of the previous flash authentication. If there
1701 * is one we need to power cycle the switch in any case to make 1731 * is one we need to power cycle the switch in any case to make
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index afbe1d29bb03..4b641e4ee0c5 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -104,10 +104,11 @@ enum icm_pkg_code {
104}; 104};
105 105
106enum icm_event_code { 106enum icm_event_code {
107 ICM_EVENT_DEVICE_CONNECTED = 3, 107 ICM_EVENT_DEVICE_CONNECTED = 0x3,
108 ICM_EVENT_DEVICE_DISCONNECTED = 4, 108 ICM_EVENT_DEVICE_DISCONNECTED = 0x4,
109 ICM_EVENT_XDOMAIN_CONNECTED = 6, 109 ICM_EVENT_XDOMAIN_CONNECTED = 0x6,
110 ICM_EVENT_XDOMAIN_DISCONNECTED = 7, 110 ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7,
111 ICM_EVENT_RTD3_VETO = 0xa,
111}; 112};
112 113
113struct icm_pkg_header { 114struct icm_pkg_header {
@@ -463,6 +464,13 @@ struct icm_tr_pkg_disconnect_xdomain_response {
463 uuid_t remote_uuid; 464 uuid_t remote_uuid;
464}; 465};
465 466
467/* Ice Lake messages */
468
469struct icm_icl_event_rtd3_veto {
470 struct icm_pkg_header hdr;
471 u32 veto_reason;
472};
473
466/* XDomain messages */ 474/* XDomain messages */
467 475
468struct tb_xdomain_header { 476struct tb_xdomain_header {
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 31d0234837e4..5a99234826e7 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -211,7 +211,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
211 return NULL; 211 return NULL;
212 } 212 }
213 tb_pci_init_path(path); 213 tb_pci_init_path(path);
214 tunnel->paths[TB_PCI_PATH_UP] = path; 214 tunnel->paths[TB_PCI_PATH_DOWN] = path;
215 215
216 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, 216 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
217 "PCIe Up"); 217 "PCIe Up");
@@ -220,7 +220,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
220 return NULL; 220 return NULL;
221 } 221 }
222 tb_pci_init_path(path); 222 tb_pci_init_path(path);
223 tunnel->paths[TB_PCI_PATH_DOWN] = path; 223 tunnel->paths[TB_PCI_PATH_UP] = path;
224 224
225 return tunnel; 225 return tunnel;
226} 226}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 5118d46702d5..4e17a7c7bf0a 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -636,7 +636,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
636 * It should be null terminated but anything else is pretty much 636 * It should be null terminated but anything else is pretty much
637 * allowed. 637 * allowed.
638 */ 638 */
639 return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key); 639 return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
640} 640}
641static DEVICE_ATTR_RO(key); 641static DEVICE_ATTR_RO(key);
642 642
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index f32cef94aa82..ebcf1434e296 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -200,10 +200,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
200 200
201 if (!uioinfo->irq) { 201 if (!uioinfo->irq) {
202 ret = platform_get_irq(pdev, 0); 202 ret = platform_get_irq(pdev, 0);
203 if (ret < 0) { 203 if (ret < 0)
204 dev_err(&pdev->dev, "failed to get IRQ\n");
205 goto bad1; 204 goto bad1;
206 }
207 uioinfo->irq = ret; 205 uioinfo->irq = ret;
208 } 206 }
209 uiomem = &uioinfo->mem[0]; 207 uiomem = &uioinfo->mem[0];
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 10688d79d180..1303b165055b 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -102,12 +102,15 @@ static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
102static int uio_pdrv_genirq_probe(struct platform_device *pdev) 102static int uio_pdrv_genirq_probe(struct platform_device *pdev)
103{ 103{
104 struct uio_info *uioinfo = dev_get_platdata(&pdev->dev); 104 struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
105 struct device_node *node = pdev->dev.of_node;
105 struct uio_pdrv_genirq_platdata *priv; 106 struct uio_pdrv_genirq_platdata *priv;
106 struct uio_mem *uiomem; 107 struct uio_mem *uiomem;
107 int ret = -EINVAL; 108 int ret = -EINVAL;
108 int i; 109 int i;
109 110
110 if (pdev->dev.of_node) { 111 if (node) {
112 const char *name;
113
111 /* alloc uioinfo for one device */ 114 /* alloc uioinfo for one device */
112 uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo), 115 uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
113 GFP_KERNEL); 116 GFP_KERNEL);
@@ -115,8 +118,13 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
115 dev_err(&pdev->dev, "unable to kmalloc\n"); 118 dev_err(&pdev->dev, "unable to kmalloc\n");
116 return -ENOMEM; 119 return -ENOMEM;
117 } 120 }
118 uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", 121
119 pdev->dev.of_node); 122 if (!of_property_read_string(node, "linux,uio-name", &name))
123 uioinfo->name = devm_kstrdup(&pdev->dev, name, GFP_KERNEL);
124 else
125 uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
126 "%pOFn", node);
127
120 uioinfo->version = "devicetree"; 128 uioinfo->version = "devicetree";
121 /* Multiple IRQs are not supported */ 129 /* Multiple IRQs are not supported */
122 } 130 }
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 7ae260577901..24b9a8e05f64 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -65,5 +65,14 @@ config HDQ_MASTER_OMAP
65 Say Y here if you want support for the 1-wire or HDQ Interface 65 Say Y here if you want support for the 1-wire or HDQ Interface
66 on an OMAP processor. 66 on an OMAP processor.
67 67
68config W1_MASTER_SGI
69 tristate "SGI ASIC driver"
70 help
71 Say Y here if you want support for your 1-wire devices using
72 SGI ASIC 1-Wire interface
73
74 This support is also available as a module. If so, the module
75 will be called sgi_w1.
76
68endmenu 77endmenu
69 78
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 18954cae4256..dae629b7ab49 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
11obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o 11obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
12obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o 12obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
13obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o 13obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
14obj-$(CONFIG_W1_MASTER_SGI) += sgi_w1.o
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index c3b2095ef6a9..1ca880e01476 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -92,7 +92,6 @@ static int mxc_w1_probe(struct platform_device *pdev)
92{ 92{
93 struct mxc_w1_device *mdev; 93 struct mxc_w1_device *mdev;
94 unsigned long clkrate; 94 unsigned long clkrate;
95 struct resource *res;
96 unsigned int clkdiv; 95 unsigned int clkdiv;
97 int err; 96 int err;
98 97
@@ -120,8 +119,7 @@ static int mxc_w1_probe(struct platform_device *pdev)
120 dev_warn(&pdev->dev, 119 dev_warn(&pdev->dev,
121 "Incorrect time base frequency %lu Hz\n", clkrate); 120 "Incorrect time base frequency %lu Hz\n", clkrate);
122 121
123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 122 mdev->regs = devm_platform_ioremap_resource(pdev, 0);
124 mdev->regs = devm_ioremap_resource(&pdev->dev, res);
125 if (IS_ERR(mdev->regs)) { 123 if (IS_ERR(mdev->regs)) {
126 err = PTR_ERR(mdev->regs); 124 err = PTR_ERR(mdev->regs);
127 goto out_disable_clk; 125 goto out_disable_clk;
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3099052e1243..4164045866b3 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -660,7 +660,6 @@ static int omap_hdq_probe(struct platform_device *pdev)
660{ 660{
661 struct device *dev = &pdev->dev; 661 struct device *dev = &pdev->dev;
662 struct hdq_data *hdq_data; 662 struct hdq_data *hdq_data;
663 struct resource *res;
664 int ret, irq; 663 int ret, irq;
665 u8 rev; 664 u8 rev;
666 const char *mode; 665 const char *mode;
@@ -674,8 +673,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
674 hdq_data->dev = dev; 673 hdq_data->dev = dev;
675 platform_set_drvdata(pdev, hdq_data); 674 platform_set_drvdata(pdev, hdq_data);
676 675
677 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 676 hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
678 hdq_data->hdq_base = devm_ioremap_resource(dev, res);
679 if (IS_ERR(hdq_data->hdq_base)) 677 if (IS_ERR(hdq_data->hdq_base))
680 return PTR_ERR(hdq_data->hdq_base); 678 return PTR_ERR(hdq_data->hdq_base);
681 679
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
new file mode 100644
index 000000000000..1b2d96b945be
--- /dev/null
+++ b/drivers/w1/masters/sgi_w1.c
@@ -0,0 +1,130 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * sgi_w1.c - w1 master driver for one wire support in SGI ASICs
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/io.h>
9#include <linux/jiffies.h>
10#include <linux/module.h>
11#include <linux/mod_devicetable.h>
12#include <linux/platform_device.h>
13#include <linux/platform_data/sgi-w1.h>
14
15#include <linux/w1.h>
16
17#define MCR_RD_DATA BIT(0)
18#define MCR_DONE BIT(1)
19
20#define MCR_PACK(pulse, sample) (((pulse) << 10) | ((sample) << 2))
21
22struct sgi_w1_device {
23 u32 __iomem *mcr;
24 struct w1_bus_master bus_master;
25 char dev_id[64];
26};
27
28static u8 sgi_w1_wait(u32 __iomem *mcr)
29{
30 u32 mcr_val;
31
32 do {
33 mcr_val = readl(mcr);
34 } while (!(mcr_val & MCR_DONE));
35
36 return (mcr_val & MCR_RD_DATA) ? 1 : 0;
37}
38
39/*
40 * this is the low level routine to
41 * reset the device on the One Wire interface
42 * on the hardware
43 */
44static u8 sgi_w1_reset_bus(void *data)
45{
46 struct sgi_w1_device *dev = data;
47 u8 ret;
48
49 writel(MCR_PACK(520, 65), dev->mcr);
50 ret = sgi_w1_wait(dev->mcr);
51 udelay(500); /* recovery time */
52 return ret;
53}
54
55/*
56 * this is the low level routine to read/write a bit on the One Wire
57 * interface on the hardware. It does write 0 if parameter bit is set
58 * to 0, otherwise a write 1/read.
59 */
60static u8 sgi_w1_touch_bit(void *data, u8 bit)
61{
62 struct sgi_w1_device *dev = data;
63 u8 ret;
64
65 if (bit)
66 writel(MCR_PACK(6, 13), dev->mcr);
67 else
68 writel(MCR_PACK(80, 30), dev->mcr);
69
70 ret = sgi_w1_wait(dev->mcr);
71 if (bit)
72 udelay(100); /* recovery */
73 return ret;
74}
75
76static int sgi_w1_probe(struct platform_device *pdev)
77{
78 struct sgi_w1_device *sdev;
79 struct sgi_w1_platform_data *pdata;
80 struct resource *res;
81
82 sdev = devm_kzalloc(&pdev->dev, sizeof(struct sgi_w1_device),
83 GFP_KERNEL);
84 if (!sdev)
85 return -ENOMEM;
86
87 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
88 sdev->mcr = devm_ioremap_resource(&pdev->dev, res);
89 if (IS_ERR(sdev->mcr))
90 return PTR_ERR(sdev->mcr);
91
92 sdev->bus_master.data = sdev;
93 sdev->bus_master.reset_bus = sgi_w1_reset_bus;
94 sdev->bus_master.touch_bit = sgi_w1_touch_bit;
95
96 pdata = dev_get_platdata(&pdev->dev);
97 if (pdata) {
98 strlcpy(sdev->dev_id, pdata->dev_id, sizeof(sdev->dev_id));
99 sdev->bus_master.dev_id = sdev->dev_id;
100 }
101
102 platform_set_drvdata(pdev, sdev);
103
104 return w1_add_master_device(&sdev->bus_master);
105}
106
107/*
108 * disassociate the w1 device from the driver
109 */
110static int sgi_w1_remove(struct platform_device *pdev)
111{
112 struct sgi_w1_device *sdev = platform_get_drvdata(pdev);
113
114 w1_remove_master_device(&sdev->bus_master);
115
116 return 0;
117}
118
119static struct platform_driver sgi_w1_driver = {
120 .driver = {
121 .name = "sgi_w1",
122 },
123 .probe = sgi_w1_probe,
124 .remove = sgi_w1_remove,
125};
126module_platform_driver(sgi_w1_driver);
127
128MODULE_LICENSE("GPL");
129MODULE_AUTHOR("Thomas Bogendoerfer");
130MODULE_DESCRIPTION("Driver for One-Wire IP in SGI ASICs");
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 37aaad26b373..ebed495b9e69 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -101,6 +101,12 @@ config W1_SLAVE_DS2438
101 Say Y here if you want to use a 1-wire 101 Say Y here if you want to use a 1-wire
102 DS2438 Smart Battery Monitor device support 102 DS2438 Smart Battery Monitor device support
103 103
104config W1_SLAVE_DS250X
105 tristate "512b/1kb/16kb EPROM family support"
106 help
107 Say Y here if you want to use a 1-wire
108 512b/1kb/16kb EPROM family device (DS250x).
109
104config W1_SLAVE_DS2780 110config W1_SLAVE_DS2780
105 tristate "Dallas 2780 battery monitor chip" 111 tristate "Dallas 2780 battery monitor chip"
106 help 112 help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index eab29f151413..8e9655eaa478 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
14obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o 14obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
15obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o 15obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
16obj-$(CONFIG_W1_SLAVE_DS2438) += w1_ds2438.o 16obj-$(CONFIG_W1_SLAVE_DS2438) += w1_ds2438.o
17obj-$(CONFIG_W1_SLAVE_DS250X) += w1_ds250x.o
17obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o 18obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
18obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o 19obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
19obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o 20obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o
diff --git a/drivers/w1/slaves/w1_ds250x.c b/drivers/w1/slaves/w1_ds250x.c
new file mode 100644
index 000000000000..e507117444d8
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds250x.c
@@ -0,0 +1,290 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * w1_ds250x.c - w1 family 09/0b/89/91 (DS250x) driver
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/moduleparam.h>
9#include <linux/device.h>
10#include <linux/types.h>
11#include <linux/delay.h>
12#include <linux/slab.h>
13#include <linux/crc16.h>
14
15#include <linux/w1.h>
16#include <linux/nvmem-provider.h>
17
18#define W1_DS2501_UNW_FAMILY 0x91
19#define W1_DS2501_SIZE 64
20
21#define W1_DS2502_FAMILY 0x09
22#define W1_DS2502_UNW_FAMILY 0x89
23#define W1_DS2502_SIZE 128
24
25#define W1_DS2505_FAMILY 0x0b
26#define W1_DS2505_SIZE 2048
27
28#define W1_PAGE_SIZE 32
29
30#define W1_EXT_READ_MEMORY 0xA5
31#define W1_READ_DATA_CRC 0xC3
32
33#define OFF2PG(off) ((off) / W1_PAGE_SIZE)
34
35#define CRC16_INIT 0
36#define CRC16_VALID 0xb001
37
38struct w1_eprom_data {
39 size_t size;
40 int (*read)(struct w1_slave *sl, int pageno);
41 u8 eprom[W1_DS2505_SIZE];
42 DECLARE_BITMAP(page_present, W1_DS2505_SIZE / W1_PAGE_SIZE);
43 char nvmem_name[64];
44};
45
46static int w1_ds2502_read_page(struct w1_slave *sl, int pageno)
47{
48 struct w1_eprom_data *data = sl->family_data;
49 int pgoff = pageno * W1_PAGE_SIZE;
50 int ret = -EIO;
51 u8 buf[3];
52 u8 crc8;
53
54 if (test_bit(pageno, data->page_present))
55 return 0; /* page already present */
56
57 mutex_lock(&sl->master->bus_mutex);
58
59 if (w1_reset_select_slave(sl))
60 goto err;
61
62 buf[0] = W1_READ_DATA_CRC;
63 buf[1] = pgoff & 0xff;
64 buf[2] = pgoff >> 8;
65 w1_write_block(sl->master, buf, 3);
66
67 crc8 = w1_read_8(sl->master);
68 if (w1_calc_crc8(buf, 3) != crc8)
69 goto err;
70
71 w1_read_block(sl->master, &data->eprom[pgoff], W1_PAGE_SIZE);
72
73 crc8 = w1_read_8(sl->master);
74 if (w1_calc_crc8(&data->eprom[pgoff], W1_PAGE_SIZE) != crc8)
75 goto err;
76
77 set_bit(pageno, data->page_present); /* mark page present */
78 ret = 0;
79err:
80 mutex_unlock(&sl->master->bus_mutex);
81 return ret;
82}
83
84static int w1_ds2505_read_page(struct w1_slave *sl, int pageno)
85{
86 struct w1_eprom_data *data = sl->family_data;
87 int redir_retries = 16;
88 int pgoff, epoff;
89 int ret = -EIO;
90 u8 buf[6];
91 u8 redir;
92 u16 crc;
93
94 if (test_bit(pageno, data->page_present))
95 return 0; /* page already present */
96
97 epoff = pgoff = pageno * W1_PAGE_SIZE;
98 mutex_lock(&sl->master->bus_mutex);
99
100retry:
101 if (w1_reset_select_slave(sl))
102 goto err;
103
104 buf[0] = W1_EXT_READ_MEMORY;
105 buf[1] = pgoff & 0xff;
106 buf[2] = pgoff >> 8;
107 w1_write_block(sl->master, buf, 3);
108 w1_read_block(sl->master, buf + 3, 3); /* redir, crc16 */
109 redir = buf[3];
110 crc = crc16(CRC16_INIT, buf, 6);
111
112 if (crc != CRC16_VALID)
113 goto err;
114
115
116 if (redir != 0xff) {
117 redir_retries--;
118 if (redir_retries < 0)
119 goto err;
120
121 pgoff = (redir ^ 0xff) * W1_PAGE_SIZE;
122 goto retry;
123 }
124
125 w1_read_block(sl->master, &data->eprom[epoff], W1_PAGE_SIZE);
126 w1_read_block(sl->master, buf, 2); /* crc16 */
127 crc = crc16(CRC16_INIT, &data->eprom[epoff], W1_PAGE_SIZE);
128 crc = crc16(crc, buf, 2);
129
130 if (crc != CRC16_VALID)
131 goto err;
132
133 set_bit(pageno, data->page_present);
134 ret = 0;
135err:
136 mutex_unlock(&sl->master->bus_mutex);
137 return ret;
138}
139
140static int w1_nvmem_read(void *priv, unsigned int off, void *buf, size_t count)
141{
142 struct w1_slave *sl = priv;
143 struct w1_eprom_data *data = sl->family_data;
144 size_t eprom_size = data->size;
145 int ret;
146 int i;
147
148 if (off > eprom_size)
149 return -EINVAL;
150
151 if ((off + count) > eprom_size)
152 count = eprom_size - off;
153
154 i = OFF2PG(off);
155 do {
156 ret = data->read(sl, i++);
157 if (ret < 0)
158 return ret;
159 } while (i < OFF2PG(off + count));
160
161 memcpy(buf, &data->eprom[off], count);
162 return 0;
163}
164
165static int w1_eprom_add_slave(struct w1_slave *sl)
166{
167 struct w1_eprom_data *data;
168 struct nvmem_device *nvmem;
169 struct nvmem_config nvmem_cfg = {
170 .dev = &sl->dev,
171 .reg_read = w1_nvmem_read,
172 .type = NVMEM_TYPE_OTP,
173 .read_only = true,
174 .word_size = 1,
175 .priv = sl,
176 .id = -1
177 };
178
179 data = devm_kzalloc(&sl->dev, sizeof(struct w1_eprom_data), GFP_KERNEL);
180 if (!data)
181 return -ENOMEM;
182
183 sl->family_data = data;
184 switch (sl->family->fid) {
185 case W1_DS2501_UNW_FAMILY:
186 data->size = W1_DS2501_SIZE;
187 data->read = w1_ds2502_read_page;
188 break;
189 case W1_DS2502_FAMILY:
190 case W1_DS2502_UNW_FAMILY:
191 data->size = W1_DS2502_SIZE;
192 data->read = w1_ds2502_read_page;
193 break;
194 case W1_DS2505_FAMILY:
195 data->size = W1_DS2505_SIZE;
196 data->read = w1_ds2505_read_page;
197 break;
198 }
199
200 if (sl->master->bus_master->dev_id)
201 snprintf(data->nvmem_name, sizeof(data->nvmem_name),
202 "%s-%02x-%012llx",
203 sl->master->bus_master->dev_id, sl->reg_num.family,
204 (unsigned long long)sl->reg_num.id);
205 else
206 snprintf(data->nvmem_name, sizeof(data->nvmem_name),
207 "%02x-%012llx",
208 sl->reg_num.family,
209 (unsigned long long)sl->reg_num.id);
210
211 nvmem_cfg.name = data->nvmem_name;
212 nvmem_cfg.size = data->size;
213
214 nvmem = devm_nvmem_register(&sl->dev, &nvmem_cfg);
215 return PTR_ERR_OR_ZERO(nvmem);
216}
217
218static struct w1_family_ops w1_eprom_fops = {
219 .add_slave = w1_eprom_add_slave,
220};
221
222static struct w1_family w1_family_09 = {
223 .fid = W1_DS2502_FAMILY,
224 .fops = &w1_eprom_fops,
225};
226
227static struct w1_family w1_family_0b = {
228 .fid = W1_DS2505_FAMILY,
229 .fops = &w1_eprom_fops,
230};
231
232static struct w1_family w1_family_89 = {
233 .fid = W1_DS2502_UNW_FAMILY,
234 .fops = &w1_eprom_fops,
235};
236
237static struct w1_family w1_family_91 = {
238 .fid = W1_DS2501_UNW_FAMILY,
239 .fops = &w1_eprom_fops,
240};
241
242static int __init w1_ds250x_init(void)
243{
244 int err;
245
246 err = w1_register_family(&w1_family_09);
247 if (err)
248 return err;
249
250 err = w1_register_family(&w1_family_0b);
251 if (err)
252 goto err_0b;
253
254 err = w1_register_family(&w1_family_89);
255 if (err)
256 goto err_89;
257
258 err = w1_register_family(&w1_family_91);
259 if (err)
260 goto err_91;
261
262 return 0;
263
264err_91:
265 w1_unregister_family(&w1_family_89);
266err_89:
267 w1_unregister_family(&w1_family_0b);
268err_0b:
269 w1_unregister_family(&w1_family_09);
270 return err;
271}
272
273static void __exit w1_ds250x_exit(void)
274{
275 w1_unregister_family(&w1_family_09);
276 w1_unregister_family(&w1_family_0b);
277 w1_unregister_family(&w1_family_89);
278 w1_unregister_family(&w1_family_91);
279}
280
281module_init(w1_ds250x_init);
282module_exit(w1_ds250x_exit);
283
284MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfe@suse.de>");
285MODULE_DESCRIPTION("w1 family driver for DS250x Add Only Memory");
286MODULE_LICENSE("GPL");
287MODULE_ALIAS("w1-family-" __stringify(W1_DS2502_FAMILY));
288MODULE_ALIAS("w1-family-" __stringify(W1_DS2505_FAMILY));
289MODULE_ALIAS("w1-family-" __stringify(W1_DS2501_UNW_FAMILY));
290MODULE_ALIAS("w1-family-" __stringify(W1_DS2502_UNW_FAMILY));
diff --git a/include/dt-bindings/interconnect/qcom,qcs404.h b/include/dt-bindings/interconnect/qcom,qcs404.h
new file mode 100644
index 000000000000..960f6e39c5f2
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcs404.h
@@ -0,0 +1,88 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Qualcomm interconnect IDs
4 *
5 * Copyright (c) 2019, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
9#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS404_H
10#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS404_H
11
12#define MASTER_AMPSS_M0 0
13#define MASTER_OXILI 1
14#define MASTER_MDP_PORT0 2
15#define MASTER_SNOC_BIMC_1 3
16#define MASTER_TCU_0 4
17#define SLAVE_EBI_CH0 5
18#define SLAVE_BIMC_SNOC 6
19
20#define MASTER_SPDM 0
21#define MASTER_BLSP_1 1
22#define MASTER_BLSP_2 2
23#define MASTER_XI_USB_HS1 3
24#define MASTER_CRYPT0 4
25#define MASTER_SDCC_1 5
26#define MASTER_SDCC_2 6
27#define MASTER_SNOC_PCNOC 7
28#define MASTER_QPIC 8
29#define PCNOC_INT_0 9
30#define PCNOC_INT_2 10
31#define PCNOC_INT_3 11
32#define PCNOC_S_0 12
33#define PCNOC_S_1 13
34#define PCNOC_S_2 14
35#define PCNOC_S_3 15
36#define PCNOC_S_4 16
37#define PCNOC_S_6 17
38#define PCNOC_S_7 18
39#define PCNOC_S_8 19
40#define PCNOC_S_9 20
41#define PCNOC_S_10 21
42#define PCNOC_S_11 22
43#define SLAVE_SPDM 23
44#define SLAVE_PDM 24
45#define SLAVE_PRNG 25
46#define SLAVE_TCSR 26
47#define SLAVE_SNOC_CFG 27
48#define SLAVE_MESSAGE_RAM 28
49#define SLAVE_DISP_SS_CFG 29
50#define SLAVE_GPU_CFG 30
51#define SLAVE_BLSP_1 31
52#define SLAVE_BLSP_2 32
53#define SLAVE_TLMM_NORTH 33
54#define SLAVE_PCIE 34
55#define SLAVE_ETHERNET 35
56#define SLAVE_TLMM_EAST 36
57#define SLAVE_TCU 37
58#define SLAVE_PMIC_ARB 38
59#define SLAVE_SDCC_1 39
60#define SLAVE_SDCC_2 40
61#define SLAVE_TLMM_SOUTH 41
62#define SLAVE_USB_HS 42
63#define SLAVE_USB3 43
64#define SLAVE_CRYPTO_0_CFG 44
65#define SLAVE_PCNOC_SNOC 45
66
67#define MASTER_QDSS_BAM 0
68#define MASTER_BIMC_SNOC 1
69#define MASTER_PCNOC_SNOC 2
70#define MASTER_QDSS_ETR 3
71#define MASTER_EMAC 4
72#define MASTER_PCIE 5
73#define MASTER_USB3 6
74#define QDSS_INT 7
75#define SNOC_INT_0 8
76#define SNOC_INT_1 9
77#define SNOC_INT_2 10
78#define SLAVE_KPSS_AHB 11
79#define SLAVE_WCSS 12
80#define SLAVE_SNOC_BIMC_1 13
81#define SLAVE_IMEM 14
82#define SLAVE_SNOC_PCNOC 15
83#define SLAVE_QDSS_STM 16
84#define SLAVE_CATS_0 17
85#define SLAVE_CATS_1 18
86#define SLAVE_LPASS 19
87
88#endif
diff --git a/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h b/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h
new file mode 100644
index 000000000000..95a7896356d6
--- /dev/null
+++ b/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h
@@ -0,0 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
4 */
5
6#define LANTIQ_PCIE_PHY_MODE_25MHZ 0
7#define LANTIQ_PCIE_PHY_MODE_25MHZ_SSC 1
8#define LANTIQ_PCIE_PHY_MODE_36MHZ 2
9#define LANTIQ_PCIE_PHY_MODE_36MHZ_SSC 3
10#define LANTIQ_PCIE_PHY_MODE_100MHZ 4
11#define LANTIQ_PCIE_PHY_MODE_100MHZ_SSC 5
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index 01684d935580..013ae4819deb 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -210,7 +210,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
210#define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \ 210#define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \
211 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK) 211 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK)
212 212
213/* 213/**
214 * Request INTEL_SIP_SMC_REG_READ 214 * Request INTEL_SIP_SMC_REG_READ
215 * 215 *
216 * Read a protected register at EL3 216 * Read a protected register at EL3
@@ -229,7 +229,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
229#define INTEL_SIP_SMC_REG_READ \ 229#define INTEL_SIP_SMC_REG_READ \
230 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ) 230 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ)
231 231
232/* 232/**
233 * Request INTEL_SIP_SMC_REG_WRITE 233 * Request INTEL_SIP_SMC_REG_WRITE
234 * 234 *
235 * Write a protected register at EL3 235 * Write a protected register at EL3
@@ -248,7 +248,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
248#define INTEL_SIP_SMC_REG_WRITE \ 248#define INTEL_SIP_SMC_REG_WRITE \
249 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE) 249 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
250 250
251/* 251/**
252 * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE 252 * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE
253 * 253 *
254 * Update one or more bits in a protected register at EL3 using a 254 * Update one or more bits in a protected register at EL3 using a
@@ -269,7 +269,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
269#define INTEL_SIP_SMC_REG_UPDATE \ 269#define INTEL_SIP_SMC_REG_UPDATE \
270 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE) 270 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE)
271 271
272/* 272/**
273 * Request INTEL_SIP_SMC_RSU_STATUS 273 * Request INTEL_SIP_SMC_RSU_STATUS
274 * 274 *
275 * Request remote status update boot log, call is synchronous. 275 * Request remote status update boot log, call is synchronous.
@@ -292,7 +292,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
292#define INTEL_SIP_SMC_RSU_STATUS \ 292#define INTEL_SIP_SMC_RSU_STATUS \
293 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS) 293 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS)
294 294
295/* 295/**
296 * Request INTEL_SIP_SMC_RSU_UPDATE 296 * Request INTEL_SIP_SMC_RSU_UPDATE
297 * 297 *
298 * Request to set the offset of the bitstream to boot after reboot, call 298 * Request to set the offset of the bitstream to boot after reboot, call
@@ -310,7 +310,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
310#define INTEL_SIP_SMC_RSU_UPDATE \ 310#define INTEL_SIP_SMC_RSU_UPDATE \
311 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE) 311 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE)
312 312
313/* 313/**
314 * Request INTEL_SIP_SMC_ECC_DBE 314 * Request INTEL_SIP_SMC_ECC_DBE
315 * 315 *
316 * Sync call used by service driver at EL1 to alert EL3 that a Double 316 * Sync call used by service driver at EL1 to alert EL3 that a Double
@@ -329,3 +329,42 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
329 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE) 329 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
330 330
331#endif 331#endif
332
333/**
334 * Request INTEL_SIP_SMC_RSU_NOTIFY
335 *
336 * Sync call used by service driver at EL1 to report hard processor
337 * system execution stage to firmware
338 *
339 * Call register usage:
340 * a0 INTEL_SIP_SMC_RSU_NOTIFY
341 * a1 32bit value representing hard processor system execution stage
342 * a2-7 not used
343 *
344 * Return status
345 * a0 INTEL_SIP_SMC_STATUS_OK
346 */
347#define INTEL_SIP_SMC_FUNCID_RSU_NOTIFY 14
348#define INTEL_SIP_SMC_RSU_NOTIFY \
349 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_NOTIFY)
350
351/**
352 * Request INTEL_SIP_SMC_RSU_RETRY_COUNTER
353 *
354 * Sync call used by service driver at EL1 to query RSU retry counter
355 *
356 * Call register usage:
357 * a0 INTEL_SIP_SMC_RSU_RETRY_COUNTER
358 * a1-7 not used
359 *
360 * Return status
361 * a0 INTEL_SIP_SMC_STATUS_OK
362 * a1 the retry counter
363 *
364 * Or
365 *
366 * a0 INTEL_SIP_SMC_RSU_ERROR
367 */
368#define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15
369#define INTEL_SIP_SMC_RSU_RETRY_COUNTER \
370 INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER)
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index e521f172a47a..b6c4302a39e0 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -95,6 +95,13 @@ struct stratix10_svc_chan;
95 * 95 *
96 * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot, 96 * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot,
97 * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR 97 * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
98 *
99 * @COMMAND_RSU_NOTIFY: report the status of hard processor system
100 * software to firmware, return status is SVC_STATUS_RSU_OK or
101 * SVC_STATUS_RSU_ERROR
102 *
103 * @COMMAND_RSU_RETRY: query firmware for the current image's retry counter,
104 * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
98 */ 105 */
99enum stratix10_svc_command_code { 106enum stratix10_svc_command_code {
100 COMMAND_NOOP = 0, 107 COMMAND_NOOP = 0,
@@ -103,7 +110,9 @@ enum stratix10_svc_command_code {
103 COMMAND_RECONFIG_DATA_CLAIM, 110 COMMAND_RECONFIG_DATA_CLAIM,
104 COMMAND_RECONFIG_STATUS, 111 COMMAND_RECONFIG_STATUS,
105 COMMAND_RSU_STATUS, 112 COMMAND_RSU_STATUS,
106 COMMAND_RSU_UPDATE 113 COMMAND_RSU_UPDATE,
114 COMMAND_RSU_NOTIFY,
115 COMMAND_RSU_RETRY,
107}; 116};
108 117
109/** 118/**
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
index 7d4664730d60..0b08ac20ab16 100644
--- a/include/linux/fpga/altera-pr-ip-core.h
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -13,6 +13,6 @@
13#include <linux/io.h> 13#include <linux/io.h>
14 14
15int alt_pr_register(struct device *dev, void __iomem *reg_base); 15int alt_pr_register(struct device *dev, void __iomem *reg_base);
16int alt_pr_unregister(struct device *dev); 16void alt_pr_unregister(struct device *dev);
17 17
18#endif /* _ALT_PR_IP_CORE_H */ 18#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/intel_th.h b/include/linux/intel_th.h
new file mode 100644
index 000000000000..9b7f4c22499c
--- /dev/null
+++ b/include/linux/intel_th.h
@@ -0,0 +1,79 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Intel(R) Trace Hub data structures for implementing buffer sinks.
4 *
5 * Copyright (C) 2019 Intel Corporation.
6 */
7
8#ifndef _INTEL_TH_H_
9#define _INTEL_TH_H_
10
11#include <linux/scatterlist.h>
12
13/* MSC operating modes (MSC_MODE) */
14enum {
15 MSC_MODE_SINGLE = 0,
16 MSC_MODE_MULTI,
17 MSC_MODE_EXI,
18 MSC_MODE_DEBUG,
19};
20
21struct msu_buffer {
22 const char *name;
23 /*
24 * ->assign() called when buffer 'mode' is set to this driver
25 * (aka mode_store())
26 * @device: struct device * of the msc
27 * @mode: allows the driver to set HW mode (see the enum above)
28 * Returns: a pointer to a private structure associated with this
29 * msc or NULL in case of error. This private structure
30 * will then be passed into all other callbacks.
31 */
32 void *(*assign)(struct device *dev, int *mode);
33 /* ->unassign(): some other mode is selected, clean up */
34 void (*unassign)(void *priv);
35 /*
36 * ->alloc_window(): allocate memory for the window of a given
37 * size
38 * @sgt: pointer to sg_table, can be overridden by the buffer
39 * driver, or kept intact
40 * Returns: number of sg table entries <= number of pages;
41 * 0 is treated as an allocation failure.
42 */
43 int (*alloc_window)(void *priv, struct sg_table **sgt,
44 size_t size);
45 void (*free_window)(void *priv, struct sg_table *sgt);
46 /* ->activate(): trace has started */
47 void (*activate)(void *priv);
48 /* ->deactivate(): trace is about to stop */
49 void (*deactivate)(void *priv);
50 /*
51 * ->ready(): window @sgt is filled up to the last block OR
52 * tracing is stopped by the user; this window contains
53 * @bytes data. The window in question transitions into
54 * the "LOCKED" state, indicating that it can't be used
55 * by hardware. To clear this state and make the window
56 * available to the hardware again, call
57 * intel_th_msc_window_unlock().
58 */
59 int (*ready)(void *priv, struct sg_table *sgt, size_t bytes);
60};
61
62int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
63 struct module *owner);
64void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf);
65void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt);
66
67#define module_intel_th_msu_buffer(__buffer) \
68static int __init __buffer##_init(void) \
69{ \
70 return intel_th_msu_buffer_register(&(__buffer), THIS_MODULE); \
71} \
72module_init(__buffer##_init); \
73static void __exit __buffer##_exit(void) \
74{ \
75 intel_th_msu_buffer_unregister(&(__buffer)); \
76} \
77module_exit(__buffer##_exit);
78
79#endif /* _INTEL_TH_H_ */
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 63caccadc2db..b16f9effa555 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -36,6 +36,8 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
36 * @nodes: internal list of the interconnect provider nodes 36 * @nodes: internal list of the interconnect provider nodes
37 * @set: pointer to device specific set operation function 37 * @set: pointer to device specific set operation function
38 * @aggregate: pointer to device specific aggregate operation function 38 * @aggregate: pointer to device specific aggregate operation function
39 * @pre_aggregate: pointer to device specific function that is called
40 * before the aggregation begins (optional)
39 * @xlate: provider-specific callback for mapping nodes from phandle arguments 41 * @xlate: provider-specific callback for mapping nodes from phandle arguments
40 * @dev: the device this interconnect provider belongs to 42 * @dev: the device this interconnect provider belongs to
41 * @users: count of active users 43 * @users: count of active users
@@ -45,8 +47,9 @@ struct icc_provider {
45 struct list_head provider_list; 47 struct list_head provider_list;
46 struct list_head nodes; 48 struct list_head nodes;
47 int (*set)(struct icc_node *src, struct icc_node *dst); 49 int (*set)(struct icc_node *src, struct icc_node *dst);
48 int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw, 50 int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
49 u32 *agg_avg, u32 *agg_peak); 51 u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
52 void (*pre_aggregate)(struct icc_node *node);
50 struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); 53 struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
51 struct device *dev; 54 struct device *dev;
52 int users; 55 int users;
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index dc25864755ba..d70a914cba11 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -30,6 +30,7 @@ struct icc_path *icc_get(struct device *dev, const int src_id,
30struct icc_path *of_icc_get(struct device *dev, const char *name); 30struct icc_path *of_icc_get(struct device *dev, const char *name);
31void icc_put(struct icc_path *path); 31void icc_put(struct icc_path *path);
32int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); 32int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
33void icc_set_tag(struct icc_path *path, u32 tag);
33 34
34#else 35#else
35 36
@@ -54,6 +55,10 @@ static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
54 return 0; 55 return 0;
55} 56}
56 57
58static inline void icc_set_tag(struct icc_path *path, u32 tag)
59{
60}
61
57#endif /* CONFIG_INTERCONNECT */ 62#endif /* CONFIG_INTERCONNECT */
58 63
59#endif /* __LINUX_INTERCONNECT_H */ 64#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/platform_data/sgi-w1.h b/include/linux/platform_data/sgi-w1.h
new file mode 100644
index 000000000000..e28c8a90ff84
--- /dev/null
+++ b/include/linux/platform_data/sgi-w1.h
@@ -0,0 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * SGI One-Wire (W1) IP
4 */
5
6#ifndef PLATFORM_DATA_SGI_W1_H
7#define PLATFORM_DATA_SGI_W1_H
8
9struct sgi_w1_platform_data {
10 char dev_id[64];
11};
12
13#endif /* PLATFORM_DATA_SGI_W1_H */
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 2d7e012db03f..ece782ef5466 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -429,6 +429,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
429 * @lock: Must be held during ring creation/destruction. Is acquired by 429 * @lock: Must be held during ring creation/destruction. Is acquired by
430 * interrupt_work when dispatching interrupts to individual rings. 430 * interrupt_work when dispatching interrupts to individual rings.
431 * @pdev: Pointer to the PCI device 431 * @pdev: Pointer to the PCI device
432 * @ops: NHI specific optional ops
432 * @iobase: MMIO space of the NHI 433 * @iobase: MMIO space of the NHI
433 * @tx_rings: All Tx rings available on this host controller 434 * @tx_rings: All Tx rings available on this host controller
434 * @rx_rings: All Rx rings available on this host controller 435 * @rx_rings: All Rx rings available on this host controller
@@ -442,6 +443,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
442struct tb_nhi { 443struct tb_nhi {
443 spinlock_t lock; 444 spinlock_t lock;
444 struct pci_dev *pdev; 445 struct pci_dev *pdev;
446 const struct tb_nhi_ops *ops;
445 void __iomem *iobase; 447 void __iomem *iobase;
446 struct tb_ring **tx_rings; 448 struct tb_ring **tx_rings;
447 struct tb_ring **rx_rings; 449 struct tb_ring **rx_rings;
diff --git a/include/linux/w1.h b/include/linux/w1.h
index e0b5156f78fd..7da0c7588e04 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -118,6 +118,9 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
118 * w1_master* is passed to the slave found callback. 118 * w1_master* is passed to the slave found callback.
119 * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH 119 * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH
120 * 120 *
121 * @dev_id: Optional device id string, which w1 slaves could use for
122 * creating names, which then give a connection to the w1 master
123 *
121 * Note: read_bit and write_bit are very low level functions and should only 124 * Note: read_bit and write_bit are very low level functions and should only
122 * be used with hardware that doesn't really support 1-wire operations, 125 * be used with hardware that doesn't really support 1-wire operations,
123 * like a parallel/serial port. 126 * like a parallel/serial port.
@@ -150,6 +153,8 @@ struct w1_bus_master {
150 153
151 void (*search)(void *, struct w1_master *, 154 void (*search)(void *, struct w1_master *,
152 u8, w1_slave_found_callback); 155 u8, w1_slave_found_callback);
156
157 char *dev_id;
153}; 158};
154 159
155/** 160/**
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
index 262876a59e86..7a2a055ba6b0 100644
--- a/include/soc/qcom/tcs.h
+++ b/include/soc/qcom/tcs.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
4 */ 4 */
5 5
6#ifndef __SOC_QCOM_TCS_H__ 6#ifndef __SOC_QCOM_TCS_H__
@@ -53,4 +53,22 @@ struct tcs_request {
53 struct tcs_cmd *cmds; 53 struct tcs_cmd *cmds;
54}; 54};
55 55
56#define BCM_TCS_CMD_COMMIT_SHFT 30
57#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
58#define BCM_TCS_CMD_VALID_SHFT 29
59#define BCM_TCS_CMD_VALID_MASK 0x20000000
60#define BCM_TCS_CMD_VOTE_X_SHFT 14
61#define BCM_TCS_CMD_VOTE_MASK 0x3fff
62#define BCM_TCS_CMD_VOTE_Y_SHFT 0
63#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
64
65/* Construct a Bus Clock Manager (BCM) specific TCS command */
66#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
67 (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
68 ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
69 ((cpu_to_le32(vote_x) & \
70 BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
71 ((cpu_to_le32(vote_y) & \
72 BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
73
56#endif /* __SOC_QCOM_TCS_H__ */ 74#endif /* __SOC_QCOM_TCS_H__ */
diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h
index 2e324e515c41..ec70a0746e59 100644
--- a/include/uapi/linux/fpga-dfl.h
+++ b/include/uapi/linux/fpga-dfl.h
@@ -176,4 +176,22 @@ struct dfl_fpga_fme_port_pr {
176 176
177#define DFL_FPGA_FME_PORT_PR _IO(DFL_FPGA_MAGIC, DFL_FME_BASE + 0) 177#define DFL_FPGA_FME_PORT_PR _IO(DFL_FPGA_MAGIC, DFL_FME_BASE + 0)
178 178
179/**
180 * DFL_FPGA_FME_PORT_RELEASE - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 1,
181 * int port_id)
182 *
183 * Driver releases the port per Port ID provided by caller.
184 * Return: 0 on success, -errno on failure.
185 */
186#define DFL_FPGA_FME_PORT_RELEASE _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 1, int)
187
188/**
189 * DFL_FPGA_FME_PORT_ASSIGN - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2,
190 * int port_id)
191 *
192 * Driver assigns the port back per Port ID provided by caller.
193 * Return: 0 on success, -errno on failure.
194 */
195#define DFL_FPGA_FME_PORT_ASSIGN _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, int)
196
179#endif /* _UAPI_LINUX_FPGA_DFL_H */ 197#endif /* _UAPI_LINUX_FPGA_DFL_H */
diff --git a/include/uapi/linux/ppdev.h b/include/uapi/linux/ppdev.h
index 8fe3c64d149e..eb895b83f2bd 100644
--- a/include/uapi/linux/ppdev.h
+++ b/include/uapi/linux/ppdev.h
@@ -15,6 +15,9 @@
15 * Added PPGETMODES/PPGETMODE/PPGETPHASE, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001 15 * Added PPGETMODES/PPGETMODE/PPGETPHASE, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001
16 */ 16 */
17 17
18#ifndef _UAPI_LINUX_PPDEV_H
19#define _UAPI_LINUX_PPDEV_H
20
18#define PP_IOCTL 'p' 21#define PP_IOCTL 'p'
19 22
20/* Set mode for read/write (e.g. IEEE1284_MODE_EPP) */ 23/* Set mode for read/write (e.g. IEEE1284_MODE_EPP) */
@@ -97,4 +100,4 @@ struct ppdev_frob_struct {
97/* only masks user-visible flags */ 100/* only masks user-visible flags */
98#define PP_FLAGMASK (PP_FASTWRITE | PP_FASTREAD | PP_W91284PIC) 101#define PP_FLAGMASK (PP_FASTWRITE | PP_FASTREAD | PP_W91284PIC)
99 102
100 103#endif /* _UAPI_LINUX_PPDEV_H */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 3956c226ca35..39c4ea51a719 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note 1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 * 2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd. 3 * Copyright 2016-2019 HabanaLabs, Ltd.
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 */ 6 */
@@ -28,20 +28,20 @@
28 28
29enum goya_queue_id { 29enum goya_queue_id {
30 GOYA_QUEUE_ID_DMA_0 = 0, 30 GOYA_QUEUE_ID_DMA_0 = 0,
31 GOYA_QUEUE_ID_DMA_1, 31 GOYA_QUEUE_ID_DMA_1 = 1,
32 GOYA_QUEUE_ID_DMA_2, 32 GOYA_QUEUE_ID_DMA_2 = 2,
33 GOYA_QUEUE_ID_DMA_3, 33 GOYA_QUEUE_ID_DMA_3 = 3,
34 GOYA_QUEUE_ID_DMA_4, 34 GOYA_QUEUE_ID_DMA_4 = 4,
35 GOYA_QUEUE_ID_CPU_PQ, 35 GOYA_QUEUE_ID_CPU_PQ = 5,
36 GOYA_QUEUE_ID_MME, /* Internal queues start here */ 36 GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */
37 GOYA_QUEUE_ID_TPC0, 37 GOYA_QUEUE_ID_TPC0 = 7,
38 GOYA_QUEUE_ID_TPC1, 38 GOYA_QUEUE_ID_TPC1 = 8,
39 GOYA_QUEUE_ID_TPC2, 39 GOYA_QUEUE_ID_TPC2 = 9,
40 GOYA_QUEUE_ID_TPC3, 40 GOYA_QUEUE_ID_TPC3 = 10,
41 GOYA_QUEUE_ID_TPC4, 41 GOYA_QUEUE_ID_TPC4 = 11,
42 GOYA_QUEUE_ID_TPC5, 42 GOYA_QUEUE_ID_TPC5 = 12,
43 GOYA_QUEUE_ID_TPC6, 43 GOYA_QUEUE_ID_TPC6 = 13,
44 GOYA_QUEUE_ID_TPC7, 44 GOYA_QUEUE_ID_TPC7 = 14,
45 GOYA_QUEUE_ID_SIZE 45 GOYA_QUEUE_ID_SIZE
46}; 46};
47 47
@@ -75,12 +75,34 @@ enum hl_device_status {
75 HL_DEVICE_STATUS_MALFUNCTION 75 HL_DEVICE_STATUS_MALFUNCTION
76}; 76};
77 77
78/* Opcode for management ioctl */ 78/* Opcode for management ioctl
79#define HL_INFO_HW_IP_INFO 0 79 *
80#define HL_INFO_HW_EVENTS 1 80 * HW_IP_INFO - Receive information about different IP blocks in the
81#define HL_INFO_DRAM_USAGE 2 81 * device.
82#define HL_INFO_HW_IDLE 3 82 * HL_INFO_HW_EVENTS - Receive an array describing how many times each event
83#define HL_INFO_DEVICE_STATUS 4 83 * occurred since the last hard reset.
84 * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the
85 * specific context. This is relevant only for devices
86 * where the dram is managed by the kernel driver
87 * HL_INFO_HW_IDLE - Retrieve information about the idle status of each
88 * internal engine.
89 * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't
90 * require an open context.
91 * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
92 * over the last period specified by the user.
93 * The period can be between 100ms to 1s, in
94 * resolution of 100ms. The return value is a
95 * percentage of the utilization rate.
96 * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each
97 * event occurred since the driver was loaded.
98 */
99#define HL_INFO_HW_IP_INFO 0
100#define HL_INFO_HW_EVENTS 1
101#define HL_INFO_DRAM_USAGE 2
102#define HL_INFO_HW_IDLE 3
103#define HL_INFO_DEVICE_STATUS 4
104#define HL_INFO_DEVICE_UTILIZATION 6
105#define HL_INFO_HW_EVENTS_AGGREGATE 7
84 106
85#define HL_INFO_VERSION_MAX_LEN 128 107#define HL_INFO_VERSION_MAX_LEN 128
86 108
@@ -122,6 +144,11 @@ struct hl_info_device_status {
122 __u32 pad; 144 __u32 pad;
123}; 145};
124 146
147struct hl_info_device_utilization {
148 __u32 utilization;
149 __u32 pad;
150};
151
125struct hl_info_args { 152struct hl_info_args {
126 /* Location of relevant struct in userspace */ 153 /* Location of relevant struct in userspace */
127 __u64 return_pointer; 154 __u64 return_pointer;
@@ -137,8 +164,15 @@ struct hl_info_args {
137 /* HL_INFO_* */ 164 /* HL_INFO_* */
138 __u32 op; 165 __u32 op;
139 166
140 /* Context ID - Currently not in use */ 167 union {
141 __u32 ctx_id; 168 /* Context ID - Currently not in use */
169 __u32 ctx_id;
170 /* Period value for utilization rate (100ms - 1000ms, in 100ms
171 * resolution.
172 */
173 __u32 period_ms;
174 };
175
142 __u32 pad; 176 __u32 pad;
143}; 177};
144 178
@@ -295,12 +329,12 @@ struct hl_mem_in {
295 struct { 329 struct {
296 /* 330 /*
297 * Requested virtual address of mapped memory. 331 * Requested virtual address of mapped memory.
298 * KMD will try to map the requested region to this 332 * The driver will try to map the requested region to
299 * hint address, as long as the address is valid and 333 * this hint address, as long as the address is valid
300 * not already mapped. The user should check the 334 * and not already mapped. The user should check the
301 * returned address of the IOCTL to make sure he got 335 * returned address of the IOCTL to make sure he got
302 * the hint address. Passing 0 here means that KMD 336 * the hint address. Passing 0 here means that the
303 * will choose the address itself. 337 * driver will choose the address itself.
304 */ 338 */
305 __u64 hint_addr; 339 __u64 hint_addr;
306 /* Handle returned from HL_MEM_OP_ALLOC */ 340 /* Handle returned from HL_MEM_OP_ALLOC */
@@ -313,12 +347,12 @@ struct hl_mem_in {
313 __u64 host_virt_addr; 347 __u64 host_virt_addr;
314 /* 348 /*
315 * Requested virtual address of mapped memory. 349 * Requested virtual address of mapped memory.
316 * KMD will try to map the requested region to this 350 * The driver will try to map the requested region to
317 * hint address, as long as the address is valid and 351 * this hint address, as long as the address is valid
318 * not already mapped. The user should check the 352 * and not already mapped. The user should check the
319 * returned address of the IOCTL to make sure he got 353 * returned address of the IOCTL to make sure he got
320 * the hint address. Passing 0 here means that KMD 354 * the hint address. Passing 0 here means that the
321 * will choose the address itself. 355 * driver will choose the address itself.
322 */ 356 */
323 __u64 hint_addr; 357 __u64 hint_addr;
324 /* Size of allocated host memory */ 358 /* Size of allocated host memory */
@@ -439,7 +473,7 @@ struct hl_debug_params_spmu {
439#define HL_DEBUG_OP_BMON 4 473#define HL_DEBUG_OP_BMON 4
440/* Opcode for SPMU component */ 474/* Opcode for SPMU component */
441#define HL_DEBUG_OP_SPMU 5 475#define HL_DEBUG_OP_SPMU 5
442/* Opcode for timestamp */ 476/* Opcode for timestamp (deprecated) */
443#define HL_DEBUG_OP_TIMESTAMP 6 477#define HL_DEBUG_OP_TIMESTAMP 6
444/* Opcode for setting the device into or out of debug mode. The enable 478/* Opcode for setting the device into or out of debug mode. The enable
445 * variable should be 1 for enabling debug mode and 0 for disabling it 479 * variable should be 1 for enabling debug mode and 0 for disabling it
diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h
new file mode 100644
index 000000000000..ee1a42ae6f46
--- /dev/null
+++ b/include/uapi/misc/xilinx_sdfec.h
@@ -0,0 +1,448 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/*
3 * Xilinx SD-FEC
4 *
5 * Copyright (C) 2019 Xilinx, Inc.
6 *
7 * Description:
8 * This driver is developed for SDFEC16 IP. It provides a char device
9 * in sysfs and supports file operations like open(), close() and ioctl().
10 */
11#ifndef __XILINX_SDFEC_H__
12#define __XILINX_SDFEC_H__
13
14#include <linux/types.h>
15
16/* Shared LDPC Tables */
17#define XSDFEC_LDPC_SC_TABLE_ADDR_BASE (0x10000)
18#define XSDFEC_LDPC_SC_TABLE_ADDR_HIGH (0x10400)
19#define XSDFEC_LDPC_LA_TABLE_ADDR_BASE (0x18000)
20#define XSDFEC_LDPC_LA_TABLE_ADDR_HIGH (0x19000)
21#define XSDFEC_LDPC_QC_TABLE_ADDR_BASE (0x20000)
22#define XSDFEC_LDPC_QC_TABLE_ADDR_HIGH (0x28000)
23
24/* LDPC tables depth */
25#define XSDFEC_SC_TABLE_DEPTH \
26 (XSDFEC_LDPC_SC_TABLE_ADDR_HIGH - XSDFEC_LDPC_SC_TABLE_ADDR_BASE)
27#define XSDFEC_LA_TABLE_DEPTH \
28 (XSDFEC_LDPC_LA_TABLE_ADDR_HIGH - XSDFEC_LDPC_LA_TABLE_ADDR_BASE)
29#define XSDFEC_QC_TABLE_DEPTH \
30 (XSDFEC_LDPC_QC_TABLE_ADDR_HIGH - XSDFEC_LDPC_QC_TABLE_ADDR_BASE)
31
32/**
33 * enum xsdfec_code - Code Type.
34 * @XSDFEC_TURBO_CODE: Driver is configured for Turbo mode.
35 * @XSDFEC_LDPC_CODE: Driver is configured for LDPC mode.
36 *
37 * This enum is used to indicate the mode of the driver. The mode is determined
38 * by checking which codes are set in the driver. Note that the mode cannot be
39 * changed by the driver.
40 */
41enum xsdfec_code {
42 XSDFEC_TURBO_CODE = 0,
43 XSDFEC_LDPC_CODE,
44};
45
46/**
47 * enum xsdfec_order - Order
48 * @XSDFEC_MAINTAIN_ORDER: Maintain order execution of blocks.
49 * @XSDFEC_OUT_OF_ORDER: Out-of-order execution of blocks.
50 *
51 * This enum is used to indicate whether the order of blocks can change from
52 * input to output.
53 */
54enum xsdfec_order {
55 XSDFEC_MAINTAIN_ORDER = 0,
56 XSDFEC_OUT_OF_ORDER,
57};
58
59/**
60 * enum xsdfec_turbo_alg - Turbo Algorithm Type.
61 * @XSDFEC_MAX_SCALE: Max Log-Map algorithm with extrinsic scaling. When
62 * scaling is set to this is equivalent to the Max Log-Map
63 * algorithm.
64 * @XSDFEC_MAX_STAR: Log-Map algorithm.
65 * @XSDFEC_TURBO_ALG_MAX: Used to indicate out of bound Turbo algorithms.
66 *
67 * This enum specifies which Turbo Decode algorithm is in use.
68 */
69enum xsdfec_turbo_alg {
70 XSDFEC_MAX_SCALE = 0,
71 XSDFEC_MAX_STAR,
72 XSDFEC_TURBO_ALG_MAX,
73};
74
75/**
76 * enum xsdfec_state - State.
77 * @XSDFEC_INIT: Driver is initialized.
78 * @XSDFEC_STARTED: Driver is started.
79 * @XSDFEC_STOPPED: Driver is stopped.
80 * @XSDFEC_NEEDS_RESET: Driver needs to be reset.
81 * @XSDFEC_PL_RECONFIGURE: Programmable Logic needs to be recofigured.
82 *
83 * This enum is used to indicate the state of the driver.
84 */
85enum xsdfec_state {
86 XSDFEC_INIT = 0,
87 XSDFEC_STARTED,
88 XSDFEC_STOPPED,
89 XSDFEC_NEEDS_RESET,
90 XSDFEC_PL_RECONFIGURE,
91};
92
93/**
94 * enum xsdfec_axis_width - AXIS_WIDTH.DIN Setting for 128-bit width.
95 * @XSDFEC_1x128b: DIN data input stream consists of a 128-bit lane
96 * @XSDFEC_2x128b: DIN data input stream consists of two 128-bit lanes
97 * @XSDFEC_4x128b: DIN data input stream consists of four 128-bit lanes
98 *
99 * This enum is used to indicate the AXIS_WIDTH.DIN setting for 128-bit width.
100 * The number of lanes of the DIN data input stream depends upon the
101 * AXIS_WIDTH.DIN parameter.
102 */
103enum xsdfec_axis_width {
104 XSDFEC_1x128b = 1,
105 XSDFEC_2x128b = 2,
106 XSDFEC_4x128b = 4,
107};
108
109/**
110 * enum xsdfec_axis_word_include - Words Configuration.
111 * @XSDFEC_FIXED_VALUE: Fixed, the DIN_WORDS AXI4-Stream interface is removed
112 * from the IP instance and is driven with the specified
113 * number of words.
114 * @XSDFEC_IN_BLOCK: In Block, configures the IP instance to expect a single
115 * DIN_WORDS value per input code block. The DIN_WORDS
116 * interface is present.
117 * @XSDFEC_PER_AXI_TRANSACTION: Per Transaction, configures the IP instance to
118 * expect one DIN_WORDS value per input transaction on the DIN interface. The
119 * DIN_WORDS interface is present.
120 * @XSDFEC_AXIS_WORDS_INCLUDE_MAX: Used to indicate out of bound Words
121 * Configurations.
122 *
123 * This enum is used to specify the DIN_WORDS configuration.
124 */
125enum xsdfec_axis_word_include {
126 XSDFEC_FIXED_VALUE = 0,
127 XSDFEC_IN_BLOCK,
128 XSDFEC_PER_AXI_TRANSACTION,
129 XSDFEC_AXIS_WORDS_INCLUDE_MAX,
130};
131
132/**
133 * struct xsdfec_turbo - User data for Turbo codes.
134 * @alg: Specifies which Turbo decode algorithm to use
135 * @scale: Specifies the extrinsic scaling to apply when the Max Scale algorithm
136 * has been selected
137 *
138 * Turbo code structure to communicate parameters to XSDFEC driver.
139 */
140struct xsdfec_turbo {
141 __u32 alg;
142 __u8 scale;
143};
144
145/**
146 * struct xsdfec_ldpc_params - User data for LDPC codes.
147 * @n: Number of code word bits
148 * @k: Number of information bits
149 * @psize: Size of sub-matrix
150 * @nlayers: Number of layers in code
151 * @nqc: Quasi Cyclic Number
152 * @nmqc: Number of M-sized QC operations in parity check matrix
153 * @nm: Number of M-size vectors in N
154 * @norm_type: Normalization required or not
155 * @no_packing: Determines if multiple QC ops should be performed
156 * @special_qc: Sub-Matrix property for Circulant weight > 0
157 * @no_final_parity: Decide if final parity check needs to be performed
158 * @max_schedule: Experimental code word scheduling limit
159 * @sc_off: SC offset
160 * @la_off: LA offset
161 * @qc_off: QC offset
162 * @sc_table: Pointer to SC Table which must be page aligned
163 * @la_table: Pointer to LA Table which must be page aligned
164 * @qc_table: Pointer to QC Table which must be page aligned
165 * @code_id: LDPC Code
166 *
167 * This structure describes the LDPC code that is passed to the driver by the
168 * application.
169 */
170struct xsdfec_ldpc_params {
171 __u32 n;
172 __u32 k;
173 __u32 psize;
174 __u32 nlayers;
175 __u32 nqc;
176 __u32 nmqc;
177 __u32 nm;
178 __u32 norm_type;
179 __u32 no_packing;
180 __u32 special_qc;
181 __u32 no_final_parity;
182 __u32 max_schedule;
183 __u32 sc_off;
184 __u32 la_off;
185 __u32 qc_off;
186 __u32 *sc_table;
187 __u32 *la_table;
188 __u32 *qc_table;
189 __u16 code_id;
190};
191
192/**
193 * struct xsdfec_status - Status of SD-FEC core.
194 * @state: State of the SD-FEC core
195 * @activity: Describes if the SD-FEC instance is Active
196 */
197struct xsdfec_status {
198 __u32 state;
199 __s8 activity;
200};
201
202/**
203 * struct xsdfec_irq - Enabling or Disabling Interrupts.
204 * @enable_isr: If true enables the ISR
205 * @enable_ecc_isr: If true enables the ECC ISR
206 */
207struct xsdfec_irq {
208 __s8 enable_isr;
209 __s8 enable_ecc_isr;
210};
211
212/**
213 * struct xsdfec_config - Configuration of SD-FEC core.
214 * @code: The codes being used by the SD-FEC instance
215 * @order: Order of Operation
216 * @din_width: Width of the DIN AXI4-Stream
217 * @din_word_include: How DIN_WORDS are inputted
218 * @dout_width: Width of the DOUT AXI4-Stream
219 * @dout_word_include: HOW DOUT_WORDS are outputted
220 * @irq: Enabling or disabling interrupts
221 * @bypass: Is the core being bypassed
222 * @code_wr_protect: Is write protection of LDPC codes enabled
223 */
224struct xsdfec_config {
225 __u32 code;
226 __u32 order;
227 __u32 din_width;
228 __u32 din_word_include;
229 __u32 dout_width;
230 __u32 dout_word_include;
231 struct xsdfec_irq irq;
232 __s8 bypass;
233 __s8 code_wr_protect;
234};
235
236/**
237 * struct xsdfec_stats - Stats retrived by ioctl XSDFEC_GET_STATS. Used
238 * to buffer atomic_t variables from struct
239 * xsdfec_dev. Counts are accumulated until
240 * the user clears them.
241 * @isr_err_count: Count of ISR errors
242 * @cecc_count: Count of Correctable ECC errors (SBE)
243 * @uecc_count: Count of Uncorrectable ECC errors (MBE)
244 */
245struct xsdfec_stats {
246 __u32 isr_err_count;
247 __u32 cecc_count;
248 __u32 uecc_count;
249};
250
251/**
252 * struct xsdfec_ldpc_param_table_sizes - Used to store sizes of SD-FEC table
253 * entries for an individual LPDC code
254 * parameter.
255 * @sc_size: Size of SC table used
256 * @la_size: Size of LA table used
257 * @qc_size: Size of QC table used
258 */
259struct xsdfec_ldpc_param_table_sizes {
260 __u32 sc_size;
261 __u32 la_size;
262 __u32 qc_size;
263};
264
265/*
266 * XSDFEC IOCTL List
267 */
268#define XSDFEC_MAGIC 'f'
269/**
270 * DOC: XSDFEC_START_DEV
271 *
272 * @Description
273 *
274 * ioctl to start SD-FEC core
275 *
276 * This fails if the XSDFEC_SET_ORDER ioctl has not been previously called
277 */
278#define XSDFEC_START_DEV _IO(XSDFEC_MAGIC, 0)
279/**
280 * DOC: XSDFEC_STOP_DEV
281 *
282 * @Description
283 *
284 * ioctl to stop the SD-FEC core
285 */
286#define XSDFEC_STOP_DEV _IO(XSDFEC_MAGIC, 1)
287/**
288 * DOC: XSDFEC_GET_STATUS
289 *
290 * @Description
291 *
292 * ioctl that returns status of SD-FEC core
293 */
294#define XSDFEC_GET_STATUS _IOR(XSDFEC_MAGIC, 2, struct xsdfec_status)
295/**
296 * DOC: XSDFEC_SET_IRQ
297 * @Parameters
298 *
299 * @struct xsdfec_irq *
300 * Pointer to the &struct xsdfec_irq that contains the interrupt settings
301 * for the SD-FEC core
302 *
303 * @Description
304 *
305 * ioctl to enable or disable irq
306 */
307#define XSDFEC_SET_IRQ _IOW(XSDFEC_MAGIC, 3, struct xsdfec_irq)
308/**
309 * DOC: XSDFEC_SET_TURBO
310 * @Parameters
311 *
312 * @struct xsdfec_turbo *
313 * Pointer to the &struct xsdfec_turbo that contains the Turbo decode
314 * settings for the SD-FEC core
315 *
316 * @Description
317 *
318 * ioctl that sets the SD-FEC Turbo parameter values
319 *
320 * This can only be used when the driver is in the XSDFEC_STOPPED state
321 */
322#define XSDFEC_SET_TURBO _IOW(XSDFEC_MAGIC, 4, struct xsdfec_turbo)
323/**
324 * DOC: XSDFEC_ADD_LDPC_CODE_PARAMS
325 * @Parameters
326 *
327 * @struct xsdfec_ldpc_params *
328 * Pointer to the &struct xsdfec_ldpc_params that contains the LDPC code
329 * parameters to be added to the SD-FEC Block
330 *
331 * @Description
332 * ioctl to add an LDPC code to the SD-FEC LDPC codes
333 *
334 * This can only be used when:
335 *
336 * - Driver is in the XSDFEC_STOPPED state
337 *
338 * - SD-FEC core is configured as LPDC
339 *
340 * - SD-FEC Code Write Protection is disabled
341 */
342#define XSDFEC_ADD_LDPC_CODE_PARAMS \
343 _IOW(XSDFEC_MAGIC, 5, struct xsdfec_ldpc_params)
344/**
345 * DOC: XSDFEC_GET_CONFIG
346 * @Parameters
347 *
348 * @struct xsdfec_config *
349 * Pointer to the &struct xsdfec_config that contains the current
350 * configuration settings of the SD-FEC Block
351 *
352 * @Description
353 *
354 * ioctl that returns SD-FEC core configuration
355 */
356#define XSDFEC_GET_CONFIG _IOR(XSDFEC_MAGIC, 6, struct xsdfec_config)
357/**
358 * DOC: XSDFEC_GET_TURBO
359 * @Parameters
360 *
361 * @struct xsdfec_turbo *
362 * Pointer to the &struct xsdfec_turbo that contains the current Turbo
363 * decode settings of the SD-FEC Block
364 *
365 * @Description
366 *
367 * ioctl that returns SD-FEC turbo param values
368 */
369#define XSDFEC_GET_TURBO _IOR(XSDFEC_MAGIC, 7, struct xsdfec_turbo)
370/**
371 * DOC: XSDFEC_SET_ORDER
372 * @Parameters
373 *
374 * @struct unsigned long *
375 * Pointer to the unsigned long that contains a value from the
376 * @enum xsdfec_order
377 *
378 * @Description
379 *
380 * ioctl that sets order, if order of blocks can change from input to output
381 *
382 * This can only be used when the driver is in the XSDFEC_STOPPED state
383 */
384#define XSDFEC_SET_ORDER _IOW(XSDFEC_MAGIC, 8, unsigned long)
385/**
386 * DOC: XSDFEC_SET_BYPASS
387 * @Parameters
388 *
389 * @struct bool *
390 * Pointer to bool that sets the bypass value, where false results in
391 * normal operation and false results in the SD-FEC performing the
392 * configured operations (same number of cycles) but output data matches
393 * the input data
394 *
395 * @Description
396 *
397 * ioctl that sets bypass.
398 *
399 * This can only be used when the driver is in the XSDFEC_STOPPED state
400 */
401#define XSDFEC_SET_BYPASS _IOW(XSDFEC_MAGIC, 9, bool)
402/**
403 * DOC: XSDFEC_IS_ACTIVE
404 * @Parameters
405 *
406 * @struct bool *
407 * Pointer to bool that returns true if the SD-FEC is processing data
408 *
409 * @Description
410 *
411 * ioctl that determines if SD-FEC is processing data
412 */
413#define XSDFEC_IS_ACTIVE _IOR(XSDFEC_MAGIC, 10, bool)
414/**
415 * DOC: XSDFEC_CLEAR_STATS
416 *
417 * @Description
418 *
419 * ioctl that clears error stats collected during interrupts
420 */
421#define XSDFEC_CLEAR_STATS _IO(XSDFEC_MAGIC, 11)
422/**
423 * DOC: XSDFEC_GET_STATS
424 * @Parameters
425 *
426 * @struct xsdfec_stats *
427 * Pointer to the &struct xsdfec_stats that will contain the updated stats
428 * values
429 *
430 * @Description
431 *
432 * ioctl that returns SD-FEC core stats
433 *
434 * This can only be used when the driver is in the XSDFEC_STOPPED state
435 */
436#define XSDFEC_GET_STATS _IOR(XSDFEC_MAGIC, 12, struct xsdfec_stats)
437/**
438 * DOC: XSDFEC_SET_DEFAULT_CONFIG
439 *
440 * @Description
441 *
442 * ioctl that returns SD-FEC core to default config, use after a reset
443 *
444 * This can only be used when the driver is in the XSDFEC_STOPPED state
445 */
446#define XSDFEC_SET_DEFAULT_CONFIG _IO(XSDFEC_MAGIC, 13)
447
448#endif /* __XILINX_SDFEC_H__ */
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 6ca97a63b3d6..251213c872b5 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -18,6 +18,7 @@
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/miscdevice.h> 20#include <linux/miscdevice.h>
21#include <linux/sizes.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
@@ -26,6 +27,7 @@
26 27
27#define TEST_FIRMWARE_NAME "test-firmware.bin" 28#define TEST_FIRMWARE_NAME "test-firmware.bin"
28#define TEST_FIRMWARE_NUM_REQS 4 29#define TEST_FIRMWARE_NUM_REQS 4
30#define TEST_FIRMWARE_BUF_SIZE SZ_1K
29 31
30static DEFINE_MUTEX(test_fw_mutex); 32static DEFINE_MUTEX(test_fw_mutex);
31static const struct firmware *test_firmware; 33static const struct firmware *test_firmware;
@@ -45,6 +47,8 @@ struct test_batched_req {
45 * test_config - represents configuration for the test for different triggers 47 * test_config - represents configuration for the test for different triggers
46 * 48 *
47 * @name: the name of the firmware file to look for 49 * @name: the name of the firmware file to look for
50 * @into_buf: when the into_buf is used if this is true
51 * request_firmware_into_buf() will be used instead.
48 * @sync_direct: when the sync trigger is used if this is true 52 * @sync_direct: when the sync trigger is used if this is true
49 * request_firmware_direct() will be used instead. 53 * request_firmware_direct() will be used instead.
50 * @send_uevent: whether or not to send a uevent for async requests 54 * @send_uevent: whether or not to send a uevent for async requests
@@ -83,6 +87,7 @@ struct test_batched_req {
83 */ 87 */
84struct test_config { 88struct test_config {
85 char *name; 89 char *name;
90 bool into_buf;
86 bool sync_direct; 91 bool sync_direct;
87 bool send_uevent; 92 bool send_uevent;
88 u8 num_requests; 93 u8 num_requests;
@@ -176,6 +181,7 @@ static int __test_firmware_config_init(void)
176 181
177 test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS; 182 test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
178 test_fw_config->send_uevent = true; 183 test_fw_config->send_uevent = true;
184 test_fw_config->into_buf = false;
179 test_fw_config->sync_direct = false; 185 test_fw_config->sync_direct = false;
180 test_fw_config->req_firmware = request_firmware; 186 test_fw_config->req_firmware = request_firmware;
181 test_fw_config->test_result = 0; 187 test_fw_config->test_result = 0;
@@ -245,6 +251,9 @@ static ssize_t config_show(struct device *dev,
245 "FW_ACTION_HOTPLUG" : 251 "FW_ACTION_HOTPLUG" :
246 "FW_ACTION_NOHOTPLUG"); 252 "FW_ACTION_NOHOTPLUG");
247 len += scnprintf(buf+len, PAGE_SIZE - len, 253 len += scnprintf(buf+len, PAGE_SIZE - len,
254 "into_buf:\t\t%s\n",
255 test_fw_config->into_buf ? "true" : "false");
256 len += scnprintf(buf+len, PAGE_SIZE - len,
248 "sync_direct:\t\t%s\n", 257 "sync_direct:\t\t%s\n",
249 test_fw_config->sync_direct ? "true" : "false"); 258 test_fw_config->sync_direct ? "true" : "false");
250 len += scnprintf(buf+len, PAGE_SIZE - len, 259 len += scnprintf(buf+len, PAGE_SIZE - len,
@@ -393,6 +402,23 @@ static ssize_t config_num_requests_show(struct device *dev,
393} 402}
394static DEVICE_ATTR_RW(config_num_requests); 403static DEVICE_ATTR_RW(config_num_requests);
395 404
405static ssize_t config_into_buf_store(struct device *dev,
406 struct device_attribute *attr,
407 const char *buf, size_t count)
408{
409 return test_dev_config_update_bool(buf,
410 count,
411 &test_fw_config->into_buf);
412}
413
414static ssize_t config_into_buf_show(struct device *dev,
415 struct device_attribute *attr,
416 char *buf)
417{
418 return test_dev_config_show_bool(buf, test_fw_config->into_buf);
419}
420static DEVICE_ATTR_RW(config_into_buf);
421
396static ssize_t config_sync_direct_store(struct device *dev, 422static ssize_t config_sync_direct_store(struct device *dev,
397 struct device_attribute *attr, 423 struct device_attribute *attr,
398 const char *buf, size_t count) 424 const char *buf, size_t count)
@@ -522,7 +548,7 @@ static ssize_t trigger_async_request_store(struct device *dev,
522 rc = count; 548 rc = count;
523 } else { 549 } else {
524 pr_err("failed to async load firmware\n"); 550 pr_err("failed to async load firmware\n");
525 rc = -ENODEV; 551 rc = -ENOMEM;
526 } 552 }
527 553
528out: 554out:
@@ -585,7 +611,26 @@ static int test_fw_run_batch_request(void *data)
585 return -EINVAL; 611 return -EINVAL;
586 } 612 }
587 613
588 req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev); 614 if (test_fw_config->into_buf) {
615 void *test_buf;
616
617 test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
618 if (!test_buf)
619 return -ENOSPC;
620
621 req->rc = request_firmware_into_buf(&req->fw,
622 req->name,
623 req->dev,
624 test_buf,
625 TEST_FIRMWARE_BUF_SIZE);
626 if (!req->fw)
627 kfree(test_buf);
628 } else {
629 req->rc = test_fw_config->req_firmware(&req->fw,
630 req->name,
631 req->dev);
632 }
633
589 if (req->rc) { 634 if (req->rc) {
590 pr_info("#%u: batched sync load failed: %d\n", 635 pr_info("#%u: batched sync load failed: %d\n",
591 req->idx, req->rc); 636 req->idx, req->rc);
@@ -849,6 +894,7 @@ static struct attribute *test_dev_attrs[] = {
849 TEST_FW_DEV_ATTR(config), 894 TEST_FW_DEV_ATTR(config),
850 TEST_FW_DEV_ATTR(config_name), 895 TEST_FW_DEV_ATTR(config_name),
851 TEST_FW_DEV_ATTR(config_num_requests), 896 TEST_FW_DEV_ATTR(config_num_requests),
897 TEST_FW_DEV_ATTR(config_into_buf),
852 TEST_FW_DEV_ATTR(config_sync_direct), 898 TEST_FW_DEV_ATTR(config_sync_direct),
853 TEST_FW_DEV_ATTR(config_send_uevent), 899 TEST_FW_DEV_ATTR(config_send_uevent),
854 TEST_FW_DEV_ATTR(config_read_fw_idx), 900 TEST_FW_DEV_ATTR(config_read_fw_idx),
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index f901076aa2ea..56894477c8bd 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -116,6 +116,16 @@ config_set_name()
116 echo -n $1 > $DIR/config_name 116 echo -n $1 > $DIR/config_name
117} 117}
118 118
119config_set_into_buf()
120{
121 echo 1 > $DIR/config_into_buf
122}
123
124config_unset_into_buf()
125{
126 echo 0 > $DIR/config_into_buf
127}
128
119config_set_sync_direct() 129config_set_sync_direct()
120{ 130{
121 echo 1 > $DIR/config_sync_direct 131 echo 1 > $DIR/config_sync_direct
@@ -153,11 +163,14 @@ config_set_read_fw_idx()
153 163
154read_firmwares() 164read_firmwares()
155{ 165{
156 if [ "$1" = "xzonly" ]; then 166 if [ "$(cat $DIR/config_into_buf)" == "1" ]; then
157 fwfile="${FW}-orig" 167 fwfile="$FW_INTO_BUF"
158 else 168 else
159 fwfile="$FW" 169 fwfile="$FW"
160 fi 170 fi
171 if [ "$1" = "xzonly" ]; then
172 fwfile="${fwfile}-orig"
173 fi
161 for i in $(seq 0 3); do 174 for i in $(seq 0 3); do
162 config_set_read_fw_idx $i 175 config_set_read_fw_idx $i
163 # Verify the contents are what we expect. 176 # Verify the contents are what we expect.
@@ -194,6 +207,18 @@ test_batched_request_firmware_nofile()
194 echo "OK" 207 echo "OK"
195} 208}
196 209
210test_batched_request_firmware_into_buf_nofile()
211{
212 echo -n "Batched request_firmware_into_buf() nofile try #$1: "
213 config_reset
214 config_set_name nope-test-firmware.bin
215 config_set_into_buf
216 config_trigger_sync
217 read_firmwares_expect_nofile
218 release_all_firmware
219 echo "OK"
220}
221
197test_batched_request_firmware_direct_nofile() 222test_batched_request_firmware_direct_nofile()
198{ 223{
199 echo -n "Batched request_firmware_direct() nofile try #$1: " 224 echo -n "Batched request_firmware_direct() nofile try #$1: "
@@ -259,6 +284,18 @@ test_batched_request_firmware()
259 echo "OK" 284 echo "OK"
260} 285}
261 286
287test_batched_request_firmware_into_buf()
288{
289 echo -n "Batched request_firmware_into_buf() $2 try #$1: "
290 config_reset
291 config_set_name $TEST_FIRMWARE_INTO_BUF_FILENAME
292 config_set_into_buf
293 config_trigger_sync
294 read_firmwares $2
295 release_all_firmware
296 echo "OK"
297}
298
262test_batched_request_firmware_direct() 299test_batched_request_firmware_direct()
263{ 300{
264 echo -n "Batched request_firmware_direct() $2 try #$1: " 301 echo -n "Batched request_firmware_direct() $2 try #$1: "
@@ -308,6 +345,10 @@ for i in $(seq 1 5); do
308done 345done
309 346
310for i in $(seq 1 5); do 347for i in $(seq 1 5); do
348 test_batched_request_firmware_into_buf $i normal
349done
350
351for i in $(seq 1 5); do
311 test_batched_request_firmware_direct $i normal 352 test_batched_request_firmware_direct $i normal
312done 353done
313 354
@@ -328,6 +369,10 @@ for i in $(seq 1 5); do
328done 369done
329 370
330for i in $(seq 1 5); do 371for i in $(seq 1 5); do
372 test_batched_request_firmware_into_buf_nofile $i
373done
374
375for i in $(seq 1 5); do
331 test_batched_request_firmware_direct_nofile $i 376 test_batched_request_firmware_direct_nofile $i
332done 377done
333 378
@@ -351,6 +396,10 @@ for i in $(seq 1 5); do
351done 396done
352 397
353for i in $(seq 1 5); do 398for i in $(seq 1 5); do
399 test_batched_request_firmware_into_buf $i both
400done
401
402for i in $(seq 1 5); do
354 test_batched_request_firmware_direct $i both 403 test_batched_request_firmware_direct $i both
355done 404done
356 405
@@ -371,6 +420,10 @@ for i in $(seq 1 5); do
371done 420done
372 421
373for i in $(seq 1 5); do 422for i in $(seq 1 5); do
423 test_batched_request_firmware_into_buf $i xzonly
424done
425
426for i in $(seq 1 5); do
374 test_batched_request_firmware_direct $i xzonly 427 test_batched_request_firmware_direct $i xzonly
375done 428done
376 429
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index f236cc295450..b879305a766d 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -9,6 +9,12 @@ DIR=/sys/devices/virtual/misc/test_firmware
9PROC_CONFIG="/proc/config.gz" 9PROC_CONFIG="/proc/config.gz"
10TEST_DIR=$(dirname $0) 10TEST_DIR=$(dirname $0)
11 11
12# We need to load a different file to test request_firmware_into_buf
13# I believe the issue is firmware loaded cached vs. non-cached
14# with same filename is bungled.
15# To reproduce rename this to test-firmware.bin
16TEST_FIRMWARE_INTO_BUF_FILENAME=test-firmware-into-buf.bin
17
12# Kselftest framework requirement - SKIP code is 4. 18# Kselftest framework requirement - SKIP code is 4.
13ksft_skip=4 19ksft_skip=4
14 20
@@ -108,6 +114,8 @@ setup_tmp_file()
108 FWPATH=$(mktemp -d) 114 FWPATH=$(mktemp -d)
109 FW="$FWPATH/test-firmware.bin" 115 FW="$FWPATH/test-firmware.bin"
110 echo "ABCD0123" >"$FW" 116 echo "ABCD0123" >"$FW"
117 FW_INTO_BUF="$FWPATH/$TEST_FIRMWARE_INTO_BUF_FILENAME"
118 echo "EFGH4567" >"$FW_INTO_BUF"
111 NAME=$(basename "$FW") 119 NAME=$(basename "$FW")
112 if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then 120 if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then
113 echo -n "$FWPATH" >/sys/module/firmware_class/parameters/path 121 echo -n "$FWPATH" >/sys/module/firmware_class/parameters/path
@@ -175,6 +183,9 @@ test_finish()
175 if [ -f $FW ]; then 183 if [ -f $FW ]; then
176 rm -f "$FW" 184 rm -f "$FW"
177 fi 185 fi
186 if [ -f $FW_INTO_BUF ]; then
187 rm -f "$FW_INTO_BUF"
188 fi
178 if [ -d $FWPATH ]; then 189 if [ -d $FWPATH ]; then
179 rm -rf "$FWPATH" 190 rm -rf "$FWPATH"
180 fi 191 fi