aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/configfs-stp-policy-p_sys-t41
-rw-r--r--Documentation/ABI/testing/sysfs-bus-vmbus21
-rw-r--r--Documentation/admin-guide/security-bugs.rst47
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt120
-rw-r--r--Documentation/driver-api/fpga/fpga-bridge.rst37
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst126
-rw-r--r--Documentation/driver-api/fpga/fpga-programming.rst107
-rw-r--r--Documentation/driver-api/fpga/fpga-region.rst91
-rw-r--r--Documentation/driver-api/fpga/index.rst2
-rw-r--r--Documentation/driver-api/fpga/intro.rst2
-rw-r--r--Documentation/driver-api/soundwire/stream.rst36
-rw-r--r--Documentation/driver-api/uio-howto.rst4
-rw-r--r--Documentation/nvmem/nvmem.txt31
-rw-r--r--Documentation/trace/stm.rst38
-rw-r--r--Documentation/trace/sys-t.rst62
-rw-r--r--MAINTAINERS11
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c489
-rw-r--r--drivers/android/binder_trace.h36
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c60
-rw-r--r--drivers/extcon/extcon-intel-int3496.c12
-rw-r--r--drivers/extcon/extcon-max14577.c24
-rw-r--r--drivers/extcon/extcon-max77693.c22
-rw-r--r--drivers/extcon/extcon-max77843.c19
-rw-r--r--drivers/extcon/extcon-max8997.c22
-rw-r--r--drivers/extcon/extcon.c15
-rw-r--r--drivers/firmware/google/Kconfig32
-rw-r--r--drivers/firmware/google/Makefile2
-rw-r--r--drivers/firmware/google/coreboot_table-acpi.c88
-rw-r--r--drivers/firmware/google/coreboot_table-of.c82
-rw-r--r--drivers/firmware/google/coreboot_table.c126
-rw-r--r--drivers/firmware/google/coreboot_table.h6
-rw-r--r--drivers/firmware/google/gsmi.c122
-rw-r--r--drivers/firmware/google/vpd.c2
-rw-r--r--drivers/fpga/altera-cvp.c8
-rw-r--r--drivers/fpga/altera-fpga2sdram.c8
-rw-r--r--drivers/fpga/altera-freeze-bridge.c13
-rw-r--r--drivers/fpga/altera-hps2fpga.c7
-rw-r--r--drivers/fpga/altera-pr-ip-core.c9
-rw-r--r--drivers/fpga/altera-ps-spi.c11
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/fpga/dfl-fme-br.c11
-rw-r--r--drivers/fpga/dfl-fme-mgr.c13
-rw-r--r--drivers/fpga/dfl-fme-region.c6
-rw-r--r--drivers/fpga/dfl.c6
-rw-r--r--drivers/fpga/fpga-bridge.c68
-rw-r--r--drivers/fpga/fpga-mgr.c64
-rw-r--r--drivers/fpga/fpga-region.c65
-rw-r--r--drivers/fpga/ice40-spi.c10
-rw-r--r--drivers/fpga/machxo2-spi.c11
-rw-r--r--drivers/fpga/of-fpga-region.c6
-rw-r--r--drivers/fpga/socfpga-a10.c5
-rw-r--r--drivers/fpga/socfpga.c10
-rw-r--r--drivers/fpga/ts73xx-fpga.c11
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c4
-rw-r--r--drivers/fpga/xilinx-spi.c12
-rw-r--r--drivers/fpga/zynq-fpga.c5
-rw-r--r--drivers/hv/channel.c300
-rw-r--r--drivers/hv/channel_mgmt.c54
-rw-r--r--drivers/hv/hv.c15
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/hv_kvp.c14
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hv/vmbus_drv.c118
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c81
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c183
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c132
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c58
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c198
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c385
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c6
-rw-r--r--drivers/hwtracing/coresight/coresight.c184
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c262
-rw-r--r--drivers/hwtracing/stm/Kconfig29
-rw-r--r--drivers/hwtracing/stm/Makefile6
-rw-r--r--drivers/hwtracing/stm/core.c292
-rw-r--r--drivers/hwtracing/stm/heartbeat.c2
-rw-r--r--drivers/hwtracing/stm/p_basic.c48
-rw-r--r--drivers/hwtracing/stm/p_sys-t.c382
-rw-r--r--drivers/hwtracing/stm/policy.c147
-rw-r--r--drivers/hwtracing/stm/stm.h56
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c2
-rw-r--r--drivers/misc/ad525x_dpot-spi.c2
-rw-r--r--drivers/misc/ad525x_dpot.c6
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/bh1770glc.c3
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/echo/echo.c2
-rw-r--r--drivers/misc/eeprom/Kconfig11
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at25.c13
-rw-r--r--drivers/misc/eeprom/ee1004.c281
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c19
-rw-r--r--drivers/misc/genwqe/card_base.c1
-rw-r--r--drivers/misc/genwqe/card_ddcb.c1
-rw-r--r--drivers/misc/genwqe/card_utils.c15
-rw-r--r--drivers/misc/kgdbts.c16
-rw-r--r--drivers/misc/lkdtm/usercopy.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c1
-rw-r--r--drivers/misc/mei/main.c4
-rw-r--r--drivers/misc/mic/scif/scif_dma.c9
-rw-r--r--drivers/misc/mic/scif/scif_fence.c2
-rw-r--r--drivers/misc/sgi-gru/grukservices.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c6
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/misc/sram.c6
-rw-r--r--drivers/misc/vmw_balloon.c1802
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c3
-rw-r--r--drivers/nvmem/core.c533
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c7
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/nvmem/sunxi_sid.c22
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c399
-rw-r--r--drivers/platform/goldfish/goldfish_pipe_qemu.h98
-rw-r--r--drivers/slimbus/core.c37
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c34
-rw-r--r--drivers/soundwire/bus.c6
-rw-r--r--drivers/soundwire/bus.h4
-rw-r--r--drivers/soundwire/intel.c68
-rw-r--r--drivers/soundwire/intel_init.c2
-rw-r--r--drivers/soundwire/stream.c488
-rw-r--r--drivers/thunderbolt/cap.c3
-rw-r--r--drivers/thunderbolt/ctl.c12
-rw-r--r--drivers/thunderbolt/ctl.h3
-rw-r--r--drivers/thunderbolt/dma_port.c5
-rw-r--r--drivers/thunderbolt/dma_port.h5
-rw-r--r--drivers/thunderbolt/domain.c7
-rw-r--r--drivers/thunderbolt/eeprom.c5
-rw-r--r--drivers/thunderbolt/icm.c5
-rw-r--r--drivers/thunderbolt/nhi.c33
-rw-r--r--drivers/thunderbolt/nhi.h3
-rw-r--r--drivers/thunderbolt/nhi_regs.h1
-rw-r--r--drivers/thunderbolt/path.c26
-rw-r--r--drivers/thunderbolt/property.c5
-rw-r--r--drivers/thunderbolt/switch.c71
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/thunderbolt/tb.h9
-rw-r--r--drivers/thunderbolt/tb_msgs.h5
-rw-r--r--drivers/thunderbolt/tb_regs.h3
-rw-r--r--drivers/thunderbolt/xdomain.c5
-rw-r--r--drivers/uio/uio.c35
-rw-r--r--drivers/uio/uio_dmem_genirq.c3
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c3
-rw-r--r--drivers/uio/uio_hv_generic.c116
-rw-r--r--drivers/uio/uio_pdrv_genirq.c3
-rw-r--r--drivers/vme/vme.c1
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/slaves/w1_ds2438.c66
-rw-r--r--include/linux/coresight.h41
-rw-r--r--include/linux/fpga/fpga-bridge.h4
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fpga/fpga-region.h4
-rw-r--r--include/linux/hyperv.h14
-rw-r--r--include/linux/nvmem-consumer.h100
-rw-r--r--include/linux/nvmem-provider.h50
-rw-r--r--include/linux/soundwire/sdw.h12
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/thunderbolt.h5
-rw-r--r--include/linux/uio_driver.h1
-rw-r--r--include/uapi/linux/android/binder.h10
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/Makefile3
-rw-r--r--lib/memcat_p.c34
-rw-r--r--lib/string.c1
-rw-r--r--lib/test_memcat_p.c115
-rw-r--r--samples/mei/mei-amt-version.c2
180 files changed, 6878 insertions, 3351 deletions
diff --git a/Documentation/ABI/testing/configfs-stp-policy-p_sys-t b/Documentation/ABI/testing/configfs-stp-policy-p_sys-t
new file mode 100644
index 000000000000..b290d1c00dcf
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-stp-policy-p_sys-t
@@ -0,0 +1,41 @@
1What: /config/stp-policy/<device>:p_sys-t.<policy>/<node>/uuid
2Date: June 2018
3KernelVersion: 4.19
4Description:
5 UUID source identifier string, RW.
6 Default value is randomly generated at the mkdir <node> time.
7 Data coming from trace sources that use this <node> will be
8 tagged with this UUID in the MIPI SyS-T packet stream, to
9 allow the decoder to discern between different sources
10 within the same master/channel range, and identify the
11 higher level decoders that may be needed for each source.
12
13What: /config/stp-policy/<device>:p_sys-t.<policy>/<node>/do_len
14Date: June 2018
15KernelVersion: 4.19
16Description:
17 Include payload length in the MIPI SyS-T header, boolean.
18 If enabled, the SyS-T protocol encoder will include payload
19 length in each packet's metadata. This is normally redundant
20 if the underlying transport protocol supports marking message
21 boundaries (which STP does), so this is off by default.
22
23What: /config/stp-policy/<device>:p_sys-t.<policy>/<node>/ts_interval
24Date: June 2018
25KernelVersion: 4.19
26Description:
27 Time interval in milliseconds. Include a timestamp in the
28 MIPI SyS-T packet metadata, if this many milliseconds have
29 passed since the previous packet from this source. Zero is
30 the default and stands for "never send the timestamp".
31
32What: /config/stp-policy/<device>:p_sys-t.<policy>/<node>/clocksync_interval
33Date: June 2018
34KernelVersion: 4.19
35Description:
36 Time interval in milliseconds. Send a CLOCKSYNC packet if
37 this many milliseconds have passed since the previous
38 CLOCKSYNC packet from this source. Zero is the default and
39 stands for "never send the CLOCKSYNC". It makes sense to
40 use this option with sources that generate constant and/or
41 periodic data, like stm_heartbeat.
diff --git a/Documentation/ABI/testing/sysfs-bus-vmbus b/Documentation/ABI/testing/sysfs-bus-vmbus
new file mode 100644
index 000000000000..91e6c065973c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-vmbus
@@ -0,0 +1,21 @@
1What: /sys/bus/vmbus/devices/.../driver_override
2Date: August 2019
3Contact: Stephen Hemminger <sthemmin@microsoft.com>
4Description:
5 This file allows the driver for a device to be specified which
6 will override standard static and dynamic ID matching. When
7 specified, only a driver with a name matching the value written
8 to driver_override will have an opportunity to bind to the
9 device. The override is specified by writing a string to the
10 driver_override file (echo uio_hv_generic > driver_override) and
11 may be cleared with an empty string (echo > driver_override).
12 This returns the device to standard matching rules binding.
13 Writing to driver_override does not automatically unbind the
14 device from its current driver or make any attempt to
15 automatically load the specified driver. If no driver with a
16 matching name is currently loaded in the kernel, the device
17 will not bind to any driver. This also allows devices to
18 opt-out of driver binding using a driver_override name such as
19 "none". Only a single driver may be specified in the override,
20 there is no support for parsing delimiters.
21
diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst
index 30491d91e93d..164bf71149fd 100644
--- a/Documentation/admin-guide/security-bugs.rst
+++ b/Documentation/admin-guide/security-bugs.rst
@@ -26,23 +26,34 @@ information is helpful. Any exploit code is very helpful and will not
26be released without consent from the reporter unless it has already been 26be released without consent from the reporter unless it has already been
27made public. 27made public.
28 28
29Disclosure 29Disclosure and embargoed information
30---------- 30------------------------------------
31 31
32The goal of the Linux kernel security team is to work with the bug 32The security list is not a disclosure channel. For that, see Coordination
33submitter to understand and fix the bug. We prefer to publish the fix as 33below.
34soon as possible, but try to avoid public discussion of the bug itself 34
35and leave that to others. 35Once a robust fix has been developed, our preference is to release the
36 36fix in a timely fashion, treating it no differently than any of the other
37Publishing the fix may be delayed when the bug or the fix is not yet 37thousands of changes and fixes the Linux kernel project releases every
38fully understood, the solution is not well-tested or for vendor 38month.
39coordination. However, we expect these delays to be short, measurable in 39
40days, not weeks or months. A release date is negotiated by the security 40However, at the request of the reporter, we will postpone releasing the
41team working with the bug submitter as well as vendors. However, the 41fix for up to 5 business days after the date of the report or after the
42kernel security team holds the final say when setting a timeframe. The 42embargo has lifted; whichever comes first. The only exception to that
43timeframe varies from immediate (esp. if it's already publicly known bug) 43rule is if the bug is publicly known, in which case the preference is to
44to a few weeks. As a basic default policy, we expect report date to 44release the fix as soon as it's available.
45release date to be on the order of 7 days. 45
46Whilst embargoed information may be shared with trusted individuals in
47order to develop a fix, such information will not be published alongside
48the fix or on any other disclosure channel without the permission of the
49reporter. This includes but is not limited to the original bug report
50and followup discussions (if any), exploits, CVE information or the
51identity of the reporter.
52
53In other words our only interest is in getting bugs fixed. All other
54information submitted to the security list and any followup discussions
55of the report are treated confidentially even after the embargo has been
56lifted, in perpetuity.
46 57
47Coordination 58Coordination
48------------ 59------------
@@ -68,7 +79,7 @@ may delay the bug handling. If a reporter wishes to have a CVE identifier
68assigned ahead of public disclosure, they will need to contact the private 79assigned ahead of public disclosure, they will need to contact the private
69linux-distros list, described above. When such a CVE identifier is known 80linux-distros list, described above. When such a CVE identifier is known
70before a patch is provided, it is desirable to mention it in the commit 81before a patch is provided, it is desirable to mention it in the commit
71message, though. 82message if the reporter agrees.
72 83
73Non-disclosure agreements 84Non-disclosure agreements
74------------------------- 85-------------------------
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 5d1ad09bafb4..f8aff65ab921 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -54,9 +54,7 @@ its hardware characteristcs.
54 clocks the core of that coresight component. The latter clock 54 clocks the core of that coresight component. The latter clock
55 is optional. 55 is optional.
56 56
57 * port or ports: The representation of the component's port 57 * port or ports: see "Graph bindings for Coresight" below.
58 layout using the generic DT graph presentation found in
59 "bindings/graph.txt".
60 58
61* Additional required properties for System Trace Macrocells (STM): 59* Additional required properties for System Trace Macrocells (STM):
62 * reg: along with the physical base address and length of the register 60 * reg: along with the physical base address and length of the register
@@ -73,7 +71,7 @@ its hardware characteristcs.
73 AMBA markee): 71 AMBA markee):
74 - "arm,coresight-replicator" 72 - "arm,coresight-replicator"
75 73
76 * port or ports: same as above. 74 * port or ports: see "Graph bindings for Coresight" below.
77 75
78* Optional properties for ETM/PTMs: 76* Optional properties for ETM/PTMs:
79 77
@@ -96,6 +94,20 @@ its hardware characteristcs.
96 * interrupts : Exactly one SPI may be listed for reporting the address 94 * interrupts : Exactly one SPI may be listed for reporting the address
97 error 95 error
98 96
97Graph bindings for Coresight
98-------------------------------
99
100Coresight components are interconnected to create a data path for the flow of
101trace data generated from the "sources" to their collection points "sink".
102Each coresight component must describe the "input" and "output" connections.
103The connections must be described via generic DT graph bindings as described
104by the "bindings/graph.txt", where each "port" along with an "endpoint"
105component represents a hardware port and the connection.
106
107 * All output ports must be listed inside a child node named "out-ports"
108 * All input ports must be listed inside a child node named "in-ports".
109 * Port address must match the hardware port number.
110
99Example: 111Example:
100 112
1011. Sinks 1131. Sinks
@@ -105,10 +117,11 @@ Example:
105 117
106 clocks = <&oscclk6a>; 118 clocks = <&oscclk6a>;
107 clock-names = "apb_pclk"; 119 clock-names = "apb_pclk";
108 port { 120 in-ports {
109 etb_in_port: endpoint@0 { 121 port {
110 slave-mode; 122 etb_in_port: endpoint@0 {
111 remote-endpoint = <&replicator_out_port0>; 123 remote-endpoint = <&replicator_out_port0>;
124 };
112 }; 125 };
113 }; 126 };
114 }; 127 };
@@ -119,10 +132,11 @@ Example:
119 132
120 clocks = <&oscclk6a>; 133 clocks = <&oscclk6a>;
121 clock-names = "apb_pclk"; 134 clock-names = "apb_pclk";
122 port { 135 in-ports {
123 tpiu_in_port: endpoint@0 { 136 port {
124 slave-mode; 137 tpiu_in_port: endpoint@0 {
125 remote-endpoint = <&replicator_out_port1>; 138 remote-endpoint = <&replicator_out_port1>;
139 };
126 }; 140 };
127 }; 141 };
128 }; 142 };
@@ -133,22 +147,16 @@ Example:
133 147
134 clocks = <&oscclk6a>; 148 clocks = <&oscclk6a>;
135 clock-names = "apb_pclk"; 149 clock-names = "apb_pclk";
136 ports { 150 in-ports {
137 #address-cells = <1>; 151 port {
138 #size-cells = <0>;
139
140 /* input port */
141 port@0 {
142 reg = <0>;
143 etr_in_port: endpoint { 152 etr_in_port: endpoint {
144 slave-mode;
145 remote-endpoint = <&replicator2_out_port0>; 153 remote-endpoint = <&replicator2_out_port0>;
146 }; 154 };
147 }; 155 };
156 };
148 157
149 /* CATU link represented by output port */ 158 out-ports {
150 port@1 { 159 port {
151 reg = <1>;
152 etr_out_port: endpoint { 160 etr_out_port: endpoint {
153 remote-endpoint = <&catu_in_port>; 161 remote-endpoint = <&catu_in_port>;
154 }; 162 };
@@ -163,7 +171,7 @@ Example:
163 */ 171 */
164 compatible = "arm,coresight-replicator"; 172 compatible = "arm,coresight-replicator";
165 173
166 ports { 174 out-ports {
167 #address-cells = <1>; 175 #address-cells = <1>;
168 #size-cells = <0>; 176 #size-cells = <0>;
169 177
@@ -181,12 +189,11 @@ Example:
181 remote-endpoint = <&tpiu_in_port>; 189 remote-endpoint = <&tpiu_in_port>;
182 }; 190 };
183 }; 191 };
192 };
184 193
185 /* replicator input port */ 194 in-ports {
186 port@2 { 195 port {
187 reg = <0>;
188 replicator_in_port0: endpoint { 196 replicator_in_port0: endpoint {
189 slave-mode;
190 remote-endpoint = <&funnel_out_port0>; 197 remote-endpoint = <&funnel_out_port0>;
191 }; 198 };
192 }; 199 };
@@ -199,40 +206,36 @@ Example:
199 206
200 clocks = <&oscclk6a>; 207 clocks = <&oscclk6a>;
201 clock-names = "apb_pclk"; 208 clock-names = "apb_pclk";
202 ports { 209 out-ports {
203 #address-cells = <1>; 210 port {
204 #size-cells = <0>;
205
206 /* funnel output port */
207 port@0 {
208 reg = <0>;
209 funnel_out_port0: endpoint { 211 funnel_out_port0: endpoint {
210 remote-endpoint = 212 remote-endpoint =
211 <&replicator_in_port0>; 213 <&replicator_in_port0>;
212 }; 214 };
213 }; 215 };
216 };
214 217
215 /* funnel input ports */ 218 in-ports {
216 port@1 { 219 #address-cells = <1>;
220 #size-cells = <0>;
221
222 port@0 {
217 reg = <0>; 223 reg = <0>;
218 funnel_in_port0: endpoint { 224 funnel_in_port0: endpoint {
219 slave-mode;
220 remote-endpoint = <&ptm0_out_port>; 225 remote-endpoint = <&ptm0_out_port>;
221 }; 226 };
222 }; 227 };
223 228
224 port@2 { 229 port@1 {
225 reg = <1>; 230 reg = <1>;
226 funnel_in_port1: endpoint { 231 funnel_in_port1: endpoint {
227 slave-mode;
228 remote-endpoint = <&ptm1_out_port>; 232 remote-endpoint = <&ptm1_out_port>;
229 }; 233 };
230 }; 234 };
231 235
232 port@3 { 236 port@2 {
233 reg = <2>; 237 reg = <2>;
234 funnel_in_port2: endpoint { 238 funnel_in_port2: endpoint {
235 slave-mode;
236 remote-endpoint = <&etm0_out_port>; 239 remote-endpoint = <&etm0_out_port>;
237 }; 240 };
238 }; 241 };
@@ -248,9 +251,11 @@ Example:
248 cpu = <&cpu0>; 251 cpu = <&cpu0>;
249 clocks = <&oscclk6a>; 252 clocks = <&oscclk6a>;
250 clock-names = "apb_pclk"; 253 clock-names = "apb_pclk";
251 port { 254 out-ports {
252 ptm0_out_port: endpoint { 255 port {
253 remote-endpoint = <&funnel_in_port0>; 256 ptm0_out_port: endpoint {
257 remote-endpoint = <&funnel_in_port0>;
258 };
254 }; 259 };
255 }; 260 };
256 }; 261 };
@@ -262,9 +267,11 @@ Example:
262 cpu = <&cpu1>; 267 cpu = <&cpu1>;
263 clocks = <&oscclk6a>; 268 clocks = <&oscclk6a>;
264 clock-names = "apb_pclk"; 269 clock-names = "apb_pclk";
265 port { 270 out-ports {
266 ptm1_out_port: endpoint { 271 port {
267 remote-endpoint = <&funnel_in_port1>; 272 ptm1_out_port: endpoint {
273 remote-endpoint = <&funnel_in_port1>;
274 };
268 }; 275 };
269 }; 276 };
270 }; 277 };
@@ -278,9 +285,11 @@ Example:
278 285
279 clocks = <&soc_smc50mhz>; 286 clocks = <&soc_smc50mhz>;
280 clock-names = "apb_pclk"; 287 clock-names = "apb_pclk";
281 port { 288 out-ports {
282 stm_out_port: endpoint { 289 port {
283 remote-endpoint = <&main_funnel_in_port2>; 290 stm_out_port: endpoint {
291 remote-endpoint = <&main_funnel_in_port2>;
292 };
284 }; 293 };
285 }; 294 };
286 }; 295 };
@@ -295,10 +304,11 @@ Example:
295 clock-names = "apb_pclk"; 304 clock-names = "apb_pclk";
296 305
297 interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>; 306 interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
298 port { 307 in-ports {
299 catu_in_port: endpoint { 308 port {
300 slave-mode; 309 catu_in_port: endpoint {
301 remote-endpoint = <&etr_out_port>; 310 remote-endpoint = <&etr_out_port>;
311 };
302 }; 312 };
303 }; 313 };
304 }; 314 };
diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst
index 2c2aaca894bf..71c5a40da320 100644
--- a/Documentation/driver-api/fpga/fpga-bridge.rst
+++ b/Documentation/driver-api/fpga/fpga-bridge.rst
@@ -4,6 +4,12 @@ FPGA Bridge
4API to implement a new FPGA bridge 4API to implement a new FPGA bridge
5~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
6 6
7* struct :c:type:`fpga_bridge` — The FPGA Bridge structure
8* struct :c:type:`fpga_bridge_ops` — Low level Bridge driver ops
9* :c:func:`devm_fpga_bridge_create()` — Allocate and init a bridge struct
10* :c:func:`fpga_bridge_register()` — Register a bridge
11* :c:func:`fpga_bridge_unregister()` — Unregister a bridge
12
7.. kernel-doc:: include/linux/fpga/fpga-bridge.h 13.. kernel-doc:: include/linux/fpga/fpga-bridge.h
8 :functions: fpga_bridge 14 :functions: fpga_bridge
9 15
@@ -11,39 +17,10 @@ API to implement a new FPGA bridge
11 :functions: fpga_bridge_ops 17 :functions: fpga_bridge_ops
12 18
13.. kernel-doc:: drivers/fpga/fpga-bridge.c 19.. kernel-doc:: drivers/fpga/fpga-bridge.c
14 :functions: fpga_bridge_create 20 :functions: devm_fpga_bridge_create
15
16.. kernel-doc:: drivers/fpga/fpga-bridge.c
17 :functions: fpga_bridge_free
18 21
19.. kernel-doc:: drivers/fpga/fpga-bridge.c 22.. kernel-doc:: drivers/fpga/fpga-bridge.c
20 :functions: fpga_bridge_register 23 :functions: fpga_bridge_register
21 24
22.. kernel-doc:: drivers/fpga/fpga-bridge.c 25.. kernel-doc:: drivers/fpga/fpga-bridge.c
23 :functions: fpga_bridge_unregister 26 :functions: fpga_bridge_unregister
24
25API to control an FPGA bridge
26~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27
28You probably won't need these directly. FPGA regions should handle this.
29
30.. kernel-doc:: drivers/fpga/fpga-bridge.c
31 :functions: of_fpga_bridge_get
32
33.. kernel-doc:: drivers/fpga/fpga-bridge.c
34 :functions: fpga_bridge_get
35
36.. kernel-doc:: drivers/fpga/fpga-bridge.c
37 :functions: fpga_bridge_put
38
39.. kernel-doc:: drivers/fpga/fpga-bridge.c
40 :functions: fpga_bridge_get_to_list
41
42.. kernel-doc:: drivers/fpga/fpga-bridge.c
43 :functions: of_fpga_bridge_get_to_list
44
45.. kernel-doc:: drivers/fpga/fpga-bridge.c
46 :functions: fpga_bridge_enable
47
48.. kernel-doc:: drivers/fpga/fpga-bridge.c
49 :functions: fpga_bridge_disable
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 82b6dbbd31cd..576f1945eacd 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -49,18 +49,14 @@ probe function calls fpga_mgr_register(), such as::
49 * them in priv 49 * them in priv
50 */ 50 */
51 51
52 mgr = fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager", 52 mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
53 &socfpga_fpga_ops, priv); 53 &socfpga_fpga_ops, priv);
54 if (!mgr) 54 if (!mgr)
55 return -ENOMEM; 55 return -ENOMEM;
56 56
57 platform_set_drvdata(pdev, mgr); 57 platform_set_drvdata(pdev, mgr);
58 58
59 ret = fpga_mgr_register(mgr); 59 return fpga_mgr_register(mgr);
60 if (ret)
61 fpga_mgr_free(mgr);
62
63 return ret;
64 } 60 }
65 61
66 static int socfpga_fpga_remove(struct platform_device *pdev) 62 static int socfpga_fpga_remove(struct platform_device *pdev)
@@ -102,67 +98,19 @@ The ops include a .state function which will determine the state the FPGA is in
102and return a code of type enum fpga_mgr_states. It doesn't result in a change 98and return a code of type enum fpga_mgr_states. It doesn't result in a change
103in state. 99in state.
104 100
105How to write an image buffer to a supported FPGA
106------------------------------------------------
107
108Some sample code::
109
110 #include <linux/fpga/fpga-mgr.h>
111
112 struct fpga_manager *mgr;
113 struct fpga_image_info *info;
114 int ret;
115
116 /*
117 * Get a reference to FPGA manager. The manager is not locked, so you can
118 * hold onto this reference without it preventing programming.
119 *
120 * This example uses the device node of the manager. Alternatively, use
121 * fpga_mgr_get(dev) instead if you have the device.
122 */
123 mgr = of_fpga_mgr_get(mgr_node);
124
125 /* struct with information about the FPGA image to program. */
126 info = fpga_image_info_alloc(dev);
127
128 /* flags indicates whether to do full or partial reconfiguration */
129 info->flags = FPGA_MGR_PARTIAL_RECONFIG;
130
131 /*
132 * At this point, indicate where the image is. This is pseudo-code; you're
133 * going to use one of these three.
134 */
135 if (image is in a scatter gather table) {
136
137 info->sgt = [your scatter gather table]
138
139 } else if (image is in a buffer) {
140
141 info->buf = [your image buffer]
142 info->count = [image buffer size]
143
144 } else if (image is in a firmware file) {
145
146 info->firmware_name = devm_kstrdup(dev, firmware_name, GFP_KERNEL);
147
148 }
149
150 /* Get exclusive control of FPGA manager */
151 ret = fpga_mgr_lock(mgr);
152
153 /* Load the buffer to the FPGA */
154 ret = fpga_mgr_buf_load(mgr, &info, buf, count);
155
156 /* Release the FPGA manager */
157 fpga_mgr_unlock(mgr);
158 fpga_mgr_put(mgr);
159
160 /* Deallocate the image info if you're done with it */
161 fpga_image_info_free(info);
162
163API for implementing a new FPGA Manager driver 101API for implementing a new FPGA Manager driver
164---------------------------------------------- 102----------------------------------------------
165 103
104* ``fpga_mgr_states`` — Values for :c:member:`fpga_manager->state`.
105* struct :c:type:`fpga_manager` — the FPGA manager struct
106* struct :c:type:`fpga_manager_ops` — Low level FPGA manager driver ops
107* :c:func:`devm_fpga_mgr_create` — Allocate and init a manager struct
108* :c:func:`fpga_mgr_register` — Register an FPGA manager
109* :c:func:`fpga_mgr_unregister` — Unregister an FPGA manager
110
111.. kernel-doc:: include/linux/fpga/fpga-mgr.h
112 :functions: fpga_mgr_states
113
166.. kernel-doc:: include/linux/fpga/fpga-mgr.h 114.. kernel-doc:: include/linux/fpga/fpga-mgr.h
167 :functions: fpga_manager 115 :functions: fpga_manager
168 116
@@ -170,56 +118,10 @@ API for implementing a new FPGA Manager driver
170 :functions: fpga_manager_ops 118 :functions: fpga_manager_ops
171 119
172.. kernel-doc:: drivers/fpga/fpga-mgr.c 120.. kernel-doc:: drivers/fpga/fpga-mgr.c
173 :functions: fpga_mgr_create 121 :functions: devm_fpga_mgr_create
174
175.. kernel-doc:: drivers/fpga/fpga-mgr.c
176 :functions: fpga_mgr_free
177 122
178.. kernel-doc:: drivers/fpga/fpga-mgr.c 123.. kernel-doc:: drivers/fpga/fpga-mgr.c
179 :functions: fpga_mgr_register 124 :functions: fpga_mgr_register
180 125
181.. kernel-doc:: drivers/fpga/fpga-mgr.c 126.. kernel-doc:: drivers/fpga/fpga-mgr.c
182 :functions: fpga_mgr_unregister 127 :functions: fpga_mgr_unregister
183
184API for programming an FPGA
185---------------------------
186
187FPGA Manager flags
188
189.. kernel-doc:: include/linux/fpga/fpga-mgr.h
190 :doc: FPGA Manager flags
191
192.. kernel-doc:: include/linux/fpga/fpga-mgr.h
193 :functions: fpga_image_info
194
195.. kernel-doc:: include/linux/fpga/fpga-mgr.h
196 :functions: fpga_mgr_states
197
198.. kernel-doc:: drivers/fpga/fpga-mgr.c
199 :functions: fpga_image_info_alloc
200
201.. kernel-doc:: drivers/fpga/fpga-mgr.c
202 :functions: fpga_image_info_free
203
204.. kernel-doc:: drivers/fpga/fpga-mgr.c
205 :functions: of_fpga_mgr_get
206
207.. kernel-doc:: drivers/fpga/fpga-mgr.c
208 :functions: fpga_mgr_get
209
210.. kernel-doc:: drivers/fpga/fpga-mgr.c
211 :functions: fpga_mgr_put
212
213.. kernel-doc:: drivers/fpga/fpga-mgr.c
214 :functions: fpga_mgr_lock
215
216.. kernel-doc:: drivers/fpga/fpga-mgr.c
217 :functions: fpga_mgr_unlock
218
219.. kernel-doc:: include/linux/fpga/fpga-mgr.h
220 :functions: fpga_mgr_states
221
222Note - use :c:func:`fpga_region_program_fpga()` instead of :c:func:`fpga_mgr_load()`
223
224.. kernel-doc:: drivers/fpga/fpga-mgr.c
225 :functions: fpga_mgr_load
diff --git a/Documentation/driver-api/fpga/fpga-programming.rst b/Documentation/driver-api/fpga/fpga-programming.rst
new file mode 100644
index 000000000000..b5484df6ff0f
--- /dev/null
+++ b/Documentation/driver-api/fpga/fpga-programming.rst
@@ -0,0 +1,107 @@
1In-kernel API for FPGA Programming
2==================================
3
4Overview
5--------
6
7The in-kernel API for FPGA programming is a combination of APIs from
8FPGA manager, bridge, and regions. The actual function used to
9trigger FPGA programming is :c:func:`fpga_region_program_fpga()`.
10
11:c:func:`fpga_region_program_fpga()` uses functionality supplied by
12the FPGA manager and bridges. It will:
13
14 * lock the region's mutex
15 * lock the mutex of the region's FPGA manager
16 * build a list of FPGA bridges if a method has been specified to do so
17 * disable the bridges
18 * program the FPGA using info passed in :c:member:`fpga_region->info`.
19 * re-enable the bridges
20 * release the locks
21
22The struct fpga_image_info specifies what FPGA image to program. It is
23allocated/freed by :c:func:`fpga_image_info_alloc()` and freed with
24:c:func:`fpga_image_info_free()`
25
26How to program an FPGA using a region
27-------------------------------------
28
29When the FPGA region driver probed, it was given a pointer to an FPGA manager
30driver so it knows which manager to use. The region also either has a list of
31bridges to control during programming or it has a pointer to a function that
32will generate that list. Here's some sample code of what to do next::
33
34 #include <linux/fpga/fpga-mgr.h>
35 #include <linux/fpga/fpga-region.h>
36
37 struct fpga_image_info *info;
38 int ret;
39
40 /*
41 * First, alloc the struct with information about the FPGA image to
42 * program.
43 */
44 info = fpga_image_info_alloc(dev);
45 if (!info)
46 return -ENOMEM;
47
48 /* Set flags as needed, such as: */
49 info->flags = FPGA_MGR_PARTIAL_RECONFIG;
50
51 /*
52 * Indicate where the FPGA image is. This is pseudo-code; you're
53 * going to use one of these three.
54 */
55 if (image is in a scatter gather table) {
56
57 info->sgt = [your scatter gather table]
58
59 } else if (image is in a buffer) {
60
61 info->buf = [your image buffer]
62 info->count = [image buffer size]
63
64 } else if (image is in a firmware file) {
65
66 info->firmware_name = devm_kstrdup(dev, firmware_name,
67 GFP_KERNEL);
68
69 }
70
71 /* Add info to region and do the programming */
72 region->info = info;
73 ret = fpga_region_program_fpga(region);
74
75 /* Deallocate the image info if you're done with it */
76 region->info = NULL;
77 fpga_image_info_free(info);
78
79 if (ret)
80 return ret;
81
82 /* Now enumerate whatever hardware has appeared in the FPGA. */
83
84API for programming an FPGA
85---------------------------
86
87* :c:func:`fpga_region_program_fpga` — Program an FPGA
88* :c:type:`fpga_image_info` — Specifies what FPGA image to program
89* :c:func:`fpga_image_info_alloc()` — Allocate an FPGA image info struct
90* :c:func:`fpga_image_info_free()` — Free an FPGA image info struct
91
92.. kernel-doc:: drivers/fpga/fpga-region.c
93 :functions: fpga_region_program_fpga
94
95FPGA Manager flags
96
97.. kernel-doc:: include/linux/fpga/fpga-mgr.h
98 :doc: FPGA Manager flags
99
100.. kernel-doc:: include/linux/fpga/fpga-mgr.h
101 :functions: fpga_image_info
102
103.. kernel-doc:: drivers/fpga/fpga-mgr.c
104 :functions: fpga_image_info_alloc
105
106.. kernel-doc:: drivers/fpga/fpga-mgr.c
107 :functions: fpga_image_info_free
diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
index f30333ce828e..0529b2d2231a 100644
--- a/Documentation/driver-api/fpga/fpga-region.rst
+++ b/Documentation/driver-api/fpga/fpga-region.rst
@@ -34,41 +34,6 @@ fpga_image_info including:
34 * flags indicating specifics such as whether the image is for partial 34 * flags indicating specifics such as whether the image is for partial
35 reconfiguration. 35 reconfiguration.
36 36
37How to program an FPGA using a region
38-------------------------------------
39
40First, allocate the info struct::
41
42 info = fpga_image_info_alloc(dev);
43 if (!info)
44 return -ENOMEM;
45
46Set flags as needed, i.e.::
47
48 info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
49
50Point to your FPGA image, such as::
51
52 info->sgt = &sgt;
53
54Add info to region and do the programming::
55
56 region->info = info;
57 ret = fpga_region_program_fpga(region);
58
59:c:func:`fpga_region_program_fpga()` operates on info passed in the
60fpga_image_info (region->info). This function will attempt to:
61
62 * lock the region's mutex
63 * lock the region's FPGA manager
64 * build a list of FPGA bridges if a method has been specified to do so
65 * disable the bridges
66 * program the FPGA
67 * re-enable the bridges
68 * release the locks
69
70Then you will want to enumerate whatever hardware has appeared in the FPGA.
71
72How to add a new FPGA region 37How to add a new FPGA region
73---------------------------- 38----------------------------
74 39
@@ -77,26 +42,62 @@ An example of usage can be seen in the probe function of [#f2]_.
77.. [#f1] ../devicetree/bindings/fpga/fpga-region.txt 42.. [#f1] ../devicetree/bindings/fpga/fpga-region.txt
78.. [#f2] ../../drivers/fpga/of-fpga-region.c 43.. [#f2] ../../drivers/fpga/of-fpga-region.c
79 44
80API to program an FPGA
81----------------------
82
83.. kernel-doc:: drivers/fpga/fpga-region.c
84 :functions: fpga_region_program_fpga
85
86API to add a new FPGA region 45API to add a new FPGA region
87---------------------------- 46----------------------------
88 47
48* struct :c:type:`fpga_region` — The FPGA region struct
49* :c:func:`devm_fpga_region_create` — Allocate and init a region struct
50* :c:func:`fpga_region_register` — Register an FPGA region
51* :c:func:`fpga_region_unregister` — Unregister an FPGA region
52
53The FPGA region's probe function will need to get a reference to the FPGA
54Manager it will be using to do the programming. This usually would happen
55during the region's probe function.
56
57* :c:func:`fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count
58* :c:func:`of_fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count,
59 given a device node.
60* :c:func:`fpga_mgr_put` — Put an FPGA manager
61
62The FPGA region will need to specify which bridges to control while programming
63the FPGA. The region driver can build a list of bridges during probe time
64(:c:member:`fpga_region->bridge_list`) or it can have a function that creates
65the list of bridges to program just before programming
66(:c:member:`fpga_region->get_bridges`). The FPGA bridge framework supplies the
67following APIs to handle building or tearing down that list.
68
69* :c:func:`fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a
70 list
71* :c:func:`of_fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a
72 list, given a device node
73* :c:func:`fpga_bridges_put` — Given a list of bridges, put them
74
89.. kernel-doc:: include/linux/fpga/fpga-region.h 75.. kernel-doc:: include/linux/fpga/fpga-region.h
90 :functions: fpga_region 76 :functions: fpga_region
91 77
92.. kernel-doc:: drivers/fpga/fpga-region.c 78.. kernel-doc:: drivers/fpga/fpga-region.c
93 :functions: fpga_region_create 79 :functions: devm_fpga_region_create
94
95.. kernel-doc:: drivers/fpga/fpga-region.c
96 :functions: fpga_region_free
97 80
98.. kernel-doc:: drivers/fpga/fpga-region.c 81.. kernel-doc:: drivers/fpga/fpga-region.c
99 :functions: fpga_region_register 82 :functions: fpga_region_register
100 83
101.. kernel-doc:: drivers/fpga/fpga-region.c 84.. kernel-doc:: drivers/fpga/fpga-region.c
102 :functions: fpga_region_unregister 85 :functions: fpga_region_unregister
86
87.. kernel-doc:: drivers/fpga/fpga-mgr.c
88 :functions: fpga_mgr_get
89
90.. kernel-doc:: drivers/fpga/fpga-mgr.c
91 :functions: of_fpga_mgr_get
92
93.. kernel-doc:: drivers/fpga/fpga-mgr.c
94 :functions: fpga_mgr_put
95
96.. kernel-doc:: drivers/fpga/fpga-bridge.c
97 :functions: fpga_bridge_get_to_list
98
99.. kernel-doc:: drivers/fpga/fpga-bridge.c
100 :functions: of_fpga_bridge_get_to_list
101
102.. kernel-doc:: drivers/fpga/fpga-bridge.c
103 :functions: fpga_bridges_put
diff --git a/Documentation/driver-api/fpga/index.rst b/Documentation/driver-api/fpga/index.rst
index c51e5ebd544a..31a4773bd2e6 100644
--- a/Documentation/driver-api/fpga/index.rst
+++ b/Documentation/driver-api/fpga/index.rst
@@ -11,3 +11,5 @@ FPGA Subsystem
11 fpga-mgr 11 fpga-mgr
12 fpga-bridge 12 fpga-bridge
13 fpga-region 13 fpga-region
14 fpga-programming
15
diff --git a/Documentation/driver-api/fpga/intro.rst b/Documentation/driver-api/fpga/intro.rst
index 50d1cab84950..f54c7dabcc7d 100644
--- a/Documentation/driver-api/fpga/intro.rst
+++ b/Documentation/driver-api/fpga/intro.rst
@@ -44,7 +44,7 @@ FPGA Region
44----------- 44-----------
45 45
46If you are adding a new interface to the FPGA framework, add it on top 46If you are adding a new interface to the FPGA framework, add it on top
47of an FPGA region to allow the most reuse of your interface. 47of an FPGA region.
48 48
49The FPGA Region framework (fpga-region.c) associates managers and 49The FPGA Region framework (fpga-region.c) associates managers and
50bridges as reconfigurable regions. A region may refer to the whole 50bridges as reconfigurable regions. A region may refer to the whole
diff --git a/Documentation/driver-api/soundwire/stream.rst b/Documentation/driver-api/soundwire/stream.rst
index 29121aa55fb9..26a6064503fd 100644
--- a/Documentation/driver-api/soundwire/stream.rst
+++ b/Documentation/driver-api/soundwire/stream.rst
@@ -101,6 +101,34 @@ interface. ::
101 +--------------------+ | | 101 +--------------------+ | |
102 +----------------+ 102 +----------------+
103 103
104Example 5: Stereo Stream with L and R channel is rendered by 2 Masters, each
105rendering one channel, and is received by two different Slaves, each
106receiving one channel. Both Masters and both Slaves are using single port. ::
107
108 +---------------+ Clock Signal +---------------+
109 | Master +----------------------------------+ Slave |
110 | Interface | | Interface |
111 | 1 | | 1 |
112 | | Data Signal | |
113 | L +----------------------------------+ L |
114 | (Data) | Data Direction | (Data) |
115 +---------------+ +-----------------------> +---------------+
116
117 +---------------+ Clock Signal +---------------+
118 | Master +----------------------------------+ Slave |
119 | Interface | | Interface |
120 | 2 | | 2 |
121 | | Data Signal | |
122 | R +----------------------------------+ R |
123 | (Data) | Data Direction | (Data) |
124 +---------------+ +-----------------------> +---------------+
125
126Note: In multi-link cases like above, to lock, one would acquire a global
127lock and then go on locking bus instances. But, in this case the caller
128framework(ASoC DPCM) guarantees that stream operations on a card are
129always serialized. So, there is no race condition and hence no need for
130global lock.
131
104SoundWire Stream Management flow 132SoundWire Stream Management flow
105================================ 133================================
106 134
@@ -174,6 +202,7 @@ per stream. From ASoC DPCM framework, this stream state maybe linked to
174.startup() operation. 202.startup() operation.
175 203
176 .. code-block:: c 204 .. code-block:: c
205
177 int sdw_alloc_stream(char * stream_name); 206 int sdw_alloc_stream(char * stream_name);
178 207
179 208
@@ -200,6 +229,7 @@ only be invoked once by respective Master(s) and Slave(s). From ASoC DPCM
200framework, this stream state is linked to .hw_params() operation. 229framework, this stream state is linked to .hw_params() operation.
201 230
202 .. code-block:: c 231 .. code-block:: c
232
203 int sdw_stream_add_master(struct sdw_bus * bus, 233 int sdw_stream_add_master(struct sdw_bus * bus,
204 struct sdw_stream_config * stream_config, 234 struct sdw_stream_config * stream_config,
205 struct sdw_ports_config * ports_config, 235 struct sdw_ports_config * ports_config,
@@ -245,6 +275,7 @@ stream. From ASoC DPCM framework, this stream state is linked to
245.prepare() operation. 275.prepare() operation.
246 276
247 .. code-block:: c 277 .. code-block:: c
278
248 int sdw_prepare_stream(struct sdw_stream_runtime * stream); 279 int sdw_prepare_stream(struct sdw_stream_runtime * stream);
249 280
250 281
@@ -274,6 +305,7 @@ stream. From ASoC DPCM framework, this stream state is linked to
274.trigger() start operation. 305.trigger() start operation.
275 306
276 .. code-block:: c 307 .. code-block:: c
308
277 int sdw_enable_stream(struct sdw_stream_runtime * stream); 309 int sdw_enable_stream(struct sdw_stream_runtime * stream);
278 310
279SDW_STREAM_DISABLED 311SDW_STREAM_DISABLED
@@ -301,6 +333,7 @@ per stream. From ASoC DPCM framework, this stream state is linked to
301.trigger() stop operation. 333.trigger() stop operation.
302 334
303 .. code-block:: c 335 .. code-block:: c
336
304 int sdw_disable_stream(struct sdw_stream_runtime * stream); 337 int sdw_disable_stream(struct sdw_stream_runtime * stream);
305 338
306 339
@@ -325,6 +358,7 @@ per stream. From ASoC DPCM framework, this stream state is linked to
325.trigger() stop operation. 358.trigger() stop operation.
326 359
327 .. code-block:: c 360 .. code-block:: c
361
328 int sdw_deprepare_stream(struct sdw_stream_runtime * stream); 362 int sdw_deprepare_stream(struct sdw_stream_runtime * stream);
329 363
330 364
@@ -349,6 +383,7 @@ all the Master(s) and Slave(s) associated with stream. From ASoC DPCM
349framework, this stream state is linked to .hw_free() operation. 383framework, this stream state is linked to .hw_free() operation.
350 384
351 .. code-block:: c 385 .. code-block:: c
386
352 int sdw_stream_remove_master(struct sdw_bus * bus, 387 int sdw_stream_remove_master(struct sdw_bus * bus,
353 struct sdw_stream_runtime * stream); 388 struct sdw_stream_runtime * stream);
354 int sdw_stream_remove_slave(struct sdw_slave * slave, 389 int sdw_stream_remove_slave(struct sdw_slave * slave,
@@ -361,6 +396,7 @@ stream assigned as part of ALLOCATED state.
361In .shutdown() the data structure maintaining stream state are freed up. 396In .shutdown() the data structure maintaining stream state are freed up.
362 397
363 .. code-block:: c 398 .. code-block:: c
399
364 void sdw_release_stream(struct sdw_stream_runtime * stream); 400 void sdw_release_stream(struct sdw_stream_runtime * stream);
365 401
366Not Supported 402Not Supported
diff --git a/Documentation/driver-api/uio-howto.rst b/Documentation/driver-api/uio-howto.rst
index fb2eb73be4a3..25f50eace28b 100644
--- a/Documentation/driver-api/uio-howto.rst
+++ b/Documentation/driver-api/uio-howto.rst
@@ -463,8 +463,8 @@ Getting information about your UIO device
463 463
464Information about all UIO devices is available in sysfs. The first thing 464Information about all UIO devices is available in sysfs. The first thing
465you should do in your driver is check ``name`` and ``version`` to make 465you should do in your driver is check ``name`` and ``version`` to make
466sure your talking to the right device and that its kernel driver has the 466sure you're talking to the right device and that its kernel driver has
467version you expect. 467the version you expect.
468 468
469You should also make sure that the memory mapping you need exists and 469You should also make sure that the memory mapping you need exists and
470has the size you expect. 470has the size you expect.
diff --git a/Documentation/nvmem/nvmem.txt b/Documentation/nvmem/nvmem.txt
index 8d8d8f58f96f..fc2fe4b18655 100644
--- a/Documentation/nvmem/nvmem.txt
+++ b/Documentation/nvmem/nvmem.txt
@@ -58,6 +58,37 @@ static int qfprom_probe(struct platform_device *pdev)
58It is mandatory that the NVMEM provider has a regmap associated with its 58It is mandatory that the NVMEM provider has a regmap associated with its
59struct device. Failure to do would return error code from nvmem_register(). 59struct device. Failure to do would return error code from nvmem_register().
60 60
61Users of board files can define and register nvmem cells using the
62nvmem_cell_table struct:
63
64static struct nvmem_cell_info foo_nvmem_cells[] = {
65 {
66 .name = "macaddr",
67 .offset = 0x7f00,
68 .bytes = ETH_ALEN,
69 }
70};
71
72static struct nvmem_cell_table foo_nvmem_cell_table = {
73 .nvmem_name = "i2c-eeprom",
74 .cells = foo_nvmem_cells,
75 .ncells = ARRAY_SIZE(foo_nvmem_cells),
76};
77
78nvmem_add_cell_table(&foo_nvmem_cell_table);
79
80Additionally it is possible to create nvmem cell lookup entries and register
81them with the nvmem framework from machine code as shown in the example below:
82
83static struct nvmem_cell_lookup foo_nvmem_lookup = {
84 .nvmem_name = "i2c-eeprom",
85 .cell_name = "macaddr",
86 .dev_id = "foo_mac.0",
87 .con_id = "mac-address",
88};
89
90nvmem_add_cell_lookups(&foo_nvmem_lookup, 1);
91
61NVMEM Consumers 92NVMEM Consumers
62+++++++++++++++ 93+++++++++++++++
63 94
diff --git a/Documentation/trace/stm.rst b/Documentation/trace/stm.rst
index 2c22ddb7fd3e..99f99963e5e7 100644
--- a/Documentation/trace/stm.rst
+++ b/Documentation/trace/stm.rst
@@ -1,3 +1,5 @@
1.. SPDX-License-Identifier: GPL-2.0
2
1=================== 3===================
2System Trace Module 4System Trace Module
3=================== 5===================
@@ -53,12 +55,30 @@ under "user" directory from the example above and this new rule will
53be used for trace sources with the id string of "user/dummy". 55be used for trace sources with the id string of "user/dummy".
54 56
55Trace sources have to open the stm class device's node and write their 57Trace sources have to open the stm class device's node and write their
56trace data into its file descriptor. In order to identify themselves 58trace data into its file descriptor.
57to the policy, they need to do a STP_POLICY_ID_SET ioctl on this file 59
58descriptor providing their id string. Otherwise, they will be 60In order to find an appropriate policy node for a given trace source,
59automatically allocated a master/channel pair upon first write to this 61several mechanisms can be used. First, a trace source can explicitly
60file descriptor according to the "default" rule of the policy, if such 62identify itself by calling an STP_POLICY_ID_SET ioctl on the character
61exists. 63device's file descriptor, providing their id string, before they write
64any data there. Secondly, if they chose not to perform the explicit
65identification (because you may not want to patch existing software
66to do this), they can just start writing the data, at which point the
67stm core will try to find a policy node with the name matching the
68task's name (e.g., "syslogd") and if one exists, it will be used.
69Thirdly, if the task name can't be found among the policy nodes, the
70catch-all entry "default" will be used, if it exists. This entry also
71needs to be created and configured by the system administrator or
72whatever tools are taking care of the policy configuration. Finally,
73if all the above steps failed, the write() to an stm file descriptor
74will return a error (EINVAL).
75
76Previously, if no policy nodes were found for a trace source, the stm
77class would silently fall back to allocating the first available
78contiguous range of master/channels from the beginning of the device's
79master/channel range. The new requirement for a policy node to exist
80will help programmers and sysadmins identify gaps in configuration
81and have better control over the un-identified sources.
62 82
63Some STM devices may allow direct mapping of the channel mmio regions 83Some STM devices may allow direct mapping of the channel mmio regions
64to userspace for zero-copy writing. One mappable page (in terms of 84to userspace for zero-copy writing. One mappable page (in terms of
@@ -92,9 +112,9 @@ allocated for the device according to the policy configuration. If
92there's a node in the root of the policy directory that matches the 112there's a node in the root of the policy directory that matches the
93stm_source device's name (for example, "console"), this node will be 113stm_source device's name (for example, "console"), this node will be
94used to allocate master and channel numbers. If there's no such policy 114used to allocate master and channel numbers. If there's no such policy
95node, the stm core will pick the first contiguous chunk of channels 115node, the stm core will use the catch-all entry "default", if one
96within the first available master. Note that the node must exist 116exists. If neither policy nodes exist, the write() to stm_source_link
97before the stm_source device is connected to its stm device. 117will return an error.
98 118
99stm_console 119stm_console
100=========== 120===========
diff --git a/Documentation/trace/sys-t.rst b/Documentation/trace/sys-t.rst
new file mode 100644
index 000000000000..3d8eb92735e9
--- /dev/null
+++ b/Documentation/trace/sys-t.rst
@@ -0,0 +1,62 @@
1.. SPDX-License-Identifier: GPL-2.0
2
3===================
4MIPI SyS-T over STP
5===================
6
7The MIPI SyS-T protocol driver can be used with STM class devices to
8generate standardized trace stream. Aside from being a standard, it
9provides better trace source identification and timestamp correlation.
10
11In order to use the MIPI SyS-T protocol driver with your STM device,
12first, you'll need CONFIG_STM_PROTO_SYS_T.
13
14Now, you can select which protocol driver you want to use when you create
15a policy for your STM device, by specifying it in the policy name:
16
17# mkdir /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/
18
19In other words, the policy name format is extended like this:
20
21 <device_name>:<protocol_name>.<policy_name>
22
23With Intel TH, therefore it can look like "0-sth:p_sys-t.my-policy".
24
25If the protocol name is omitted, the STM class will chose whichever
26protocol driver was loaded first.
27
28You can also double check that everything is working as expected by
29
30# cat /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/protocol
31p_sys-t
32
33Now, with the MIPI SyS-T protocol driver, each policy node in the
34configfs gets a few additional attributes, which determine per-source
35parameters specific to the protocol:
36
37# mkdir /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/default
38# ls /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/default
39channels
40clocksync_interval
41do_len
42masters
43ts_interval
44uuid
45
46The most important one here is the "uuid", which determines the UUID
47that will be used to tag all data coming from this source. It is
48automatically generated when a new node is created, but it is likely
49that you would want to change it.
50
51do_len switches on/off the additional "payload length" field in the
52MIPI SyS-T message header. It is off by default as the STP already
53marks message boundaries.
54
55ts_interval and clocksync_interval determine how much time in milliseconds
56can pass before we need to include a protocol (not transport, aka STP)
57timestamp in a message header or send a CLOCKSYNC packet, respectively.
58
59See Documentation/ABI/testing/configfs-stp-policy-p_sys-t for more
60details.
61
62* [1] https://www.mipi.org/specifications/sys-t
diff --git a/MAINTAINERS b/MAINTAINERS
index 554941e05171..fdb6a298c7e7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -932,6 +932,7 @@ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
932M: Arve Hjønnevåg <arve@android.com> 932M: Arve Hjønnevåg <arve@android.com>
933M: Todd Kjos <tkjos@android.com> 933M: Todd Kjos <tkjos@android.com>
934M: Martijn Coenen <maco@android.com> 934M: Martijn Coenen <maco@android.com>
935M: Joel Fernandes <joel@joelfernandes.org>
935T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git 936T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
936L: devel@driverdev.osuosl.org 937L: devel@driverdev.osuosl.org
937S: Supported 938S: Supported
@@ -13757,7 +13758,7 @@ F: sound/soc/
13757F: include/sound/soc* 13758F: include/sound/soc*
13758 13759
13759SOUNDWIRE SUBSYSTEM 13760SOUNDWIRE SUBSYSTEM
13760M: Vinod Koul <vinod.koul@intel.com> 13761M: Vinod Koul <vkoul@kernel.org>
13761M: Sanyog Kale <sanyog.r.kale@intel.com> 13762M: Sanyog Kale <sanyog.r.kale@intel.com>
13762R: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com> 13763R: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
13763L: alsa-devel@alsa-project.org (moderated for non-subscribers) 13764L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -15523,13 +15524,19 @@ F: arch/x86/um/
15523F: fs/hostfs/ 15524F: fs/hostfs/
15524F: fs/hppfs/ 15525F: fs/hppfs/
15525 15526
15527USERSPACE COPYIN/COPYOUT (UIOVEC)
15528M: Alexander Viro <viro@zeniv.linux.org.uk>
15529S: Maintained
15530F: lib/iov_iter.c
15531F: include/linux/uio.h
15532
15526USERSPACE I/O (UIO) 15533USERSPACE I/O (UIO)
15527M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 15534M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
15528S: Maintained 15535S: Maintained
15529T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git 15536T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
15530F: Documentation/driver-api/uio-howto.rst 15537F: Documentation/driver-api/uio-howto.rst
15531F: drivers/uio/ 15538F: drivers/uio/
15532F: include/linux/uio*.h 15539F: include/linux/uio_driver.h
15533 15540
15534UTIL-LINUX PACKAGE 15541UTIL-LINUX PACKAGE
15535M: Karel Zak <kzak@redhat.com> 15542M: Karel Zak <kzak@redhat.com>
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 432e9ad77070..51e8250d113f 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
10 10
11config ANDROID_BINDER_IPC 11config ANDROID_BINDER_IPC
12 bool "Android Binder IPC Driver" 12 bool "Android Binder IPC Driver"
13 depends on MMU 13 depends on MMU && !CPU_CACHE_VIVT
14 default n 14 default n
15 ---help--- 15 ---help---
16 Binder is used in Android for both communication between processes, 16 Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d58763b6b009..cb30a524d16d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -71,6 +71,7 @@
71#include <linux/security.h> 71#include <linux/security.h>
72#include <linux/spinlock.h> 72#include <linux/spinlock.h>
73#include <linux/ratelimit.h> 73#include <linux/ratelimit.h>
74#include <linux/syscalls.h>
74 75
75#include <uapi/linux/android/binder.h> 76#include <uapi/linux/android/binder.h>
76 77
@@ -457,9 +458,8 @@ struct binder_ref {
457}; 458};
458 459
459enum binder_deferred_state { 460enum binder_deferred_state {
460 BINDER_DEFERRED_PUT_FILES = 0x01, 461 BINDER_DEFERRED_FLUSH = 0x01,
461 BINDER_DEFERRED_FLUSH = 0x02, 462 BINDER_DEFERRED_RELEASE = 0x02,
462 BINDER_DEFERRED_RELEASE = 0x04,
463}; 463};
464 464
465/** 465/**
@@ -480,9 +480,6 @@ enum binder_deferred_state {
480 * (invariant after initialized) 480 * (invariant after initialized)
481 * @tsk task_struct for group_leader of process 481 * @tsk task_struct for group_leader of process
482 * (invariant after initialized) 482 * (invariant after initialized)
483 * @files files_struct for process
484 * (protected by @files_lock)
485 * @files_lock mutex to protect @files
486 * @deferred_work_node: element for binder_deferred_list 483 * @deferred_work_node: element for binder_deferred_list
487 * (protected by binder_deferred_lock) 484 * (protected by binder_deferred_lock)
488 * @deferred_work: bitmap of deferred work to perform 485 * @deferred_work: bitmap of deferred work to perform
@@ -527,8 +524,6 @@ struct binder_proc {
527 struct list_head waiting_threads; 524 struct list_head waiting_threads;
528 int pid; 525 int pid;
529 struct task_struct *tsk; 526 struct task_struct *tsk;
530 struct files_struct *files;
531 struct mutex files_lock;
532 struct hlist_node deferred_work_node; 527 struct hlist_node deferred_work_node;
533 int deferred_work; 528 int deferred_work;
534 bool is_dead; 529 bool is_dead;
@@ -611,6 +606,23 @@ struct binder_thread {
611 bool is_dead; 606 bool is_dead;
612}; 607};
613 608
609/**
610 * struct binder_txn_fd_fixup - transaction fd fixup list element
611 * @fixup_entry: list entry
612 * @file: struct file to be associated with new fd
613 * @offset: offset in buffer data to this fixup
614 *
615 * List element for fd fixups in a transaction. Since file
616 * descriptors need to be allocated in the context of the
617 * target process, we pass each fd to be processed in this
618 * struct.
619 */
620struct binder_txn_fd_fixup {
621 struct list_head fixup_entry;
622 struct file *file;
623 size_t offset;
624};
625
614struct binder_transaction { 626struct binder_transaction {
615 int debug_id; 627 int debug_id;
616 struct binder_work work; 628 struct binder_work work;
@@ -628,6 +640,7 @@ struct binder_transaction {
628 long priority; 640 long priority;
629 long saved_priority; 641 long saved_priority;
630 kuid_t sender_euid; 642 kuid_t sender_euid;
643 struct list_head fd_fixups;
631 /** 644 /**
632 * @lock: protects @from, @to_proc, and @to_thread 645 * @lock: protects @from, @to_proc, and @to_thread
633 * 646 *
@@ -822,6 +835,7 @@ static void
822binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, 835binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
823 struct binder_work *work) 836 struct binder_work *work)
824{ 837{
838 WARN_ON(!list_empty(&thread->waiting_thread_node));
825 binder_enqueue_work_ilocked(work, &thread->todo); 839 binder_enqueue_work_ilocked(work, &thread->todo);
826} 840}
827 841
@@ -839,6 +853,7 @@ static void
839binder_enqueue_thread_work_ilocked(struct binder_thread *thread, 853binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
840 struct binder_work *work) 854 struct binder_work *work)
841{ 855{
856 WARN_ON(!list_empty(&thread->waiting_thread_node));
842 binder_enqueue_work_ilocked(work, &thread->todo); 857 binder_enqueue_work_ilocked(work, &thread->todo);
843 thread->process_todo = true; 858 thread->process_todo = true;
844} 859}
@@ -920,66 +935,6 @@ static void binder_free_thread(struct binder_thread *thread);
920static void binder_free_proc(struct binder_proc *proc); 935static void binder_free_proc(struct binder_proc *proc);
921static void binder_inc_node_tmpref_ilocked(struct binder_node *node); 936static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
922 937
923static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
924{
925 unsigned long rlim_cur;
926 unsigned long irqs;
927 int ret;
928
929 mutex_lock(&proc->files_lock);
930 if (proc->files == NULL) {
931 ret = -ESRCH;
932 goto err;
933 }
934 if (!lock_task_sighand(proc->tsk, &irqs)) {
935 ret = -EMFILE;
936 goto err;
937 }
938 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
939 unlock_task_sighand(proc->tsk, &irqs);
940
941 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
942err:
943 mutex_unlock(&proc->files_lock);
944 return ret;
945}
946
947/*
948 * copied from fd_install
949 */
950static void task_fd_install(
951 struct binder_proc *proc, unsigned int fd, struct file *file)
952{
953 mutex_lock(&proc->files_lock);
954 if (proc->files)
955 __fd_install(proc->files, fd, file);
956 mutex_unlock(&proc->files_lock);
957}
958
959/*
960 * copied from sys_close
961 */
962static long task_close_fd(struct binder_proc *proc, unsigned int fd)
963{
964 int retval;
965
966 mutex_lock(&proc->files_lock);
967 if (proc->files == NULL) {
968 retval = -ESRCH;
969 goto err;
970 }
971 retval = __close_fd(proc->files, fd);
972 /* can't restart close syscall because file table entry was cleared */
973 if (unlikely(retval == -ERESTARTSYS ||
974 retval == -ERESTARTNOINTR ||
975 retval == -ERESTARTNOHAND ||
976 retval == -ERESTART_RESTARTBLOCK))
977 retval = -EINTR;
978err:
979 mutex_unlock(&proc->files_lock);
980 return retval;
981}
982
983static bool binder_has_work_ilocked(struct binder_thread *thread, 938static bool binder_has_work_ilocked(struct binder_thread *thread,
984 bool do_proc_work) 939 bool do_proc_work)
985{ 940{
@@ -1270,19 +1225,12 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1270 } else 1225 } else
1271 node->local_strong_refs++; 1226 node->local_strong_refs++;
1272 if (!node->has_strong_ref && target_list) { 1227 if (!node->has_strong_ref && target_list) {
1228 struct binder_thread *thread = container_of(target_list,
1229 struct binder_thread, todo);
1273 binder_dequeue_work_ilocked(&node->work); 1230 binder_dequeue_work_ilocked(&node->work);
1274 /* 1231 BUG_ON(&thread->todo != target_list);
1275 * Note: this function is the only place where we queue 1232 binder_enqueue_deferred_thread_work_ilocked(thread,
1276 * directly to a thread->todo without using the 1233 &node->work);
1277 * corresponding binder_enqueue_thread_work() helper
1278 * functions; in this case it's ok to not set the
1279 * process_todo flag, since we know this node work will
1280 * always be followed by other work that starts queue
1281 * processing: in case of synchronous transactions, a
1282 * BR_REPLY or BR_ERROR; in case of oneway
1283 * transactions, a BR_TRANSACTION_COMPLETE.
1284 */
1285 binder_enqueue_work_ilocked(&node->work, target_list);
1286 } 1234 }
1287 } else { 1235 } else {
1288 if (!internal) 1236 if (!internal)
@@ -1958,10 +1906,32 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
1958 return NULL; 1906 return NULL;
1959} 1907}
1960 1908
1909/**
1910 * binder_free_txn_fixups() - free unprocessed fd fixups
1911 * @t: binder transaction for t->from
1912 *
1913 * If the transaction is being torn down prior to being
1914 * processed by the target process, free all of the
1915 * fd fixups and fput the file structs. It is safe to
1916 * call this function after the fixups have been
1917 * processed -- in that case, the list will be empty.
1918 */
1919static void binder_free_txn_fixups(struct binder_transaction *t)
1920{
1921 struct binder_txn_fd_fixup *fixup, *tmp;
1922
1923 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1924 fput(fixup->file);
1925 list_del(&fixup->fixup_entry);
1926 kfree(fixup);
1927 }
1928}
1929
1961static void binder_free_transaction(struct binder_transaction *t) 1930static void binder_free_transaction(struct binder_transaction *t)
1962{ 1931{
1963 if (t->buffer) 1932 if (t->buffer)
1964 t->buffer->transaction = NULL; 1933 t->buffer->transaction = NULL;
1934 binder_free_txn_fixups(t);
1965 kfree(t); 1935 kfree(t);
1966 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1936 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1967} 1937}
@@ -2262,12 +2232,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2262 } break; 2232 } break;
2263 2233
2264 case BINDER_TYPE_FD: { 2234 case BINDER_TYPE_FD: {
2265 struct binder_fd_object *fp = to_binder_fd_object(hdr); 2235 /*
2266 2236 * No need to close the file here since user-space
2267 binder_debug(BINDER_DEBUG_TRANSACTION, 2237 * closes it for for successfully delivered
2268 " fd %d\n", fp->fd); 2238 * transactions. For transactions that weren't
2269 if (failed_at) 2239 * delivered, the new fd was never allocated so
2270 task_close_fd(proc, fp->fd); 2240 * there is no need to close and the fput on the
2241 * file is done when the transaction is torn
2242 * down.
2243 */
2244 WARN_ON(failed_at &&
2245 proc->tsk == current->group_leader);
2271 } break; 2246 } break;
2272 case BINDER_TYPE_PTR: 2247 case BINDER_TYPE_PTR:
2273 /* 2248 /*
@@ -2283,6 +2258,15 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2283 size_t fd_index; 2258 size_t fd_index;
2284 binder_size_t fd_buf_size; 2259 binder_size_t fd_buf_size;
2285 2260
2261 if (proc->tsk != current->group_leader) {
2262 /*
2263 * Nothing to do if running in sender context
2264 * The fd fixups have not been applied so no
2265 * fds need to be closed.
2266 */
2267 continue;
2268 }
2269
2286 fda = to_binder_fd_array_object(hdr); 2270 fda = to_binder_fd_array_object(hdr);
2287 parent = binder_validate_ptr(buffer, fda->parent, 2271 parent = binder_validate_ptr(buffer, fda->parent,
2288 off_start, 2272 off_start,
@@ -2315,7 +2299,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2315 } 2299 }
2316 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); 2300 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2317 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2301 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2318 task_close_fd(proc, fd_array[fd_index]); 2302 ksys_close(fd_array[fd_index]);
2319 } break; 2303 } break;
2320 default: 2304 default:
2321 pr_err("transaction release %d bad object type %x\n", 2305 pr_err("transaction release %d bad object type %x\n",
@@ -2447,17 +2431,18 @@ done:
2447 return ret; 2431 return ret;
2448} 2432}
2449 2433
2450static int binder_translate_fd(int fd, 2434static int binder_translate_fd(u32 *fdp,
2451 struct binder_transaction *t, 2435 struct binder_transaction *t,
2452 struct binder_thread *thread, 2436 struct binder_thread *thread,
2453 struct binder_transaction *in_reply_to) 2437 struct binder_transaction *in_reply_to)
2454{ 2438{
2455 struct binder_proc *proc = thread->proc; 2439 struct binder_proc *proc = thread->proc;
2456 struct binder_proc *target_proc = t->to_proc; 2440 struct binder_proc *target_proc = t->to_proc;
2457 int target_fd; 2441 struct binder_txn_fd_fixup *fixup;
2458 struct file *file; 2442 struct file *file;
2459 int ret; 2443 int ret = 0;
2460 bool target_allows_fd; 2444 bool target_allows_fd;
2445 int fd = *fdp;
2461 2446
2462 if (in_reply_to) 2447 if (in_reply_to)
2463 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); 2448 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
@@ -2485,19 +2470,24 @@ static int binder_translate_fd(int fd,
2485 goto err_security; 2470 goto err_security;
2486 } 2471 }
2487 2472
2488 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 2473 /*
2489 if (target_fd < 0) { 2474 * Add fixup record for this transaction. The allocation
2475 * of the fd in the target needs to be done from a
2476 * target thread.
2477 */
2478 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2479 if (!fixup) {
2490 ret = -ENOMEM; 2480 ret = -ENOMEM;
2491 goto err_get_unused_fd; 2481 goto err_alloc;
2492 } 2482 }
2493 task_fd_install(target_proc, target_fd, file); 2483 fixup->file = file;
2494 trace_binder_transaction_fd(t, fd, target_fd); 2484 fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
2495 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n", 2485 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2496 fd, target_fd); 2486 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2497 2487
2498 return target_fd; 2488 return ret;
2499 2489
2500err_get_unused_fd: 2490err_alloc:
2501err_security: 2491err_security:
2502 fput(file); 2492 fput(file);
2503err_fget: 2493err_fget:
@@ -2511,8 +2501,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2511 struct binder_thread *thread, 2501 struct binder_thread *thread,
2512 struct binder_transaction *in_reply_to) 2502 struct binder_transaction *in_reply_to)
2513{ 2503{
2514 binder_size_t fdi, fd_buf_size, num_installed_fds; 2504 binder_size_t fdi, fd_buf_size;
2515 int target_fd;
2516 uintptr_t parent_buffer; 2505 uintptr_t parent_buffer;
2517 u32 *fd_array; 2506 u32 *fd_array;
2518 struct binder_proc *proc = thread->proc; 2507 struct binder_proc *proc = thread->proc;
@@ -2544,23 +2533,12 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2544 return -EINVAL; 2533 return -EINVAL;
2545 } 2534 }
2546 for (fdi = 0; fdi < fda->num_fds; fdi++) { 2535 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2547 target_fd = binder_translate_fd(fd_array[fdi], t, thread, 2536 int ret = binder_translate_fd(&fd_array[fdi], t, thread,
2548 in_reply_to); 2537 in_reply_to);
2549 if (target_fd < 0) 2538 if (ret < 0)
2550 goto err_translate_fd_failed; 2539 return ret;
2551 fd_array[fdi] = target_fd;
2552 } 2540 }
2553 return 0; 2541 return 0;
2554
2555err_translate_fd_failed:
2556 /*
2557 * Failed to allocate fd or security error, free fds
2558 * installed so far.
2559 */
2560 num_installed_fds = fdi;
2561 for (fdi = 0; fdi < num_installed_fds; fdi++)
2562 task_close_fd(target_proc, fd_array[fdi]);
2563 return target_fd;
2564} 2542}
2565 2543
2566static int binder_fixup_parent(struct binder_transaction *t, 2544static int binder_fixup_parent(struct binder_transaction *t,
@@ -2723,6 +2701,7 @@ static void binder_transaction(struct binder_proc *proc,
2723{ 2701{
2724 int ret; 2702 int ret;
2725 struct binder_transaction *t; 2703 struct binder_transaction *t;
2704 struct binder_work *w;
2726 struct binder_work *tcomplete; 2705 struct binder_work *tcomplete;
2727 binder_size_t *offp, *off_end, *off_start; 2706 binder_size_t *offp, *off_end, *off_start;
2728 binder_size_t off_min; 2707 binder_size_t off_min;
@@ -2864,6 +2843,29 @@ static void binder_transaction(struct binder_proc *proc,
2864 goto err_invalid_target_handle; 2843 goto err_invalid_target_handle;
2865 } 2844 }
2866 binder_inner_proc_lock(proc); 2845 binder_inner_proc_lock(proc);
2846
2847 w = list_first_entry_or_null(&thread->todo,
2848 struct binder_work, entry);
2849 if (!(tr->flags & TF_ONE_WAY) && w &&
2850 w->type == BINDER_WORK_TRANSACTION) {
2851 /*
2852 * Do not allow new outgoing transaction from a
2853 * thread that has a transaction at the head of
2854 * its todo list. Only need to check the head
2855 * because binder_select_thread_ilocked picks a
2856 * thread from proc->waiting_threads to enqueue
2857 * the transaction, and nothing is queued to the
2858 * todo list while the thread is on waiting_threads.
2859 */
2860 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2861 proc->pid, thread->pid);
2862 binder_inner_proc_unlock(proc);
2863 return_error = BR_FAILED_REPLY;
2864 return_error_param = -EPROTO;
2865 return_error_line = __LINE__;
2866 goto err_bad_todo_list;
2867 }
2868
2867 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 2869 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2868 struct binder_transaction *tmp; 2870 struct binder_transaction *tmp;
2869 2871
@@ -2911,6 +2913,7 @@ static void binder_transaction(struct binder_proc *proc,
2911 return_error_line = __LINE__; 2913 return_error_line = __LINE__;
2912 goto err_alloc_t_failed; 2914 goto err_alloc_t_failed;
2913 } 2915 }
2916 INIT_LIST_HEAD(&t->fd_fixups);
2914 binder_stats_created(BINDER_STAT_TRANSACTION); 2917 binder_stats_created(BINDER_STAT_TRANSACTION);
2915 spin_lock_init(&t->lock); 2918 spin_lock_init(&t->lock);
2916 2919
@@ -3066,17 +3069,16 @@ static void binder_transaction(struct binder_proc *proc,
3066 3069
3067 case BINDER_TYPE_FD: { 3070 case BINDER_TYPE_FD: {
3068 struct binder_fd_object *fp = to_binder_fd_object(hdr); 3071 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3069 int target_fd = binder_translate_fd(fp->fd, t, thread, 3072 int ret = binder_translate_fd(&fp->fd, t, thread,
3070 in_reply_to); 3073 in_reply_to);
3071 3074
3072 if (target_fd < 0) { 3075 if (ret < 0) {
3073 return_error = BR_FAILED_REPLY; 3076 return_error = BR_FAILED_REPLY;
3074 return_error_param = target_fd; 3077 return_error_param = ret;
3075 return_error_line = __LINE__; 3078 return_error_line = __LINE__;
3076 goto err_translate_failed; 3079 goto err_translate_failed;
3077 } 3080 }
3078 fp->pad_binder = 0; 3081 fp->pad_binder = 0;
3079 fp->fd = target_fd;
3080 } break; 3082 } break;
3081 case BINDER_TYPE_FDA: { 3083 case BINDER_TYPE_FDA: {
3082 struct binder_fd_array_object *fda = 3084 struct binder_fd_array_object *fda =
@@ -3233,6 +3235,7 @@ err_bad_object_type:
3233err_bad_offset: 3235err_bad_offset:
3234err_bad_parent: 3236err_bad_parent:
3235err_copy_data_failed: 3237err_copy_data_failed:
3238 binder_free_txn_fixups(t);
3236 trace_binder_transaction_failed_buffer_release(t->buffer); 3239 trace_binder_transaction_failed_buffer_release(t->buffer);
3237 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3240 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3238 if (target_node) 3241 if (target_node)
@@ -3247,6 +3250,7 @@ err_alloc_tcomplete_failed:
3247 kfree(t); 3250 kfree(t);
3248 binder_stats_deleted(BINDER_STAT_TRANSACTION); 3251 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3249err_alloc_t_failed: 3252err_alloc_t_failed:
3253err_bad_todo_list:
3250err_bad_call_stack: 3254err_bad_call_stack:
3251err_empty_call_stack: 3255err_empty_call_stack:
3252err_dead_binder: 3256err_dead_binder:
@@ -3294,6 +3298,47 @@ err_invalid_target_handle:
3294 } 3298 }
3295} 3299}
3296 3300
3301/**
3302 * binder_free_buf() - free the specified buffer
3303 * @proc: binder proc that owns buffer
3304 * @buffer: buffer to be freed
3305 *
3306 * If buffer for an async transaction, enqueue the next async
3307 * transaction from the node.
3308 *
3309 * Cleanup buffer and free it.
3310 */
3311static void
3312binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3313{
3314 if (buffer->transaction) {
3315 buffer->transaction->buffer = NULL;
3316 buffer->transaction = NULL;
3317 }
3318 if (buffer->async_transaction && buffer->target_node) {
3319 struct binder_node *buf_node;
3320 struct binder_work *w;
3321
3322 buf_node = buffer->target_node;
3323 binder_node_inner_lock(buf_node);
3324 BUG_ON(!buf_node->has_async_transaction);
3325 BUG_ON(buf_node->proc != proc);
3326 w = binder_dequeue_work_head_ilocked(
3327 &buf_node->async_todo);
3328 if (!w) {
3329 buf_node->has_async_transaction = false;
3330 } else {
3331 binder_enqueue_work_ilocked(
3332 w, &proc->todo);
3333 binder_wakeup_proc_ilocked(proc);
3334 }
3335 binder_node_inner_unlock(buf_node);
3336 }
3337 trace_binder_transaction_buffer_release(buffer);
3338 binder_transaction_buffer_release(proc, buffer, NULL);
3339 binder_alloc_free_buf(&proc->alloc, buffer);
3340}
3341
3297static int binder_thread_write(struct binder_proc *proc, 3342static int binder_thread_write(struct binder_proc *proc,
3298 struct binder_thread *thread, 3343 struct binder_thread *thread,
3299 binder_uintptr_t binder_buffer, size_t size, 3344 binder_uintptr_t binder_buffer, size_t size,
@@ -3480,33 +3525,7 @@ static int binder_thread_write(struct binder_proc *proc,
3480 proc->pid, thread->pid, (u64)data_ptr, 3525 proc->pid, thread->pid, (u64)data_ptr,
3481 buffer->debug_id, 3526 buffer->debug_id,
3482 buffer->transaction ? "active" : "finished"); 3527 buffer->transaction ? "active" : "finished");
3483 3528 binder_free_buf(proc, buffer);
3484 if (buffer->transaction) {
3485 buffer->transaction->buffer = NULL;
3486 buffer->transaction = NULL;
3487 }
3488 if (buffer->async_transaction && buffer->target_node) {
3489 struct binder_node *buf_node;
3490 struct binder_work *w;
3491
3492 buf_node = buffer->target_node;
3493 binder_node_inner_lock(buf_node);
3494 BUG_ON(!buf_node->has_async_transaction);
3495 BUG_ON(buf_node->proc != proc);
3496 w = binder_dequeue_work_head_ilocked(
3497 &buf_node->async_todo);
3498 if (!w) {
3499 buf_node->has_async_transaction = false;
3500 } else {
3501 binder_enqueue_work_ilocked(
3502 w, &proc->todo);
3503 binder_wakeup_proc_ilocked(proc);
3504 }
3505 binder_node_inner_unlock(buf_node);
3506 }
3507 trace_binder_transaction_buffer_release(buffer);
3508 binder_transaction_buffer_release(proc, buffer, NULL);
3509 binder_alloc_free_buf(&proc->alloc, buffer);
3510 break; 3529 break;
3511 } 3530 }
3512 3531
@@ -3829,6 +3848,76 @@ static int binder_wait_for_work(struct binder_thread *thread,
3829 return ret; 3848 return ret;
3830} 3849}
3831 3850
3851/**
3852 * binder_apply_fd_fixups() - finish fd translation
3853 * @t: binder transaction with list of fd fixups
3854 *
3855 * Now that we are in the context of the transaction target
3856 * process, we can allocate and install fds. Process the
3857 * list of fds to translate and fixup the buffer with the
3858 * new fds.
3859 *
3860 * If we fail to allocate an fd, then free the resources by
3861 * fput'ing files that have not been processed and ksys_close'ing
3862 * any fds that have already been allocated.
3863 */
3864static int binder_apply_fd_fixups(struct binder_transaction *t)
3865{
3866 struct binder_txn_fd_fixup *fixup, *tmp;
3867 int ret = 0;
3868
3869 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3870 int fd = get_unused_fd_flags(O_CLOEXEC);
3871 u32 *fdp;
3872
3873 if (fd < 0) {
3874 binder_debug(BINDER_DEBUG_TRANSACTION,
3875 "failed fd fixup txn %d fd %d\n",
3876 t->debug_id, fd);
3877 ret = -ENOMEM;
3878 break;
3879 }
3880 binder_debug(BINDER_DEBUG_TRANSACTION,
3881 "fd fixup txn %d fd %d\n",
3882 t->debug_id, fd);
3883 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3884 fd_install(fd, fixup->file);
3885 fixup->file = NULL;
3886 fdp = (u32 *)(t->buffer->data + fixup->offset);
3887 /*
3888 * This store can cause problems for CPUs with a
3889 * VIVT cache (eg ARMv5) since the cache cannot
3890 * detect virtual aliases to the same physical cacheline.
3891 * To support VIVT, this address and the user-space VA
3892 * would both need to be flushed. Since this kernel
3893 * VA is not constructed via page_to_virt(), we can't
3894 * use flush_dcache_page() on it, so we'd have to use
3895 * an internal function. If devices with VIVT ever
3896 * need to run Android, we'll either need to go back
3897 * to patching the translated fd from the sender side
3898 * (using the non-standard kernel functions), or rework
3899 * how the kernel uses the buffer to use page_to_virt()
3900 * addresses instead of allocating in our own vm area.
3901 *
3902 * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
3903 */
3904 *fdp = fd;
3905 }
3906 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3907 if (fixup->file) {
3908 fput(fixup->file);
3909 } else if (ret) {
3910 u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
3911
3912 ksys_close(*fdp);
3913 }
3914 list_del(&fixup->fixup_entry);
3915 kfree(fixup);
3916 }
3917
3918 return ret;
3919}
3920
3832static int binder_thread_read(struct binder_proc *proc, 3921static int binder_thread_read(struct binder_proc *proc,
3833 struct binder_thread *thread, 3922 struct binder_thread *thread,
3834 binder_uintptr_t binder_buffer, size_t size, 3923 binder_uintptr_t binder_buffer, size_t size,
@@ -4110,6 +4199,34 @@ retry:
4110 tr.sender_pid = 0; 4199 tr.sender_pid = 0;
4111 } 4200 }
4112 4201
4202 ret = binder_apply_fd_fixups(t);
4203 if (ret) {
4204 struct binder_buffer *buffer = t->buffer;
4205 bool oneway = !!(t->flags & TF_ONE_WAY);
4206 int tid = t->debug_id;
4207
4208 if (t_from)
4209 binder_thread_dec_tmpref(t_from);
4210 buffer->transaction = NULL;
4211 binder_cleanup_transaction(t, "fd fixups failed",
4212 BR_FAILED_REPLY);
4213 binder_free_buf(proc, buffer);
4214 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4215 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4216 proc->pid, thread->pid,
4217 oneway ? "async " :
4218 (cmd == BR_REPLY ? "reply " : ""),
4219 tid, BR_FAILED_REPLY, ret, __LINE__);
4220 if (cmd == BR_REPLY) {
4221 cmd = BR_FAILED_REPLY;
4222 if (put_user(cmd, (uint32_t __user *)ptr))
4223 return -EFAULT;
4224 ptr += sizeof(uint32_t);
4225 binder_stat_br(proc, thread, cmd);
4226 break;
4227 }
4228 continue;
4229 }
4113 tr.data_size = t->buffer->data_size; 4230 tr.data_size = t->buffer->data_size;
4114 tr.offsets_size = t->buffer->offsets_size; 4231 tr.offsets_size = t->buffer->offsets_size;
4115 tr.data.ptr.buffer = (binder_uintptr_t) 4232 tr.data.ptr.buffer = (binder_uintptr_t)
@@ -4544,6 +4661,42 @@ out:
4544 return ret; 4661 return ret;
4545} 4662}
4546 4663
4664static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4665 struct binder_node_info_for_ref *info)
4666{
4667 struct binder_node *node;
4668 struct binder_context *context = proc->context;
4669 __u32 handle = info->handle;
4670
4671 if (info->strong_count || info->weak_count || info->reserved1 ||
4672 info->reserved2 || info->reserved3) {
4673 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4674 proc->pid);
4675 return -EINVAL;
4676 }
4677
4678 /* This ioctl may only be used by the context manager */
4679 mutex_lock(&context->context_mgr_node_lock);
4680 if (!context->binder_context_mgr_node ||
4681 context->binder_context_mgr_node->proc != proc) {
4682 mutex_unlock(&context->context_mgr_node_lock);
4683 return -EPERM;
4684 }
4685 mutex_unlock(&context->context_mgr_node_lock);
4686
4687 node = binder_get_node_from_ref(proc, handle, true, NULL);
4688 if (!node)
4689 return -EINVAL;
4690
4691 info->strong_count = node->local_strong_refs +
4692 node->internal_strong_refs;
4693 info->weak_count = node->local_weak_refs;
4694
4695 binder_put_node(node);
4696
4697 return 0;
4698}
4699
4547static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, 4700static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4548 struct binder_node_debug_info *info) 4701 struct binder_node_debug_info *info)
4549{ 4702{
@@ -4638,6 +4791,25 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4638 } 4791 }
4639 break; 4792 break;
4640 } 4793 }
4794 case BINDER_GET_NODE_INFO_FOR_REF: {
4795 struct binder_node_info_for_ref info;
4796
4797 if (copy_from_user(&info, ubuf, sizeof(info))) {
4798 ret = -EFAULT;
4799 goto err;
4800 }
4801
4802 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4803 if (ret < 0)
4804 goto err;
4805
4806 if (copy_to_user(ubuf, &info, sizeof(info))) {
4807 ret = -EFAULT;
4808 goto err;
4809 }
4810
4811 break;
4812 }
4641 case BINDER_GET_NODE_DEBUG_INFO: { 4813 case BINDER_GET_NODE_DEBUG_INFO: {
4642 struct binder_node_debug_info info; 4814 struct binder_node_debug_info info;
4643 4815
@@ -4693,7 +4865,6 @@ static void binder_vma_close(struct vm_area_struct *vma)
4693 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 4865 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4694 (unsigned long)pgprot_val(vma->vm_page_prot)); 4866 (unsigned long)pgprot_val(vma->vm_page_prot));
4695 binder_alloc_vma_close(&proc->alloc); 4867 binder_alloc_vma_close(&proc->alloc);
4696 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4697} 4868}
4698 4869
4699static vm_fault_t binder_vm_fault(struct vm_fault *vmf) 4870static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
@@ -4739,9 +4910,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4739 ret = binder_alloc_mmap_handler(&proc->alloc, vma); 4910 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4740 if (ret) 4911 if (ret)
4741 return ret; 4912 return ret;
4742 mutex_lock(&proc->files_lock);
4743 proc->files = get_files_struct(current);
4744 mutex_unlock(&proc->files_lock);
4745 return 0; 4913 return 0;
4746 4914
4747err_bad_arg: 4915err_bad_arg:
@@ -4765,7 +4933,6 @@ static int binder_open(struct inode *nodp, struct file *filp)
4765 spin_lock_init(&proc->outer_lock); 4933 spin_lock_init(&proc->outer_lock);
4766 get_task_struct(current->group_leader); 4934 get_task_struct(current->group_leader);
4767 proc->tsk = current->group_leader; 4935 proc->tsk = current->group_leader;
4768 mutex_init(&proc->files_lock);
4769 INIT_LIST_HEAD(&proc->todo); 4936 INIT_LIST_HEAD(&proc->todo);
4770 proc->default_priority = task_nice(current); 4937 proc->default_priority = task_nice(current);
4771 binder_dev = container_of(filp->private_data, struct binder_device, 4938 binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4915,8 +5082,6 @@ static void binder_deferred_release(struct binder_proc *proc)
4915 struct rb_node *n; 5082 struct rb_node *n;
4916 int threads, nodes, incoming_refs, outgoing_refs, active_transactions; 5083 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4917 5084
4918 BUG_ON(proc->files);
4919
4920 mutex_lock(&binder_procs_lock); 5085 mutex_lock(&binder_procs_lock);
4921 hlist_del(&proc->proc_node); 5086 hlist_del(&proc->proc_node);
4922 mutex_unlock(&binder_procs_lock); 5087 mutex_unlock(&binder_procs_lock);
@@ -4998,7 +5163,6 @@ static void binder_deferred_release(struct binder_proc *proc)
4998static void binder_deferred_func(struct work_struct *work) 5163static void binder_deferred_func(struct work_struct *work)
4999{ 5164{
5000 struct binder_proc *proc; 5165 struct binder_proc *proc;
5001 struct files_struct *files;
5002 5166
5003 int defer; 5167 int defer;
5004 5168
@@ -5016,23 +5180,11 @@ static void binder_deferred_func(struct work_struct *work)
5016 } 5180 }
5017 mutex_unlock(&binder_deferred_lock); 5181 mutex_unlock(&binder_deferred_lock);
5018 5182
5019 files = NULL;
5020 if (defer & BINDER_DEFERRED_PUT_FILES) {
5021 mutex_lock(&proc->files_lock);
5022 files = proc->files;
5023 if (files)
5024 proc->files = NULL;
5025 mutex_unlock(&proc->files_lock);
5026 }
5027
5028 if (defer & BINDER_DEFERRED_FLUSH) 5183 if (defer & BINDER_DEFERRED_FLUSH)
5029 binder_deferred_flush(proc); 5184 binder_deferred_flush(proc);
5030 5185
5031 if (defer & BINDER_DEFERRED_RELEASE) 5186 if (defer & BINDER_DEFERRED_RELEASE)
5032 binder_deferred_release(proc); /* frees proc */ 5187 binder_deferred_release(proc); /* frees proc */
5033
5034 if (files)
5035 put_files_struct(files);
5036 } while (proc); 5188 } while (proc);
5037} 5189}
5038static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 5190static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
@@ -5667,12 +5819,11 @@ static int __init binder_init(void)
5667 * Copy the module_parameter string, because we don't want to 5819 * Copy the module_parameter string, because we don't want to
5668 * tokenize it in-place. 5820 * tokenize it in-place.
5669 */ 5821 */
5670 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); 5822 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5671 if (!device_names) { 5823 if (!device_names) {
5672 ret = -ENOMEM; 5824 ret = -ENOMEM;
5673 goto err_alloc_device_names_failed; 5825 goto err_alloc_device_names_failed;
5674 } 5826 }
5675 strcpy(device_names, binder_devices_param);
5676 5827
5677 device_tmp = device_names; 5828 device_tmp = device_names;
5678 while ((device_name = strsep(&device_tmp, ","))) { 5829 while ((device_name = strsep(&device_tmp, ","))) {
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 588eb3ec3507..14de7ac57a34 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -223,22 +223,40 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
223 __entry->dest_ref_debug_id, __entry->dest_ref_desc) 223 __entry->dest_ref_debug_id, __entry->dest_ref_desc)
224); 224);
225 225
226TRACE_EVENT(binder_transaction_fd, 226TRACE_EVENT(binder_transaction_fd_send,
227 TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), 227 TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
228 TP_ARGS(t, src_fd, dest_fd), 228 TP_ARGS(t, fd, offset),
229 229
230 TP_STRUCT__entry( 230 TP_STRUCT__entry(
231 __field(int, debug_id) 231 __field(int, debug_id)
232 __field(int, src_fd) 232 __field(int, fd)
233 __field(int, dest_fd) 233 __field(size_t, offset)
234 ),
235 TP_fast_assign(
236 __entry->debug_id = t->debug_id;
237 __entry->fd = fd;
238 __entry->offset = offset;
239 ),
240 TP_printk("transaction=%d src_fd=%d offset=%zu",
241 __entry->debug_id, __entry->fd, __entry->offset)
242);
243
244TRACE_EVENT(binder_transaction_fd_recv,
245 TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
246 TP_ARGS(t, fd, offset),
247
248 TP_STRUCT__entry(
249 __field(int, debug_id)
250 __field(int, fd)
251 __field(size_t, offset)
234 ), 252 ),
235 TP_fast_assign( 253 TP_fast_assign(
236 __entry->debug_id = t->debug_id; 254 __entry->debug_id = t->debug_id;
237 __entry->src_fd = src_fd; 255 __entry->fd = fd;
238 __entry->dest_fd = dest_fd; 256 __entry->offset = offset;
239 ), 257 ),
240 TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", 258 TP_printk("transaction=%d dest_fd=%d offset=%zu",
241 __entry->debug_id, __entry->src_fd, __entry->dest_fd) 259 __entry->debug_id, __entry->fd, __entry->offset)
242); 260);
243 261
244DECLARE_EVENT_CLASS(binder_buffer_class, 262DECLARE_EVENT_CLASS(binder_buffer_class,
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 5e1dd2772278..5ef215297101 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -1,18 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Extcon charger detection driver for Intel Cherrytrail Whiskey Cove PMIC 3 * Extcon charger detection driver for Intel Cherrytrail Whiskey Cove PMIC
3 * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> 4 * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
4 * 5 *
5 * Based on various non upstream patches to support the CHT Whiskey Cove PMIC: 6 * Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
6 * Copyright (C) 2013-2015 Intel Corporation. All rights reserved. 7 * Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */ 8 */
17 9
18#include <linux/extcon-provider.h> 10#include <linux/extcon-provider.h>
@@ -32,10 +24,10 @@
32#define CHT_WC_CHGRCTRL0_EMRGCHREN BIT(1) 24#define CHT_WC_CHGRCTRL0_EMRGCHREN BIT(1)
33#define CHT_WC_CHGRCTRL0_EXTCHRDIS BIT(2) 25#define CHT_WC_CHGRCTRL0_EXTCHRDIS BIT(2)
34#define CHT_WC_CHGRCTRL0_SWCONTROL BIT(3) 26#define CHT_WC_CHGRCTRL0_SWCONTROL BIT(3)
35#define CHT_WC_CHGRCTRL0_TTLCK_MASK BIT(4) 27#define CHT_WC_CHGRCTRL0_TTLCK BIT(4)
36#define CHT_WC_CHGRCTRL0_CCSM_OFF_MASK BIT(5) 28#define CHT_WC_CHGRCTRL0_CCSM_OFF BIT(5)
37#define CHT_WC_CHGRCTRL0_DBPOFF_MASK BIT(6) 29#define CHT_WC_CHGRCTRL0_DBPOFF BIT(6)
38#define CHT_WC_CHGRCTRL0_WDT_NOKICK BIT(7) 30#define CHT_WC_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
39 31
40#define CHT_WC_CHGRCTRL1 0x5e17 32#define CHT_WC_CHGRCTRL1 0x5e17
41 33
@@ -52,7 +44,7 @@
52#define CHT_WC_USBSRC_TYPE_ACA 4 44#define CHT_WC_USBSRC_TYPE_ACA 4
53#define CHT_WC_USBSRC_TYPE_SE1 5 45#define CHT_WC_USBSRC_TYPE_SE1 5
54#define CHT_WC_USBSRC_TYPE_MHL 6 46#define CHT_WC_USBSRC_TYPE_MHL 6
55#define CHT_WC_USBSRC_TYPE_FLOAT_DP_DN 7 47#define CHT_WC_USBSRC_TYPE_FLOATING 7
56#define CHT_WC_USBSRC_TYPE_OTHER 8 48#define CHT_WC_USBSRC_TYPE_OTHER 8
57#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9 49#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9
58 50
@@ -61,9 +53,12 @@
61#define CHT_WC_PWRSRC_STS 0x6e1e 53#define CHT_WC_PWRSRC_STS 0x6e1e
62#define CHT_WC_PWRSRC_VBUS BIT(0) 54#define CHT_WC_PWRSRC_VBUS BIT(0)
63#define CHT_WC_PWRSRC_DC BIT(1) 55#define CHT_WC_PWRSRC_DC BIT(1)
64#define CHT_WC_PWRSRC_BAT BIT(2) 56#define CHT_WC_PWRSRC_BATT BIT(2)
65#define CHT_WC_PWRSRC_ID_GND BIT(3) 57#define CHT_WC_PWRSRC_USBID_MASK GENMASK(4, 3)
66#define CHT_WC_PWRSRC_ID_FLOAT BIT(4) 58#define CHT_WC_PWRSRC_USBID_SHIFT 3
59#define CHT_WC_PWRSRC_RID_ACA 0
60#define CHT_WC_PWRSRC_RID_GND 1
61#define CHT_WC_PWRSRC_RID_FLOAT 2
67 62
68#define CHT_WC_VBUS_GPIO_CTLO 0x6e2d 63#define CHT_WC_VBUS_GPIO_CTLO 0x6e2d
69#define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0) 64#define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0)
@@ -104,16 +99,20 @@ struct cht_wc_extcon_data {
104 99
105static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts) 100static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
106{ 101{
107 if (pwrsrc_sts & CHT_WC_PWRSRC_ID_GND) 102 switch ((pwrsrc_sts & CHT_WC_PWRSRC_USBID_MASK) >> CHT_WC_PWRSRC_USBID_SHIFT) {
103 case CHT_WC_PWRSRC_RID_GND:
108 return USB_ID_GND; 104 return USB_ID_GND;
109 if (pwrsrc_sts & CHT_WC_PWRSRC_ID_FLOAT) 105 case CHT_WC_PWRSRC_RID_FLOAT:
110 return USB_ID_FLOAT; 106 return USB_ID_FLOAT;
111 107 case CHT_WC_PWRSRC_RID_ACA:
112 /* 108 default:
113 * Once we have iio support for the gpadc we should read the USBID 109 /*
114 * gpadc channel here and determine ACA role based on that. 110 * Once we have IIO support for the GPADC we should read
115 */ 111 * the USBID GPADC channel here and determine ACA role
116 return USB_ID_FLOAT; 112 * based on that.
113 */
114 return USB_ID_FLOAT;
115 }
117} 116}
118 117
119static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext, 118static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext,
@@ -156,9 +155,9 @@ static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext,
156 dev_warn(ext->dev, 155 dev_warn(ext->dev,
157 "Unhandled charger type %d, defaulting to SDP\n", 156 "Unhandled charger type %d, defaulting to SDP\n",
158 ret); 157 ret);
159 /* Fall through, treat as SDP */ 158 return EXTCON_CHG_USB_SDP;
160 case CHT_WC_USBSRC_TYPE_SDP: 159 case CHT_WC_USBSRC_TYPE_SDP:
161 case CHT_WC_USBSRC_TYPE_FLOAT_DP_DN: 160 case CHT_WC_USBSRC_TYPE_FLOATING:
162 case CHT_WC_USBSRC_TYPE_OTHER: 161 case CHT_WC_USBSRC_TYPE_OTHER:
163 return EXTCON_CHG_USB_SDP; 162 return EXTCON_CHG_USB_SDP;
164 case CHT_WC_USBSRC_TYPE_CDP: 163 case CHT_WC_USBSRC_TYPE_CDP:
@@ -279,7 +278,7 @@ static int cht_wc_extcon_sw_control(struct cht_wc_extcon_data *ext, bool enable)
279{ 278{
280 int ret, mask, val; 279 int ret, mask, val;
281 280
282 mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF_MASK; 281 mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF;
283 val = enable ? mask : 0; 282 val = enable ? mask : 0;
284 ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val); 283 ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val);
285 if (ret) 284 if (ret)
@@ -292,6 +291,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
292{ 291{
293 struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); 292 struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
294 struct cht_wc_extcon_data *ext; 293 struct cht_wc_extcon_data *ext;
294 unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK);
295 int irq, ret; 295 int irq, ret;
296 296
297 irq = platform_get_irq(pdev, 0); 297 irq = platform_get_irq(pdev, 0);
@@ -352,9 +352,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
352 } 352 }
353 353
354 /* Unmask irqs */ 354 /* Unmask irqs */
355 ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ_MASK, 355 ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ_MASK, mask);
356 (int)~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_ID_GND |
357 CHT_WC_PWRSRC_ID_FLOAT));
358 if (ret) { 356 if (ret) {
359 dev_err(ext->dev, "Error writing irq-mask: %d\n", ret); 357 dev_err(ext->dev, "Error writing irq-mask: %d\n", ret);
360 goto disable_sw_control; 358 goto disable_sw_control;
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index fd24debe58a3..80c9abcc3f97 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Intel INT3496 ACPI device extcon driver 3 * Intel INT3496 ACPI device extcon driver
3 * 4 *
@@ -7,15 +8,6 @@
7 * 8 *
8 * Copyright (c) 2014, Intel Corporation. 9 * Copyright (c) 2014, Intel Corporation.
9 * Author: David Cohen <david.a.cohen@linux.intel.com> 10 * Author: David Cohen <david.a.cohen@linux.intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */ 11 */
20 12
21#include <linux/acpi.h> 13#include <linux/acpi.h>
@@ -192,4 +184,4 @@ module_platform_driver(int3496_driver);
192 184
193MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); 185MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
194MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver"); 186MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver");
195MODULE_LICENSE("GPL"); 187MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index b871836da8a4..22d2feb1f8bc 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -1,20 +1,10 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC 2//
3 * 3// extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
4 * Copyright (C) 2013,2014 Samsung Electronics 4//
5 * Chanwoo Choi <cw00.choi@samsung.com> 5// Copyright (C) 2013,2014 Samsung Electronics
6 * Krzysztof Kozlowski <krzk@kernel.org> 6// Chanwoo Choi <cw00.choi@samsung.com>
7 * 7// Krzysztof Kozlowski <krzk@kernel.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18 8
19#include <linux/kernel.h> 9#include <linux/kernel.h>
20#include <linux/module.h> 10#include <linux/module.h>
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 227651ff9666..a79537ebb671 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1,19 +1,9 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * extcon-max77693.c - MAX77693 extcon driver to support MAX77693 MUIC 2//
3 * 3// extcon-max77693.c - MAX77693 extcon driver to support MAX77693 MUIC
4 * Copyright (C) 2012 Samsung Electrnoics 4//
5 * Chanwoo Choi <cw00.choi@samsung.com> 5// Copyright (C) 2012 Samsung Electrnoics
6 * 6// Chanwoo Choi <cw00.choi@samsung.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17 7
18#include <linux/kernel.h> 8#include <linux/kernel.h>
19#include <linux/module.h> 9#include <linux/module.h>
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index c9fcd6cd41cb..b98cbd0362f5 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -1,15 +1,10 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * extcon-max77843.c - Maxim MAX77843 extcon driver to support 2//
3 * MUIC(Micro USB Interface Controller) 3// extcon-max77843.c - Maxim MAX77843 extcon driver to support
4 * 4// MUIC(Micro USB Interface Controller)
5 * Copyright (C) 2015 Samsung Electronics 5//
6 * Author: Jaewon Kim <jaewon02.kim@samsung.com> 6// Copyright (C) 2015 Samsung Electronics
7 * 7// Author: Jaewon Kim <jaewon02.kim@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13 8
14#include <linux/extcon-provider.h> 9#include <linux/extcon-provider.h>
15#include <linux/i2c.h> 10#include <linux/i2c.h>
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 9f30f4929b72..bdabb2479e0d 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -1,19 +1,9 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC 2//
3 * 3// extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
4 * Copyright (C) 2012 Samsung Electronics 4//
5 * Donggeun Kim <dg77.kim@samsung.com> 5// Copyright (C) 2012 Samsung Electronics
6 * 6// Donggeun Kim <dg77.kim@samsung.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17 7
18#include <linux/kernel.h> 8#include <linux/kernel.h>
19#include <linux/module.h> 9#include <linux/module.h>
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index b9d27c8fe57e..5ab0498be652 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -628,7 +628,7 @@ int extcon_get_property(struct extcon_dev *edev, unsigned int id,
628 unsigned long flags; 628 unsigned long flags;
629 int index, ret = 0; 629 int index, ret = 0;
630 630
631 *prop_val = (union extcon_property_value)(0); 631 *prop_val = (union extcon_property_value){0};
632 632
633 if (!edev) 633 if (!edev)
634 return -EINVAL; 634 return -EINVAL;
@@ -1123,7 +1123,6 @@ int extcon_dev_register(struct extcon_dev *edev)
1123 (unsigned long)atomic_inc_return(&edev_no)); 1123 (unsigned long)atomic_inc_return(&edev_no));
1124 1124
1125 if (edev->max_supported) { 1125 if (edev->max_supported) {
1126 char buf[10];
1127 char *str; 1126 char *str;
1128 struct extcon_cable *cable; 1127 struct extcon_cable *cable;
1129 1128
@@ -1137,9 +1136,7 @@ int extcon_dev_register(struct extcon_dev *edev)
1137 for (index = 0; index < edev->max_supported; index++) { 1136 for (index = 0; index < edev->max_supported; index++) {
1138 cable = &edev->cables[index]; 1137 cable = &edev->cables[index];
1139 1138
1140 snprintf(buf, 10, "cable.%d", index); 1139 str = kasprintf(GFP_KERNEL, "cable.%d", index);
1141 str = kzalloc(strlen(buf) + 1,
1142 GFP_KERNEL);
1143 if (!str) { 1140 if (!str) {
1144 for (index--; index >= 0; index--) { 1141 for (index--; index >= 0; index--) {
1145 cable = &edev->cables[index]; 1142 cable = &edev->cables[index];
@@ -1149,7 +1146,6 @@ int extcon_dev_register(struct extcon_dev *edev)
1149 1146
1150 goto err_alloc_cables; 1147 goto err_alloc_cables;
1151 } 1148 }
1152 strcpy(str, buf);
1153 1149
1154 cable->edev = edev; 1150 cable->edev = edev;
1155 cable->cable_index = index; 1151 cable->cable_index = index;
@@ -1172,7 +1168,6 @@ int extcon_dev_register(struct extcon_dev *edev)
1172 } 1168 }
1173 1169
1174 if (edev->max_supported && edev->mutually_exclusive) { 1170 if (edev->max_supported && edev->mutually_exclusive) {
1175 char buf[80];
1176 char *name; 1171 char *name;
1177 1172
1178 /* Count the size of mutually_exclusive array */ 1173 /* Count the size of mutually_exclusive array */
@@ -1197,9 +1192,8 @@ int extcon_dev_register(struct extcon_dev *edev)
1197 } 1192 }
1198 1193
1199 for (index = 0; edev->mutually_exclusive[index]; index++) { 1194 for (index = 0; edev->mutually_exclusive[index]; index++) {
1200 sprintf(buf, "0x%x", edev->mutually_exclusive[index]); 1195 name = kasprintf(GFP_KERNEL, "0x%x",
1201 name = kzalloc(strlen(buf) + 1, 1196 edev->mutually_exclusive[index]);
1202 GFP_KERNEL);
1203 if (!name) { 1197 if (!name) {
1204 for (index--; index >= 0; index--) { 1198 for (index--; index >= 0; index--) {
1205 kfree(edev->d_attrs_muex[index].attr. 1199 kfree(edev->d_attrs_muex[index].attr.
@@ -1210,7 +1204,6 @@ int extcon_dev_register(struct extcon_dev *edev)
1210 ret = -ENOMEM; 1204 ret = -ENOMEM;
1211 goto err_muex; 1205 goto err_muex;
1212 } 1206 }
1213 strcpy(name, buf);
1214 sysfs_attr_init(&edev->d_attrs_muex[index].attr); 1207 sysfs_attr_init(&edev->d_attrs_muex[index].attr);
1215 edev->d_attrs_muex[index].attr.name = name; 1208 edev->d_attrs_muex[index].attr.name = name;
1216 edev->d_attrs_muex[index].attr.mode = 0000; 1209 edev->d_attrs_muex[index].attr.mode = 0000;
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index a456a000048b..91a0404affe2 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -10,37 +10,31 @@ if GOOGLE_FIRMWARE
10 10
11config GOOGLE_SMI 11config GOOGLE_SMI
12 tristate "SMI interface for Google platforms" 12 tristate "SMI interface for Google platforms"
13 depends on X86 && ACPI && DMI && EFI 13 depends on X86 && ACPI && DMI
14 select EFI_VARS
15 help 14 help
16 Say Y here if you want to enable SMI callbacks for Google 15 Say Y here if you want to enable SMI callbacks for Google
17 platforms. This provides an interface for writing to and 16 platforms. This provides an interface for writing to and
18 clearing the EFI event log and reading and writing NVRAM 17 clearing the event log. If EFI_VARS is also enabled this
18 driver provides an interface for reading and writing NVRAM
19 variables. 19 variables.
20 20
21config GOOGLE_COREBOOT_TABLE 21config GOOGLE_COREBOOT_TABLE
22 tristate 22 tristate "Coreboot Table Access"
23 depends on GOOGLE_COREBOOT_TABLE_ACPI || GOOGLE_COREBOOT_TABLE_OF 23 depends on ACPI || OF
24
25config GOOGLE_COREBOOT_TABLE_ACPI
26 tristate "Coreboot Table Access - ACPI"
27 depends on ACPI
28 select GOOGLE_COREBOOT_TABLE
29 help 24 help
30 This option enables the coreboot_table module, which provides other 25 This option enables the coreboot_table module, which provides other
31 firmware modules to access to the coreboot table. The coreboot table 26 firmware modules access to the coreboot table. The coreboot table
32 pointer is accessed through the ACPI "GOOGCB00" object. 27 pointer is accessed through the ACPI "GOOGCB00" object or the
28 device tree node /firmware/coreboot.
33 If unsure say N. 29 If unsure say N.
34 30
31config GOOGLE_COREBOOT_TABLE_ACPI
32 tristate
33 select GOOGLE_COREBOOT_TABLE
34
35config GOOGLE_COREBOOT_TABLE_OF 35config GOOGLE_COREBOOT_TABLE_OF
36 tristate "Coreboot Table Access - Device Tree" 36 tristate
37 depends on OF
38 select GOOGLE_COREBOOT_TABLE 37 select GOOGLE_COREBOOT_TABLE
39 help
40 This option enable the coreboot_table module, which provide other
41 firmware modules to access coreboot table. The coreboot table pointer
42 is accessed through the device tree node /firmware/coreboot.
43 If unsure say N.
44 38
45config GOOGLE_MEMCONSOLE 39config GOOGLE_MEMCONSOLE
46 tristate 40 tristate
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
index d0b3fba96194..d17caded5d88 100644
--- a/drivers/firmware/google/Makefile
+++ b/drivers/firmware/google/Makefile
@@ -2,8 +2,6 @@
2 2
3obj-$(CONFIG_GOOGLE_SMI) += gsmi.o 3obj-$(CONFIG_GOOGLE_SMI) += gsmi.o
4obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o 4obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o
5obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_ACPI) += coreboot_table-acpi.o
6obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_OF) += coreboot_table-of.o
7obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT) += framebuffer-coreboot.o 5obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT) += framebuffer-coreboot.o
8obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o 6obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o
9obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o 7obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o
diff --git a/drivers/firmware/google/coreboot_table-acpi.c b/drivers/firmware/google/coreboot_table-acpi.c
deleted file mode 100644
index 77197fe3d42f..000000000000
--- a/drivers/firmware/google/coreboot_table-acpi.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * coreboot_table-acpi.c
3 *
4 * Using ACPI to locate Coreboot table and provide coreboot table access.
5 *
6 * Copyright 2017 Google Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License v2.0 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/acpi.h>
19#include <linux/device.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/io.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26
27#include "coreboot_table.h"
28
29static int coreboot_table_acpi_probe(struct platform_device *pdev)
30{
31 phys_addr_t phyaddr;
32 resource_size_t len;
33 struct coreboot_table_header __iomem *header = NULL;
34 struct resource *res;
35 void __iomem *ptr = NULL;
36
37 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
38 if (!res)
39 return -EINVAL;
40
41 len = resource_size(res);
42 if (!res->start || !len)
43 return -EINVAL;
44
45 phyaddr = res->start;
46 header = ioremap_cache(phyaddr, sizeof(*header));
47 if (header == NULL)
48 return -ENOMEM;
49
50 ptr = ioremap_cache(phyaddr,
51 header->header_bytes + header->table_bytes);
52 iounmap(header);
53 if (!ptr)
54 return -ENOMEM;
55
56 return coreboot_table_init(&pdev->dev, ptr);
57}
58
59static int coreboot_table_acpi_remove(struct platform_device *pdev)
60{
61 return coreboot_table_exit();
62}
63
64static const struct acpi_device_id cros_coreboot_acpi_match[] = {
65 { "GOOGCB00", 0 },
66 { "BOOT0000", 0 },
67 { }
68};
69MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
70
71static struct platform_driver coreboot_table_acpi_driver = {
72 .probe = coreboot_table_acpi_probe,
73 .remove = coreboot_table_acpi_remove,
74 .driver = {
75 .name = "coreboot_table_acpi",
76 .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
77 },
78};
79
80static int __init coreboot_table_acpi_init(void)
81{
82 return platform_driver_register(&coreboot_table_acpi_driver);
83}
84
85module_init(coreboot_table_acpi_init);
86
87MODULE_AUTHOR("Google, Inc.");
88MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table-of.c b/drivers/firmware/google/coreboot_table-of.c
deleted file mode 100644
index f15bf404c579..000000000000
--- a/drivers/firmware/google/coreboot_table-of.c
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * coreboot_table-of.c
3 *
4 * Coreboot table access through open firmware.
5 *
6 * Copyright 2017 Google Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License v2.0 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/device.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of_address.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
24
25#include "coreboot_table.h"
26
27static int coreboot_table_of_probe(struct platform_device *pdev)
28{
29 struct device_node *fw_dn = pdev->dev.of_node;
30 void __iomem *ptr;
31
32 ptr = of_iomap(fw_dn, 0);
33 of_node_put(fw_dn);
34 if (!ptr)
35 return -ENOMEM;
36
37 return coreboot_table_init(&pdev->dev, ptr);
38}
39
40static int coreboot_table_of_remove(struct platform_device *pdev)
41{
42 return coreboot_table_exit();
43}
44
45static const struct of_device_id coreboot_of_match[] = {
46 { .compatible = "coreboot" },
47 {},
48};
49
50static struct platform_driver coreboot_table_of_driver = {
51 .probe = coreboot_table_of_probe,
52 .remove = coreboot_table_of_remove,
53 .driver = {
54 .name = "coreboot_table_of",
55 .of_match_table = coreboot_of_match,
56 },
57};
58
59static int __init platform_coreboot_table_of_init(void)
60{
61 struct platform_device *pdev;
62 struct device_node *of_node;
63
64 /* Limit device creation to the presence of /firmware/coreboot node */
65 of_node = of_find_node_by_path("/firmware/coreboot");
66 if (!of_node)
67 return -ENODEV;
68
69 if (!of_match_node(coreboot_of_match, of_node))
70 return -ENODEV;
71
72 pdev = of_platform_device_create(of_node, "coreboot_table_of", NULL);
73 if (!pdev)
74 return -ENODEV;
75
76 return platform_driver_register(&coreboot_table_of_driver);
77}
78
79module_init(platform_coreboot_table_of_init);
80
81MODULE_AUTHOR("Google, Inc.");
82MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 19db5709ae28..078d3bbe632f 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -16,12 +16,15 @@
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 */ 17 */
18 18
19#include <linux/acpi.h>
19#include <linux/device.h> 20#include <linux/device.h>
20#include <linux/err.h> 21#include <linux/err.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/io.h> 23#include <linux/io.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/platform_device.h>
25#include <linux/slab.h> 28#include <linux/slab.h>
26 29
27#include "coreboot_table.h" 30#include "coreboot_table.h"
@@ -29,8 +32,6 @@
29#define CB_DEV(d) container_of(d, struct coreboot_device, dev) 32#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
30#define CB_DRV(d) container_of(d, struct coreboot_driver, drv) 33#define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
31 34
32static struct coreboot_table_header __iomem *ptr_header;
33
34static int coreboot_bus_match(struct device *dev, struct device_driver *drv) 35static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
35{ 36{
36 struct coreboot_device *device = CB_DEV(dev); 37 struct coreboot_device *device = CB_DEV(dev);
@@ -70,12 +71,6 @@ static struct bus_type coreboot_bus_type = {
70 .remove = coreboot_bus_remove, 71 .remove = coreboot_bus_remove,
71}; 72};
72 73
73static int __init coreboot_bus_init(void)
74{
75 return bus_register(&coreboot_bus_type);
76}
77module_init(coreboot_bus_init);
78
79static void coreboot_device_release(struct device *dev) 74static void coreboot_device_release(struct device *dev)
80{ 75{
81 struct coreboot_device *device = CB_DEV(dev); 76 struct coreboot_device *device = CB_DEV(dev);
@@ -97,62 +92,117 @@ void coreboot_driver_unregister(struct coreboot_driver *driver)
97} 92}
98EXPORT_SYMBOL(coreboot_driver_unregister); 93EXPORT_SYMBOL(coreboot_driver_unregister);
99 94
100int coreboot_table_init(struct device *dev, void __iomem *ptr) 95static int coreboot_table_populate(struct device *dev, void *ptr)
101{ 96{
102 int i, ret; 97 int i, ret;
103 void *ptr_entry; 98 void *ptr_entry;
104 struct coreboot_device *device; 99 struct coreboot_device *device;
105 struct coreboot_table_entry entry; 100 struct coreboot_table_entry *entry;
106 struct coreboot_table_header header; 101 struct coreboot_table_header *header = ptr;
107
108 ptr_header = ptr;
109 memcpy_fromio(&header, ptr_header, sizeof(header));
110
111 if (strncmp(header.signature, "LBIO", sizeof(header.signature))) {
112 pr_warn("coreboot_table: coreboot table missing or corrupt!\n");
113 return -ENODEV;
114 }
115 102
116 ptr_entry = (void *)ptr_header + header.header_bytes; 103 ptr_entry = ptr + header->header_bytes;
117 for (i = 0; i < header.table_entries; i++) { 104 for (i = 0; i < header->table_entries; i++) {
118 memcpy_fromio(&entry, ptr_entry, sizeof(entry)); 105 entry = ptr_entry;
119 106
120 device = kzalloc(sizeof(struct device) + entry.size, GFP_KERNEL); 107 device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
121 if (!device) { 108 if (!device)
122 ret = -ENOMEM; 109 return -ENOMEM;
123 break;
124 }
125 110
126 dev_set_name(&device->dev, "coreboot%d", i); 111 dev_set_name(&device->dev, "coreboot%d", i);
127 device->dev.parent = dev; 112 device->dev.parent = dev;
128 device->dev.bus = &coreboot_bus_type; 113 device->dev.bus = &coreboot_bus_type;
129 device->dev.release = coreboot_device_release; 114 device->dev.release = coreboot_device_release;
130 memcpy_fromio(&device->entry, ptr_entry, entry.size); 115 memcpy(&device->entry, ptr_entry, entry->size);
131 116
132 ret = device_register(&device->dev); 117 ret = device_register(&device->dev);
133 if (ret) { 118 if (ret) {
134 put_device(&device->dev); 119 put_device(&device->dev);
135 break; 120 return ret;
136 } 121 }
137 122
138 ptr_entry += entry.size; 123 ptr_entry += entry->size;
139 } 124 }
140 125
141 return ret; 126 return 0;
142} 127}
143EXPORT_SYMBOL(coreboot_table_init);
144 128
145int coreboot_table_exit(void) 129static int coreboot_table_probe(struct platform_device *pdev)
146{ 130{
147 if (ptr_header) { 131 resource_size_t len;
148 bus_unregister(&coreboot_bus_type); 132 struct coreboot_table_header *header;
149 iounmap(ptr_header); 133 struct resource *res;
150 ptr_header = NULL; 134 struct device *dev = &pdev->dev;
135 void *ptr;
136 int ret;
137
138 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
139 if (!res)
140 return -EINVAL;
141
142 len = resource_size(res);
143 if (!res->start || !len)
144 return -EINVAL;
145
146 /* Check just the header first to make sure things are sane */
147 header = memremap(res->start, sizeof(*header), MEMREMAP_WB);
148 if (!header)
149 return -ENOMEM;
150
151 len = header->header_bytes + header->table_bytes;
152 ret = strncmp(header->signature, "LBIO", sizeof(header->signature));
153 memunmap(header);
154 if (ret) {
155 dev_warn(dev, "coreboot table missing or corrupt!\n");
156 return -ENODEV;
151 } 157 }
152 158
159 ptr = memremap(res->start, len, MEMREMAP_WB);
160 if (!ptr)
161 return -ENOMEM;
162
163 ret = bus_register(&coreboot_bus_type);
164 if (!ret) {
165 ret = coreboot_table_populate(dev, ptr);
166 if (ret)
167 bus_unregister(&coreboot_bus_type);
168 }
169 memunmap(ptr);
170
171 return ret;
172}
173
174static int coreboot_table_remove(struct platform_device *pdev)
175{
176 bus_unregister(&coreboot_bus_type);
153 return 0; 177 return 0;
154} 178}
155EXPORT_SYMBOL(coreboot_table_exit);
156 179
180#ifdef CONFIG_ACPI
181static const struct acpi_device_id cros_coreboot_acpi_match[] = {
182 { "GOOGCB00", 0 },
183 { "BOOT0000", 0 },
184 { }
185};
186MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
187#endif
188
189#ifdef CONFIG_OF
190static const struct of_device_id coreboot_of_match[] = {
191 { .compatible = "coreboot" },
192 {}
193};
194MODULE_DEVICE_TABLE(of, coreboot_of_match);
195#endif
196
197static struct platform_driver coreboot_table_driver = {
198 .probe = coreboot_table_probe,
199 .remove = coreboot_table_remove,
200 .driver = {
201 .name = "coreboot_table",
202 .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
203 .of_match_table = of_match_ptr(coreboot_of_match),
204 },
205};
206module_platform_driver(coreboot_table_driver);
157MODULE_AUTHOR("Google, Inc."); 207MODULE_AUTHOR("Google, Inc.");
158MODULE_LICENSE("GPL"); 208MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 8ad95a94481b..71a9de6b15fa 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -91,10 +91,4 @@ int coreboot_driver_register(struct coreboot_driver *driver);
91/* Unregister a driver that uses the data from a coreboot table. */ 91/* Unregister a driver that uses the data from a coreboot table. */
92void coreboot_driver_unregister(struct coreboot_driver *driver); 92void coreboot_driver_unregister(struct coreboot_driver *driver);
93 93
94/* Initialize coreboot table module given a pointer to iomem */
95int coreboot_table_init(struct device *dev, void __iomem *ptr);
96
97/* Cleanup coreboot table module */
98int coreboot_table_exit(void);
99
100#endif /* __COREBOOT_TABLE_H */ 94#endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c8f169bf2e27..82ce1e6d261e 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -29,6 +29,7 @@
29#include <linux/efi.h> 29#include <linux/efi.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/ucs2_string.h> 31#include <linux/ucs2_string.h>
32#include <linux/suspend.h>
32 33
33#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */ 34#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
34/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */ 35/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
@@ -70,6 +71,8 @@
70#define GSMI_CMD_SET_NVRAM_VAR 0x03 71#define GSMI_CMD_SET_NVRAM_VAR 0x03
71#define GSMI_CMD_SET_EVENT_LOG 0x08 72#define GSMI_CMD_SET_EVENT_LOG 0x08
72#define GSMI_CMD_CLEAR_EVENT_LOG 0x09 73#define GSMI_CMD_CLEAR_EVENT_LOG 0x09
74#define GSMI_CMD_LOG_S0IX_SUSPEND 0x0a
75#define GSMI_CMD_LOG_S0IX_RESUME 0x0b
73#define GSMI_CMD_CLEAR_CONFIG 0x20 76#define GSMI_CMD_CLEAR_CONFIG 0x20
74#define GSMI_CMD_HANDSHAKE_TYPE 0xC1 77#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
75 78
@@ -84,7 +87,7 @@ struct gsmi_buf {
84 u32 address; /* physical address of buffer */ 87 u32 address; /* physical address of buffer */
85}; 88};
86 89
87struct gsmi_device { 90static struct gsmi_device {
88 struct platform_device *pdev; /* platform device */ 91 struct platform_device *pdev; /* platform device */
89 struct gsmi_buf *name_buf; /* variable name buffer */ 92 struct gsmi_buf *name_buf; /* variable name buffer */
90 struct gsmi_buf *data_buf; /* generic data buffer */ 93 struct gsmi_buf *data_buf; /* generic data buffer */
@@ -122,7 +125,6 @@ struct gsmi_log_entry_type_1 {
122 u32 instance; 125 u32 instance;
123} __packed; 126} __packed;
124 127
125
126/* 128/*
127 * Some platforms don't have explicit SMI handshake 129 * Some platforms don't have explicit SMI handshake
128 * and need to wait for SMI to complete. 130 * and need to wait for SMI to complete.
@@ -133,6 +135,15 @@ module_param(spincount, uint, 0600);
133MODULE_PARM_DESC(spincount, 135MODULE_PARM_DESC(spincount,
134 "The number of loop iterations to use when using the spin handshake."); 136 "The number of loop iterations to use when using the spin handshake.");
135 137
138/*
139 * Platforms might not support S0ix logging in their GSMI handlers. In order to
140 * avoid any side-effects of generating an SMI for S0ix logging, use the S0ix
141 * related GSMI commands only for those platforms that explicitly enable this
142 * option.
143 */
144static bool s0ix_logging_enable;
145module_param(s0ix_logging_enable, bool, 0600);
146
136static struct gsmi_buf *gsmi_buf_alloc(void) 147static struct gsmi_buf *gsmi_buf_alloc(void)
137{ 148{
138 struct gsmi_buf *smibuf; 149 struct gsmi_buf *smibuf;
@@ -289,6 +300,10 @@ static int gsmi_exec(u8 func, u8 sub)
289 return rc; 300 return rc;
290} 301}
291 302
303#ifdef CONFIG_EFI_VARS
304
305static struct efivars efivars;
306
292static efi_status_t gsmi_get_variable(efi_char16_t *name, 307static efi_status_t gsmi_get_variable(efi_char16_t *name,
293 efi_guid_t *vendor, u32 *attr, 308 efi_guid_t *vendor, u32 *attr,
294 unsigned long *data_size, 309 unsigned long *data_size,
@@ -466,6 +481,8 @@ static const struct efivar_operations efivar_ops = {
466 .get_next_variable = gsmi_get_next_variable, 481 .get_next_variable = gsmi_get_next_variable,
467}; 482};
468 483
484#endif /* CONFIG_EFI_VARS */
485
469static ssize_t eventlog_write(struct file *filp, struct kobject *kobj, 486static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
470 struct bin_attribute *bin_attr, 487 struct bin_attribute *bin_attr,
471 char *buf, loff_t pos, size_t count) 488 char *buf, loff_t pos, size_t count)
@@ -480,11 +497,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
480 if (count < sizeof(u32)) 497 if (count < sizeof(u32))
481 return -EINVAL; 498 return -EINVAL;
482 param.type = *(u32 *)buf; 499 param.type = *(u32 *)buf;
483 count -= sizeof(u32);
484 buf += sizeof(u32); 500 buf += sizeof(u32);
485 501
486 /* The remaining buffer is the data payload */ 502 /* The remaining buffer is the data payload */
487 if (count > gsmi_dev.data_buf->length) 503 if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
488 return -EINVAL; 504 return -EINVAL;
489 param.data_len = count - sizeof(u32); 505 param.data_len = count - sizeof(u32);
490 506
@@ -504,7 +520,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
504 520
505 spin_unlock_irqrestore(&gsmi_dev.lock, flags); 521 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
506 522
507 return rc; 523 return (rc == 0) ? count : rc;
508 524
509} 525}
510 526
@@ -716,6 +732,12 @@ static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
716 DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."), 732 DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
717 }, 733 },
718 }, 734 },
735 {
736 .ident = "Coreboot Firmware",
737 .matches = {
738 DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
739 },
740 },
719 {} 741 {}
720}; 742};
721MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table); 743MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
@@ -762,7 +784,6 @@ static __init int gsmi_system_valid(void)
762} 784}
763 785
764static struct kobject *gsmi_kobj; 786static struct kobject *gsmi_kobj;
765static struct efivars efivars;
766 787
767static const struct platform_device_info gsmi_dev_info = { 788static const struct platform_device_info gsmi_dev_info = {
768 .name = "gsmi", 789 .name = "gsmi",
@@ -771,6 +792,78 @@ static const struct platform_device_info gsmi_dev_info = {
771 .dma_mask = DMA_BIT_MASK(32), 792 .dma_mask = DMA_BIT_MASK(32),
772}; 793};
773 794
795#ifdef CONFIG_PM
796static void gsmi_log_s0ix_info(u8 cmd)
797{
798 unsigned long flags;
799
800 /*
801 * If platform has not enabled S0ix logging, then no action is
802 * necessary.
803 */
804 if (!s0ix_logging_enable)
805 return;
806
807 spin_lock_irqsave(&gsmi_dev.lock, flags);
808
809 memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
810
811 gsmi_exec(GSMI_CALLBACK, cmd);
812
813 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
814}
815
816static int gsmi_log_s0ix_suspend(struct device *dev)
817{
818 /*
819 * If system is not suspending via firmware using the standard ACPI Sx
820 * types, then make a GSMI call to log the suspend info.
821 */
822 if (!pm_suspend_via_firmware())
823 gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_SUSPEND);
824
825 /*
826 * Always return success, since we do not want suspend
827 * to fail just because of logging failure.
828 */
829 return 0;
830}
831
832static int gsmi_log_s0ix_resume(struct device *dev)
833{
834 /*
835 * If system did not resume via firmware, then make a GSMI call to log
836 * the resume info and wake source.
837 */
838 if (!pm_resume_via_firmware())
839 gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_RESUME);
840
841 /*
842 * Always return success, since we do not want resume
843 * to fail just because of logging failure.
844 */
845 return 0;
846}
847
848static const struct dev_pm_ops gsmi_pm_ops = {
849 .suspend_noirq = gsmi_log_s0ix_suspend,
850 .resume_noirq = gsmi_log_s0ix_resume,
851};
852
853static int gsmi_platform_driver_probe(struct platform_device *dev)
854{
855 return 0;
856}
857
858static struct platform_driver gsmi_driver_info = {
859 .driver = {
860 .name = "gsmi",
861 .pm = &gsmi_pm_ops,
862 },
863 .probe = gsmi_platform_driver_probe,
864};
865#endif
866
774static __init int gsmi_init(void) 867static __init int gsmi_init(void)
775{ 868{
776 unsigned long flags; 869 unsigned long flags;
@@ -782,6 +875,14 @@ static __init int gsmi_init(void)
782 875
783 gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command; 876 gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
784 877
878#ifdef CONFIG_PM
879 ret = platform_driver_register(&gsmi_driver_info);
880 if (unlikely(ret)) {
881 printk(KERN_ERR "gsmi: unable to register platform driver\n");
882 return ret;
883 }
884#endif
885
785 /* register device */ 886 /* register device */
786 gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info); 887 gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
787 if (IS_ERR(gsmi_dev.pdev)) { 888 if (IS_ERR(gsmi_dev.pdev)) {
@@ -886,11 +987,14 @@ static __init int gsmi_init(void)
886 goto out_remove_bin_file; 987 goto out_remove_bin_file;
887 } 988 }
888 989
990#ifdef CONFIG_EFI_VARS
889 ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj); 991 ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
890 if (ret) { 992 if (ret) {
891 printk(KERN_INFO "gsmi: Failed to register efivars\n"); 993 printk(KERN_INFO "gsmi: Failed to register efivars\n");
892 goto out_remove_sysfs_files; 994 sysfs_remove_files(gsmi_kobj, gsmi_attrs);
995 goto out_remove_bin_file;
893 } 996 }
997#endif
894 998
895 register_reboot_notifier(&gsmi_reboot_notifier); 999 register_reboot_notifier(&gsmi_reboot_notifier);
896 register_die_notifier(&gsmi_die_notifier); 1000 register_die_notifier(&gsmi_die_notifier);
@@ -901,8 +1005,6 @@ static __init int gsmi_init(void)
901 1005
902 return 0; 1006 return 0;
903 1007
904out_remove_sysfs_files:
905 sysfs_remove_files(gsmi_kobj, gsmi_attrs);
906out_remove_bin_file: 1008out_remove_bin_file:
907 sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr); 1009 sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
908out_err: 1010out_err:
@@ -922,7 +1024,9 @@ static void __exit gsmi_exit(void)
922 unregister_die_notifier(&gsmi_die_notifier); 1024 unregister_die_notifier(&gsmi_die_notifier);
923 atomic_notifier_chain_unregister(&panic_notifier_list, 1025 atomic_notifier_chain_unregister(&panic_notifier_list,
924 &gsmi_panic_notifier); 1026 &gsmi_panic_notifier);
1027#ifdef CONFIG_EFI_VARS
925 efivars_unregister(&efivars); 1028 efivars_unregister(&efivars);
1029#endif
926 1030
927 sysfs_remove_files(gsmi_kobj, gsmi_attrs); 1031 sysfs_remove_files(gsmi_kobj, gsmi_attrs);
928 sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr); 1032 sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1aa67bb5d8c0..c0c0b4e4e281 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -198,7 +198,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
198 198
199 sec->name = name; 199 sec->name = name;
200 200
201 /* We want to export the raw partion with name ${name}_raw */ 201 /* We want to export the raw partition with name ${name}_raw */
202 sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name); 202 sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
203 if (!sec->raw_name) { 203 if (!sec->raw_name) {
204 err = -ENOMEM; 204 err = -ENOMEM;
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 7fa793672a7a..610a1558e0ed 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -453,8 +453,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
453 snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s", 453 snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
454 ALTERA_CVP_MGR_NAME, pci_name(pdev)); 454 ALTERA_CVP_MGR_NAME, pci_name(pdev));
455 455
456 mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, 456 mgr = devm_fpga_mgr_create(&pdev->dev, conf->mgr_name,
457 &altera_cvp_ops, conf); 457 &altera_cvp_ops, conf);
458 if (!mgr) { 458 if (!mgr) {
459 ret = -ENOMEM; 459 ret = -ENOMEM;
460 goto err_unmap; 460 goto err_unmap;
@@ -463,10 +463,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
463 pci_set_drvdata(pdev, mgr); 463 pci_set_drvdata(pdev, mgr);
464 464
465 ret = fpga_mgr_register(mgr); 465 ret = fpga_mgr_register(mgr);
466 if (ret) { 466 if (ret)
467 fpga_mgr_free(mgr);
468 goto err_unmap; 467 goto err_unmap;
469 }
470 468
471 ret = driver_create_file(&altera_cvp_driver.driver, 469 ret = driver_create_file(&altera_cvp_driver.driver,
472 &driver_attr_chkcfg); 470 &driver_attr_chkcfg);
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index 23660ccd634b..a78e49c63c64 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -121,18 +121,16 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
121 /* Get f2s bridge configuration saved in handoff register */ 121 /* Get f2s bridge configuration saved in handoff register */
122 regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask); 122 regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
123 123
124 br = fpga_bridge_create(dev, F2S_BRIDGE_NAME, 124 br = devm_fpga_bridge_create(dev, F2S_BRIDGE_NAME,
125 &altera_fpga2sdram_br_ops, priv); 125 &altera_fpga2sdram_br_ops, priv);
126 if (!br) 126 if (!br)
127 return -ENOMEM; 127 return -ENOMEM;
128 128
129 platform_set_drvdata(pdev, br); 129 platform_set_drvdata(pdev, br);
130 130
131 ret = fpga_bridge_register(br); 131 ret = fpga_bridge_register(br);
132 if (ret) { 132 if (ret)
133 fpga_bridge_free(br);
134 return ret; 133 return ret;
135 }
136 134
137 dev_info(dev, "driver initialized with handoff %08x\n", priv->mask); 135 dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
138 136
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index ffd586c48ecf..dd58c4aea92e 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -213,7 +213,6 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
213 struct fpga_bridge *br; 213 struct fpga_bridge *br;
214 struct resource *res; 214 struct resource *res;
215 u32 status, revision; 215 u32 status, revision;
216 int ret;
217 216
218 if (!np) 217 if (!np)
219 return -ENODEV; 218 return -ENODEV;
@@ -245,20 +244,14 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
245 244
246 priv->base_addr = base_addr; 245 priv->base_addr = base_addr;
247 246
248 br = fpga_bridge_create(dev, FREEZE_BRIDGE_NAME, 247 br = devm_fpga_bridge_create(dev, FREEZE_BRIDGE_NAME,
249 &altera_freeze_br_br_ops, priv); 248 &altera_freeze_br_br_ops, priv);
250 if (!br) 249 if (!br)
251 return -ENOMEM; 250 return -ENOMEM;
252 251
253 platform_set_drvdata(pdev, br); 252 platform_set_drvdata(pdev, br);
254 253
255 ret = fpga_bridge_register(br); 254 return fpga_bridge_register(br);
256 if (ret) {
257 fpga_bridge_free(br);
258 return ret;
259 }
260
261 return 0;
262} 255}
263 256
264static int altera_freeze_br_remove(struct platform_device *pdev) 257static int altera_freeze_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index a974d3f60321..77b95f251821 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -180,7 +180,8 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
180 } 180 }
181 } 181 }
182 182
183 br = fpga_bridge_create(dev, priv->name, &altera_hps2fpga_br_ops, priv); 183 br = devm_fpga_bridge_create(dev, priv->name,
184 &altera_hps2fpga_br_ops, priv);
184 if (!br) { 185 if (!br) {
185 ret = -ENOMEM; 186 ret = -ENOMEM;
186 goto err; 187 goto err;
@@ -190,12 +191,10 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
190 191
191 ret = fpga_bridge_register(br); 192 ret = fpga_bridge_register(br);
192 if (ret) 193 if (ret)
193 goto err_free; 194 goto err;
194 195
195 return 0; 196 return 0;
196 197
197err_free:
198 fpga_bridge_free(br);
199err: 198err:
200 clk_disable_unprepare(priv->clk); 199 clk_disable_unprepare(priv->clk);
201 200
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index 65e0b6a2c031..a7a3bf0b5202 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -177,7 +177,6 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
177{ 177{
178 struct alt_pr_priv *priv; 178 struct alt_pr_priv *priv;
179 struct fpga_manager *mgr; 179 struct fpga_manager *mgr;
180 int ret;
181 u32 val; 180 u32 val;
182 181
183 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 182 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -192,17 +191,13 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
192 (val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT, 191 (val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT,
193 (int)(val & ALT_PR_CSR_PR_START)); 192 (int)(val & ALT_PR_CSR_PR_START));
194 193
195 mgr = fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv); 194 mgr = devm_fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv);
196 if (!mgr) 195 if (!mgr)
197 return -ENOMEM; 196 return -ENOMEM;
198 197
199 dev_set_drvdata(dev, mgr); 198 dev_set_drvdata(dev, mgr);
200 199
201 ret = fpga_mgr_register(mgr); 200 return fpga_mgr_register(mgr);
202 if (ret)
203 fpga_mgr_free(mgr);
204
205 return ret;
206} 201}
207EXPORT_SYMBOL_GPL(alt_pr_register); 202EXPORT_SYMBOL_GPL(alt_pr_register);
208 203
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 24b25c626036..33aafda50af5 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -239,7 +239,6 @@ static int altera_ps_probe(struct spi_device *spi)
239 struct altera_ps_conf *conf; 239 struct altera_ps_conf *conf;
240 const struct of_device_id *of_id; 240 const struct of_device_id *of_id;
241 struct fpga_manager *mgr; 241 struct fpga_manager *mgr;
242 int ret;
243 242
244 conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL); 243 conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
245 if (!conf) 244 if (!conf)
@@ -275,18 +274,14 @@ static int altera_ps_probe(struct spi_device *spi)
275 snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s", 274 snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
276 dev_driver_string(&spi->dev), dev_name(&spi->dev)); 275 dev_driver_string(&spi->dev), dev_name(&spi->dev));
277 276
278 mgr = fpga_mgr_create(&spi->dev, conf->mgr_name, 277 mgr = devm_fpga_mgr_create(&spi->dev, conf->mgr_name,
279 &altera_ps_ops, conf); 278 &altera_ps_ops, conf);
280 if (!mgr) 279 if (!mgr)
281 return -ENOMEM; 280 return -ENOMEM;
282 281
283 spi_set_drvdata(spi, mgr); 282 spi_set_drvdata(spi, mgr);
284 283
285 ret = fpga_mgr_register(mgr); 284 return fpga_mgr_register(mgr);
286 if (ret)
287 fpga_mgr_free(mgr);
288
289 return ret;
290} 285}
291 286
292static int altera_ps_remove(struct spi_device *spi) 287static int altera_ps_remove(struct spi_device *spi)
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 0e81d33af856..025aba3ea76c 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -70,7 +70,7 @@ static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
70 dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid, 70 dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
71 incr ? '+' : '-', npages << PAGE_SHIFT, 71 incr ? '+' : '-', npages << PAGE_SHIFT,
72 current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), 72 current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
73 ret ? "- execeeded" : ""); 73 ret ? "- exceeded" : "");
74 74
75 up_write(&current->mm->mmap_sem); 75 up_write(&current->mm->mmap_sem);
76 76
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 7cc041def8b3..3ff9f3a687ce 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -61,7 +61,6 @@ static int fme_br_probe(struct platform_device *pdev)
61 struct device *dev = &pdev->dev; 61 struct device *dev = &pdev->dev;
62 struct fme_br_priv *priv; 62 struct fme_br_priv *priv;
63 struct fpga_bridge *br; 63 struct fpga_bridge *br;
64 int ret;
65 64
66 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 65 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
67 if (!priv) 66 if (!priv)
@@ -69,18 +68,14 @@ static int fme_br_probe(struct platform_device *pdev)
69 68
70 priv->pdata = dev_get_platdata(dev); 69 priv->pdata = dev_get_platdata(dev);
71 70
72 br = fpga_bridge_create(dev, "DFL FPGA FME Bridge", 71 br = devm_fpga_bridge_create(dev, "DFL FPGA FME Bridge",
73 &fme_bridge_ops, priv); 72 &fme_bridge_ops, priv);
74 if (!br) 73 if (!br)
75 return -ENOMEM; 74 return -ENOMEM;
76 75
77 platform_set_drvdata(pdev, br); 76 platform_set_drvdata(pdev, br);
78 77
79 ret = fpga_bridge_register(br); 78 return fpga_bridge_register(br);
80 if (ret)
81 fpga_bridge_free(br);
82
83 return ret;
84} 79}
85 80
86static int fme_br_remove(struct platform_device *pdev) 81static int fme_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index b5ef405b6d88..76f37709dd1a 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -201,7 +201,7 @@ static int fme_mgr_write(struct fpga_manager *mgr,
201 } 201 }
202 202
203 if (count < 4) { 203 if (count < 4) {
204 dev_err(dev, "Invaild PR bitstream size\n"); 204 dev_err(dev, "Invalid PR bitstream size\n");
205 return -EINVAL; 205 return -EINVAL;
206 } 206 }
207 207
@@ -287,7 +287,6 @@ static int fme_mgr_probe(struct platform_device *pdev)
287 struct fme_mgr_priv *priv; 287 struct fme_mgr_priv *priv;
288 struct fpga_manager *mgr; 288 struct fpga_manager *mgr;
289 struct resource *res; 289 struct resource *res;
290 int ret;
291 290
292 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 291 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
293 if (!priv) 292 if (!priv)
@@ -309,19 +308,15 @@ static int fme_mgr_probe(struct platform_device *pdev)
309 308
310 fme_mgr_get_compat_id(priv->ioaddr, compat_id); 309 fme_mgr_get_compat_id(priv->ioaddr, compat_id);
311 310
312 mgr = fpga_mgr_create(dev, "DFL FME FPGA Manager", 311 mgr = devm_fpga_mgr_create(dev, "DFL FME FPGA Manager",
313 &fme_mgr_ops, priv); 312 &fme_mgr_ops, priv);
314 if (!mgr) 313 if (!mgr)
315 return -ENOMEM; 314 return -ENOMEM;
316 315
317 mgr->compat_id = compat_id; 316 mgr->compat_id = compat_id;
318 platform_set_drvdata(pdev, mgr); 317 platform_set_drvdata(pdev, mgr);
319 318
320 ret = fpga_mgr_register(mgr); 319 return fpga_mgr_register(mgr);
321 if (ret)
322 fpga_mgr_free(mgr);
323
324 return ret;
325} 320}
326 321
327static int fme_mgr_remove(struct platform_device *pdev) 322static int fme_mgr_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 51a5ac2293a7..ec134ec93f08 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -39,7 +39,7 @@ static int fme_region_probe(struct platform_device *pdev)
39 if (IS_ERR(mgr)) 39 if (IS_ERR(mgr))
40 return -EPROBE_DEFER; 40 return -EPROBE_DEFER;
41 41
42 region = fpga_region_create(dev, mgr, fme_region_get_bridges); 42 region = devm_fpga_region_create(dev, mgr, fme_region_get_bridges);
43 if (!region) { 43 if (!region) {
44 ret = -ENOMEM; 44 ret = -ENOMEM;
45 goto eprobe_mgr_put; 45 goto eprobe_mgr_put;
@@ -51,14 +51,12 @@ static int fme_region_probe(struct platform_device *pdev)
51 51
52 ret = fpga_region_register(region); 52 ret = fpga_region_register(region);
53 if (ret) 53 if (ret)
54 goto region_free; 54 goto eprobe_mgr_put;
55 55
56 dev_dbg(dev, "DFL FME FPGA Region probed\n"); 56 dev_dbg(dev, "DFL FME FPGA Region probed\n");
57 57
58 return 0; 58 return 0;
59 59
60region_free:
61 fpga_region_free(region);
62eprobe_mgr_put: 60eprobe_mgr_put:
63 fpga_mgr_put(mgr); 61 fpga_mgr_put(mgr);
64 return ret; 62 return ret;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index a9b521bccb06..2c09e502e721 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -899,7 +899,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
899 if (!cdev) 899 if (!cdev)
900 return ERR_PTR(-ENOMEM); 900 return ERR_PTR(-ENOMEM);
901 901
902 cdev->region = fpga_region_create(info->dev, NULL, NULL); 902 cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
903 if (!cdev->region) { 903 if (!cdev->region) {
904 ret = -ENOMEM; 904 ret = -ENOMEM;
905 goto free_cdev_exit; 905 goto free_cdev_exit;
@@ -911,7 +911,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
911 911
912 ret = fpga_region_register(cdev->region); 912 ret = fpga_region_register(cdev->region);
913 if (ret) 913 if (ret)
914 goto free_region_exit; 914 goto free_cdev_exit;
915 915
916 /* create and init build info for enumeration */ 916 /* create and init build info for enumeration */
917 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL); 917 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
@@ -942,8 +942,6 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
942 942
943unregister_region_exit: 943unregister_region_exit:
944 fpga_region_unregister(cdev->region); 944 fpga_region_unregister(cdev->region);
945free_region_exit:
946 fpga_region_free(cdev->region);
947free_cdev_exit: 945free_cdev_exit:
948 devm_kfree(info->dev, cdev); 946 devm_kfree(info->dev, cdev);
949 return ERR_PTR(ret); 947 return ERR_PTR(ret);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index c983dac97501..80bd8f1b2aa6 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -324,6 +324,9 @@ ATTRIBUTE_GROUPS(fpga_bridge);
324 * @br_ops: pointer to structure of fpga bridge ops 324 * @br_ops: pointer to structure of fpga bridge ops
325 * @priv: FPGA bridge private data 325 * @priv: FPGA bridge private data
326 * 326 *
327 * The caller of this function is responsible for freeing the bridge with
328 * fpga_bridge_free(). Using devm_fpga_bridge_create() instead is recommended.
329 *
327 * Return: struct fpga_bridge or NULL 330 * Return: struct fpga_bridge or NULL
328 */ 331 */
329struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name, 332struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
@@ -378,8 +381,8 @@ error_kfree:
378EXPORT_SYMBOL_GPL(fpga_bridge_create); 381EXPORT_SYMBOL_GPL(fpga_bridge_create);
379 382
380/** 383/**
381 * fpga_bridge_free - free a fpga bridge and its id 384 * fpga_bridge_free - free a fpga bridge created by fpga_bridge_create()
382 * @bridge: FPGA bridge struct created by fpga_bridge_create 385 * @bridge: FPGA bridge struct
383 */ 386 */
384void fpga_bridge_free(struct fpga_bridge *bridge) 387void fpga_bridge_free(struct fpga_bridge *bridge)
385{ 388{
@@ -388,9 +391,56 @@ void fpga_bridge_free(struct fpga_bridge *bridge)
388} 391}
389EXPORT_SYMBOL_GPL(fpga_bridge_free); 392EXPORT_SYMBOL_GPL(fpga_bridge_free);
390 393
394static void devm_fpga_bridge_release(struct device *dev, void *res)
395{
396 struct fpga_bridge *bridge = *(struct fpga_bridge **)res;
397
398 fpga_bridge_free(bridge);
399}
400
391/** 401/**
392 * fpga_bridge_register - register a fpga bridge 402 * devm_fpga_bridge_create - create and init a managed struct fpga_bridge
393 * @bridge: FPGA bridge struct created by fpga_bridge_create 403 * @dev: FPGA bridge device from pdev
404 * @name: FPGA bridge name
405 * @br_ops: pointer to structure of fpga bridge ops
406 * @priv: FPGA bridge private data
407 *
408 * This function is intended for use in a FPGA bridge driver's probe function.
409 * After the bridge driver creates the struct with devm_fpga_bridge_create(), it
410 * should register the bridge with fpga_bridge_register(). The bridge driver's
411 * remove function should call fpga_bridge_unregister(). The bridge struct
412 * allocated with this function will be freed automatically on driver detach.
413 * This includes the case of a probe function returning error before calling
414 * fpga_bridge_register(), the struct will still get cleaned up.
415 *
416 * Return: struct fpga_bridge or NULL
417 */
418struct fpga_bridge
419*devm_fpga_bridge_create(struct device *dev, const char *name,
420 const struct fpga_bridge_ops *br_ops, void *priv)
421{
422 struct fpga_bridge **ptr, *bridge;
423
424 ptr = devres_alloc(devm_fpga_bridge_release, sizeof(*ptr), GFP_KERNEL);
425 if (!ptr)
426 return NULL;
427
428 bridge = fpga_bridge_create(dev, name, br_ops, priv);
429 if (!bridge) {
430 devres_free(ptr);
431 } else {
432 *ptr = bridge;
433 devres_add(dev, ptr);
434 }
435
436 return bridge;
437}
438EXPORT_SYMBOL_GPL(devm_fpga_bridge_create);
439
440/**
441 * fpga_bridge_register - register a FPGA bridge
442 *
443 * @bridge: FPGA bridge struct
394 * 444 *
395 * Return: 0 for success, error code otherwise. 445 * Return: 0 for success, error code otherwise.
396 */ 446 */
@@ -412,8 +462,11 @@ int fpga_bridge_register(struct fpga_bridge *bridge)
412EXPORT_SYMBOL_GPL(fpga_bridge_register); 462EXPORT_SYMBOL_GPL(fpga_bridge_register);
413 463
414/** 464/**
415 * fpga_bridge_unregister - unregister and free a fpga bridge 465 * fpga_bridge_unregister - unregister a FPGA bridge
416 * @bridge: FPGA bridge struct created by fpga_bridge_create 466 *
467 * @bridge: FPGA bridge struct
468 *
469 * This function is intended for use in a FPGA bridge driver's remove function.
417 */ 470 */
418void fpga_bridge_unregister(struct fpga_bridge *bridge) 471void fpga_bridge_unregister(struct fpga_bridge *bridge)
419{ 472{
@@ -430,9 +483,6 @@ EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
430 483
431static void fpga_bridge_dev_release(struct device *dev) 484static void fpga_bridge_dev_release(struct device *dev)
432{ 485{
433 struct fpga_bridge *bridge = to_fpga_bridge(dev);
434
435 fpga_bridge_free(bridge);
436} 486}
437 487
438static int __init fpga_bridge_dev_init(void) 488static int __init fpga_bridge_dev_init(void)
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index a41b07e37884..c3866816456a 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -558,6 +558,9 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
558 * @mops: pointer to structure of fpga manager ops 558 * @mops: pointer to structure of fpga manager ops
559 * @priv: fpga manager private data 559 * @priv: fpga manager private data
560 * 560 *
561 * The caller of this function is responsible for freeing the struct with
562 * fpga_mgr_free(). Using devm_fpga_mgr_create() instead is recommended.
563 *
561 * Return: pointer to struct fpga_manager or NULL 564 * Return: pointer to struct fpga_manager or NULL
562 */ 565 */
563struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name, 566struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
@@ -618,8 +621,8 @@ error_kfree:
618EXPORT_SYMBOL_GPL(fpga_mgr_create); 621EXPORT_SYMBOL_GPL(fpga_mgr_create);
619 622
620/** 623/**
621 * fpga_mgr_free - deallocate a FPGA manager 624 * fpga_mgr_free - free a FPGA manager created with fpga_mgr_create()
622 * @mgr: fpga manager struct created by fpga_mgr_create 625 * @mgr: fpga manager struct
623 */ 626 */
624void fpga_mgr_free(struct fpga_manager *mgr) 627void fpga_mgr_free(struct fpga_manager *mgr)
625{ 628{
@@ -628,9 +631,55 @@ void fpga_mgr_free(struct fpga_manager *mgr)
628} 631}
629EXPORT_SYMBOL_GPL(fpga_mgr_free); 632EXPORT_SYMBOL_GPL(fpga_mgr_free);
630 633
634static void devm_fpga_mgr_release(struct device *dev, void *res)
635{
636 struct fpga_manager *mgr = *(struct fpga_manager **)res;
637
638 fpga_mgr_free(mgr);
639}
640
641/**
642 * devm_fpga_mgr_create - create and initialize a managed FPGA manager struct
643 * @dev: fpga manager device from pdev
644 * @name: fpga manager name
645 * @mops: pointer to structure of fpga manager ops
646 * @priv: fpga manager private data
647 *
648 * This function is intended for use in a FPGA manager driver's probe function.
649 * After the manager driver creates the manager struct with
650 * devm_fpga_mgr_create(), it should register it with fpga_mgr_register(). The
651 * manager driver's remove function should call fpga_mgr_unregister(). The
652 * manager struct allocated with this function will be freed automatically on
653 * driver detach. This includes the case of a probe function returning error
654 * before calling fpga_mgr_register(), the struct will still get cleaned up.
655 *
656 * Return: pointer to struct fpga_manager or NULL
657 */
658struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
659 const struct fpga_manager_ops *mops,
660 void *priv)
661{
662 struct fpga_manager **ptr, *mgr;
663
664 ptr = devres_alloc(devm_fpga_mgr_release, sizeof(*ptr), GFP_KERNEL);
665 if (!ptr)
666 return NULL;
667
668 mgr = fpga_mgr_create(dev, name, mops, priv);
669 if (!mgr) {
670 devres_free(ptr);
671 } else {
672 *ptr = mgr;
673 devres_add(dev, ptr);
674 }
675
676 return mgr;
677}
678EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
679
631/** 680/**
632 * fpga_mgr_register - register a FPGA manager 681 * fpga_mgr_register - register a FPGA manager
633 * @mgr: fpga manager struct created by fpga_mgr_create 682 * @mgr: fpga manager struct
634 * 683 *
635 * Return: 0 on success, negative error code otherwise. 684 * Return: 0 on success, negative error code otherwise.
636 */ 685 */
@@ -661,8 +710,10 @@ error_device:
661EXPORT_SYMBOL_GPL(fpga_mgr_register); 710EXPORT_SYMBOL_GPL(fpga_mgr_register);
662 711
663/** 712/**
664 * fpga_mgr_unregister - unregister and free a FPGA manager 713 * fpga_mgr_unregister - unregister a FPGA manager
665 * @mgr: fpga manager struct 714 * @mgr: fpga manager struct
715 *
716 * This function is intended for use in a FPGA manager driver's remove function.
666 */ 717 */
667void fpga_mgr_unregister(struct fpga_manager *mgr) 718void fpga_mgr_unregister(struct fpga_manager *mgr)
668{ 719{
@@ -681,9 +732,6 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
681 732
682static void fpga_mgr_dev_release(struct device *dev) 733static void fpga_mgr_dev_release(struct device *dev)
683{ 734{
684 struct fpga_manager *mgr = to_fpga_manager(dev);
685
686 fpga_mgr_free(mgr);
687} 735}
688 736
689static int __init fpga_mgr_class_init(void) 737static int __init fpga_mgr_class_init(void)
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 0d65220d5ec5..bde5a9d460c5 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -185,6 +185,10 @@ ATTRIBUTE_GROUPS(fpga_region);
185 * @mgr: manager that programs this region 185 * @mgr: manager that programs this region
186 * @get_bridges: optional function to get bridges to a list 186 * @get_bridges: optional function to get bridges to a list
187 * 187 *
188 * The caller of this function is responsible for freeing the resulting region
189 * struct with fpga_region_free(). Using devm_fpga_region_create() instead is
190 * recommended.
191 *
188 * Return: struct fpga_region or NULL 192 * Return: struct fpga_region or NULL
189 */ 193 */
190struct fpga_region 194struct fpga_region
@@ -230,8 +234,8 @@ err_free:
230EXPORT_SYMBOL_GPL(fpga_region_create); 234EXPORT_SYMBOL_GPL(fpga_region_create);
231 235
232/** 236/**
233 * fpga_region_free - free a struct fpga_region 237 * fpga_region_free - free a FPGA region created by fpga_region_create()
234 * @region: FPGA region created by fpga_region_create 238 * @region: FPGA region
235 */ 239 */
236void fpga_region_free(struct fpga_region *region) 240void fpga_region_free(struct fpga_region *region)
237{ 241{
@@ -240,21 +244,69 @@ void fpga_region_free(struct fpga_region *region)
240} 244}
241EXPORT_SYMBOL_GPL(fpga_region_free); 245EXPORT_SYMBOL_GPL(fpga_region_free);
242 246
247static void devm_fpga_region_release(struct device *dev, void *res)
248{
249 struct fpga_region *region = *(struct fpga_region **)res;
250
251 fpga_region_free(region);
252}
253
254/**
255 * devm_fpga_region_create - create and initialize a managed FPGA region struct
256 * @dev: device parent
257 * @mgr: manager that programs this region
258 * @get_bridges: optional function to get bridges to a list
259 *
260 * This function is intended for use in a FPGA region driver's probe function.
261 * After the region driver creates the region struct with
262 * devm_fpga_region_create(), it should register it with fpga_region_register().
263 * The region driver's remove function should call fpga_region_unregister().
264 * The region struct allocated with this function will be freed automatically on
265 * driver detach. This includes the case of a probe function returning error
266 * before calling fpga_region_register(), the struct will still get cleaned up.
267 *
268 * Return: struct fpga_region or NULL
269 */
270struct fpga_region
271*devm_fpga_region_create(struct device *dev,
272 struct fpga_manager *mgr,
273 int (*get_bridges)(struct fpga_region *))
274{
275 struct fpga_region **ptr, *region;
276
277 ptr = devres_alloc(devm_fpga_region_release, sizeof(*ptr), GFP_KERNEL);
278 if (!ptr)
279 return NULL;
280
281 region = fpga_region_create(dev, mgr, get_bridges);
282 if (!region) {
283 devres_free(ptr);
284 } else {
285 *ptr = region;
286 devres_add(dev, ptr);
287 }
288
289 return region;
290}
291EXPORT_SYMBOL_GPL(devm_fpga_region_create);
292
243/** 293/**
244 * fpga_region_register - register a FPGA region 294 * fpga_region_register - register a FPGA region
245 * @region: FPGA region created by fpga_region_create 295 * @region: FPGA region
296 *
246 * Return: 0 or -errno 297 * Return: 0 or -errno
247 */ 298 */
248int fpga_region_register(struct fpga_region *region) 299int fpga_region_register(struct fpga_region *region)
249{ 300{
250 return device_add(&region->dev); 301 return device_add(&region->dev);
251
252} 302}
253EXPORT_SYMBOL_GPL(fpga_region_register); 303EXPORT_SYMBOL_GPL(fpga_region_register);
254 304
255/** 305/**
256 * fpga_region_unregister - unregister and free a FPGA region 306 * fpga_region_unregister - unregister a FPGA region
257 * @region: FPGA region 307 * @region: FPGA region
308 *
309 * This function is intended for use in a FPGA region driver's remove function.
258 */ 310 */
259void fpga_region_unregister(struct fpga_region *region) 311void fpga_region_unregister(struct fpga_region *region)
260{ 312{
@@ -264,9 +316,6 @@ EXPORT_SYMBOL_GPL(fpga_region_unregister);
264 316
265static void fpga_region_dev_release(struct device *dev) 317static void fpga_region_dev_release(struct device *dev)
266{ 318{
267 struct fpga_region *region = to_fpga_region(dev);
268
269 fpga_region_free(region);
270} 319}
271 320
272/** 321/**
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 5981c7ee7a7d..6154661b8f76 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -175,18 +175,14 @@ static int ice40_fpga_probe(struct spi_device *spi)
175 return ret; 175 return ret;
176 } 176 }
177 177
178 mgr = fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager", 178 mgr = devm_fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager",
179 &ice40_fpga_ops, priv); 179 &ice40_fpga_ops, priv);
180 if (!mgr) 180 if (!mgr)
181 return -ENOMEM; 181 return -ENOMEM;
182 182
183 spi_set_drvdata(spi, mgr); 183 spi_set_drvdata(spi, mgr);
184 184
185 ret = fpga_mgr_register(mgr); 185 return fpga_mgr_register(mgr);
186 if (ret)
187 fpga_mgr_free(mgr);
188
189 return ret;
190} 186}
191 187
192static int ice40_fpga_remove(struct spi_device *spi) 188static int ice40_fpga_remove(struct spi_device *spi)
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index a582e0000c97..4d8a87641587 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -356,25 +356,20 @@ static int machxo2_spi_probe(struct spi_device *spi)
356{ 356{
357 struct device *dev = &spi->dev; 357 struct device *dev = &spi->dev;
358 struct fpga_manager *mgr; 358 struct fpga_manager *mgr;
359 int ret;
360 359
361 if (spi->max_speed_hz > MACHXO2_MAX_SPEED) { 360 if (spi->max_speed_hz > MACHXO2_MAX_SPEED) {
362 dev_err(dev, "Speed is too high\n"); 361 dev_err(dev, "Speed is too high\n");
363 return -EINVAL; 362 return -EINVAL;
364 } 363 }
365 364
366 mgr = fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager", 365 mgr = devm_fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager",
367 &machxo2_ops, spi); 366 &machxo2_ops, spi);
368 if (!mgr) 367 if (!mgr)
369 return -ENOMEM; 368 return -ENOMEM;
370 369
371 spi_set_drvdata(spi, mgr); 370 spi_set_drvdata(spi, mgr);
372 371
373 ret = fpga_mgr_register(mgr); 372 return fpga_mgr_register(mgr);
374 if (ret)
375 fpga_mgr_free(mgr);
376
377 return ret;
378} 373}
379 374
380static int machxo2_spi_remove(struct spi_device *spi) 375static int machxo2_spi_remove(struct spi_device *spi)
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 052a1342ab7e..122286fd255a 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -410,7 +410,7 @@ static int of_fpga_region_probe(struct platform_device *pdev)
410 if (IS_ERR(mgr)) 410 if (IS_ERR(mgr))
411 return -EPROBE_DEFER; 411 return -EPROBE_DEFER;
412 412
413 region = fpga_region_create(dev, mgr, of_fpga_region_get_bridges); 413 region = devm_fpga_region_create(dev, mgr, of_fpga_region_get_bridges);
414 if (!region) { 414 if (!region) {
415 ret = -ENOMEM; 415 ret = -ENOMEM;
416 goto eprobe_mgr_put; 416 goto eprobe_mgr_put;
@@ -418,7 +418,7 @@ static int of_fpga_region_probe(struct platform_device *pdev)
418 418
419 ret = fpga_region_register(region); 419 ret = fpga_region_register(region);
420 if (ret) 420 if (ret)
421 goto eprobe_free; 421 goto eprobe_mgr_put;
422 422
423 of_platform_populate(np, fpga_region_of_match, NULL, &region->dev); 423 of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
424 dev_set_drvdata(dev, region); 424 dev_set_drvdata(dev, region);
@@ -427,8 +427,6 @@ static int of_fpga_region_probe(struct platform_device *pdev)
427 427
428 return 0; 428 return 0;
429 429
430eprobe_free:
431 fpga_region_free(region);
432eprobe_mgr_put: 430eprobe_mgr_put:
433 fpga_mgr_put(mgr); 431 fpga_mgr_put(mgr);
434 return ret; 432 return ret;
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index be30c48eb6e4..573d88bdf730 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -508,8 +508,8 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
508 return -EBUSY; 508 return -EBUSY;
509 } 509 }
510 510
511 mgr = fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager", 511 mgr = devm_fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager",
512 &socfpga_a10_fpga_mgr_ops, priv); 512 &socfpga_a10_fpga_mgr_ops, priv);
513 if (!mgr) 513 if (!mgr)
514 return -ENOMEM; 514 return -ENOMEM;
515 515
@@ -517,7 +517,6 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
517 517
518 ret = fpga_mgr_register(mgr); 518 ret = fpga_mgr_register(mgr);
519 if (ret) { 519 if (ret) {
520 fpga_mgr_free(mgr);
521 clk_disable_unprepare(priv->clk); 520 clk_disable_unprepare(priv->clk);
522 return ret; 521 return ret;
523 } 522 }
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 959d71f26896..4a8a2fcd4e6c 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -571,18 +571,14 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
571 if (ret) 571 if (ret)
572 return ret; 572 return ret;
573 573
574 mgr = fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager", 574 mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
575 &socfpga_fpga_ops, priv); 575 &socfpga_fpga_ops, priv);
576 if (!mgr) 576 if (!mgr)
577 return -ENOMEM; 577 return -ENOMEM;
578 578
579 platform_set_drvdata(pdev, mgr); 579 platform_set_drvdata(pdev, mgr);
580 580
581 ret = fpga_mgr_register(mgr); 581 return fpga_mgr_register(mgr);
582 if (ret)
583 fpga_mgr_free(mgr);
584
585 return ret;
586} 582}
587 583
588static int socfpga_fpga_remove(struct platform_device *pdev) 584static int socfpga_fpga_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 08efd1895b1b..dc22a5842609 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -118,7 +118,6 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
118 struct ts73xx_fpga_priv *priv; 118 struct ts73xx_fpga_priv *priv;
119 struct fpga_manager *mgr; 119 struct fpga_manager *mgr;
120 struct resource *res; 120 struct resource *res;
121 int ret;
122 121
123 priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL); 122 priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
124 if (!priv) 123 if (!priv)
@@ -133,18 +132,14 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
133 return PTR_ERR(priv->io_base); 132 return PTR_ERR(priv->io_base);
134 } 133 }
135 134
136 mgr = fpga_mgr_create(kdev, "TS-73xx FPGA Manager", 135 mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
137 &ts73xx_fpga_ops, priv); 136 &ts73xx_fpga_ops, priv);
138 if (!mgr) 137 if (!mgr)
139 return -ENOMEM; 138 return -ENOMEM;
140 139
141 platform_set_drvdata(pdev, mgr); 140 platform_set_drvdata(pdev, mgr);
142 141
143 ret = fpga_mgr_register(mgr); 142 return fpga_mgr_register(mgr);
144 if (ret)
145 fpga_mgr_free(mgr);
146
147 return ret;
148} 143}
149 144
150static int ts73xx_fpga_remove(struct platform_device *pdev) 145static int ts73xx_fpga_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 07ba1539e82c..641036135207 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -121,8 +121,8 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
121 121
122 clk_disable(priv->clk); 122 clk_disable(priv->clk);
123 123
124 br = fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler", 124 br = devm_fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler",
125 &xlnx_pr_decoupler_br_ops, priv); 125 &xlnx_pr_decoupler_br_ops, priv);
126 if (!br) { 126 if (!br) {
127 err = -ENOMEM; 127 err = -ENOMEM;
128 goto err_clk; 128 goto err_clk;
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 8d1945966533..469486be20c4 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -144,7 +144,6 @@ static int xilinx_spi_probe(struct spi_device *spi)
144{ 144{
145 struct xilinx_spi_conf *conf; 145 struct xilinx_spi_conf *conf;
146 struct fpga_manager *mgr; 146 struct fpga_manager *mgr;
147 int ret;
148 147
149 conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL); 148 conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
150 if (!conf) 149 if (!conf)
@@ -167,18 +166,15 @@ static int xilinx_spi_probe(struct spi_device *spi)
167 return PTR_ERR(conf->done); 166 return PTR_ERR(conf->done);
168 } 167 }
169 168
170 mgr = fpga_mgr_create(&spi->dev, "Xilinx Slave Serial FPGA Manager", 169 mgr = devm_fpga_mgr_create(&spi->dev,
171 &xilinx_spi_ops, conf); 170 "Xilinx Slave Serial FPGA Manager",
171 &xilinx_spi_ops, conf);
172 if (!mgr) 172 if (!mgr)
173 return -ENOMEM; 173 return -ENOMEM;
174 174
175 spi_set_drvdata(spi, mgr); 175 spi_set_drvdata(spi, mgr);
176 176
177 ret = fpga_mgr_register(mgr); 177 return fpga_mgr_register(mgr);
178 if (ret)
179 fpga_mgr_free(mgr);
180
181 return ret;
182} 178}
183 179
184static int xilinx_spi_remove(struct spi_device *spi) 180static int xilinx_spi_remove(struct spi_device *spi)
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 3110e00121ca..bb82efeebb9d 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -614,8 +614,8 @@ static int zynq_fpga_probe(struct platform_device *pdev)
614 614
615 clk_disable(priv->clk); 615 clk_disable(priv->clk);
616 616
617 mgr = fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager", 617 mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
618 &zynq_fpga_ops, priv); 618 &zynq_fpga_ops, priv);
619 if (!mgr) 619 if (!mgr)
620 return -ENOMEM; 620 return -ENOMEM;
621 621
@@ -624,7 +624,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
624 err = fpga_mgr_register(mgr); 624 err = fpga_mgr_register(mgr);
625 if (err) { 625 if (err) {
626 dev_err(dev, "unable to register FPGA manager\n"); 626 dev_err(dev, "unable to register FPGA manager\n");
627 fpga_mgr_free(mgr);
628 clk_unprepare(priv->clk); 627 clk_unprepare(priv->clk);
629 return err; 628 return err;
630 } 629 }
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 741857d80da1..de8193f3b838 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -79,85 +79,96 @@ void vmbus_setevent(struct vmbus_channel *channel)
79} 79}
80EXPORT_SYMBOL_GPL(vmbus_setevent); 80EXPORT_SYMBOL_GPL(vmbus_setevent);
81 81
82/* 82/* vmbus_free_ring - drop mapping of ring buffer */
83 * vmbus_open - Open the specified channel. 83void vmbus_free_ring(struct vmbus_channel *channel)
84 */ 84{
85int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, 85 hv_ringbuffer_cleanup(&channel->outbound);
86 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen, 86 hv_ringbuffer_cleanup(&channel->inbound);
87 void (*onchannelcallback)(void *context), void *context) 87
88 if (channel->ringbuffer_page) {
89 __free_pages(channel->ringbuffer_page,
90 get_order(channel->ringbuffer_pagecount
91 << PAGE_SHIFT));
92 channel->ringbuffer_page = NULL;
93 }
94}
95EXPORT_SYMBOL_GPL(vmbus_free_ring);
96
97/* vmbus_alloc_ring - allocate and map pages for ring buffer */
98int vmbus_alloc_ring(struct vmbus_channel *newchannel,
99 u32 send_size, u32 recv_size)
100{
101 struct page *page;
102 int order;
103
104 if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
105 return -EINVAL;
106
107 /* Allocate the ring buffer */
108 order = get_order(send_size + recv_size);
109 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
110 GFP_KERNEL|__GFP_ZERO, order);
111
112 if (!page)
113 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
114
115 if (!page)
116 return -ENOMEM;
117
118 newchannel->ringbuffer_page = page;
119 newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
120 newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
121
122 return 0;
123}
124EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
125
126static int __vmbus_open(struct vmbus_channel *newchannel,
127 void *userdata, u32 userdatalen,
128 void (*onchannelcallback)(void *context), void *context)
88{ 129{
89 struct vmbus_channel_open_channel *open_msg; 130 struct vmbus_channel_open_channel *open_msg;
90 struct vmbus_channel_msginfo *open_info = NULL; 131 struct vmbus_channel_msginfo *open_info = NULL;
132 struct page *page = newchannel->ringbuffer_page;
133 u32 send_pages, recv_pages;
91 unsigned long flags; 134 unsigned long flags;
92 int ret, err = 0; 135 int err;
93 struct page *page;
94 136
95 if (send_ringbuffer_size % PAGE_SIZE || 137 if (userdatalen > MAX_USER_DEFINED_BYTES)
96 recv_ringbuffer_size % PAGE_SIZE)
97 return -EINVAL; 138 return -EINVAL;
98 139
140 send_pages = newchannel->ringbuffer_send_offset;
141 recv_pages = newchannel->ringbuffer_pagecount - send_pages;
142
99 spin_lock_irqsave(&newchannel->lock, flags); 143 spin_lock_irqsave(&newchannel->lock, flags);
100 if (newchannel->state == CHANNEL_OPEN_STATE) { 144 if (newchannel->state != CHANNEL_OPEN_STATE) {
101 newchannel->state = CHANNEL_OPENING_STATE;
102 } else {
103 spin_unlock_irqrestore(&newchannel->lock, flags); 145 spin_unlock_irqrestore(&newchannel->lock, flags);
104 return -EINVAL; 146 return -EINVAL;
105 } 147 }
106 spin_unlock_irqrestore(&newchannel->lock, flags); 148 spin_unlock_irqrestore(&newchannel->lock, flags);
107 149
150 newchannel->state = CHANNEL_OPENING_STATE;
108 newchannel->onchannel_callback = onchannelcallback; 151 newchannel->onchannel_callback = onchannelcallback;
109 newchannel->channel_callback_context = context; 152 newchannel->channel_callback_context = context;
110 153
111 /* Allocate the ring buffer */ 154 err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
112 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), 155 if (err)
113 GFP_KERNEL|__GFP_ZERO, 156 goto error_clean_ring;
114 get_order(send_ringbuffer_size +
115 recv_ringbuffer_size));
116
117 if (!page)
118 page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
119 get_order(send_ringbuffer_size +
120 recv_ringbuffer_size));
121
122 if (!page) {
123 err = -ENOMEM;
124 goto error_set_chnstate;
125 }
126
127 newchannel->ringbuffer_pages = page_address(page);
128 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
129 recv_ringbuffer_size) >> PAGE_SHIFT;
130
131 ret = hv_ringbuffer_init(&newchannel->outbound, page,
132 send_ringbuffer_size >> PAGE_SHIFT);
133
134 if (ret != 0) {
135 err = ret;
136 goto error_free_pages;
137 }
138
139 ret = hv_ringbuffer_init(&newchannel->inbound,
140 &page[send_ringbuffer_size >> PAGE_SHIFT],
141 recv_ringbuffer_size >> PAGE_SHIFT);
142 if (ret != 0) {
143 err = ret;
144 goto error_free_pages;
145 }
146 157
158 err = hv_ringbuffer_init(&newchannel->inbound,
159 &page[send_pages], recv_pages);
160 if (err)
161 goto error_clean_ring;
147 162
148 /* Establish the gpadl for the ring buffer */ 163 /* Establish the gpadl for the ring buffer */
149 newchannel->ringbuffer_gpadlhandle = 0; 164 newchannel->ringbuffer_gpadlhandle = 0;
150 165
151 ret = vmbus_establish_gpadl(newchannel, 166 err = vmbus_establish_gpadl(newchannel,
152 page_address(page), 167 page_address(newchannel->ringbuffer_page),
153 send_ringbuffer_size + 168 (send_pages + recv_pages) << PAGE_SHIFT,
154 recv_ringbuffer_size,
155 &newchannel->ringbuffer_gpadlhandle); 169 &newchannel->ringbuffer_gpadlhandle);
156 170 if (err)
157 if (ret != 0) { 171 goto error_clean_ring;
158 err = ret;
159 goto error_free_pages;
160 }
161 172
162 /* Create and init the channel open message */ 173 /* Create and init the channel open message */
163 open_info = kmalloc(sizeof(*open_info) + 174 open_info = kmalloc(sizeof(*open_info) +
@@ -176,15 +187,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
176 open_msg->openid = newchannel->offermsg.child_relid; 187 open_msg->openid = newchannel->offermsg.child_relid;
177 open_msg->child_relid = newchannel->offermsg.child_relid; 188 open_msg->child_relid = newchannel->offermsg.child_relid;
178 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; 189 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
179 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >> 190 open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
180 PAGE_SHIFT;
181 open_msg->target_vp = newchannel->target_vp; 191 open_msg->target_vp = newchannel->target_vp;
182 192
183 if (userdatalen > MAX_USER_DEFINED_BYTES) {
184 err = -EINVAL;
185 goto error_free_gpadl;
186 }
187
188 if (userdatalen) 193 if (userdatalen)
189 memcpy(open_msg->userdata, userdata, userdatalen); 194 memcpy(open_msg->userdata, userdata, userdatalen);
190 195
@@ -195,18 +200,16 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
195 200
196 if (newchannel->rescind) { 201 if (newchannel->rescind) {
197 err = -ENODEV; 202 err = -ENODEV;
198 goto error_free_gpadl; 203 goto error_free_info;
199 } 204 }
200 205
201 ret = vmbus_post_msg(open_msg, 206 err = vmbus_post_msg(open_msg,
202 sizeof(struct vmbus_channel_open_channel), true); 207 sizeof(struct vmbus_channel_open_channel), true);
203 208
204 trace_vmbus_open(open_msg, ret); 209 trace_vmbus_open(open_msg, err);
205 210
206 if (ret != 0) { 211 if (err != 0)
207 err = ret;
208 goto error_clean_msglist; 212 goto error_clean_msglist;
209 }
210 213
211 wait_for_completion(&open_info->waitevent); 214 wait_for_completion(&open_info->waitevent);
212 215
@@ -216,12 +219,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
216 219
217 if (newchannel->rescind) { 220 if (newchannel->rescind) {
218 err = -ENODEV; 221 err = -ENODEV;
219 goto error_free_gpadl; 222 goto error_free_info;
220 } 223 }
221 224
222 if (open_info->response.open_result.status) { 225 if (open_info->response.open_result.status) {
223 err = -EAGAIN; 226 err = -EAGAIN;
224 goto error_free_gpadl; 227 goto error_free_info;
225 } 228 }
226 229
227 newchannel->state = CHANNEL_OPENED_STATE; 230 newchannel->state = CHANNEL_OPENED_STATE;
@@ -232,19 +235,50 @@ error_clean_msglist:
232 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 235 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
233 list_del(&open_info->msglistentry); 236 list_del(&open_info->msglistentry);
234 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 237 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
235 238error_free_info:
239 kfree(open_info);
236error_free_gpadl: 240error_free_gpadl:
237 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); 241 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
238 kfree(open_info); 242 newchannel->ringbuffer_gpadlhandle = 0;
239error_free_pages: 243error_clean_ring:
240 hv_ringbuffer_cleanup(&newchannel->outbound); 244 hv_ringbuffer_cleanup(&newchannel->outbound);
241 hv_ringbuffer_cleanup(&newchannel->inbound); 245 hv_ringbuffer_cleanup(&newchannel->inbound);
242 __free_pages(page,
243 get_order(send_ringbuffer_size + recv_ringbuffer_size));
244error_set_chnstate:
245 newchannel->state = CHANNEL_OPEN_STATE; 246 newchannel->state = CHANNEL_OPEN_STATE;
246 return err; 247 return err;
247} 248}
249
250/*
251 * vmbus_connect_ring - Open the channel but reuse ring buffer
252 */
253int vmbus_connect_ring(struct vmbus_channel *newchannel,
254 void (*onchannelcallback)(void *context), void *context)
255{
256 return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
257}
258EXPORT_SYMBOL_GPL(vmbus_connect_ring);
259
260/*
261 * vmbus_open - Open the specified channel.
262 */
263int vmbus_open(struct vmbus_channel *newchannel,
264 u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
265 void *userdata, u32 userdatalen,
266 void (*onchannelcallback)(void *context), void *context)
267{
268 int err;
269
270 err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
271 recv_ringbuffer_size);
272 if (err)
273 return err;
274
275 err = __vmbus_open(newchannel, userdata, userdatalen,
276 onchannelcallback, context);
277 if (err)
278 vmbus_free_ring(newchannel);
279
280 return err;
281}
248EXPORT_SYMBOL_GPL(vmbus_open); 282EXPORT_SYMBOL_GPL(vmbus_open);
249 283
250/* Used for Hyper-V Socket: a guest client's connect() to the host */ 284/* Used for Hyper-V Socket: a guest client's connect() to the host */
@@ -612,10 +646,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
612 * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): 646 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
613 * here we should skip most of the below cleanup work. 647 * here we should skip most of the below cleanup work.
614 */ 648 */
615 if (channel->state != CHANNEL_OPENED_STATE) { 649 if (channel->state != CHANNEL_OPENED_STATE)
616 ret = -EINVAL; 650 return -EINVAL;
617 goto out;
618 }
619 651
620 channel->state = CHANNEL_OPEN_STATE; 652 channel->state = CHANNEL_OPEN_STATE;
621 653
@@ -637,11 +669,10 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
637 * If we failed to post the close msg, 669 * If we failed to post the close msg,
638 * it is perhaps better to leak memory. 670 * it is perhaps better to leak memory.
639 */ 671 */
640 goto out;
641 } 672 }
642 673
643 /* Tear down the gpadl for the channel's ring buffer */ 674 /* Tear down the gpadl for the channel's ring buffer */
644 if (channel->ringbuffer_gpadlhandle) { 675 else if (channel->ringbuffer_gpadlhandle) {
645 ret = vmbus_teardown_gpadl(channel, 676 ret = vmbus_teardown_gpadl(channel,
646 channel->ringbuffer_gpadlhandle); 677 channel->ringbuffer_gpadlhandle);
647 if (ret) { 678 if (ret) {
@@ -650,74 +681,78 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
650 * If we failed to teardown gpadl, 681 * If we failed to teardown gpadl,
651 * it is perhaps better to leak memory. 682 * it is perhaps better to leak memory.
652 */ 683 */
653 goto out;
654 } 684 }
655 }
656
657 /* Cleanup the ring buffers for this channel */
658 hv_ringbuffer_cleanup(&channel->outbound);
659 hv_ringbuffer_cleanup(&channel->inbound);
660 685
661 free_pages((unsigned long)channel->ringbuffer_pages, 686 channel->ringbuffer_gpadlhandle = 0;
662 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 687 }
663 688
664out:
665 return ret; 689 return ret;
666} 690}
667 691
668/* 692/* disconnect ring - close all channels */
669 * vmbus_close - Close the specified channel 693int vmbus_disconnect_ring(struct vmbus_channel *channel)
670 */
671void vmbus_close(struct vmbus_channel *channel)
672{ 694{
673 struct list_head *cur, *tmp; 695 struct vmbus_channel *cur_channel, *tmp;
674 struct vmbus_channel *cur_channel; 696 unsigned long flags;
697 LIST_HEAD(list);
698 int ret;
675 699
676 if (channel->primary_channel != NULL) { 700 if (channel->primary_channel != NULL)
677 /* 701 return -EINVAL;
678 * We will only close sub-channels when 702
679 * the primary is closed. 703 /* Snapshot the list of subchannels */
680 */ 704 spin_lock_irqsave(&channel->lock, flags);
681 return; 705 list_splice_init(&channel->sc_list, &list);
682 } 706 channel->num_sc = 0;
683 /* 707 spin_unlock_irqrestore(&channel->lock, flags);
684 * Close all the sub-channels first and then close the 708
685 * primary channel. 709 list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
686 */ 710 if (cur_channel->rescind)
687 list_for_each_safe(cur, tmp, &channel->sc_list) {
688 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
689 if (cur_channel->rescind) {
690 wait_for_completion(&cur_channel->rescind_event); 711 wait_for_completion(&cur_channel->rescind_event);
691 mutex_lock(&vmbus_connection.channel_mutex); 712
692 vmbus_close_internal(cur_channel); 713 mutex_lock(&vmbus_connection.channel_mutex);
693 hv_process_channel_removal( 714 if (vmbus_close_internal(cur_channel) == 0) {
694 cur_channel->offermsg.child_relid); 715 vmbus_free_ring(cur_channel);
695 } else { 716
696 mutex_lock(&vmbus_connection.channel_mutex); 717 if (cur_channel->rescind)
697 vmbus_close_internal(cur_channel); 718 hv_process_channel_removal(cur_channel);
698 } 719 }
699 mutex_unlock(&vmbus_connection.channel_mutex); 720 mutex_unlock(&vmbus_connection.channel_mutex);
700 } 721 }
722
701 /* 723 /*
702 * Now close the primary. 724 * Now close the primary.
703 */ 725 */
704 mutex_lock(&vmbus_connection.channel_mutex); 726 mutex_lock(&vmbus_connection.channel_mutex);
705 vmbus_close_internal(channel); 727 ret = vmbus_close_internal(channel);
706 mutex_unlock(&vmbus_connection.channel_mutex); 728 mutex_unlock(&vmbus_connection.channel_mutex);
729
730 return ret;
731}
732EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
733
734/*
735 * vmbus_close - Close the specified channel
736 */
737void vmbus_close(struct vmbus_channel *channel)
738{
739 if (vmbus_disconnect_ring(channel) == 0)
740 vmbus_free_ring(channel);
707} 741}
708EXPORT_SYMBOL_GPL(vmbus_close); 742EXPORT_SYMBOL_GPL(vmbus_close);
709 743
710/** 744/**
711 * vmbus_sendpacket() - Send the specified buffer on the given channel 745 * vmbus_sendpacket() - Send the specified buffer on the given channel
712 * @channel: Pointer to vmbus_channel structure. 746 * @channel: Pointer to vmbus_channel structure
713 * @buffer: Pointer to the buffer you want to receive the data into. 747 * @buffer: Pointer to the buffer you want to send the data from.
714 * @bufferlen: Maximum size of what the the buffer will hold 748 * @bufferlen: Maximum size of what the buffer holds.
715 * @requestid: Identifier of the request 749 * @requestid: Identifier of the request
716 * @type: Type of packet that is being send e.g. negotiate, time 750 * @type: Type of packet that is being sent e.g. negotiate, time
717 * packet etc. 751 * packet etc.
752 * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
718 * 753 *
719 * Sends data in @buffer directly to hyper-v via the vmbus 754 * Sends data in @buffer directly to Hyper-V via the vmbus.
720 * This will send the data unparsed to hyper-v. 755 * This will send the data unparsed to Hyper-V.
721 * 756 *
722 * Mainly used by Hyper-V drivers. 757 * Mainly used by Hyper-V drivers.
723 */ 758 */
@@ -850,12 +885,13 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
850EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); 885EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
851 886
852/** 887/**
853 * vmbus_recvpacket() - Retrieve the user packet on the specified channel 888 * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
854 * @channel: Pointer to vmbus_channel structure. 889 * @channel: Pointer to vmbus_channel structure
855 * @buffer: Pointer to the buffer you want to receive the data into. 890 * @buffer: Pointer to the buffer you want to receive the data into.
856 * @bufferlen: Maximum size of what the the buffer will hold 891 * @bufferlen: Maximum size of what the buffer can hold.
857 * @buffer_actual_len: The actual size of the data after it was received 892 * @buffer_actual_len: The actual size of the data after it was received.
858 * @requestid: Identifier of the request 893 * @requestid: Identifier of the request
894 * @raw: true means keep the vmpacket_descriptor header in the received data.
859 * 895 *
860 * Receives directly from the hyper-v vmbus and puts the data it received 896 * Receives directly from the hyper-v vmbus and puts the data it received
861 * into Buffer. This will receive the data unparsed from hyper-v. 897 * into Buffer. This will receive the data unparsed from hyper-v.
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0f0e091c117c..6277597d3d58 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -198,24 +198,19 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
198} 198}
199 199
200/** 200/**
201 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message 201 * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
202 * @icmsghdrp: Pointer to msg header structure 202 * @icmsghdrp: Pointer to msg header structure
203 * @icmsg_negotiate: Pointer to negotiate message structure
204 * @buf: Raw buffer channel data 203 * @buf: Raw buffer channel data
204 * @fw_version: The framework versions we can support.
205 * @fw_vercnt: The size of @fw_version.
206 * @srv_version: The service versions we can support.
207 * @srv_vercnt: The size of @srv_version.
208 * @nego_fw_version: The selected framework version.
209 * @nego_srv_version: The selected service version.
205 * 210 *
206 * @icmsghdrp is of type &struct icmsg_hdr. 211 * Note: Versions are given in decreasing order.
207 * Set up and fill in default negotiate response message.
208 *
209 * The fw_version and fw_vercnt specifies the framework version that
210 * we can support.
211 *
212 * The srv_version and srv_vercnt specifies the service
213 * versions we can support.
214 *
215 * Versions are given in decreasing order.
216 *
217 * nego_fw_version and nego_srv_version store the selected protocol versions.
218 * 212 *
213 * Set up and fill in default negotiate response message.
219 * Mainly used by Hyper-V drivers. 214 * Mainly used by Hyper-V drivers.
220 */ 215 */
221bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, 216bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
@@ -385,21 +380,14 @@ static void vmbus_release_relid(u32 relid)
385 trace_vmbus_release_relid(&msg, ret); 380 trace_vmbus_release_relid(&msg, ret);
386} 381}
387 382
388void hv_process_channel_removal(u32 relid) 383void hv_process_channel_removal(struct vmbus_channel *channel)
389{ 384{
385 struct vmbus_channel *primary_channel;
390 unsigned long flags; 386 unsigned long flags;
391 struct vmbus_channel *primary_channel, *channel;
392 387
393 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 388 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
394
395 /*
396 * Make sure channel is valid as we may have raced.
397 */
398 channel = relid2channel(relid);
399 if (!channel)
400 return;
401
402 BUG_ON(!channel->rescind); 389 BUG_ON(!channel->rescind);
390
403 if (channel->target_cpu != get_cpu()) { 391 if (channel->target_cpu != get_cpu()) {
404 put_cpu(); 392 put_cpu();
405 smp_call_function_single(channel->target_cpu, 393 smp_call_function_single(channel->target_cpu,
@@ -429,7 +417,7 @@ void hv_process_channel_removal(u32 relid)
429 cpumask_clear_cpu(channel->target_cpu, 417 cpumask_clear_cpu(channel->target_cpu,
430 &primary_channel->alloced_cpus_in_node); 418 &primary_channel->alloced_cpus_in_node);
431 419
432 vmbus_release_relid(relid); 420 vmbus_release_relid(channel->offermsg.child_relid);
433 421
434 free_channel(channel); 422 free_channel(channel);
435} 423}
@@ -606,16 +594,18 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
606 bool perf_chn = vmbus_devs[dev_type].perf_device; 594 bool perf_chn = vmbus_devs[dev_type].perf_device;
607 struct vmbus_channel *primary = channel->primary_channel; 595 struct vmbus_channel *primary = channel->primary_channel;
608 int next_node; 596 int next_node;
609 struct cpumask available_mask; 597 cpumask_var_t available_mask;
610 struct cpumask *alloced_mask; 598 struct cpumask *alloced_mask;
611 599
612 if ((vmbus_proto_version == VERSION_WS2008) || 600 if ((vmbus_proto_version == VERSION_WS2008) ||
613 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { 601 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
602 !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
614 /* 603 /*
615 * Prior to win8, all channel interrupts are 604 * Prior to win8, all channel interrupts are
616 * delivered on cpu 0. 605 * delivered on cpu 0.
617 * Also if the channel is not a performance critical 606 * Also if the channel is not a performance critical
618 * channel, bind it to cpu 0. 607 * channel, bind it to cpu 0.
608 * In case alloc_cpumask_var() fails, bind it to cpu 0.
619 */ 609 */
620 channel->numa_node = 0; 610 channel->numa_node = 0;
621 channel->target_cpu = 0; 611 channel->target_cpu = 0;
@@ -653,7 +643,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
653 cpumask_clear(alloced_mask); 643 cpumask_clear(alloced_mask);
654 } 644 }
655 645
656 cpumask_xor(&available_mask, alloced_mask, 646 cpumask_xor(available_mask, alloced_mask,
657 cpumask_of_node(primary->numa_node)); 647 cpumask_of_node(primary->numa_node));
658 648
659 cur_cpu = -1; 649 cur_cpu = -1;
@@ -671,10 +661,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
671 } 661 }
672 662
673 while (true) { 663 while (true) {
674 cur_cpu = cpumask_next(cur_cpu, &available_mask); 664 cur_cpu = cpumask_next(cur_cpu, available_mask);
675 if (cur_cpu >= nr_cpu_ids) { 665 if (cur_cpu >= nr_cpu_ids) {
676 cur_cpu = -1; 666 cur_cpu = -1;
677 cpumask_copy(&available_mask, 667 cpumask_copy(available_mask,
678 cpumask_of_node(primary->numa_node)); 668 cpumask_of_node(primary->numa_node));
679 continue; 669 continue;
680 } 670 }
@@ -704,6 +694,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
704 694
705 channel->target_cpu = cur_cpu; 695 channel->target_cpu = cur_cpu;
706 channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu); 696 channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
697
698 free_cpumask_var(available_mask);
707} 699}
708 700
709static void vmbus_wait_for_unload(void) 701static void vmbus_wait_for_unload(void)
@@ -943,7 +935,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
943 * The channel is currently not open; 935 * The channel is currently not open;
944 * it is safe for us to cleanup the channel. 936 * it is safe for us to cleanup the channel.
945 */ 937 */
946 hv_process_channel_removal(rescind->child_relid); 938 hv_process_channel_removal(channel);
947 } else { 939 } else {
948 complete(&channel->rescind_event); 940 complete(&channel->rescind_event);
949 } 941 }
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 748a1c4172a6..332d7c34be5c 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -189,6 +189,17 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
189int hv_synic_alloc(void) 189int hv_synic_alloc(void)
190{ 190{
191 int cpu; 191 int cpu;
192 struct hv_per_cpu_context *hv_cpu;
193
194 /*
195 * First, zero all per-cpu memory areas so hv_synic_free() can
196 * detect what memory has been allocated and cleanup properly
197 * after any failures.
198 */
199 for_each_present_cpu(cpu) {
200 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
201 memset(hv_cpu, 0, sizeof(*hv_cpu));
202 }
192 203
193 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), 204 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
194 GFP_KERNEL); 205 GFP_KERNEL);
@@ -198,10 +209,8 @@ int hv_synic_alloc(void)
198 } 209 }
199 210
200 for_each_present_cpu(cpu) { 211 for_each_present_cpu(cpu) {
201 struct hv_per_cpu_context *hv_cpu 212 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
202 = per_cpu_ptr(hv_context.cpu_context, cpu);
203 213
204 memset(hv_cpu, 0, sizeof(*hv_cpu));
205 tasklet_init(&hv_cpu->msg_dpc, 214 tasklet_init(&hv_cpu->msg_dpc,
206 vmbus_on_msg_dpc, (unsigned long) hv_cpu); 215 vmbus_on_msg_dpc, (unsigned long) hv_cpu);
207 216
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b1b788082793..41631512ae97 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -689,7 +689,7 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
689 __online_page_increment_counters(pg); 689 __online_page_increment_counters(pg);
690 __online_page_free(pg); 690 __online_page_free(pg);
691 691
692 WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock)); 692 lockdep_assert_held(&dm_device.ha_lock);
693 dm_device.num_pages_onlined++; 693 dm_device.num_pages_onlined++;
694} 694}
695 695
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5eed1e7da15c..a7513a8a8e37 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,7 +353,6 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
353 353
354 out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled; 354 out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
355 355
356 default:
357 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id, 356 utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
358 MAX_ADAPTER_ID_SIZE, 357 MAX_ADAPTER_ID_SIZE,
359 UTF16_LITTLE_ENDIAN, 358 UTF16_LITTLE_ENDIAN,
@@ -406,7 +405,7 @@ kvp_send_key(struct work_struct *dummy)
406 process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO); 405 process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
407 break; 406 break;
408 case KVP_OP_GET_IP_INFO: 407 case KVP_OP_GET_IP_INFO:
409 process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO); 408 /* We only need to pass on message->kvp_hdr.operation. */
410 break; 409 break;
411 case KVP_OP_SET: 410 case KVP_OP_SET:
412 switch (in_msg->body.kvp_set.data.value_type) { 411 switch (in_msg->body.kvp_set.data.value_type) {
@@ -421,7 +420,7 @@ kvp_send_key(struct work_struct *dummy)
421 UTF16_LITTLE_ENDIAN, 420 UTF16_LITTLE_ENDIAN,
422 message->body.kvp_set.data.value, 421 message->body.kvp_set.data.value,
423 HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1; 422 HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
424 break; 423 break;
425 424
426 case REG_U32: 425 case REG_U32:
427 /* 426 /*
@@ -446,6 +445,9 @@ kvp_send_key(struct work_struct *dummy)
446 break; 445 break;
447 446
448 } 447 }
448
449 break;
450
449 case KVP_OP_GET: 451 case KVP_OP_GET:
450 message->body.kvp_set.data.key_size = 452 message->body.kvp_set.data.key_size =
451 utf16s_to_utf8s( 453 utf16s_to_utf8s(
@@ -454,7 +456,7 @@ kvp_send_key(struct work_struct *dummy)
454 UTF16_LITTLE_ENDIAN, 456 UTF16_LITTLE_ENDIAN,
455 message->body.kvp_set.data.key, 457 message->body.kvp_set.data.key,
456 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; 458 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
457 break; 459 break;
458 460
459 case KVP_OP_DELETE: 461 case KVP_OP_DELETE:
460 message->body.kvp_delete.key_size = 462 message->body.kvp_delete.key_size =
@@ -464,12 +466,12 @@ kvp_send_key(struct work_struct *dummy)
464 UTF16_LITTLE_ENDIAN, 466 UTF16_LITTLE_ENDIAN,
465 message->body.kvp_delete.key, 467 message->body.kvp_delete.key,
466 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; 468 HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
467 break; 469 break;
468 470
469 case KVP_OP_ENUMERATE: 471 case KVP_OP_ENUMERATE:
470 message->body.kvp_enum_data.index = 472 message->body.kvp_enum_data.index =
471 in_msg->body.kvp_enum_data.index; 473 in_msg->body.kvp_enum_data.index;
472 break; 474 break;
473 } 475 }
474 476
475 kvp_transaction.state = HVUTIL_USERSPACE_REQ; 477 kvp_transaction.state = HVUTIL_USERSPACE_REQ;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 3e90eb91db45..64d0c85d5161 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -241,6 +241,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
241void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 241void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
242{ 242{
243 vunmap(ring_info->ring_buffer); 243 vunmap(ring_info->ring_buffer);
244 ring_info->ring_buffer = NULL;
244} 245}
245 246
246/* Write to the ring buffer. */ 247/* Write to the ring buffer. */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c71cc857b649..283d184280af 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -498,6 +498,54 @@ static ssize_t device_show(struct device *dev,
498} 498}
499static DEVICE_ATTR_RO(device); 499static DEVICE_ATTR_RO(device);
500 500
501static ssize_t driver_override_store(struct device *dev,
502 struct device_attribute *attr,
503 const char *buf, size_t count)
504{
505 struct hv_device *hv_dev = device_to_hv_device(dev);
506 char *driver_override, *old, *cp;
507
508 /* We need to keep extra room for a newline */
509 if (count >= (PAGE_SIZE - 1))
510 return -EINVAL;
511
512 driver_override = kstrndup(buf, count, GFP_KERNEL);
513 if (!driver_override)
514 return -ENOMEM;
515
516 cp = strchr(driver_override, '\n');
517 if (cp)
518 *cp = '\0';
519
520 device_lock(dev);
521 old = hv_dev->driver_override;
522 if (strlen(driver_override)) {
523 hv_dev->driver_override = driver_override;
524 } else {
525 kfree(driver_override);
526 hv_dev->driver_override = NULL;
527 }
528 device_unlock(dev);
529
530 kfree(old);
531
532 return count;
533}
534
535static ssize_t driver_override_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
537{
538 struct hv_device *hv_dev = device_to_hv_device(dev);
539 ssize_t len;
540
541 device_lock(dev);
542 len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
543 device_unlock(dev);
544
545 return len;
546}
547static DEVICE_ATTR_RW(driver_override);
548
501/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ 549/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
502static struct attribute *vmbus_dev_attrs[] = { 550static struct attribute *vmbus_dev_attrs[] = {
503 &dev_attr_id.attr, 551 &dev_attr_id.attr,
@@ -528,6 +576,7 @@ static struct attribute *vmbus_dev_attrs[] = {
528 &dev_attr_channel_vp_mapping.attr, 576 &dev_attr_channel_vp_mapping.attr,
529 &dev_attr_vendor.attr, 577 &dev_attr_vendor.attr,
530 &dev_attr_device.attr, 578 &dev_attr_device.attr,
579 &dev_attr_driver_override.attr,
531 NULL, 580 NULL,
532}; 581};
533ATTRIBUTE_GROUPS(vmbus_dev); 582ATTRIBUTE_GROUPS(vmbus_dev);
@@ -563,17 +612,26 @@ static inline bool is_null_guid(const uuid_le *guid)
563 return true; 612 return true;
564} 613}
565 614
566/* 615static const struct hv_vmbus_device_id *
567 * Return a matching hv_vmbus_device_id pointer. 616hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const uuid_le *guid)
568 * If there is no match, return NULL. 617
569 */ 618{
570static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv, 619 if (id == NULL)
571 const uuid_le *guid) 620 return NULL; /* empty device table */
621
622 for (; !is_null_guid(&id->guid); id++)
623 if (!uuid_le_cmp(id->guid, *guid))
624 return id;
625
626 return NULL;
627}
628
629static const struct hv_vmbus_device_id *
630hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
572{ 631{
573 const struct hv_vmbus_device_id *id = NULL; 632 const struct hv_vmbus_device_id *id = NULL;
574 struct vmbus_dynid *dynid; 633 struct vmbus_dynid *dynid;
575 634
576 /* Look at the dynamic ids first, before the static ones */
577 spin_lock(&drv->dynids.lock); 635 spin_lock(&drv->dynids.lock);
578 list_for_each_entry(dynid, &drv->dynids.list, node) { 636 list_for_each_entry(dynid, &drv->dynids.list, node) {
579 if (!uuid_le_cmp(dynid->id.guid, *guid)) { 637 if (!uuid_le_cmp(dynid->id.guid, *guid)) {
@@ -583,18 +641,37 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
583 } 641 }
584 spin_unlock(&drv->dynids.lock); 642 spin_unlock(&drv->dynids.lock);
585 643
586 if (id) 644 return id;
587 return id; 645}
588 646
589 id = drv->id_table; 647static const struct hv_vmbus_device_id vmbus_device_null = {
590 if (id == NULL) 648 .guid = NULL_UUID_LE,
591 return NULL; /* empty device table */ 649};
592 650
593 for (; !is_null_guid(&id->guid); id++) 651/*
594 if (!uuid_le_cmp(id->guid, *guid)) 652 * Return a matching hv_vmbus_device_id pointer.
595 return id; 653 * If there is no match, return NULL.
654 */
655static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
656 struct hv_device *dev)
657{
658 const uuid_le *guid = &dev->dev_type;
659 const struct hv_vmbus_device_id *id;
596 660
597 return NULL; 661 /* When driver_override is set, only bind to the matching driver */
662 if (dev->driver_override && strcmp(dev->driver_override, drv->name))
663 return NULL;
664
665 /* Look at the dynamic ids first, before the static ones */
666 id = hv_vmbus_dynid_match(drv, guid);
667 if (!id)
668 id = hv_vmbus_dev_match(drv->id_table, guid);
669
670 /* driver_override will always match, send a dummy id */
671 if (!id && dev->driver_override)
672 id = &vmbus_device_null;
673
674 return id;
598} 675}
599 676
600/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */ 677/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
@@ -643,7 +720,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
643 if (retval) 720 if (retval)
644 return retval; 721 return retval;
645 722
646 if (hv_vmbus_get_id(drv, &guid)) 723 if (hv_vmbus_dynid_match(drv, &guid))
647 return -EEXIST; 724 return -EEXIST;
648 725
649 retval = vmbus_add_dynid(drv, &guid); 726 retval = vmbus_add_dynid(drv, &guid);
@@ -708,7 +785,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
708 if (is_hvsock_channel(hv_dev->channel)) 785 if (is_hvsock_channel(hv_dev->channel))
709 return drv->hvsock; 786 return drv->hvsock;
710 787
711 if (hv_vmbus_get_id(drv, &hv_dev->dev_type)) 788 if (hv_vmbus_get_id(drv, hv_dev))
712 return 1; 789 return 1;
713 790
714 return 0; 791 return 0;
@@ -725,7 +802,7 @@ static int vmbus_probe(struct device *child_device)
725 struct hv_device *dev = device_to_hv_device(child_device); 802 struct hv_device *dev = device_to_hv_device(child_device);
726 const struct hv_vmbus_device_id *dev_id; 803 const struct hv_vmbus_device_id *dev_id;
727 804
728 dev_id = hv_vmbus_get_id(drv, &dev->dev_type); 805 dev_id = hv_vmbus_get_id(drv, dev);
729 if (drv->probe) { 806 if (drv->probe) {
730 ret = drv->probe(dev, dev_id); 807 ret = drv->probe(dev, dev_id);
731 if (ret != 0) 808 if (ret != 0)
@@ -787,10 +864,9 @@ static void vmbus_device_release(struct device *device)
787 struct vmbus_channel *channel = hv_dev->channel; 864 struct vmbus_channel *channel = hv_dev->channel;
788 865
789 mutex_lock(&vmbus_connection.channel_mutex); 866 mutex_lock(&vmbus_connection.channel_mutex);
790 hv_process_channel_removal(channel->offermsg.child_relid); 867 hv_process_channel_removal(channel);
791 mutex_unlock(&vmbus_connection.channel_mutex); 868 mutex_unlock(&vmbus_connection.channel_mutex);
792 kfree(hv_dev); 869 kfree(hv_dev);
793
794} 870}
795 871
796/* The one and only one */ 872/* The one and only one */
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index ff94e58845b7..170fbb66bda2 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -406,6 +406,7 @@ static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
406 406
407static int catu_enable_hw(struct catu_drvdata *drvdata, void *data) 407static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
408{ 408{
409 int rc;
409 u32 control, mode; 410 u32 control, mode;
410 struct etr_buf *etr_buf = data; 411 struct etr_buf *etr_buf = data;
411 412
@@ -418,6 +419,10 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
418 return -EBUSY; 419 return -EBUSY;
419 } 420 }
420 421
422 rc = coresight_claim_device_unlocked(drvdata->base);
423 if (rc)
424 return rc;
425
421 control |= BIT(CATU_CONTROL_ENABLE); 426 control |= BIT(CATU_CONTROL_ENABLE);
422 427
423 if (etr_buf && etr_buf->mode == ETR_MODE_CATU) { 428 if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
@@ -459,6 +464,7 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
459 int rc = 0; 464 int rc = 0;
460 465
461 catu_write_control(drvdata, 0); 466 catu_write_control(drvdata, 0);
467 coresight_disclaim_device_unlocked(drvdata->base);
462 if (catu_wait_for_ready(drvdata)) { 468 if (catu_wait_for_ready(drvdata)) {
463 dev_info(drvdata->dev, "Timeout while waiting for READY\n"); 469 dev_info(drvdata->dev, "Timeout while waiting for READY\n");
464 rc = -EAGAIN; 470 rc = -EAGAIN;
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index f6d0571ab9dd..299667b887fc 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -34,48 +34,87 @@ struct replicator_state {
34 struct coresight_device *csdev; 34 struct coresight_device *csdev;
35}; 35};
36 36
37/*
38 * replicator_reset : Reset the replicator configuration to sane values.
39 */
40static void replicator_reset(struct replicator_state *drvdata)
41{
42 CS_UNLOCK(drvdata->base);
43
44 if (!coresight_claim_device_unlocked(drvdata->base)) {
45 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
46 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
47 coresight_disclaim_device_unlocked(drvdata->base);
48 }
49
50 CS_LOCK(drvdata->base);
51}
52
37static int replicator_enable(struct coresight_device *csdev, int inport, 53static int replicator_enable(struct coresight_device *csdev, int inport,
38 int outport) 54 int outport)
39{ 55{
56 int rc = 0;
57 u32 reg;
40 struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); 58 struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
41 59
60 switch (outport) {
61 case 0:
62 reg = REPLICATOR_IDFILTER0;
63 break;
64 case 1:
65 reg = REPLICATOR_IDFILTER1;
66 break;
67 default:
68 WARN_ON(1);
69 return -EINVAL;
70 }
71
42 CS_UNLOCK(drvdata->base); 72 CS_UNLOCK(drvdata->base);
43 73
44 /* 74 if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
45 * Ensure that the other port is disabled 75 (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
46 * 0x00 - passing through the replicator unimpeded 76 rc = coresight_claim_device_unlocked(drvdata->base);
47 * 0xff - disable (or impede) the flow of ATB data 77
48 */ 78 /* Ensure that the outport is enabled. */
49 if (outport == 0) { 79 if (!rc) {
50 writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER0); 80 writel_relaxed(0x00, drvdata->base + reg);
51 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1); 81 dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
52 } else {
53 writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER1);
54 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
55 } 82 }
56 83
57 CS_LOCK(drvdata->base); 84 CS_LOCK(drvdata->base);
58 85
59 dev_info(drvdata->dev, "REPLICATOR enabled\n"); 86 return rc;
60 return 0;
61} 87}
62 88
63static void replicator_disable(struct coresight_device *csdev, int inport, 89static void replicator_disable(struct coresight_device *csdev, int inport,
64 int outport) 90 int outport)
65{ 91{
92 u32 reg;
66 struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); 93 struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
67 94
95 switch (outport) {
96 case 0:
97 reg = REPLICATOR_IDFILTER0;
98 break;
99 case 1:
100 reg = REPLICATOR_IDFILTER1;
101 break;
102 default:
103 WARN_ON(1);
104 return;
105 }
106
68 CS_UNLOCK(drvdata->base); 107 CS_UNLOCK(drvdata->base);
69 108
70 /* disable the flow of ATB data through port */ 109 /* disable the flow of ATB data through port */
71 if (outport == 0) 110 writel_relaxed(0xff, drvdata->base + reg);
72 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
73 else
74 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
75 111
112 if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
113 (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
114 coresight_disclaim_device_unlocked(drvdata->base);
76 CS_LOCK(drvdata->base); 115 CS_LOCK(drvdata->base);
77 116
78 dev_info(drvdata->dev, "REPLICATOR disabled\n"); 117 dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
79} 118}
80 119
81static const struct coresight_ops_link replicator_link_ops = { 120static const struct coresight_ops_link replicator_link_ops = {
@@ -156,7 +195,11 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
156 desc.groups = replicator_groups; 195 desc.groups = replicator_groups;
157 drvdata->csdev = coresight_register(&desc); 196 drvdata->csdev = coresight_register(&desc);
158 197
159 return PTR_ERR_OR_ZERO(drvdata->csdev); 198 if (!IS_ERR(drvdata->csdev)) {
199 replicator_reset(drvdata);
200 return 0;
201 }
202 return PTR_ERR(drvdata->csdev);
160} 203}
161 204
162#ifdef CONFIG_PM 205#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 306119eaf16a..824be0c5f592 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -5,7 +5,6 @@
5 * Description: CoreSight Embedded Trace Buffer driver 5 * Description: CoreSight Embedded Trace Buffer driver
6 */ 6 */
7 7
8#include <asm/local.h>
9#include <linux/kernel.h> 8#include <linux/kernel.h>
10#include <linux/init.h> 9#include <linux/init.h>
11#include <linux/types.h> 10#include <linux/types.h>
@@ -28,6 +27,7 @@
28 27
29 28
30#include "coresight-priv.h" 29#include "coresight-priv.h"
30#include "coresight-etm-perf.h"
31 31
32#define ETB_RAM_DEPTH_REG 0x004 32#define ETB_RAM_DEPTH_REG 0x004
33#define ETB_STATUS_REG 0x00c 33#define ETB_STATUS_REG 0x00c
@@ -71,8 +71,8 @@
71 * @miscdev: specifics to handle "/dev/xyz.etb" entry. 71 * @miscdev: specifics to handle "/dev/xyz.etb" entry.
72 * @spinlock: only one at a time pls. 72 * @spinlock: only one at a time pls.
73 * @reading: synchronise user space access to etb buffer. 73 * @reading: synchronise user space access to etb buffer.
74 * @mode: this ETB is being used.
75 * @buf: area of memory where ETB buffer content gets sent. 74 * @buf: area of memory where ETB buffer content gets sent.
75 * @mode: this ETB is being used.
76 * @buffer_depth: size of @buf. 76 * @buffer_depth: size of @buf.
77 * @trigger_cntr: amount of words to store after a trigger. 77 * @trigger_cntr: amount of words to store after a trigger.
78 */ 78 */
@@ -84,12 +84,15 @@ struct etb_drvdata {
84 struct miscdevice miscdev; 84 struct miscdevice miscdev;
85 spinlock_t spinlock; 85 spinlock_t spinlock;
86 local_t reading; 86 local_t reading;
87 local_t mode;
88 u8 *buf; 87 u8 *buf;
88 u32 mode;
89 u32 buffer_depth; 89 u32 buffer_depth;
90 u32 trigger_cntr; 90 u32 trigger_cntr;
91}; 91};
92 92
93static int etb_set_buffer(struct coresight_device *csdev,
94 struct perf_output_handle *handle);
95
93static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata) 96static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
94{ 97{
95 u32 depth = 0; 98 u32 depth = 0;
@@ -103,7 +106,7 @@ static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
103 return depth; 106 return depth;
104} 107}
105 108
106static void etb_enable_hw(struct etb_drvdata *drvdata) 109static void __etb_enable_hw(struct etb_drvdata *drvdata)
107{ 110{
108 int i; 111 int i;
109 u32 depth; 112 u32 depth;
@@ -131,32 +134,92 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
131 CS_LOCK(drvdata->base); 134 CS_LOCK(drvdata->base);
132} 135}
133 136
134static int etb_enable(struct coresight_device *csdev, u32 mode) 137static int etb_enable_hw(struct etb_drvdata *drvdata)
138{
139 __etb_enable_hw(drvdata);
140 return 0;
141}
142
143static int etb_enable_sysfs(struct coresight_device *csdev)
135{ 144{
136 u32 val; 145 int ret = 0;
137 unsigned long flags; 146 unsigned long flags;
138 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 147 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
139 148
140 val = local_cmpxchg(&drvdata->mode, 149 spin_lock_irqsave(&drvdata->spinlock, flags);
141 CS_MODE_DISABLED, mode); 150
142 /* 151 /* Don't messup with perf sessions. */
143 * When accessing from Perf, a HW buffer can be handled 152 if (drvdata->mode == CS_MODE_PERF) {
144 * by a single trace entity. In sysFS mode many tracers 153 ret = -EBUSY;
145 * can be logging to the same HW buffer. 154 goto out;
146 */ 155 }
147 if (val == CS_MODE_PERF)
148 return -EBUSY;
149 156
150 /* Nothing to do, the tracer is already enabled. */ 157 /* Nothing to do, the tracer is already enabled. */
151 if (val == CS_MODE_SYSFS) 158 if (drvdata->mode == CS_MODE_SYSFS)
152 goto out; 159 goto out;
153 160
154 spin_lock_irqsave(&drvdata->spinlock, flags); 161 ret = etb_enable_hw(drvdata);
155 etb_enable_hw(drvdata); 162 if (!ret)
163 drvdata->mode = CS_MODE_SYSFS;
164
165out:
156 spin_unlock_irqrestore(&drvdata->spinlock, flags); 166 spin_unlock_irqrestore(&drvdata->spinlock, flags);
167 return ret;
168}
169
170static int etb_enable_perf(struct coresight_device *csdev, void *data)
171{
172 int ret = 0;
173 unsigned long flags;
174 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
175
176 spin_lock_irqsave(&drvdata->spinlock, flags);
177
178 /* No need to continue if the component is already in use. */
179 if (drvdata->mode != CS_MODE_DISABLED) {
180 ret = -EBUSY;
181 goto out;
182 }
183
184 /*
185 * We don't have an internal state to clean up if we fail to setup
186 * the perf buffer. So we can perform the step before we turn the
187 * ETB on and leave without cleaning up.
188 */
189 ret = etb_set_buffer(csdev, (struct perf_output_handle *)data);
190 if (ret)
191 goto out;
192
193 ret = etb_enable_hw(drvdata);
194 if (!ret)
195 drvdata->mode = CS_MODE_PERF;
157 196
158out: 197out:
159 dev_info(drvdata->dev, "ETB enabled\n"); 198 spin_unlock_irqrestore(&drvdata->spinlock, flags);
199 return ret;
200}
201
202static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
203{
204 int ret;
205 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
206
207 switch (mode) {
208 case CS_MODE_SYSFS:
209 ret = etb_enable_sysfs(csdev);
210 break;
211 case CS_MODE_PERF:
212 ret = etb_enable_perf(csdev, data);
213 break;
214 default:
215 ret = -EINVAL;
216 break;
217 }
218
219 if (ret)
220 return ret;
221
222 dev_dbg(drvdata->dev, "ETB enabled\n");
160 return 0; 223 return 0;
161} 224}
162 225
@@ -256,13 +319,16 @@ static void etb_disable(struct coresight_device *csdev)
256 unsigned long flags; 319 unsigned long flags;
257 320
258 spin_lock_irqsave(&drvdata->spinlock, flags); 321 spin_lock_irqsave(&drvdata->spinlock, flags);
259 etb_disable_hw(drvdata);
260 etb_dump_hw(drvdata);
261 spin_unlock_irqrestore(&drvdata->spinlock, flags);
262 322
263 local_set(&drvdata->mode, CS_MODE_DISABLED); 323 /* Disable the ETB only if it needs to */
324 if (drvdata->mode != CS_MODE_DISABLED) {
325 etb_disable_hw(drvdata);
326 etb_dump_hw(drvdata);
327 drvdata->mode = CS_MODE_DISABLED;
328 }
329 spin_unlock_irqrestore(&drvdata->spinlock, flags);
264 330
265 dev_info(drvdata->dev, "ETB disabled\n"); 331 dev_dbg(drvdata->dev, "ETB disabled\n");
266} 332}
267 333
268static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu, 334static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
@@ -294,12 +360,14 @@ static void etb_free_buffer(void *config)
294} 360}
295 361
296static int etb_set_buffer(struct coresight_device *csdev, 362static int etb_set_buffer(struct coresight_device *csdev,
297 struct perf_output_handle *handle, 363 struct perf_output_handle *handle)
298 void *sink_config)
299{ 364{
300 int ret = 0; 365 int ret = 0;
301 unsigned long head; 366 unsigned long head;
302 struct cs_buffers *buf = sink_config; 367 struct cs_buffers *buf = etm_perf_sink_config(handle);
368
369 if (!buf)
370 return -EINVAL;
303 371
304 /* wrap head around to the amount of space we have */ 372 /* wrap head around to the amount of space we have */
305 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 373 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
@@ -315,37 +383,7 @@ static int etb_set_buffer(struct coresight_device *csdev,
315 return ret; 383 return ret;
316} 384}
317 385
318static unsigned long etb_reset_buffer(struct coresight_device *csdev, 386static unsigned long etb_update_buffer(struct coresight_device *csdev,
319 struct perf_output_handle *handle,
320 void *sink_config)
321{
322 unsigned long size = 0;
323 struct cs_buffers *buf = sink_config;
324
325 if (buf) {
326 /*
327 * In snapshot mode ->data_size holds the new address of the
328 * ring buffer's head. The size itself is the whole address
329 * range since we want the latest information.
330 */
331 if (buf->snapshot)
332 handle->head = local_xchg(&buf->data_size,
333 buf->nr_pages << PAGE_SHIFT);
334
335 /*
336 * Tell the tracer PMU how much we got in this run and if
337 * something went wrong along the way. Nobody else can use
338 * this cs_buffers instance until we are done. As such
339 * resetting parameters here and squaring off with the ring
340 * buffer API in the tracer PMU is fine.
341 */
342 size = local_xchg(&buf->data_size, 0);
343 }
344
345 return size;
346}
347
348static void etb_update_buffer(struct coresight_device *csdev,
349 struct perf_output_handle *handle, 387 struct perf_output_handle *handle,
350 void *sink_config) 388 void *sink_config)
351{ 389{
@@ -354,13 +392,13 @@ static void etb_update_buffer(struct coresight_device *csdev,
354 u8 *buf_ptr; 392 u8 *buf_ptr;
355 const u32 *barrier; 393 const u32 *barrier;
356 u32 read_ptr, write_ptr, capacity; 394 u32 read_ptr, write_ptr, capacity;
357 u32 status, read_data, to_read; 395 u32 status, read_data;
358 unsigned long offset; 396 unsigned long offset, to_read;
359 struct cs_buffers *buf = sink_config; 397 struct cs_buffers *buf = sink_config;
360 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 398 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
361 399
362 if (!buf) 400 if (!buf)
363 return; 401 return 0;
364 402
365 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS; 403 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
366 404
@@ -465,18 +503,17 @@ static void etb_update_buffer(struct coresight_device *csdev,
465 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); 503 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
466 504
467 /* 505 /*
468 * In snapshot mode all we have to do is communicate to 506 * In snapshot mode we have to update the handle->head to point
469 * perf_aux_output_end() the address of the current head. In full 507 * to the new location.
470 * trace mode the same function expects a size to move rb->aux_head
471 * forward.
472 */ 508 */
473 if (buf->snapshot) 509 if (buf->snapshot) {
474 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); 510 handle->head = (cur * PAGE_SIZE) + offset;
475 else 511 to_read = buf->nr_pages << PAGE_SHIFT;
476 local_add(to_read, &buf->data_size); 512 }
477
478 etb_enable_hw(drvdata); 513 etb_enable_hw(drvdata);
479 CS_LOCK(drvdata->base); 514 CS_LOCK(drvdata->base);
515
516 return to_read;
480} 517}
481 518
482static const struct coresight_ops_sink etb_sink_ops = { 519static const struct coresight_ops_sink etb_sink_ops = {
@@ -484,8 +521,6 @@ static const struct coresight_ops_sink etb_sink_ops = {
484 .disable = etb_disable, 521 .disable = etb_disable,
485 .alloc_buffer = etb_alloc_buffer, 522 .alloc_buffer = etb_alloc_buffer,
486 .free_buffer = etb_free_buffer, 523 .free_buffer = etb_free_buffer,
487 .set_buffer = etb_set_buffer,
488 .reset_buffer = etb_reset_buffer,
489 .update_buffer = etb_update_buffer, 524 .update_buffer = etb_update_buffer,
490}; 525};
491 526
@@ -498,14 +533,14 @@ static void etb_dump(struct etb_drvdata *drvdata)
498 unsigned long flags; 533 unsigned long flags;
499 534
500 spin_lock_irqsave(&drvdata->spinlock, flags); 535 spin_lock_irqsave(&drvdata->spinlock, flags);
501 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { 536 if (drvdata->mode == CS_MODE_SYSFS) {
502 etb_disable_hw(drvdata); 537 etb_disable_hw(drvdata);
503 etb_dump_hw(drvdata); 538 etb_dump_hw(drvdata);
504 etb_enable_hw(drvdata); 539 etb_enable_hw(drvdata);
505 } 540 }
506 spin_unlock_irqrestore(&drvdata->spinlock, flags); 541 spin_unlock_irqrestore(&drvdata->spinlock, flags);
507 542
508 dev_info(drvdata->dev, "ETB dumped\n"); 543 dev_dbg(drvdata->dev, "ETB dumped\n");
509} 544}
510 545
511static int etb_open(struct inode *inode, struct file *file) 546static int etb_open(struct inode *inode, struct file *file)
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 677695635211..abe8249b893b 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/perf_event.h> 14#include <linux/perf_event.h>
15#include <linux/percpu-defs.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/types.h> 17#include <linux/types.h>
17#include <linux/workqueue.h> 18#include <linux/workqueue.h>
@@ -22,20 +23,6 @@
22static struct pmu etm_pmu; 23static struct pmu etm_pmu;
23static bool etm_perf_up; 24static bool etm_perf_up;
24 25
25/**
26 * struct etm_event_data - Coresight specifics associated to an event
27 * @work: Handle to free allocated memory outside IRQ context.
28 * @mask: Hold the CPU(s) this event was set for.
29 * @snk_config: The sink configuration.
30 * @path: An array of path, each slot for one CPU.
31 */
32struct etm_event_data {
33 struct work_struct work;
34 cpumask_t mask;
35 void *snk_config;
36 struct list_head **path;
37};
38
39static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); 26static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
40static DEFINE_PER_CPU(struct coresight_device *, csdev_src); 27static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
41 28
@@ -61,6 +48,18 @@ static const struct attribute_group *etm_pmu_attr_groups[] = {
61 NULL, 48 NULL,
62}; 49};
63 50
51static inline struct list_head **
52etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
53{
54 return per_cpu_ptr(data->path, cpu);
55}
56
57static inline struct list_head *
58etm_event_cpu_path(struct etm_event_data *data, int cpu)
59{
60 return *etm_event_cpu_path_ptr(data, cpu);
61}
62
64static void etm_event_read(struct perf_event *event) {} 63static void etm_event_read(struct perf_event *event) {}
65 64
66static int etm_addr_filters_alloc(struct perf_event *event) 65static int etm_addr_filters_alloc(struct perf_event *event)
@@ -114,29 +113,30 @@ static void free_event_data(struct work_struct *work)
114 113
115 event_data = container_of(work, struct etm_event_data, work); 114 event_data = container_of(work, struct etm_event_data, work);
116 mask = &event_data->mask; 115 mask = &event_data->mask;
117 /* 116
118 * First deal with the sink configuration. See comment in 117 /* Free the sink buffers, if there are any */
119 * etm_setup_aux() about why we take the first available path. 118 if (event_data->snk_config && !WARN_ON(cpumask_empty(mask))) {
120 */
121 if (event_data->snk_config) {
122 cpu = cpumask_first(mask); 119 cpu = cpumask_first(mask);
123 sink = coresight_get_sink(event_data->path[cpu]); 120 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
124 if (sink_ops(sink)->free_buffer) 121 if (sink_ops(sink)->free_buffer)
125 sink_ops(sink)->free_buffer(event_data->snk_config); 122 sink_ops(sink)->free_buffer(event_data->snk_config);
126 } 123 }
127 124
128 for_each_cpu(cpu, mask) { 125 for_each_cpu(cpu, mask) {
129 if (!(IS_ERR_OR_NULL(event_data->path[cpu]))) 126 struct list_head **ppath;
130 coresight_release_path(event_data->path[cpu]); 127
128 ppath = etm_event_cpu_path_ptr(event_data, cpu);
129 if (!(IS_ERR_OR_NULL(*ppath)))
130 coresight_release_path(*ppath);
131 *ppath = NULL;
131 } 132 }
132 133
133 kfree(event_data->path); 134 free_percpu(event_data->path);
134 kfree(event_data); 135 kfree(event_data);
135} 136}
136 137
137static void *alloc_event_data(int cpu) 138static void *alloc_event_data(int cpu)
138{ 139{
139 int size;
140 cpumask_t *mask; 140 cpumask_t *mask;
141 struct etm_event_data *event_data; 141 struct etm_event_data *event_data;
142 142
@@ -145,16 +145,12 @@ static void *alloc_event_data(int cpu)
145 if (!event_data) 145 if (!event_data)
146 return NULL; 146 return NULL;
147 147
148 /* Make sure nothing disappears under us */
149 get_online_cpus();
150 size = num_online_cpus();
151 148
152 mask = &event_data->mask; 149 mask = &event_data->mask;
153 if (cpu != -1) 150 if (cpu != -1)
154 cpumask_set_cpu(cpu, mask); 151 cpumask_set_cpu(cpu, mask);
155 else 152 else
156 cpumask_copy(mask, cpu_online_mask); 153 cpumask_copy(mask, cpu_present_mask);
157 put_online_cpus();
158 154
159 /* 155 /*
160 * Each CPU has a single path between source and destination. As such 156 * Each CPU has a single path between source and destination. As such
@@ -164,8 +160,8 @@ static void *alloc_event_data(int cpu)
164 * unused memory when dealing with single CPU trace scenarios is small 160 * unused memory when dealing with single CPU trace scenarios is small
165 * compared to the cost of searching through an optimized array. 161 * compared to the cost of searching through an optimized array.
166 */ 162 */
167 event_data->path = kcalloc(size, 163 event_data->path = alloc_percpu(struct list_head *);
168 sizeof(struct list_head *), GFP_KERNEL); 164
169 if (!event_data->path) { 165 if (!event_data->path) {
170 kfree(event_data); 166 kfree(event_data);
171 return NULL; 167 return NULL;
@@ -206,34 +202,53 @@ static void *etm_setup_aux(int event_cpu, void **pages,
206 * on the cmd line. As such the "enable_sink" flag in sysFS is reset. 202 * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
207 */ 203 */
208 sink = coresight_get_enabled_sink(true); 204 sink = coresight_get_enabled_sink(true);
209 if (!sink) 205 if (!sink || !sink_ops(sink)->alloc_buffer)
210 goto err; 206 goto err;
211 207
212 mask = &event_data->mask; 208 mask = &event_data->mask;
213 209
214 /* Setup the path for each CPU in a trace session */ 210 /*
211 * Setup the path for each CPU in a trace session. We try to build
212 * trace path for each CPU in the mask. If we don't find an ETM
213 * for the CPU or fail to build a path, we clear the CPU from the
214 * mask and continue with the rest. If ever we try to trace on those
215 * CPUs, we can handle it and fail the session.
216 */
215 for_each_cpu(cpu, mask) { 217 for_each_cpu(cpu, mask) {
218 struct list_head *path;
216 struct coresight_device *csdev; 219 struct coresight_device *csdev;
217 220
218 csdev = per_cpu(csdev_src, cpu); 221 csdev = per_cpu(csdev_src, cpu);
219 if (!csdev) 222 /*
220 goto err; 223 * If there is no ETM associated with this CPU clear it from
224 * the mask and continue with the rest. If ever we try to trace
225 * on this CPU, we handle it accordingly.
226 */
227 if (!csdev) {
228 cpumask_clear_cpu(cpu, mask);
229 continue;
230 }
221 231
222 /* 232 /*
223 * Building a path doesn't enable it, it simply builds a 233 * Building a path doesn't enable it, it simply builds a
224 * list of devices from source to sink that can be 234 * list of devices from source to sink that can be
225 * referenced later when the path is actually needed. 235 * referenced later when the path is actually needed.
226 */ 236 */
227 event_data->path[cpu] = coresight_build_path(csdev, sink); 237 path = coresight_build_path(csdev, sink);
228 if (IS_ERR(event_data->path[cpu])) 238 if (IS_ERR(path)) {
229 goto err; 239 cpumask_clear_cpu(cpu, mask);
240 continue;
241 }
242
243 *etm_event_cpu_path_ptr(event_data, cpu) = path;
230 } 244 }
231 245
232 if (!sink_ops(sink)->alloc_buffer) 246 /* If we don't have any CPUs ready for tracing, abort */
247 cpu = cpumask_first(mask);
248 if (cpu >= nr_cpu_ids)
233 goto err; 249 goto err;
234 250
235 cpu = cpumask_first(mask); 251 /* Allocate the sink buffer for this session */
236 /* Get the AUX specific data from the sink buffer */
237 event_data->snk_config = 252 event_data->snk_config =
238 sink_ops(sink)->alloc_buffer(sink, cpu, pages, 253 sink_ops(sink)->alloc_buffer(sink, cpu, pages,
239 nr_pages, overwrite); 254 nr_pages, overwrite);
@@ -255,6 +270,7 @@ static void etm_event_start(struct perf_event *event, int flags)
255 struct etm_event_data *event_data; 270 struct etm_event_data *event_data;
256 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); 271 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
257 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); 272 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
273 struct list_head *path;
258 274
259 if (!csdev) 275 if (!csdev)
260 goto fail; 276 goto fail;
@@ -267,18 +283,14 @@ static void etm_event_start(struct perf_event *event, int flags)
267 if (!event_data) 283 if (!event_data)
268 goto fail; 284 goto fail;
269 285
286 path = etm_event_cpu_path(event_data, cpu);
270 /* We need a sink, no need to continue without one */ 287 /* We need a sink, no need to continue without one */
271 sink = coresight_get_sink(event_data->path[cpu]); 288 sink = coresight_get_sink(path);
272 if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer)) 289 if (WARN_ON_ONCE(!sink))
273 goto fail_end_stop;
274
275 /* Configure the sink */
276 if (sink_ops(sink)->set_buffer(sink, handle,
277 event_data->snk_config))
278 goto fail_end_stop; 290 goto fail_end_stop;
279 291
280 /* Nothing will happen without a path */ 292 /* Nothing will happen without a path */
281 if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF)) 293 if (coresight_enable_path(path, CS_MODE_PERF, handle))
282 goto fail_end_stop; 294 goto fail_end_stop;
283 295
284 /* Tell the perf core the event is alive */ 296 /* Tell the perf core the event is alive */
@@ -286,11 +298,13 @@ static void etm_event_start(struct perf_event *event, int flags)
286 298
287 /* Finally enable the tracer */ 299 /* Finally enable the tracer */
288 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) 300 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
289 goto fail_end_stop; 301 goto fail_disable_path;
290 302
291out: 303out:
292 return; 304 return;
293 305
306fail_disable_path:
307 coresight_disable_path(path);
294fail_end_stop: 308fail_end_stop:
295 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 309 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
296 perf_aux_output_end(handle, 0); 310 perf_aux_output_end(handle, 0);
@@ -306,6 +320,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
306 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); 320 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
307 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); 321 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
308 struct etm_event_data *event_data = perf_get_aux(handle); 322 struct etm_event_data *event_data = perf_get_aux(handle);
323 struct list_head *path;
309 324
310 if (event->hw.state == PERF_HES_STOPPED) 325 if (event->hw.state == PERF_HES_STOPPED)
311 return; 326 return;
@@ -313,7 +328,11 @@ static void etm_event_stop(struct perf_event *event, int mode)
313 if (!csdev) 328 if (!csdev)
314 return; 329 return;
315 330
316 sink = coresight_get_sink(event_data->path[cpu]); 331 path = etm_event_cpu_path(event_data, cpu);
332 if (!path)
333 return;
334
335 sink = coresight_get_sink(path);
317 if (!sink) 336 if (!sink)
318 return; 337 return;
319 338
@@ -331,20 +350,13 @@ static void etm_event_stop(struct perf_event *event, int mode)
331 if (!sink_ops(sink)->update_buffer) 350 if (!sink_ops(sink)->update_buffer)
332 return; 351 return;
333 352
334 sink_ops(sink)->update_buffer(sink, handle, 353 size = sink_ops(sink)->update_buffer(sink, handle,
335 event_data->snk_config); 354 event_data->snk_config);
336
337 if (!sink_ops(sink)->reset_buffer)
338 return;
339
340 size = sink_ops(sink)->reset_buffer(sink, handle,
341 event_data->snk_config);
342
343 perf_aux_output_end(handle, size); 355 perf_aux_output_end(handle, size);
344 } 356 }
345 357
346 /* Disabling the path make its elements available to other sessions */ 358 /* Disabling the path make its elements available to other sessions */
347 coresight_disable_path(event_data->path[cpu]); 359 coresight_disable_path(path);
348} 360}
349 361
350static int etm_event_add(struct perf_event *event, int mode) 362static int etm_event_add(struct perf_event *event, int mode)
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index 4197df4faf5e..da7d9336a15c 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -7,6 +7,7 @@
7#ifndef _CORESIGHT_ETM_PERF_H 7#ifndef _CORESIGHT_ETM_PERF_H
8#define _CORESIGHT_ETM_PERF_H 8#define _CORESIGHT_ETM_PERF_H
9 9
10#include <linux/percpu-defs.h>
10#include "coresight-priv.h" 11#include "coresight-priv.h"
11 12
12struct coresight_device; 13struct coresight_device;
@@ -42,14 +43,39 @@ struct etm_filters {
42 bool ssstatus; 43 bool ssstatus;
43}; 44};
44 45
46/**
47 * struct etm_event_data - Coresight specifics associated to an event
48 * @work: Handle to free allocated memory outside IRQ context.
49 * @mask: Hold the CPU(s) this event was set for.
50 * @snk_config: The sink configuration.
51 * @path: An array of path, each slot for one CPU.
52 */
53struct etm_event_data {
54 struct work_struct work;
55 cpumask_t mask;
56 void *snk_config;
57 struct list_head * __percpu *path;
58};
45 59
46#ifdef CONFIG_CORESIGHT 60#ifdef CONFIG_CORESIGHT
47int etm_perf_symlink(struct coresight_device *csdev, bool link); 61int etm_perf_symlink(struct coresight_device *csdev, bool link);
62static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
63{
64 struct etm_event_data *data = perf_get_aux(handle);
48 65
66 if (data)
67 return data->snk_config;
68 return NULL;
69}
49#else 70#else
50static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) 71static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
51{ return -EINVAL; } 72{ return -EINVAL; }
52 73
74static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
75{
76 return NULL;
77}
78
53#endif /* CONFIG_CORESIGHT */ 79#endif /* CONFIG_CORESIGHT */
54 80
55#endif 81#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 7c74263c333d..fd5c4cca7db5 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -355,11 +355,10 @@ static int etm_parse_event_config(struct etm_drvdata *drvdata,
355 return 0; 355 return 0;
356} 356}
357 357
358static void etm_enable_hw(void *info) 358static int etm_enable_hw(struct etm_drvdata *drvdata)
359{ 359{
360 int i; 360 int i, rc;
361 u32 etmcr; 361 u32 etmcr;
362 struct etm_drvdata *drvdata = info;
363 struct etm_config *config = &drvdata->config; 362 struct etm_config *config = &drvdata->config;
364 363
365 CS_UNLOCK(drvdata->base); 364 CS_UNLOCK(drvdata->base);
@@ -370,6 +369,9 @@ static void etm_enable_hw(void *info)
370 etm_set_pwrup(drvdata); 369 etm_set_pwrup(drvdata);
371 /* Make sure all registers are accessible */ 370 /* Make sure all registers are accessible */
372 etm_os_unlock(drvdata); 371 etm_os_unlock(drvdata);
372 rc = coresight_claim_device_unlocked(drvdata->base);
373 if (rc)
374 goto done;
373 375
374 etm_set_prog(drvdata); 376 etm_set_prog(drvdata);
375 377
@@ -418,9 +420,29 @@ static void etm_enable_hw(void *info)
418 etm_writel(drvdata, 0x0, ETMVMIDCVR); 420 etm_writel(drvdata, 0x0, ETMVMIDCVR);
419 421
420 etm_clr_prog(drvdata); 422 etm_clr_prog(drvdata);
423
424done:
425 if (rc)
426 etm_set_pwrdwn(drvdata);
421 CS_LOCK(drvdata->base); 427 CS_LOCK(drvdata->base);
422 428
423 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); 429 dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
430 drvdata->cpu, rc);
431 return rc;
432}
433
434struct etm_enable_arg {
435 struct etm_drvdata *drvdata;
436 int rc;
437};
438
439static void etm_enable_hw_smp_call(void *info)
440{
441 struct etm_enable_arg *arg = info;
442
443 if (WARN_ON(!arg))
444 return;
445 arg->rc = etm_enable_hw(arg->drvdata);
424} 446}
425 447
426static int etm_cpu_id(struct coresight_device *csdev) 448static int etm_cpu_id(struct coresight_device *csdev)
@@ -475,14 +497,13 @@ static int etm_enable_perf(struct coresight_device *csdev,
475 /* Configure the tracer based on the session's specifics */ 497 /* Configure the tracer based on the session's specifics */
476 etm_parse_event_config(drvdata, event); 498 etm_parse_event_config(drvdata, event);
477 /* And enable it */ 499 /* And enable it */
478 etm_enable_hw(drvdata); 500 return etm_enable_hw(drvdata);
479
480 return 0;
481} 501}
482 502
483static int etm_enable_sysfs(struct coresight_device *csdev) 503static int etm_enable_sysfs(struct coresight_device *csdev)
484{ 504{
485 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 505 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
506 struct etm_enable_arg arg = { 0 };
486 int ret; 507 int ret;
487 508
488 spin_lock(&drvdata->spinlock); 509 spin_lock(&drvdata->spinlock);
@@ -492,20 +513,21 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
492 * hw configuration will take place on the local CPU during bring up. 513 * hw configuration will take place on the local CPU during bring up.
493 */ 514 */
494 if (cpu_online(drvdata->cpu)) { 515 if (cpu_online(drvdata->cpu)) {
516 arg.drvdata = drvdata;
495 ret = smp_call_function_single(drvdata->cpu, 517 ret = smp_call_function_single(drvdata->cpu,
496 etm_enable_hw, drvdata, 1); 518 etm_enable_hw_smp_call, &arg, 1);
497 if (ret) 519 if (!ret)
498 goto err; 520 ret = arg.rc;
521 if (!ret)
522 drvdata->sticky_enable = true;
523 } else {
524 ret = -ENODEV;
499 } 525 }
500 526
501 drvdata->sticky_enable = true;
502 spin_unlock(&drvdata->spinlock); 527 spin_unlock(&drvdata->spinlock);
503 528
504 dev_info(drvdata->dev, "ETM tracing enabled\n"); 529 if (!ret)
505 return 0; 530 dev_dbg(drvdata->dev, "ETM tracing enabled\n");
506
507err:
508 spin_unlock(&drvdata->spinlock);
509 return ret; 531 return ret;
510} 532}
511 533
@@ -555,6 +577,8 @@ static void etm_disable_hw(void *info)
555 for (i = 0; i < drvdata->nr_cntr; i++) 577 for (i = 0; i < drvdata->nr_cntr; i++)
556 config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); 578 config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
557 579
580 coresight_disclaim_device_unlocked(drvdata->base);
581
558 etm_set_pwrdwn(drvdata); 582 etm_set_pwrdwn(drvdata);
559 CS_LOCK(drvdata->base); 583 CS_LOCK(drvdata->base);
560 584
@@ -604,7 +628,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
604 spin_unlock(&drvdata->spinlock); 628 spin_unlock(&drvdata->spinlock);
605 cpus_read_unlock(); 629 cpus_read_unlock();
606 630
607 dev_info(drvdata->dev, "ETM tracing disabled\n"); 631 dev_dbg(drvdata->dev, "ETM tracing disabled\n");
608} 632}
609 633
610static void etm_disable(struct coresight_device *csdev, 634static void etm_disable(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 1d94ebec027b..53e2fb6e86f6 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -28,6 +28,7 @@
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29#include <asm/sections.h> 29#include <asm/sections.h>
30#include <asm/local.h> 30#include <asm/local.h>
31#include <asm/virt.h>
31 32
32#include "coresight-etm4x.h" 33#include "coresight-etm4x.h"
33#include "coresight-etm-perf.h" 34#include "coresight-etm-perf.h"
@@ -77,16 +78,24 @@ static int etm4_trace_id(struct coresight_device *csdev)
77 return drvdata->trcid; 78 return drvdata->trcid;
78} 79}
79 80
80static void etm4_enable_hw(void *info) 81struct etm4_enable_arg {
82 struct etmv4_drvdata *drvdata;
83 int rc;
84};
85
86static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
81{ 87{
82 int i; 88 int i, rc;
83 struct etmv4_drvdata *drvdata = info;
84 struct etmv4_config *config = &drvdata->config; 89 struct etmv4_config *config = &drvdata->config;
85 90
86 CS_UNLOCK(drvdata->base); 91 CS_UNLOCK(drvdata->base);
87 92
88 etm4_os_unlock(drvdata); 93 etm4_os_unlock(drvdata);
89 94
95 rc = coresight_claim_device_unlocked(drvdata->base);
96 if (rc)
97 goto done;
98
90 /* Disable the trace unit before programming trace registers */ 99 /* Disable the trace unit before programming trace registers */
91 writel_relaxed(0, drvdata->base + TRCPRGCTLR); 100 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
92 101
@@ -174,9 +183,21 @@ static void etm4_enable_hw(void *info)
174 dev_err(drvdata->dev, 183 dev_err(drvdata->dev,
175 "timeout while waiting for Idle Trace Status\n"); 184 "timeout while waiting for Idle Trace Status\n");
176 185
186done:
177 CS_LOCK(drvdata->base); 187 CS_LOCK(drvdata->base);
178 188
179 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); 189 dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
190 drvdata->cpu, rc);
191 return rc;
192}
193
194static void etm4_enable_hw_smp_call(void *info)
195{
196 struct etm4_enable_arg *arg = info;
197
198 if (WARN_ON(!arg))
199 return;
200 arg->rc = etm4_enable_hw(arg->drvdata);
180} 201}
181 202
182static int etm4_parse_event_config(struct etmv4_drvdata *drvdata, 203static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
@@ -242,7 +263,7 @@ static int etm4_enable_perf(struct coresight_device *csdev,
242 if (ret) 263 if (ret)
243 goto out; 264 goto out;
244 /* And enable it */ 265 /* And enable it */
245 etm4_enable_hw(drvdata); 266 ret = etm4_enable_hw(drvdata);
246 267
247out: 268out:
248 return ret; 269 return ret;
@@ -251,6 +272,7 @@ out:
251static int etm4_enable_sysfs(struct coresight_device *csdev) 272static int etm4_enable_sysfs(struct coresight_device *csdev)
252{ 273{
253 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 274 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
275 struct etm4_enable_arg arg = { 0 };
254 int ret; 276 int ret;
255 277
256 spin_lock(&drvdata->spinlock); 278 spin_lock(&drvdata->spinlock);
@@ -259,19 +281,17 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
259 * Executing etm4_enable_hw on the cpu whose ETM is being enabled 281 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
260 * ensures that register writes occur when cpu is powered. 282 * ensures that register writes occur when cpu is powered.
261 */ 283 */
284 arg.drvdata = drvdata;
262 ret = smp_call_function_single(drvdata->cpu, 285 ret = smp_call_function_single(drvdata->cpu,
263 etm4_enable_hw, drvdata, 1); 286 etm4_enable_hw_smp_call, &arg, 1);
264 if (ret) 287 if (!ret)
265 goto err; 288 ret = arg.rc;
266 289 if (!ret)
267 drvdata->sticky_enable = true; 290 drvdata->sticky_enable = true;
268 spin_unlock(&drvdata->spinlock); 291 spin_unlock(&drvdata->spinlock);
269 292
270 dev_info(drvdata->dev, "ETM tracing enabled\n"); 293 if (!ret)
271 return 0; 294 dev_dbg(drvdata->dev, "ETM tracing enabled\n");
272
273err:
274 spin_unlock(&drvdata->spinlock);
275 return ret; 295 return ret;
276} 296}
277 297
@@ -328,6 +348,8 @@ static void etm4_disable_hw(void *info)
328 isb(); 348 isb();
329 writel_relaxed(control, drvdata->base + TRCPRGCTLR); 349 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
330 350
351 coresight_disclaim_device_unlocked(drvdata->base);
352
331 CS_LOCK(drvdata->base); 353 CS_LOCK(drvdata->base);
332 354
333 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); 355 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
@@ -380,7 +402,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
380 spin_unlock(&drvdata->spinlock); 402 spin_unlock(&drvdata->spinlock);
381 cpus_read_unlock(); 403 cpus_read_unlock();
382 404
383 dev_info(drvdata->dev, "ETM tracing disabled\n"); 405 dev_dbg(drvdata->dev, "ETM tracing disabled\n");
384} 406}
385 407
386static void etm4_disable(struct coresight_device *csdev, 408static void etm4_disable(struct coresight_device *csdev,
@@ -605,7 +627,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
605 config->vinst_ctrl |= BIT(0); 627 config->vinst_ctrl |= BIT(0);
606} 628}
607 629
608static u64 etm4_get_access_type(struct etmv4_config *config) 630static u64 etm4_get_ns_access_type(struct etmv4_config *config)
609{ 631{
610 u64 access_type = 0; 632 u64 access_type = 0;
611 633
@@ -616,17 +638,26 @@ static u64 etm4_get_access_type(struct etmv4_config *config)
616 * Bit[13] Exception level 1 - OS 638 * Bit[13] Exception level 1 - OS
617 * Bit[14] Exception level 2 - Hypervisor 639 * Bit[14] Exception level 2 - Hypervisor
618 * Bit[15] Never implemented 640 * Bit[15] Never implemented
619 *
620 * Always stay away from hypervisor mode.
621 */ 641 */
622 access_type = ETM_EXLEVEL_NS_HYP; 642 if (!is_kernel_in_hyp_mode()) {
623 643 /* Stay away from hypervisor mode for non-VHE */
624 if (config->mode & ETM_MODE_EXCL_KERN) 644 access_type = ETM_EXLEVEL_NS_HYP;
625 access_type |= ETM_EXLEVEL_NS_OS; 645 if (config->mode & ETM_MODE_EXCL_KERN)
646 access_type |= ETM_EXLEVEL_NS_OS;
647 } else if (config->mode & ETM_MODE_EXCL_KERN) {
648 access_type = ETM_EXLEVEL_NS_HYP;
649 }
626 650
627 if (config->mode & ETM_MODE_EXCL_USER) 651 if (config->mode & ETM_MODE_EXCL_USER)
628 access_type |= ETM_EXLEVEL_NS_APP; 652 access_type |= ETM_EXLEVEL_NS_APP;
629 653
654 return access_type;
655}
656
657static u64 etm4_get_access_type(struct etmv4_config *config)
658{
659 u64 access_type = etm4_get_ns_access_type(config);
660
630 /* 661 /*
631 * EXLEVEL_S, bits[11:8], don't trace anything happening 662 * EXLEVEL_S, bits[11:8], don't trace anything happening
632 * in secure state. 663 * in secure state.
@@ -880,20 +911,10 @@ void etm4_config_trace_mode(struct etmv4_config *config)
880 911
881 addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP]; 912 addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
882 /* clear default config */ 913 /* clear default config */
883 addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS); 914 addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
915 ETM_EXLEVEL_NS_HYP);
884 916
885 /* 917 addr_acc |= etm4_get_ns_access_type(config);
886 * EXLEVEL_NS, bits[15:12]
887 * The Exception levels are:
888 * Bit[12] Exception level 0 - Application
889 * Bit[13] Exception level 1 - OS
890 * Bit[14] Exception level 2 - Hypervisor
891 * Bit[15] Never implemented
892 */
893 if (mode & ETM_MODE_EXCL_KERN)
894 addr_acc |= ETM_EXLEVEL_NS_OS;
895 else
896 addr_acc |= ETM_EXLEVEL_NS_APP;
897 918
898 config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc; 919 config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
899 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; 920 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 448145a36675..927925151509 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -25,6 +25,7 @@
25#define FUNNEL_HOLDTIME_MASK 0xf00 25#define FUNNEL_HOLDTIME_MASK 0xf00
26#define FUNNEL_HOLDTIME_SHFT 0x8 26#define FUNNEL_HOLDTIME_SHFT 0x8
27#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT) 27#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
28#define FUNNEL_ENSx_MASK 0xff
28 29
29/** 30/**
30 * struct funnel_drvdata - specifics associated to a funnel component 31 * struct funnel_drvdata - specifics associated to a funnel component
@@ -42,31 +43,42 @@ struct funnel_drvdata {
42 unsigned long priority; 43 unsigned long priority;
43}; 44};
44 45
45static void funnel_enable_hw(struct funnel_drvdata *drvdata, int port) 46static int funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
46{ 47{
47 u32 functl; 48 u32 functl;
49 int rc = 0;
48 50
49 CS_UNLOCK(drvdata->base); 51 CS_UNLOCK(drvdata->base);
50 52
51 functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL); 53 functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
54 /* Claim the device only when we enable the first slave */
55 if (!(functl & FUNNEL_ENSx_MASK)) {
56 rc = coresight_claim_device_unlocked(drvdata->base);
57 if (rc)
58 goto done;
59 }
60
52 functl &= ~FUNNEL_HOLDTIME_MASK; 61 functl &= ~FUNNEL_HOLDTIME_MASK;
53 functl |= FUNNEL_HOLDTIME; 62 functl |= FUNNEL_HOLDTIME;
54 functl |= (1 << port); 63 functl |= (1 << port);
55 writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL); 64 writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
56 writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL); 65 writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL);
57 66done:
58 CS_LOCK(drvdata->base); 67 CS_LOCK(drvdata->base);
68 return rc;
59} 69}
60 70
61static int funnel_enable(struct coresight_device *csdev, int inport, 71static int funnel_enable(struct coresight_device *csdev, int inport,
62 int outport) 72 int outport)
63{ 73{
74 int rc;
64 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 75 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
65 76
66 funnel_enable_hw(drvdata, inport); 77 rc = funnel_enable_hw(drvdata, inport);
67 78
68 dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); 79 if (!rc)
69 return 0; 80 dev_dbg(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
81 return rc;
70} 82}
71 83
72static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport) 84static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
@@ -79,6 +91,10 @@ static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
79 functl &= ~(1 << inport); 91 functl &= ~(1 << inport);
80 writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL); 92 writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
81 93
94 /* Disclaim the device if none of the slaves are now active */
95 if (!(functl & FUNNEL_ENSx_MASK))
96 coresight_disclaim_device_unlocked(drvdata->base);
97
82 CS_LOCK(drvdata->base); 98 CS_LOCK(drvdata->base);
83} 99}
84 100
@@ -89,7 +105,7 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
89 105
90 funnel_disable_hw(drvdata, inport); 106 funnel_disable_hw(drvdata, inport);
91 107
92 dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); 108 dev_dbg(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
93} 109}
94 110
95static const struct coresight_ops_link funnel_link_ops = { 111static const struct coresight_ops_link funnel_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 1a6cf3589866..579f34943bf1 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -25,6 +25,13 @@
25#define CORESIGHT_DEVID 0xfc8 25#define CORESIGHT_DEVID 0xfc8
26#define CORESIGHT_DEVTYPE 0xfcc 26#define CORESIGHT_DEVTYPE 0xfcc
27 27
28
29/*
30 * Coresight device CLAIM protocol.
31 * See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore.
32 */
33#define CORESIGHT_CLAIM_SELF_HOSTED BIT(1)
34
28#define TIMEOUT_US 100 35#define TIMEOUT_US 100
29#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb) 36#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
30 37
@@ -137,7 +144,7 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
137} 144}
138 145
139void coresight_disable_path(struct list_head *path); 146void coresight_disable_path(struct list_head *path);
140int coresight_enable_path(struct list_head *path, u32 mode); 147int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
141struct coresight_device *coresight_get_sink(struct list_head *path); 148struct coresight_device *coresight_get_sink(struct list_head *path);
142struct coresight_device *coresight_get_enabled_sink(bool reset); 149struct coresight_device *coresight_get_enabled_sink(bool reset);
143struct list_head *coresight_build_path(struct coresight_device *csdev, 150struct list_head *coresight_build_path(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 8d2eaaab6c2f..feac98315471 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -35,7 +35,7 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
35{ 35{
36 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 36 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
37 37
38 dev_info(drvdata->dev, "REPLICATOR enabled\n"); 38 dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
39 return 0; 39 return 0;
40} 40}
41 41
@@ -44,7 +44,7 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
44{ 44{
45 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 45 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
46 46
47 dev_info(drvdata->dev, "REPLICATOR disabled\n"); 47 dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
48} 48}
49 49
50static const struct coresight_ops_link replicator_link_ops = { 50static const struct coresight_ops_link replicator_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index c46c70aec1d5..35d6f9709274 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -211,7 +211,7 @@ static int stm_enable(struct coresight_device *csdev,
211 stm_enable_hw(drvdata); 211 stm_enable_hw(drvdata);
212 spin_unlock(&drvdata->spinlock); 212 spin_unlock(&drvdata->spinlock);
213 213
214 dev_info(drvdata->dev, "STM tracing enabled\n"); 214 dev_dbg(drvdata->dev, "STM tracing enabled\n");
215 return 0; 215 return 0;
216} 216}
217 217
@@ -274,7 +274,7 @@ static void stm_disable(struct coresight_device *csdev,
274 pm_runtime_put(drvdata->dev); 274 pm_runtime_put(drvdata->dev);
275 275
276 local_set(&drvdata->mode, CS_MODE_DISABLED); 276 local_set(&drvdata->mode, CS_MODE_DISABLED);
277 dev_info(drvdata->dev, "STM tracing disabled\n"); 277 dev_dbg(drvdata->dev, "STM tracing disabled\n");
278 } 278 }
279} 279}
280 280
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 0549249f4b39..53fc83b72a49 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -10,8 +10,12 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include "coresight-priv.h" 11#include "coresight-priv.h"
12#include "coresight-tmc.h" 12#include "coresight-tmc.h"
13#include "coresight-etm-perf.h"
13 14
14static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 15static int tmc_set_etf_buffer(struct coresight_device *csdev,
16 struct perf_output_handle *handle);
17
18static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
15{ 19{
16 CS_UNLOCK(drvdata->base); 20 CS_UNLOCK(drvdata->base);
17 21
@@ -30,33 +34,41 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
30 CS_LOCK(drvdata->base); 34 CS_LOCK(drvdata->base);
31} 35}
32 36
37static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
38{
39 int rc = coresight_claim_device(drvdata->base);
40
41 if (rc)
42 return rc;
43
44 __tmc_etb_enable_hw(drvdata);
45 return 0;
46}
47
33static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) 48static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
34{ 49{
35 char *bufp; 50 char *bufp;
36 u32 read_data, lost; 51 u32 read_data, lost;
37 int i;
38 52
39 /* Check if the buffer wrapped around. */ 53 /* Check if the buffer wrapped around. */
40 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL; 54 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
41 bufp = drvdata->buf; 55 bufp = drvdata->buf;
42 drvdata->len = 0; 56 drvdata->len = 0;
43 while (1) { 57 while (1) {
44 for (i = 0; i < drvdata->memwidth; i++) { 58 read_data = readl_relaxed(drvdata->base + TMC_RRD);
45 read_data = readl_relaxed(drvdata->base + TMC_RRD); 59 if (read_data == 0xFFFFFFFF)
46 if (read_data == 0xFFFFFFFF) 60 break;
47 goto done; 61 memcpy(bufp, &read_data, 4);
48 memcpy(bufp, &read_data, 4); 62 bufp += 4;
49 bufp += 4; 63 drvdata->len += 4;
50 drvdata->len += 4;
51 }
52 } 64 }
53done: 65
54 if (lost) 66 if (lost)
55 coresight_insert_barrier_packet(drvdata->buf); 67 coresight_insert_barrier_packet(drvdata->buf);
56 return; 68 return;
57} 69}
58 70
59static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 71static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
60{ 72{
61 CS_UNLOCK(drvdata->base); 73 CS_UNLOCK(drvdata->base);
62 74
@@ -72,7 +84,13 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
72 CS_LOCK(drvdata->base); 84 CS_LOCK(drvdata->base);
73} 85}
74 86
75static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 87static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
88{
89 coresight_disclaim_device(drvdata);
90 __tmc_etb_disable_hw(drvdata);
91}
92
93static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
76{ 94{
77 CS_UNLOCK(drvdata->base); 95 CS_UNLOCK(drvdata->base);
78 96
@@ -88,13 +106,24 @@ static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
88 CS_LOCK(drvdata->base); 106 CS_LOCK(drvdata->base);
89} 107}
90 108
109static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
110{
111 int rc = coresight_claim_device(drvdata->base);
112
113 if (rc)
114 return rc;
115
116 __tmc_etf_enable_hw(drvdata);
117 return 0;
118}
119
91static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) 120static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
92{ 121{
93 CS_UNLOCK(drvdata->base); 122 CS_UNLOCK(drvdata->base);
94 123
95 tmc_flush_and_stop(drvdata); 124 tmc_flush_and_stop(drvdata);
96 tmc_disable_hw(drvdata); 125 tmc_disable_hw(drvdata);
97 126 coresight_disclaim_device_unlocked(drvdata->base);
98 CS_LOCK(drvdata->base); 127 CS_LOCK(drvdata->base);
99} 128}
100 129
@@ -170,8 +199,12 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
170 drvdata->buf = buf; 199 drvdata->buf = buf;
171 } 200 }
172 201
173 drvdata->mode = CS_MODE_SYSFS; 202 ret = tmc_etb_enable_hw(drvdata);
174 tmc_etb_enable_hw(drvdata); 203 if (!ret)
204 drvdata->mode = CS_MODE_SYSFS;
205 else
206 /* Free up the buffer if we failed to enable */
207 used = false;
175out: 208out:
176 spin_unlock_irqrestore(&drvdata->spinlock, flags); 209 spin_unlock_irqrestore(&drvdata->spinlock, flags);
177 210
@@ -182,37 +215,40 @@ out:
182 return ret; 215 return ret;
183} 216}
184 217
185static int tmc_enable_etf_sink_perf(struct coresight_device *csdev) 218static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
186{ 219{
187 int ret = 0; 220 int ret = 0;
188 unsigned long flags; 221 unsigned long flags;
189 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 222 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
223 struct perf_output_handle *handle = data;
190 224
191 spin_lock_irqsave(&drvdata->spinlock, flags); 225 spin_lock_irqsave(&drvdata->spinlock, flags);
192 if (drvdata->reading) { 226 do {
193 ret = -EINVAL; 227 ret = -EINVAL;
194 goto out; 228 if (drvdata->reading)
195 } 229 break;
196 230 /*
197 /* 231 * In Perf mode there can be only one writer per sink. There
198 * In Perf mode there can be only one writer per sink. There 232 * is also no need to continue if the ETB/ETF is already
199 * is also no need to continue if the ETB/ETR is already operated 233 * operated from sysFS.
200 * from sysFS. 234 */
201 */ 235 if (drvdata->mode != CS_MODE_DISABLED)
202 if (drvdata->mode != CS_MODE_DISABLED) { 236 break;
203 ret = -EINVAL;
204 goto out;
205 }
206 237
207 drvdata->mode = CS_MODE_PERF; 238 ret = tmc_set_etf_buffer(csdev, handle);
208 tmc_etb_enable_hw(drvdata); 239 if (ret)
209out: 240 break;
241 ret = tmc_etb_enable_hw(drvdata);
242 if (!ret)
243 drvdata->mode = CS_MODE_PERF;
244 } while (0);
210 spin_unlock_irqrestore(&drvdata->spinlock, flags); 245 spin_unlock_irqrestore(&drvdata->spinlock, flags);
211 246
212 return ret; 247 return ret;
213} 248}
214 249
215static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) 250static int tmc_enable_etf_sink(struct coresight_device *csdev,
251 u32 mode, void *data)
216{ 252{
217 int ret; 253 int ret;
218 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 254 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -222,7 +258,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
222 ret = tmc_enable_etf_sink_sysfs(csdev); 258 ret = tmc_enable_etf_sink_sysfs(csdev);
223 break; 259 break;
224 case CS_MODE_PERF: 260 case CS_MODE_PERF:
225 ret = tmc_enable_etf_sink_perf(csdev); 261 ret = tmc_enable_etf_sink_perf(csdev, data);
226 break; 262 break;
227 /* We shouldn't be here */ 263 /* We shouldn't be here */
228 default: 264 default:
@@ -233,7 +269,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
233 if (ret) 269 if (ret)
234 return ret; 270 return ret;
235 271
236 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); 272 dev_dbg(drvdata->dev, "TMC-ETB/ETF enabled\n");
237 return 0; 273 return 0;
238} 274}
239 275
@@ -256,12 +292,13 @@ static void tmc_disable_etf_sink(struct coresight_device *csdev)
256 292
257 spin_unlock_irqrestore(&drvdata->spinlock, flags); 293 spin_unlock_irqrestore(&drvdata->spinlock, flags);
258 294
259 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); 295 dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
260} 296}
261 297
262static int tmc_enable_etf_link(struct coresight_device *csdev, 298static int tmc_enable_etf_link(struct coresight_device *csdev,
263 int inport, int outport) 299 int inport, int outport)
264{ 300{
301 int ret;
265 unsigned long flags; 302 unsigned long flags;
266 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 303 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
267 304
@@ -271,12 +308,14 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
271 return -EBUSY; 308 return -EBUSY;
272 } 309 }
273 310
274 tmc_etf_enable_hw(drvdata); 311 ret = tmc_etf_enable_hw(drvdata);
275 drvdata->mode = CS_MODE_SYSFS; 312 if (!ret)
313 drvdata->mode = CS_MODE_SYSFS;
276 spin_unlock_irqrestore(&drvdata->spinlock, flags); 314 spin_unlock_irqrestore(&drvdata->spinlock, flags);
277 315
278 dev_info(drvdata->dev, "TMC-ETF enabled\n"); 316 if (!ret)
279 return 0; 317 dev_dbg(drvdata->dev, "TMC-ETF enabled\n");
318 return ret;
280} 319}
281 320
282static void tmc_disable_etf_link(struct coresight_device *csdev, 321static void tmc_disable_etf_link(struct coresight_device *csdev,
@@ -295,7 +334,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
295 drvdata->mode = CS_MODE_DISABLED; 334 drvdata->mode = CS_MODE_DISABLED;
296 spin_unlock_irqrestore(&drvdata->spinlock, flags); 335 spin_unlock_irqrestore(&drvdata->spinlock, flags);
297 336
298 dev_info(drvdata->dev, "TMC-ETF disabled\n"); 337 dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
299} 338}
300 339
301static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, 340static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
@@ -328,12 +367,14 @@ static void tmc_free_etf_buffer(void *config)
328} 367}
329 368
330static int tmc_set_etf_buffer(struct coresight_device *csdev, 369static int tmc_set_etf_buffer(struct coresight_device *csdev,
331 struct perf_output_handle *handle, 370 struct perf_output_handle *handle)
332 void *sink_config)
333{ 371{
334 int ret = 0; 372 int ret = 0;
335 unsigned long head; 373 unsigned long head;
336 struct cs_buffers *buf = sink_config; 374 struct cs_buffers *buf = etm_perf_sink_config(handle);
375
376 if (!buf)
377 return -EINVAL;
337 378
338 /* wrap head around to the amount of space we have */ 379 /* wrap head around to the amount of space we have */
339 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); 380 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
@@ -349,36 +390,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev,
349 return ret; 390 return ret;
350} 391}
351 392
352static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, 393static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
353 struct perf_output_handle *handle,
354 void *sink_config)
355{
356 long size = 0;
357 struct cs_buffers *buf = sink_config;
358
359 if (buf) {
360 /*
361 * In snapshot mode ->data_size holds the new address of the
362 * ring buffer's head. The size itself is the whole address
363 * range since we want the latest information.
364 */
365 if (buf->snapshot)
366 handle->head = local_xchg(&buf->data_size,
367 buf->nr_pages << PAGE_SHIFT);
368 /*
369 * Tell the tracer PMU how much we got in this run and if
370 * something went wrong along the way. Nobody else can use
371 * this cs_buffers instance until we are done. As such
372 * resetting parameters here and squaring off with the ring
373 * buffer API in the tracer PMU is fine.
374 */
375 size = local_xchg(&buf->data_size, 0);
376 }
377
378 return size;
379}
380
381static void tmc_update_etf_buffer(struct coresight_device *csdev,
382 struct perf_output_handle *handle, 394 struct perf_output_handle *handle,
383 void *sink_config) 395 void *sink_config)
384{ 396{
@@ -387,17 +399,17 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
387 const u32 *barrier; 399 const u32 *barrier;
388 u32 *buf_ptr; 400 u32 *buf_ptr;
389 u64 read_ptr, write_ptr; 401 u64 read_ptr, write_ptr;
390 u32 status, to_read; 402 u32 status;
391 unsigned long offset; 403 unsigned long offset, to_read;
392 struct cs_buffers *buf = sink_config; 404 struct cs_buffers *buf = sink_config;
393 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 405 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
394 406
395 if (!buf) 407 if (!buf)
396 return; 408 return 0;
397 409
398 /* This shouldn't happen */ 410 /* This shouldn't happen */
399 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF)) 411 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
400 return; 412 return 0;
401 413
402 CS_UNLOCK(drvdata->base); 414 CS_UNLOCK(drvdata->base);
403 415
@@ -438,10 +450,10 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
438 case TMC_MEM_INTF_WIDTH_32BITS: 450 case TMC_MEM_INTF_WIDTH_32BITS:
439 case TMC_MEM_INTF_WIDTH_64BITS: 451 case TMC_MEM_INTF_WIDTH_64BITS:
440 case TMC_MEM_INTF_WIDTH_128BITS: 452 case TMC_MEM_INTF_WIDTH_128BITS:
441 mask = GENMASK(31, 5); 453 mask = GENMASK(31, 4);
442 break; 454 break;
443 case TMC_MEM_INTF_WIDTH_256BITS: 455 case TMC_MEM_INTF_WIDTH_256BITS:
444 mask = GENMASK(31, 6); 456 mask = GENMASK(31, 5);
445 break; 457 break;
446 } 458 }
447 459
@@ -486,18 +498,14 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
486 } 498 }
487 } 499 }
488 500
489 /* 501 /* In snapshot mode we have to update the head */
490 * In snapshot mode all we have to do is communicate to 502 if (buf->snapshot) {
491 * perf_aux_output_end() the address of the current head. In full 503 handle->head = (cur * PAGE_SIZE) + offset;
492 * trace mode the same function expects a size to move rb->aux_head 504 to_read = buf->nr_pages << PAGE_SHIFT;
493 * forward. 505 }
494 */
495 if (buf->snapshot)
496 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
497 else
498 local_add(to_read, &buf->data_size);
499
500 CS_LOCK(drvdata->base); 506 CS_LOCK(drvdata->base);
507
508 return to_read;
501} 509}
502 510
503static const struct coresight_ops_sink tmc_etf_sink_ops = { 511static const struct coresight_ops_sink tmc_etf_sink_ops = {
@@ -505,8 +513,6 @@ static const struct coresight_ops_sink tmc_etf_sink_ops = {
505 .disable = tmc_disable_etf_sink, 513 .disable = tmc_disable_etf_sink,
506 .alloc_buffer = tmc_alloc_etf_buffer, 514 .alloc_buffer = tmc_alloc_etf_buffer,
507 .free_buffer = tmc_free_etf_buffer, 515 .free_buffer = tmc_free_etf_buffer,
508 .set_buffer = tmc_set_etf_buffer,
509 .reset_buffer = tmc_reset_etf_buffer,
510 .update_buffer = tmc_update_etf_buffer, 516 .update_buffer = tmc_update_etf_buffer,
511}; 517};
512 518
@@ -563,7 +569,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
563 569
564 /* Disable the TMC if need be */ 570 /* Disable the TMC if need be */
565 if (drvdata->mode == CS_MODE_SYSFS) 571 if (drvdata->mode == CS_MODE_SYSFS)
566 tmc_etb_disable_hw(drvdata); 572 __tmc_etb_disable_hw(drvdata);
567 573
568 drvdata->reading = true; 574 drvdata->reading = true;
569out: 575out:
@@ -603,7 +609,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
603 * can't be NULL. 609 * can't be NULL.
604 */ 610 */
605 memset(drvdata->buf, 0, drvdata->size); 611 memset(drvdata->buf, 0, drvdata->size);
606 tmc_etb_enable_hw(drvdata); 612 __tmc_etb_enable_hw(drvdata);
607 } else { 613 } else {
608 /* 614 /*
609 * The ETB/ETF is not tracing and the buffer was just read. 615 * The ETB/ETF is not tracing and the buffer was just read.
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 2eda5de304c2..f684283890d3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include "coresight-catu.h" 12#include "coresight-catu.h"
13#include "coresight-etm-perf.h"
13#include "coresight-priv.h" 14#include "coresight-priv.h"
14#include "coresight-tmc.h" 15#include "coresight-tmc.h"
15 16
@@ -21,6 +22,28 @@ struct etr_flat_buf {
21}; 22};
22 23
23/* 24/*
25 * etr_perf_buffer - Perf buffer used for ETR
26 * @etr_buf - Actual buffer used by the ETR
27 * @snaphost - Perf session mode
28 * @head - handle->head at the beginning of the session.
29 * @nr_pages - Number of pages in the ring buffer.
30 * @pages - Array of Pages in the ring buffer.
31 */
32struct etr_perf_buffer {
33 struct etr_buf *etr_buf;
34 bool snapshot;
35 unsigned long head;
36 int nr_pages;
37 void **pages;
38};
39
40/* Convert the perf index to an offset within the ETR buffer */
41#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
42
43/* Lower limit for ETR hardware buffer */
44#define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
45
46/*
24 * The TMC ETR SG has a page size of 4K. The SG table contains pointers 47 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
25 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from 48 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
26 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could 49 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
@@ -536,7 +559,7 @@ tmc_init_etr_sg_table(struct device *dev, int node,
536 sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages); 559 sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
537 if (IS_ERR(sg_table)) { 560 if (IS_ERR(sg_table)) {
538 kfree(etr_table); 561 kfree(etr_table);
539 return ERR_PTR(PTR_ERR(sg_table)); 562 return ERR_CAST(sg_table);
540 } 563 }
541 564
542 etr_table->sg_table = sg_table; 565 etr_table->sg_table = sg_table;
@@ -728,12 +751,14 @@ tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
728 return NULL; 751 return NULL;
729} 752}
730 753
731static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata) 754static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
755 struct etr_buf *etr_buf)
732{ 756{
733 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); 757 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
734 758
735 if (catu && helper_ops(catu)->enable) 759 if (catu && helper_ops(catu)->enable)
736 helper_ops(catu)->enable(catu, drvdata->etr_buf); 760 return helper_ops(catu)->enable(catu, etr_buf);
761 return 0;
737} 762}
738 763
739static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata) 764static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
@@ -895,17 +920,11 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
895 tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); 920 tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
896} 921}
897 922
898static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) 923static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
899{ 924{
900 u32 axictl, sts; 925 u32 axictl, sts;
901 struct etr_buf *etr_buf = drvdata->etr_buf; 926 struct etr_buf *etr_buf = drvdata->etr_buf;
902 927
903 /*
904 * If this ETR is connected to a CATU, enable it before we turn
905 * this on
906 */
907 tmc_etr_enable_catu(drvdata);
908
909 CS_UNLOCK(drvdata->base); 928 CS_UNLOCK(drvdata->base);
910 929
911 /* Wait for TMCSReady bit to be set */ 930 /* Wait for TMCSReady bit to be set */
@@ -924,11 +943,8 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
924 axictl |= TMC_AXICTL_ARCACHE_OS; 943 axictl |= TMC_AXICTL_ARCACHE_OS;
925 } 944 }
926 945
927 if (etr_buf->mode == ETR_MODE_ETR_SG) { 946 if (etr_buf->mode == ETR_MODE_ETR_SG)
928 if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
929 return;
930 axictl |= TMC_AXICTL_SCT_GAT_MODE; 947 axictl |= TMC_AXICTL_SCT_GAT_MODE;
931 }
932 948
933 writel_relaxed(axictl, drvdata->base + TMC_AXICTL); 949 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
934 tmc_write_dba(drvdata, etr_buf->hwaddr); 950 tmc_write_dba(drvdata, etr_buf->hwaddr);
@@ -954,19 +970,54 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
954 CS_LOCK(drvdata->base); 970 CS_LOCK(drvdata->base);
955} 971}
956 972
973static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
974 struct etr_buf *etr_buf)
975{
976 int rc;
977
978 /* Callers should provide an appropriate buffer for use */
979 if (WARN_ON(!etr_buf))
980 return -EINVAL;
981
982 if ((etr_buf->mode == ETR_MODE_ETR_SG) &&
983 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
984 return -EINVAL;
985
986 if (WARN_ON(drvdata->etr_buf))
987 return -EBUSY;
988
989 /*
990 * If this ETR is connected to a CATU, enable it before we turn
991 * this on.
992 */
993 rc = tmc_etr_enable_catu(drvdata, etr_buf);
994 if (rc)
995 return rc;
996 rc = coresight_claim_device(drvdata->base);
997 if (!rc) {
998 drvdata->etr_buf = etr_buf;
999 __tmc_etr_enable_hw(drvdata);
1000 }
1001
1002 return rc;
1003}
1004
957/* 1005/*
958 * Return the available trace data in the buffer (starts at etr_buf->offset, 1006 * Return the available trace data in the buffer (starts at etr_buf->offset,
959 * limited by etr_buf->len) from @pos, with a maximum limit of @len, 1007 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
960 * also updating the @bufpp on where to find it. Since the trace data 1008 * also updating the @bufpp on where to find it. Since the trace data
961 * starts at anywhere in the buffer, depending on the RRP, we adjust the 1009 * starts at anywhere in the buffer, depending on the RRP, we adjust the
962 * @len returned to handle buffer wrapping around. 1010 * @len returned to handle buffer wrapping around.
1011 *
1012 * We are protected here by drvdata->reading != 0, which ensures the
1013 * sysfs_buf stays alive.
963 */ 1014 */
964ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata, 1015ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
965 loff_t pos, size_t len, char **bufpp) 1016 loff_t pos, size_t len, char **bufpp)
966{ 1017{
967 s64 offset; 1018 s64 offset;
968 ssize_t actual = len; 1019 ssize_t actual = len;
969 struct etr_buf *etr_buf = drvdata->etr_buf; 1020 struct etr_buf *etr_buf = drvdata->sysfs_buf;
970 1021
971 if (pos + actual > etr_buf->len) 1022 if (pos + actual > etr_buf->len)
972 actual = etr_buf->len - pos; 1023 actual = etr_buf->len - pos;
@@ -996,10 +1047,17 @@ tmc_etr_free_sysfs_buf(struct etr_buf *buf)
996 1047
997static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata) 1048static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
998{ 1049{
999 tmc_sync_etr_buf(drvdata); 1050 struct etr_buf *etr_buf = drvdata->etr_buf;
1051
1052 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
1053 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
1054 drvdata->sysfs_buf = NULL;
1055 } else {
1056 tmc_sync_etr_buf(drvdata);
1057 }
1000} 1058}
1001 1059
1002static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) 1060static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1003{ 1061{
1004 CS_UNLOCK(drvdata->base); 1062 CS_UNLOCK(drvdata->base);
1005 1063
@@ -1015,8 +1073,16 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1015 1073
1016 CS_LOCK(drvdata->base); 1074 CS_LOCK(drvdata->base);
1017 1075
1076}
1077
1078static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1079{
1080 __tmc_etr_disable_hw(drvdata);
1018 /* Disable CATU device if this ETR is connected to one */ 1081 /* Disable CATU device if this ETR is connected to one */
1019 tmc_etr_disable_catu(drvdata); 1082 tmc_etr_disable_catu(drvdata);
1083 coresight_disclaim_device(drvdata->base);
1084 /* Reset the ETR buf used by hardware */
1085 drvdata->etr_buf = NULL;
1020} 1086}
1021 1087
1022static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) 1088static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
@@ -1024,7 +1090,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1024 int ret = 0; 1090 int ret = 0;
1025 unsigned long flags; 1091 unsigned long flags;
1026 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1092 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1027 struct etr_buf *new_buf = NULL, *free_buf = NULL; 1093 struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
1028 1094
1029 /* 1095 /*
1030 * If we are enabling the ETR from disabled state, we need to make 1096 * If we are enabling the ETR from disabled state, we need to make
@@ -1035,7 +1101,8 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1035 * with the lock released. 1101 * with the lock released.
1036 */ 1102 */
1037 spin_lock_irqsave(&drvdata->spinlock, flags); 1103 spin_lock_irqsave(&drvdata->spinlock, flags);
1038 if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) { 1104 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1105 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
1039 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1106 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1040 1107
1041 /* Allocate memory with the locks released */ 1108 /* Allocate memory with the locks released */
@@ -1064,14 +1131,15 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1064 * If we don't have a buffer or it doesn't match the requested size, 1131 * If we don't have a buffer or it doesn't match the requested size,
1065 * use the buffer allocated above. Otherwise reuse the existing buffer. 1132 * use the buffer allocated above. Otherwise reuse the existing buffer.
1066 */ 1133 */
1067 if (!drvdata->etr_buf || 1134 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1068 (new_buf && drvdata->etr_buf->size != new_buf->size)) { 1135 if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
1069 free_buf = drvdata->etr_buf; 1136 free_buf = sysfs_buf;
1070 drvdata->etr_buf = new_buf; 1137 drvdata->sysfs_buf = new_buf;
1071 } 1138 }
1072 1139
1073 drvdata->mode = CS_MODE_SYSFS; 1140 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1074 tmc_etr_enable_hw(drvdata); 1141 if (!ret)
1142 drvdata->mode = CS_MODE_SYSFS;
1075out: 1143out:
1076 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1144 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1077 1145
@@ -1080,24 +1148,244 @@ out:
1080 tmc_etr_free_sysfs_buf(free_buf); 1148 tmc_etr_free_sysfs_buf(free_buf);
1081 1149
1082 if (!ret) 1150 if (!ret)
1083 dev_info(drvdata->dev, "TMC-ETR enabled\n"); 1151 dev_dbg(drvdata->dev, "TMC-ETR enabled\n");
1084 1152
1085 return ret; 1153 return ret;
1086} 1154}
1087 1155
1088static int tmc_enable_etr_sink_perf(struct coresight_device *csdev) 1156/*
1157 * tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf.
1158 * The size of the hardware buffer is dependent on the size configured
1159 * via sysfs and the perf ring buffer size. We prefer to allocate the
1160 * largest possible size, scaling down the size by half until it
1161 * reaches a minimum limit (1M), beyond which we give up.
1162 */
1163static struct etr_perf_buffer *
1164tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
1165 void **pages, bool snapshot)
1089{ 1166{
1090 /* We don't support perf mode yet ! */ 1167 struct etr_buf *etr_buf;
1091 return -EINVAL; 1168 struct etr_perf_buffer *etr_perf;
1169 unsigned long size;
1170
1171 etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
1172 if (!etr_perf)
1173 return ERR_PTR(-ENOMEM);
1174
1175 /*
1176 * Try to match the perf ring buffer size if it is larger
1177 * than the size requested via sysfs.
1178 */
1179 if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
1180 etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
1181 0, node, NULL);
1182 if (!IS_ERR(etr_buf))
1183 goto done;
1184 }
1185
1186 /*
1187 * Else switch to configured size for this ETR
1188 * and scale down until we hit the minimum limit.
1189 */
1190 size = drvdata->size;
1191 do {
1192 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
1193 if (!IS_ERR(etr_buf))
1194 goto done;
1195 size /= 2;
1196 } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
1197
1198 kfree(etr_perf);
1199 return ERR_PTR(-ENOMEM);
1200
1201done:
1202 etr_perf->etr_buf = etr_buf;
1203 return etr_perf;
1092} 1204}
1093 1205
1094static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode) 1206
1207static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
1208 int cpu, void **pages, int nr_pages,
1209 bool snapshot)
1210{
1211 struct etr_perf_buffer *etr_perf;
1212 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1213
1214 if (cpu == -1)
1215 cpu = smp_processor_id();
1216
1217 etr_perf = tmc_etr_setup_perf_buf(drvdata, cpu_to_node(cpu),
1218 nr_pages, pages, snapshot);
1219 if (IS_ERR(etr_perf)) {
1220 dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n");
1221 return NULL;
1222 }
1223
1224 etr_perf->snapshot = snapshot;
1225 etr_perf->nr_pages = nr_pages;
1226 etr_perf->pages = pages;
1227
1228 return etr_perf;
1229}
1230
1231static void tmc_free_etr_buffer(void *config)
1232{
1233 struct etr_perf_buffer *etr_perf = config;
1234
1235 if (etr_perf->etr_buf)
1236 tmc_free_etr_buf(etr_perf->etr_buf);
1237 kfree(etr_perf);
1238}
1239
1240/*
1241 * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
1242 * buffer to the perf ring buffer.
1243 */
1244static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
1245{
1246 long bytes, to_copy;
1247 long pg_idx, pg_offset, src_offset;
1248 unsigned long head = etr_perf->head;
1249 char **dst_pages, *src_buf;
1250 struct etr_buf *etr_buf = etr_perf->etr_buf;
1251
1252 head = etr_perf->head;
1253 pg_idx = head >> PAGE_SHIFT;
1254 pg_offset = head & (PAGE_SIZE - 1);
1255 dst_pages = (char **)etr_perf->pages;
1256 src_offset = etr_buf->offset;
1257 to_copy = etr_buf->len;
1258
1259 while (to_copy > 0) {
1260 /*
1261 * In one iteration, we can copy minimum of :
1262 * 1) what is available in the source buffer,
1263 * 2) what is available in the source buffer, before it
1264 * wraps around.
1265 * 3) what is available in the destination page.
1266 * in one iteration.
1267 */
1268 bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
1269 &src_buf);
1270 if (WARN_ON_ONCE(bytes <= 0))
1271 break;
1272 bytes = min(bytes, (long)(PAGE_SIZE - pg_offset));
1273
1274 memcpy(dst_pages[pg_idx] + pg_offset, src_buf, bytes);
1275
1276 to_copy -= bytes;
1277
1278 /* Move destination pointers */
1279 pg_offset += bytes;
1280 if (pg_offset == PAGE_SIZE) {
1281 pg_offset = 0;
1282 if (++pg_idx == etr_perf->nr_pages)
1283 pg_idx = 0;
1284 }
1285
1286 /* Move source pointers */
1287 src_offset += bytes;
1288 if (src_offset >= etr_buf->size)
1289 src_offset -= etr_buf->size;
1290 }
1291}
1292
1293/*
1294 * tmc_update_etr_buffer : Update the perf ring buffer with the
1295 * available trace data. We use software double buffering at the moment.
1296 *
1297 * TODO: Add support for reusing the perf ring buffer.
1298 */
1299static unsigned long
1300tmc_update_etr_buffer(struct coresight_device *csdev,
1301 struct perf_output_handle *handle,
1302 void *config)
1303{
1304 bool lost = false;
1305 unsigned long flags, size = 0;
1306 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1307 struct etr_perf_buffer *etr_perf = config;
1308 struct etr_buf *etr_buf = etr_perf->etr_buf;
1309
1310 spin_lock_irqsave(&drvdata->spinlock, flags);
1311 if (WARN_ON(drvdata->perf_data != etr_perf)) {
1312 lost = true;
1313 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1314 goto out;
1315 }
1316
1317 CS_UNLOCK(drvdata->base);
1318
1319 tmc_flush_and_stop(drvdata);
1320 tmc_sync_etr_buf(drvdata);
1321
1322 CS_LOCK(drvdata->base);
1323 /* Reset perf specific data */
1324 drvdata->perf_data = NULL;
1325 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1326
1327 size = etr_buf->len;
1328 tmc_etr_sync_perf_buffer(etr_perf);
1329
1330 /*
1331 * Update handle->head in snapshot mode. Also update the size to the
1332 * hardware buffer size if there was an overflow.
1333 */
1334 if (etr_perf->snapshot) {
1335 handle->head += size;
1336 if (etr_buf->full)
1337 size = etr_buf->size;
1338 }
1339
1340 lost |= etr_buf->full;
1341out:
1342 if (lost)
1343 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
1344 return size;
1345}
1346
1347static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1348{
1349 int rc = 0;
1350 unsigned long flags;
1351 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1352 struct perf_output_handle *handle = data;
1353 struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
1354
1355 spin_lock_irqsave(&drvdata->spinlock, flags);
1356 /*
1357 * There can be only one writer per sink in perf mode. If the sink
1358 * is already open in SYSFS mode, we can't use it.
1359 */
1360 if (drvdata->mode != CS_MODE_DISABLED || WARN_ON(drvdata->perf_data)) {
1361 rc = -EBUSY;
1362 goto unlock_out;
1363 }
1364
1365 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) {
1366 rc = -EINVAL;
1367 goto unlock_out;
1368 }
1369
1370 etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
1371 drvdata->perf_data = etr_perf;
1372 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
1373 if (!rc)
1374 drvdata->mode = CS_MODE_PERF;
1375
1376unlock_out:
1377 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1378 return rc;
1379}
1380
1381static int tmc_enable_etr_sink(struct coresight_device *csdev,
1382 u32 mode, void *data)
1095{ 1383{
1096 switch (mode) { 1384 switch (mode) {
1097 case CS_MODE_SYSFS: 1385 case CS_MODE_SYSFS:
1098 return tmc_enable_etr_sink_sysfs(csdev); 1386 return tmc_enable_etr_sink_sysfs(csdev);
1099 case CS_MODE_PERF: 1387 case CS_MODE_PERF:
1100 return tmc_enable_etr_sink_perf(csdev); 1388 return tmc_enable_etr_sink_perf(csdev, data);
1101 } 1389 }
1102 1390
1103 /* We shouldn't be here */ 1391 /* We shouldn't be here */
@@ -1123,12 +1411,15 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
1123 1411
1124 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1412 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1125 1413
1126 dev_info(drvdata->dev, "TMC-ETR disabled\n"); 1414 dev_dbg(drvdata->dev, "TMC-ETR disabled\n");
1127} 1415}
1128 1416
1129static const struct coresight_ops_sink tmc_etr_sink_ops = { 1417static const struct coresight_ops_sink tmc_etr_sink_ops = {
1130 .enable = tmc_enable_etr_sink, 1418 .enable = tmc_enable_etr_sink,
1131 .disable = tmc_disable_etr_sink, 1419 .disable = tmc_disable_etr_sink,
1420 .alloc_buffer = tmc_alloc_etr_buffer,
1421 .update_buffer = tmc_update_etr_buffer,
1422 .free_buffer = tmc_free_etr_buffer,
1132}; 1423};
1133 1424
1134const struct coresight_ops tmc_etr_cs_ops = { 1425const struct coresight_ops tmc_etr_cs_ops = {
@@ -1150,21 +1441,19 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
1150 goto out; 1441 goto out;
1151 } 1442 }
1152 1443
1153 /* Don't interfere if operated from Perf */ 1444 /*
1154 if (drvdata->mode == CS_MODE_PERF) { 1445 * We can safely allow reads even if the ETR is operating in PERF mode,
1155 ret = -EINVAL; 1446 * since the sysfs session is captured in mode specific data.
1156 goto out; 1447 * If drvdata::sysfs_data is NULL the trace data has been read already.
1157 } 1448 */
1158 1449 if (!drvdata->sysfs_buf) {
1159 /* If drvdata::etr_buf is NULL the trace data has been read already */
1160 if (drvdata->etr_buf == NULL) {
1161 ret = -EINVAL; 1450 ret = -EINVAL;
1162 goto out; 1451 goto out;
1163 } 1452 }
1164 1453
1165 /* Disable the TMC if need be */ 1454 /* Disable the TMC if we are trying to read from a running session. */
1166 if (drvdata->mode == CS_MODE_SYSFS) 1455 if (drvdata->mode == CS_MODE_SYSFS)
1167 tmc_etr_disable_hw(drvdata); 1456 __tmc_etr_disable_hw(drvdata);
1168 1457
1169 drvdata->reading = true; 1458 drvdata->reading = true;
1170out: 1459out:
@@ -1176,7 +1465,7 @@ out:
1176int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) 1465int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1177{ 1466{
1178 unsigned long flags; 1467 unsigned long flags;
1179 struct etr_buf *etr_buf = NULL; 1468 struct etr_buf *sysfs_buf = NULL;
1180 1469
1181 /* config types are set a boot time and never change */ 1470 /* config types are set a boot time and never change */
1182 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) 1471 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -1191,22 +1480,22 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1191 * buffer. Since the tracer is still enabled drvdata::buf can't 1480 * buffer. Since the tracer is still enabled drvdata::buf can't
1192 * be NULL. 1481 * be NULL.
1193 */ 1482 */
1194 tmc_etr_enable_hw(drvdata); 1483 __tmc_etr_enable_hw(drvdata);
1195 } else { 1484 } else {
1196 /* 1485 /*
1197 * The ETR is not tracing and the buffer was just read. 1486 * The ETR is not tracing and the buffer was just read.
1198 * As such prepare to free the trace buffer. 1487 * As such prepare to free the trace buffer.
1199 */ 1488 */
1200 etr_buf = drvdata->etr_buf; 1489 sysfs_buf = drvdata->sysfs_buf;
1201 drvdata->etr_buf = NULL; 1490 drvdata->sysfs_buf = NULL;
1202 } 1491 }
1203 1492
1204 drvdata->reading = false; 1493 drvdata->reading = false;
1205 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1494 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1206 1495
1207 /* Free allocated memory out side of the spinlock */ 1496 /* Free allocated memory out side of the spinlock */
1208 if (etr_buf) 1497 if (sysfs_buf)
1209 tmc_free_etr_buf(etr_buf); 1498 tmc_etr_free_sysfs_buf(sysfs_buf);
1210 1499
1211 return 0; 1500 return 0;
1212} 1501}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1b817ec1192c..ea249f0bcd73 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -81,7 +81,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
81 } 81 }
82 82
83 if (!ret) 83 if (!ret)
84 dev_info(drvdata->dev, "TMC read start\n"); 84 dev_dbg(drvdata->dev, "TMC read start\n");
85 85
86 return ret; 86 return ret;
87} 87}
@@ -103,7 +103,7 @@ static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
103 } 103 }
104 104
105 if (!ret) 105 if (!ret)
106 dev_info(drvdata->dev, "TMC read end\n"); 106 dev_dbg(drvdata->dev, "TMC read end\n");
107 107
108 return ret; 108 return ret;
109} 109}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 7027bd60c4cc..487c53701e9c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -170,6 +170,8 @@ struct etr_buf {
170 * @trigger_cntr: amount of words to store after a trigger. 170 * @trigger_cntr: amount of words to store after a trigger.
171 * @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the 171 * @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the
172 * device configuration register (DEVID) 172 * device configuration register (DEVID)
173 * @perf_data: PERF buffer for ETR.
174 * @sysfs_data: SYSFS buffer for ETR.
173 */ 175 */
174struct tmc_drvdata { 176struct tmc_drvdata {
175 void __iomem *base; 177 void __iomem *base;
@@ -189,6 +191,8 @@ struct tmc_drvdata {
189 enum tmc_mem_intf_width memwidth; 191 enum tmc_mem_intf_width memwidth;
190 u32 trigger_cntr; 192 u32 trigger_cntr;
191 u32 etr_caps; 193 u32 etr_caps;
194 struct etr_buf *sysfs_buf;
195 void *perf_data;
192}; 196};
193 197
194struct etr_buf_operations { 198struct etr_buf_operations {
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 459ef930d98c..b2f72a1fa402 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -68,13 +68,13 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
68 CS_LOCK(drvdata->base); 68 CS_LOCK(drvdata->base);
69} 69}
70 70
71static int tpiu_enable(struct coresight_device *csdev, u32 mode) 71static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
72{ 72{
73 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 73 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
74 74
75 tpiu_enable_hw(drvdata); 75 tpiu_enable_hw(drvdata);
76 76
77 dev_info(drvdata->dev, "TPIU enabled\n"); 77 dev_dbg(drvdata->dev, "TPIU enabled\n");
78 return 0; 78 return 0;
79} 79}
80 80
@@ -100,7 +100,7 @@ static void tpiu_disable(struct coresight_device *csdev)
100 100
101 tpiu_disable_hw(drvdata); 101 tpiu_disable_hw(drvdata);
102 102
103 dev_info(drvdata->dev, "TPIU disabled\n"); 103 dev_dbg(drvdata->dev, "TPIU disabled\n");
104} 104}
105 105
106static const struct coresight_ops_sink tpiu_sink_ops = { 106static const struct coresight_ops_sink tpiu_sink_ops = {
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 3e07fd335f8c..2b0df1a0a8df 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -128,16 +128,105 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
128 return -ENODEV; 128 return -ENODEV;
129} 129}
130 130
131static int coresight_enable_sink(struct coresight_device *csdev, u32 mode) 131static inline u32 coresight_read_claim_tags(void __iomem *base)
132{
133 return readl_relaxed(base + CORESIGHT_CLAIMCLR);
134}
135
136static inline bool coresight_is_claimed_self_hosted(void __iomem *base)
137{
138 return coresight_read_claim_tags(base) == CORESIGHT_CLAIM_SELF_HOSTED;
139}
140
141static inline bool coresight_is_claimed_any(void __iomem *base)
142{
143 return coresight_read_claim_tags(base) != 0;
144}
145
146static inline void coresight_set_claim_tags(void __iomem *base)
147{
148 writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMSET);
149 isb();
150}
151
152static inline void coresight_clear_claim_tags(void __iomem *base)
153{
154 writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMCLR);
155 isb();
156}
157
158/*
159 * coresight_claim_device_unlocked : Claim the device for self-hosted usage
160 * to prevent an external tool from touching this device. As per PSCI
161 * standards, section "Preserving the execution context" => "Debug and Trace
162 * save and Restore", DBGCLAIM[1] is reserved for Self-hosted debug/trace and
163 * DBGCLAIM[0] is reserved for external tools.
164 *
165 * Called with CS_UNLOCKed for the component.
166 * Returns : 0 on success
167 */
168int coresight_claim_device_unlocked(void __iomem *base)
169{
170 if (coresight_is_claimed_any(base))
171 return -EBUSY;
172
173 coresight_set_claim_tags(base);
174 if (coresight_is_claimed_self_hosted(base))
175 return 0;
176 /* There was a race setting the tags, clean up and fail */
177 coresight_clear_claim_tags(base);
178 return -EBUSY;
179}
180
181int coresight_claim_device(void __iomem *base)
182{
183 int rc;
184
185 CS_UNLOCK(base);
186 rc = coresight_claim_device_unlocked(base);
187 CS_LOCK(base);
188
189 return rc;
190}
191
192/*
193 * coresight_disclaim_device_unlocked : Clear the claim tags for the device.
194 * Called with CS_UNLOCKed for the component.
195 */
196void coresight_disclaim_device_unlocked(void __iomem *base)
197{
198
199 if (coresight_is_claimed_self_hosted(base))
200 coresight_clear_claim_tags(base);
201 else
202 /*
203 * The external agent may have not honoured our claim
204 * and has manipulated it. Or something else has seriously
205 * gone wrong in our driver.
206 */
207 WARN_ON_ONCE(1);
208}
209
210void coresight_disclaim_device(void __iomem *base)
211{
212 CS_UNLOCK(base);
213 coresight_disclaim_device_unlocked(base);
214 CS_LOCK(base);
215}
216
217static int coresight_enable_sink(struct coresight_device *csdev,
218 u32 mode, void *data)
132{ 219{
133 int ret; 220 int ret;
134 221
135 if (!csdev->enable) { 222 /*
136 if (sink_ops(csdev)->enable) { 223 * We need to make sure the "new" session is compatible with the
137 ret = sink_ops(csdev)->enable(csdev, mode); 224 * existing "mode" of operation.
138 if (ret) 225 */
139 return ret; 226 if (sink_ops(csdev)->enable) {
140 } 227 ret = sink_ops(csdev)->enable(csdev, mode, data);
228 if (ret)
229 return ret;
141 csdev->enable = true; 230 csdev->enable = true;
142 } 231 }
143 232
@@ -184,8 +273,10 @@ static int coresight_enable_link(struct coresight_device *csdev,
184 if (atomic_inc_return(&csdev->refcnt[refport]) == 1) { 273 if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
185 if (link_ops(csdev)->enable) { 274 if (link_ops(csdev)->enable) {
186 ret = link_ops(csdev)->enable(csdev, inport, outport); 275 ret = link_ops(csdev)->enable(csdev, inport, outport);
187 if (ret) 276 if (ret) {
277 atomic_dec(&csdev->refcnt[refport]);
188 return ret; 278 return ret;
279 }
189 } 280 }
190 } 281 }
191 282
@@ -274,13 +365,21 @@ static bool coresight_disable_source(struct coresight_device *csdev)
274 return !csdev->enable; 365 return !csdev->enable;
275} 366}
276 367
277void coresight_disable_path(struct list_head *path) 368/*
369 * coresight_disable_path_from : Disable components in the given path beyond
370 * @nd in the list. If @nd is NULL, all the components, except the SOURCE are
371 * disabled.
372 */
373static void coresight_disable_path_from(struct list_head *path,
374 struct coresight_node *nd)
278{ 375{
279 u32 type; 376 u32 type;
280 struct coresight_node *nd;
281 struct coresight_device *csdev, *parent, *child; 377 struct coresight_device *csdev, *parent, *child;
282 378
283 list_for_each_entry(nd, path, link) { 379 if (!nd)
380 nd = list_first_entry(path, struct coresight_node, link);
381
382 list_for_each_entry_continue(nd, path, link) {
284 csdev = nd->csdev; 383 csdev = nd->csdev;
285 type = csdev->type; 384 type = csdev->type;
286 385
@@ -300,7 +399,12 @@ void coresight_disable_path(struct list_head *path)
300 coresight_disable_sink(csdev); 399 coresight_disable_sink(csdev);
301 break; 400 break;
302 case CORESIGHT_DEV_TYPE_SOURCE: 401 case CORESIGHT_DEV_TYPE_SOURCE:
303 /* sources are disabled from either sysFS or Perf */ 402 /*
403 * We skip the first node in the path assuming that it
404 * is the source. So we don't expect a source device in
405 * the middle of a path.
406 */
407 WARN_ON(1);
304 break; 408 break;
305 case CORESIGHT_DEV_TYPE_LINK: 409 case CORESIGHT_DEV_TYPE_LINK:
306 parent = list_prev_entry(nd, link)->csdev; 410 parent = list_prev_entry(nd, link)->csdev;
@@ -313,7 +417,12 @@ void coresight_disable_path(struct list_head *path)
313 } 417 }
314} 418}
315 419
316int coresight_enable_path(struct list_head *path, u32 mode) 420void coresight_disable_path(struct list_head *path)
421{
422 coresight_disable_path_from(path, NULL);
423}
424
425int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data)
317{ 426{
318 427
319 int ret = 0; 428 int ret = 0;
@@ -338,9 +447,15 @@ int coresight_enable_path(struct list_head *path, u32 mode)
338 447
339 switch (type) { 448 switch (type) {
340 case CORESIGHT_DEV_TYPE_SINK: 449 case CORESIGHT_DEV_TYPE_SINK:
341 ret = coresight_enable_sink(csdev, mode); 450 ret = coresight_enable_sink(csdev, mode, sink_data);
451 /*
452 * Sink is the first component turned on. If we
453 * failed to enable the sink, there are no components
454 * that need disabling. Disabling the path here
455 * would mean we could disrupt an existing session.
456 */
342 if (ret) 457 if (ret)
343 goto err; 458 goto out;
344 break; 459 break;
345 case CORESIGHT_DEV_TYPE_SOURCE: 460 case CORESIGHT_DEV_TYPE_SOURCE:
346 /* sources are enabled from either sysFS or Perf */ 461 /* sources are enabled from either sysFS or Perf */
@@ -360,7 +475,7 @@ int coresight_enable_path(struct list_head *path, u32 mode)
360out: 475out:
361 return ret; 476 return ret;
362err: 477err:
363 coresight_disable_path(path); 478 coresight_disable_path_from(path, nd);
364 goto out; 479 goto out;
365} 480}
366 481
@@ -635,7 +750,7 @@ int coresight_enable(struct coresight_device *csdev)
635 goto out; 750 goto out;
636 } 751 }
637 752
638 ret = coresight_enable_path(path, CS_MODE_SYSFS); 753 ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
639 if (ret) 754 if (ret)
640 goto err_path; 755 goto err_path;
641 756
@@ -995,18 +1110,16 @@ postcore_initcall(coresight_init);
995 1110
996struct coresight_device *coresight_register(struct coresight_desc *desc) 1111struct coresight_device *coresight_register(struct coresight_desc *desc)
997{ 1112{
998 int i;
999 int ret; 1113 int ret;
1000 int link_subtype; 1114 int link_subtype;
1001 int nr_refcnts = 1; 1115 int nr_refcnts = 1;
1002 atomic_t *refcnts = NULL; 1116 atomic_t *refcnts = NULL;
1003 struct coresight_device *csdev; 1117 struct coresight_device *csdev;
1004 struct coresight_connection *conns = NULL;
1005 1118
1006 csdev = kzalloc(sizeof(*csdev), GFP_KERNEL); 1119 csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
1007 if (!csdev) { 1120 if (!csdev) {
1008 ret = -ENOMEM; 1121 ret = -ENOMEM;
1009 goto err_kzalloc_csdev; 1122 goto err_out;
1010 } 1123 }
1011 1124
1012 if (desc->type == CORESIGHT_DEV_TYPE_LINK || 1125 if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
@@ -1022,7 +1135,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
1022 refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL); 1135 refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
1023 if (!refcnts) { 1136 if (!refcnts) {
1024 ret = -ENOMEM; 1137 ret = -ENOMEM;
1025 goto err_kzalloc_refcnts; 1138 goto err_free_csdev;
1026 } 1139 }
1027 1140
1028 csdev->refcnt = refcnts; 1141 csdev->refcnt = refcnts;
@@ -1030,22 +1143,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
1030 csdev->nr_inport = desc->pdata->nr_inport; 1143 csdev->nr_inport = desc->pdata->nr_inport;
1031 csdev->nr_outport = desc->pdata->nr_outport; 1144 csdev->nr_outport = desc->pdata->nr_outport;
1032 1145
1033 /* Initialise connections if there is at least one outport */ 1146 csdev->conns = desc->pdata->conns;
1034 if (csdev->nr_outport) {
1035 conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL);
1036 if (!conns) {
1037 ret = -ENOMEM;
1038 goto err_kzalloc_conns;
1039 }
1040
1041 for (i = 0; i < csdev->nr_outport; i++) {
1042 conns[i].outport = desc->pdata->outports[i];
1043 conns[i].child_name = desc->pdata->child_names[i];
1044 conns[i].child_port = desc->pdata->child_ports[i];
1045 }
1046 }
1047
1048 csdev->conns = conns;
1049 1147
1050 csdev->type = desc->type; 1148 csdev->type = desc->type;
1051 csdev->subtype = desc->subtype; 1149 csdev->subtype = desc->subtype;
@@ -1062,7 +1160,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
1062 ret = device_register(&csdev->dev); 1160 ret = device_register(&csdev->dev);
1063 if (ret) { 1161 if (ret) {
1064 put_device(&csdev->dev); 1162 put_device(&csdev->dev);
1065 goto err_kzalloc_csdev; 1163 /*
1164 * All resources are free'd explicitly via
1165 * coresight_device_release(), triggered from put_device().
1166 */
1167 goto err_out;
1066 } 1168 }
1067 1169
1068 mutex_lock(&coresight_mutex); 1170 mutex_lock(&coresight_mutex);
@@ -1074,11 +1176,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
1074 1176
1075 return csdev; 1177 return csdev;
1076 1178
1077err_kzalloc_conns: 1179err_free_csdev:
1078 kfree(refcnts);
1079err_kzalloc_refcnts:
1080 kfree(csdev); 1180 kfree(csdev);
1081err_kzalloc_csdev: 1181err_out:
1082 return ERR_PTR(ret); 1182 return ERR_PTR(ret);
1083} 1183}
1084EXPORT_SYMBOL_GPL(coresight_register); 1184EXPORT_SYMBOL_GPL(coresight_register);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 6880bee195c8..89092f83567e 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -45,8 +45,13 @@ of_coresight_get_endpoint_device(struct device_node *endpoint)
45 endpoint, of_dev_node_match); 45 endpoint, of_dev_node_match);
46} 46}
47 47
48static void of_coresight_get_ports(const struct device_node *node, 48static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
49 int *nr_inport, int *nr_outport) 49{
50 return of_property_read_bool(ep, "slave-mode");
51}
52
53static void of_coresight_get_ports_legacy(const struct device_node *node,
54 int *nr_inport, int *nr_outport)
50{ 55{
51 struct device_node *ep = NULL; 56 struct device_node *ep = NULL;
52 int in = 0, out = 0; 57 int in = 0, out = 0;
@@ -56,7 +61,7 @@ static void of_coresight_get_ports(const struct device_node *node,
56 if (!ep) 61 if (!ep)
57 break; 62 break;
58 63
59 if (of_property_read_bool(ep, "slave-mode")) 64 if (of_coresight_legacy_ep_is_input(ep))
60 in++; 65 in++;
61 else 66 else
62 out++; 67 out++;
@@ -67,32 +72,77 @@ static void of_coresight_get_ports(const struct device_node *node,
67 *nr_outport = out; 72 *nr_outport = out;
68} 73}
69 74
75static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
76{
77 struct device_node *parent = of_graph_get_port_parent(ep);
78
79 /*
80 * Skip one-level up to the real device node, if we
81 * are using the new bindings.
82 */
83 if (!of_node_cmp(parent->name, "in-ports") ||
84 !of_node_cmp(parent->name, "out-ports"))
85 parent = of_get_next_parent(parent);
86
87 return parent;
88}
89
90static inline struct device_node *
91of_coresight_get_input_ports_node(const struct device_node *node)
92{
93 return of_get_child_by_name(node, "in-ports");
94}
95
96static inline struct device_node *
97of_coresight_get_output_ports_node(const struct device_node *node)
98{
99 return of_get_child_by_name(node, "out-ports");
100}
101
102static inline int
103of_coresight_count_ports(struct device_node *port_parent)
104{
105 int i = 0;
106 struct device_node *ep = NULL;
107
108 while ((ep = of_graph_get_next_endpoint(port_parent, ep)))
109 i++;
110 return i;
111}
112
113static void of_coresight_get_ports(const struct device_node *node,
114 int *nr_inport, int *nr_outport)
115{
116 struct device_node *input_ports = NULL, *output_ports = NULL;
117
118 input_ports = of_coresight_get_input_ports_node(node);
119 output_ports = of_coresight_get_output_ports_node(node);
120
121 if (input_ports || output_ports) {
122 if (input_ports) {
123 *nr_inport = of_coresight_count_ports(input_ports);
124 of_node_put(input_ports);
125 }
126 if (output_ports) {
127 *nr_outport = of_coresight_count_ports(output_ports);
128 of_node_put(output_ports);
129 }
130 } else {
131 /* Fall back to legacy DT bindings parsing */
132 of_coresight_get_ports_legacy(node, nr_inport, nr_outport);
133 }
134}
135
70static int of_coresight_alloc_memory(struct device *dev, 136static int of_coresight_alloc_memory(struct device *dev,
71 struct coresight_platform_data *pdata) 137 struct coresight_platform_data *pdata)
72{ 138{
73 /* List of output port on this component */ 139 if (pdata->nr_outport) {
74 pdata->outports = devm_kcalloc(dev, 140 pdata->conns = devm_kzalloc(dev, pdata->nr_outport *
75 pdata->nr_outport, 141 sizeof(*pdata->conns),
76 sizeof(*pdata->outports), 142 GFP_KERNEL);
77 GFP_KERNEL); 143 if (!pdata->conns)
78 if (!pdata->outports) 144 return -ENOMEM;
79 return -ENOMEM; 145 }
80
81 /* Children connected to this component via @outports */
82 pdata->child_names = devm_kcalloc(dev,
83 pdata->nr_outport,
84 sizeof(*pdata->child_names),
85 GFP_KERNEL);
86 if (!pdata->child_names)
87 return -ENOMEM;
88
89 /* Port number on the child this component is connected to */
90 pdata->child_ports = devm_kcalloc(dev,
91 pdata->nr_outport,
92 sizeof(*pdata->child_ports),
93 GFP_KERNEL);
94 if (!pdata->child_ports)
95 return -ENOMEM;
96 146
97 return 0; 147 return 0;
98} 148}
@@ -114,17 +164,78 @@ int of_coresight_get_cpu(const struct device_node *node)
114} 164}
115EXPORT_SYMBOL_GPL(of_coresight_get_cpu); 165EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
116 166
167/*
168 * of_coresight_parse_endpoint : Parse the given output endpoint @ep
169 * and fill the connection information in @conn
170 *
171 * Parses the local port, remote device name and the remote port.
172 *
173 * Returns :
174 * 1 - If the parsing is successful and a connection record
175 * was created for an output connection.
176 * 0 - If the parsing completed without any fatal errors.
177 * -Errno - Fatal error, abort the scanning.
178 */
179static int of_coresight_parse_endpoint(struct device *dev,
180 struct device_node *ep,
181 struct coresight_connection *conn)
182{
183 int ret = 0;
184 struct of_endpoint endpoint, rendpoint;
185 struct device_node *rparent = NULL;
186 struct device_node *rep = NULL;
187 struct device *rdev = NULL;
188
189 do {
190 /* Parse the local port details */
191 if (of_graph_parse_endpoint(ep, &endpoint))
192 break;
193 /*
194 * Get a handle on the remote endpoint and the device it is
195 * attached to.
196 */
197 rep = of_graph_get_remote_endpoint(ep);
198 if (!rep)
199 break;
200 rparent = of_coresight_get_port_parent(rep);
201 if (!rparent)
202 break;
203 if (of_graph_parse_endpoint(rep, &rendpoint))
204 break;
205
206 /* If the remote device is not available, defer probing */
207 rdev = of_coresight_get_endpoint_device(rparent);
208 if (!rdev) {
209 ret = -EPROBE_DEFER;
210 break;
211 }
212
213 conn->outport = endpoint.port;
214 conn->child_name = devm_kstrdup(dev,
215 dev_name(rdev),
216 GFP_KERNEL);
217 conn->child_port = rendpoint.port;
218 /* Connection record updated */
219 ret = 1;
220 } while (0);
221
222 of_node_put(rparent);
223 of_node_put(rep);
224 put_device(rdev);
225
226 return ret;
227}
228
117struct coresight_platform_data * 229struct coresight_platform_data *
118of_get_coresight_platform_data(struct device *dev, 230of_get_coresight_platform_data(struct device *dev,
119 const struct device_node *node) 231 const struct device_node *node)
120{ 232{
121 int i = 0, ret = 0; 233 int ret = 0;
122 struct coresight_platform_data *pdata; 234 struct coresight_platform_data *pdata;
123 struct of_endpoint endpoint, rendpoint; 235 struct coresight_connection *conn;
124 struct device *rdev;
125 struct device_node *ep = NULL; 236 struct device_node *ep = NULL;
126 struct device_node *rparent = NULL; 237 const struct device_node *parent = NULL;
127 struct device_node *rport = NULL; 238 bool legacy_binding = false;
128 239
129 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 240 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
130 if (!pdata) 241 if (!pdata)
@@ -132,63 +243,54 @@ of_get_coresight_platform_data(struct device *dev,
132 243
133 /* Use device name as sysfs handle */ 244 /* Use device name as sysfs handle */
134 pdata->name = dev_name(dev); 245 pdata->name = dev_name(dev);
246 pdata->cpu = of_coresight_get_cpu(node);
135 247
136 /* Get the number of input and output port for this component */ 248 /* Get the number of input and output port for this component */
137 of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport); 249 of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
138 250
139 if (pdata->nr_outport) { 251 /* If there are no output connections, we are done */
140 ret = of_coresight_alloc_memory(dev, pdata); 252 if (!pdata->nr_outport)
141 if (ret) 253 return pdata;
142 return ERR_PTR(ret);
143
144 /* Iterate through each port to discover topology */
145 do {
146 /* Get a handle on a port */
147 ep = of_graph_get_next_endpoint(node, ep);
148 if (!ep)
149 break;
150
151 /*
152 * No need to deal with input ports, processing for as
153 * processing for output ports will deal with them.
154 */
155 if (of_find_property(ep, "slave-mode", NULL))
156 continue;
157
158 /* Get a handle on the local endpoint */
159 ret = of_graph_parse_endpoint(ep, &endpoint);
160
161 if (ret)
162 continue;
163
164 /* The local out port number */
165 pdata->outports[i] = endpoint.port;
166
167 /*
168 * Get a handle on the remote port and parent
169 * attached to it.
170 */
171 rparent = of_graph_get_remote_port_parent(ep);
172 rport = of_graph_get_remote_port(ep);
173
174 if (!rparent || !rport)
175 continue;
176 254
177 if (of_graph_parse_endpoint(rport, &rendpoint)) 255 ret = of_coresight_alloc_memory(dev, pdata);
178 continue; 256 if (ret)
257 return ERR_PTR(ret);
179 258
180 rdev = of_coresight_get_endpoint_device(rparent); 259 parent = of_coresight_get_output_ports_node(node);
181 if (!rdev) 260 /*
182 return ERR_PTR(-EPROBE_DEFER); 261 * If the DT uses obsoleted bindings, the ports are listed
183 262 * under the device and we need to filter out the input
184 pdata->child_names[i] = dev_name(rdev); 263 * ports.
185 pdata->child_ports[i] = rendpoint.id; 264 */
186 265 if (!parent) {
187 i++; 266 legacy_binding = true;
188 } while (ep); 267 parent = node;
268 dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
189 } 269 }
190 270
191 pdata->cpu = of_coresight_get_cpu(node); 271 conn = pdata->conns;
272
273 /* Iterate through each output port to discover topology */
274 while ((ep = of_graph_get_next_endpoint(parent, ep))) {
275 /*
276 * Legacy binding mixes input/output ports under the
277 * same parent. So, skip the input ports if we are dealing
278 * with legacy binding, as they processed with their
279 * connected output ports.
280 */
281 if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
282 continue;
283
284 ret = of_coresight_parse_endpoint(dev, ep, conn);
285 switch (ret) {
286 case 1:
287 conn++; /* Fall through */
288 case 0:
289 break;
290 default:
291 return ERR_PTR(ret);
292 }
293 }
192 294
193 return pdata; 295 return pdata;
194} 296}
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 723e2d90083d..752dd66742bf 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -11,6 +11,35 @@ config STM
11 11
12if STM 12if STM
13 13
14config STM_PROTO_BASIC
15 tristate "Basic STM framing protocol driver"
16 default CONFIG_STM
17 help
18 This is a simple framing protocol for sending data over STM
19 devices. This was the protocol that the STM framework used
20 exclusively until the MIPI SyS-T support was added. Use this
21 driver for compatibility with your existing STM setup.
22
23 The receiving side only needs to be able to decode the MIPI
24 STP protocol in order to extract the data.
25
26 If you want to be able to use the basic protocol or want the
27 backwards compatibility for your existing setup, say Y.
28
29config STM_PROTO_SYS_T
30 tristate "MIPI SyS-T STM framing protocol driver"
31 default CONFIG_STM
32 help
33 This is an implementation of MIPI SyS-T protocol to be used
34 over the STP transport. In addition to the data payload, it
35 also carries additional metadata for time correlation, better
36 means of trace source identification, etc.
37
38 The receiving side must be able to decode this protocol in
39 addition to the MIPI STP, in order to extract the data.
40
41 If you don't know what this is, say N.
42
14config STM_DUMMY 43config STM_DUMMY
15 tristate "Dummy STM driver" 44 tristate "Dummy STM driver"
16 help 45 help
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index effc19e5190f..1692fcd29277 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -3,6 +3,12 @@ obj-$(CONFIG_STM) += stm_core.o
3 3
4stm_core-y := core.o policy.o 4stm_core-y := core.o policy.o
5 5
6obj-$(CONFIG_STM_PROTO_BASIC) += stm_p_basic.o
7obj-$(CONFIG_STM_PROTO_SYS_T) += stm_p_sys-t.o
8
9stm_p_basic-y := p_basic.o
10stm_p_sys-t-y := p_sys-t.o
11
6obj-$(CONFIG_STM_DUMMY) += dummy_stm.o 12obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
7 13
8obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o 14obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 10bcb5d73f90..93ce3aa740a9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -293,15 +293,15 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
293 if (width > stm->data->sw_nchannels) 293 if (width > stm->data->sw_nchannels)
294 return -EINVAL; 294 return -EINVAL;
295 295
296 if (policy_node) { 296 /* We no longer accept policy_node==NULL here */
297 stp_policy_node_get_ranges(policy_node, 297 if (WARN_ON_ONCE(!policy_node))
298 &midx, &mend, &cidx, &cend); 298 return -EINVAL;
299 } else { 299
300 midx = stm->data->sw_start; 300 /*
301 cidx = 0; 301 * Also, the caller holds reference to policy_node, so it won't
302 mend = stm->data->sw_end; 302 * disappear on us.
303 cend = stm->data->sw_nchannels - 1; 303 */
304 } 304 stp_policy_node_get_ranges(policy_node, &midx, &mend, &cidx, &cend);
305 305
306 spin_lock(&stm->mc_lock); 306 spin_lock(&stm->mc_lock);
307 spin_lock(&output->lock); 307 spin_lock(&output->lock);
@@ -316,11 +316,26 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
316 output->master = midx; 316 output->master = midx;
317 output->channel = cidx; 317 output->channel = cidx;
318 output->nr_chans = width; 318 output->nr_chans = width;
319 if (stm->pdrv->output_open) {
320 void *priv = stp_policy_node_priv(policy_node);
321
322 if (WARN_ON_ONCE(!priv))
323 goto unlock;
324
325 /* configfs subsys mutex is held by the caller */
326 ret = stm->pdrv->output_open(priv, output);
327 if (ret)
328 goto unlock;
329 }
330
319 stm_output_claim(stm, output); 331 stm_output_claim(stm, output);
320 dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width); 332 dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width);
321 333
322 ret = 0; 334 ret = 0;
323unlock: 335unlock:
336 if (ret)
337 output->nr_chans = 0;
338
324 spin_unlock(&output->lock); 339 spin_unlock(&output->lock);
325 spin_unlock(&stm->mc_lock); 340 spin_unlock(&stm->mc_lock);
326 341
@@ -333,6 +348,8 @@ static void stm_output_free(struct stm_device *stm, struct stm_output *output)
333 spin_lock(&output->lock); 348 spin_lock(&output->lock);
334 if (output->nr_chans) 349 if (output->nr_chans)
335 stm_output_disclaim(stm, output); 350 stm_output_disclaim(stm, output);
351 if (stm->pdrv && stm->pdrv->output_close)
352 stm->pdrv->output_close(output);
336 spin_unlock(&output->lock); 353 spin_unlock(&output->lock);
337 spin_unlock(&stm->mc_lock); 354 spin_unlock(&stm->mc_lock);
338} 355}
@@ -349,6 +366,127 @@ static int major_match(struct device *dev, const void *data)
349 return MAJOR(dev->devt) == major; 366 return MAJOR(dev->devt) == major;
350} 367}
351 368
369/*
370 * Framing protocol management
371 * Modules can implement STM protocol drivers and (un-)register them
372 * with the STM class framework.
373 */
374static struct list_head stm_pdrv_head;
375static struct mutex stm_pdrv_mutex;
376
377struct stm_pdrv_entry {
378 struct list_head entry;
379 const struct stm_protocol_driver *pdrv;
380 const struct config_item_type *node_type;
381};
382
383static const struct stm_pdrv_entry *
384__stm_lookup_protocol(const char *name)
385{
386 struct stm_pdrv_entry *pe;
387
388 /*
389 * If no name is given (NULL or ""), fall back to "p_basic".
390 */
391 if (!name || !*name)
392 name = "p_basic";
393
394 list_for_each_entry(pe, &stm_pdrv_head, entry) {
395 if (!strcmp(name, pe->pdrv->name))
396 return pe;
397 }
398
399 return NULL;
400}
401
402int stm_register_protocol(const struct stm_protocol_driver *pdrv)
403{
404 struct stm_pdrv_entry *pe = NULL;
405 int ret = -ENOMEM;
406
407 mutex_lock(&stm_pdrv_mutex);
408
409 if (__stm_lookup_protocol(pdrv->name)) {
410 ret = -EEXIST;
411 goto unlock;
412 }
413
414 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
415 if (!pe)
416 goto unlock;
417
418 if (pdrv->policy_attr) {
419 pe->node_type = get_policy_node_type(pdrv->policy_attr);
420 if (!pe->node_type)
421 goto unlock;
422 }
423
424 list_add_tail(&pe->entry, &stm_pdrv_head);
425 pe->pdrv = pdrv;
426
427 ret = 0;
428unlock:
429 mutex_unlock(&stm_pdrv_mutex);
430
431 if (ret)
432 kfree(pe);
433
434 return ret;
435}
436EXPORT_SYMBOL_GPL(stm_register_protocol);
437
438void stm_unregister_protocol(const struct stm_protocol_driver *pdrv)
439{
440 struct stm_pdrv_entry *pe, *iter;
441
442 mutex_lock(&stm_pdrv_mutex);
443
444 list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
445 if (pe->pdrv == pdrv) {
446 list_del(&pe->entry);
447
448 if (pe->node_type) {
449 kfree(pe->node_type->ct_attrs);
450 kfree(pe->node_type);
451 }
452 kfree(pe);
453 break;
454 }
455 }
456
457 mutex_unlock(&stm_pdrv_mutex);
458}
459EXPORT_SYMBOL_GPL(stm_unregister_protocol);
460
461static bool stm_get_protocol(const struct stm_protocol_driver *pdrv)
462{
463 return try_module_get(pdrv->owner);
464}
465
466void stm_put_protocol(const struct stm_protocol_driver *pdrv)
467{
468 module_put(pdrv->owner);
469}
470
471int stm_lookup_protocol(const char *name,
472 const struct stm_protocol_driver **pdrv,
473 const struct config_item_type **node_type)
474{
475 const struct stm_pdrv_entry *pe;
476
477 mutex_lock(&stm_pdrv_mutex);
478
479 pe = __stm_lookup_protocol(name);
480 if (pe && pe->pdrv && stm_get_protocol(pe->pdrv)) {
481 *pdrv = pe->pdrv;
482 *node_type = pe->node_type;
483 }
484
485 mutex_unlock(&stm_pdrv_mutex);
486
487 return pe ? 0 : -ENOENT;
488}
489
352static int stm_char_open(struct inode *inode, struct file *file) 490static int stm_char_open(struct inode *inode, struct file *file)
353{ 491{
354 struct stm_file *stmf; 492 struct stm_file *stmf;
@@ -405,42 +543,81 @@ static int stm_char_release(struct inode *inode, struct file *file)
405 return 0; 543 return 0;
406} 544}
407 545
408static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width) 546static int
547stm_assign_first_policy(struct stm_device *stm, struct stm_output *output,
548 char **ids, unsigned int width)
409{ 549{
410 struct stm_device *stm = stmf->stm; 550 struct stp_policy_node *pn;
411 int ret; 551 int err, n;
412 552
413 stmf->policy_node = stp_policy_node_lookup(stm, id); 553 /*
554 * On success, stp_policy_node_lookup() will return holding the
555 * configfs subsystem mutex, which is then released in
556 * stp_policy_node_put(). This allows the pdrv->output_open() in
557 * stm_output_assign() to serialize against the attribute accessors.
558 */
559 for (n = 0, pn = NULL; ids[n] && !pn; n++)
560 pn = stp_policy_node_lookup(stm, ids[n]);
414 561
415 ret = stm_output_assign(stm, width, stmf->policy_node, &stmf->output); 562 if (!pn)
563 return -EINVAL;
416 564
417 if (stmf->policy_node) 565 err = stm_output_assign(stm, width, pn, output);
418 stp_policy_node_put(stmf->policy_node);
419 566
420 return ret; 567 stp_policy_node_put(pn);
568
569 return err;
421} 570}
422 571
423static ssize_t notrace stm_write(struct stm_data *data, unsigned int master, 572/**
424 unsigned int channel, const char *buf, size_t count) 573 * stm_data_write() - send the given payload as data packets
574 * @data: stm driver's data
575 * @m: STP master
576 * @c: STP channel
577 * @ts_first: timestamp the first packet
578 * @buf: data payload buffer
579 * @count: data payload size
580 */
581ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
582 unsigned int c, bool ts_first, const void *buf,
583 size_t count)
425{ 584{
426 unsigned int flags = STP_PACKET_TIMESTAMPED; 585 unsigned int flags = ts_first ? STP_PACKET_TIMESTAMPED : 0;
427 const unsigned char *p = buf, nil = 0;
428 size_t pos;
429 ssize_t sz; 586 ssize_t sz;
587 size_t pos;
430 588
431 for (pos = 0, p = buf; count > pos; pos += sz, p += sz) { 589 for (pos = 0, sz = 0; pos < count; pos += sz) {
432 sz = min_t(unsigned int, count - pos, 8); 590 sz = min_t(unsigned int, count - pos, 8);
433 sz = data->packet(data, master, channel, STP_PACKET_DATA, flags, 591 sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz,
434 sz, p); 592 &((u8 *)buf)[pos]);
435 flags = 0; 593 if (sz <= 0)
436
437 if (sz < 0)
438 break; 594 break;
595
596 if (ts_first) {
597 flags = 0;
598 ts_first = false;
599 }
439 } 600 }
440 601
441 data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); 602 return sz < 0 ? sz : pos;
603}
604EXPORT_SYMBOL_GPL(stm_data_write);
605
606static ssize_t notrace
607stm_write(struct stm_device *stm, struct stm_output *output,
608 unsigned int chan, const char *buf, size_t count)
609{
610 int err;
611
612 /* stm->pdrv is serialized against policy_mutex */
613 if (!stm->pdrv)
614 return -ENODEV;
615
616 err = stm->pdrv->write(stm->data, output, chan, buf, count);
617 if (err < 0)
618 return err;
442 619
443 return pos; 620 return err;
444} 621}
445 622
446static ssize_t stm_char_write(struct file *file, const char __user *buf, 623static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -455,16 +632,21 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
455 count = PAGE_SIZE - 1; 632 count = PAGE_SIZE - 1;
456 633
457 /* 634 /*
458 * if no m/c have been assigned to this writer up to this 635 * If no m/c have been assigned to this writer up to this
459 * point, use "default" policy entry 636 * point, try to use the task name and "default" policy entries.
460 */ 637 */
461 if (!stmf->output.nr_chans) { 638 if (!stmf->output.nr_chans) {
462 err = stm_file_assign(stmf, "default", 1); 639 char comm[sizeof(current->comm)];
640 char *ids[] = { comm, "default", NULL };
641
642 get_task_comm(comm, current);
643
644 err = stm_assign_first_policy(stmf->stm, &stmf->output, ids, 1);
463 /* 645 /*
464 * EBUSY means that somebody else just assigned this 646 * EBUSY means that somebody else just assigned this
465 * output, which is just fine for write() 647 * output, which is just fine for write()
466 */ 648 */
467 if (err && err != -EBUSY) 649 if (err)
468 return err; 650 return err;
469 } 651 }
470 652
@@ -480,8 +662,7 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
480 662
481 pm_runtime_get_sync(&stm->dev); 663 pm_runtime_get_sync(&stm->dev);
482 664
483 count = stm_write(stm->data, stmf->output.master, stmf->output.channel, 665 count = stm_write(stm, &stmf->output, 0, kbuf, count);
484 kbuf, count);
485 666
486 pm_runtime_mark_last_busy(&stm->dev); 667 pm_runtime_mark_last_busy(&stm->dev);
487 pm_runtime_put_autosuspend(&stm->dev); 668 pm_runtime_put_autosuspend(&stm->dev);
@@ -550,6 +731,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
550{ 731{
551 struct stm_device *stm = stmf->stm; 732 struct stm_device *stm = stmf->stm;
552 struct stp_policy_id *id; 733 struct stp_policy_id *id;
734 char *ids[] = { NULL, NULL };
553 int ret = -EINVAL; 735 int ret = -EINVAL;
554 u32 size; 736 u32 size;
555 737
@@ -582,7 +764,9 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
582 id->width > PAGE_SIZE / stm->data->sw_mmiosz) 764 id->width > PAGE_SIZE / stm->data->sw_mmiosz)
583 goto err_free; 765 goto err_free;
584 766
585 ret = stm_file_assign(stmf, id->id, id->width); 767 ids[0] = id->id;
768 ret = stm_assign_first_policy(stmf->stm, &stmf->output, ids,
769 id->width);
586 if (ret) 770 if (ret)
587 goto err_free; 771 goto err_free;
588 772
@@ -818,8 +1002,8 @@ EXPORT_SYMBOL_GPL(stm_unregister_device);
818static int stm_source_link_add(struct stm_source_device *src, 1002static int stm_source_link_add(struct stm_source_device *src,
819 struct stm_device *stm) 1003 struct stm_device *stm)
820{ 1004{
821 char *id; 1005 char *ids[] = { NULL, "default", NULL };
822 int err; 1006 int err = -ENOMEM;
823 1007
824 mutex_lock(&stm->link_mutex); 1008 mutex_lock(&stm->link_mutex);
825 spin_lock(&stm->link_lock); 1009 spin_lock(&stm->link_lock);
@@ -833,19 +1017,13 @@ static int stm_source_link_add(struct stm_source_device *src,
833 spin_unlock(&stm->link_lock); 1017 spin_unlock(&stm->link_lock);
834 mutex_unlock(&stm->link_mutex); 1018 mutex_unlock(&stm->link_mutex);
835 1019
836 id = kstrdup(src->data->name, GFP_KERNEL); 1020 ids[0] = kstrdup(src->data->name, GFP_KERNEL);
837 if (id) { 1021 if (!ids[0])
838 src->policy_node = 1022 goto fail_detach;
839 stp_policy_node_lookup(stm, id);
840
841 kfree(id);
842 }
843
844 err = stm_output_assign(stm, src->data->nr_chans,
845 src->policy_node, &src->output);
846 1023
847 if (src->policy_node) 1024 err = stm_assign_first_policy(stm, &src->output, ids,
848 stp_policy_node_put(src->policy_node); 1025 src->data->nr_chans);
1026 kfree(ids[0]);
849 1027
850 if (err) 1028 if (err)
851 goto fail_detach; 1029 goto fail_detach;
@@ -1134,9 +1312,7 @@ int notrace stm_source_write(struct stm_source_data *data,
1134 1312
1135 stm = srcu_dereference(src->link, &stm_source_srcu); 1313 stm = srcu_dereference(src->link, &stm_source_srcu);
1136 if (stm) 1314 if (stm)
1137 count = stm_write(stm->data, src->output.master, 1315 count = stm_write(stm, &src->output, chan, buf, count);
1138 src->output.channel + chan,
1139 buf, count);
1140 else 1316 else
1141 count = -ENODEV; 1317 count = -ENODEV;
1142 1318
@@ -1163,7 +1339,15 @@ static int __init stm_core_init(void)
1163 goto err_src; 1339 goto err_src;
1164 1340
1165 init_srcu_struct(&stm_source_srcu); 1341 init_srcu_struct(&stm_source_srcu);
1342 INIT_LIST_HEAD(&stm_pdrv_head);
1343 mutex_init(&stm_pdrv_mutex);
1166 1344
1345 /*
1346 * So as to not confuse existing users with a requirement
1347 * to load yet another module, do it here.
1348 */
1349 if (IS_ENABLED(CONFIG_STM_PROTO_BASIC))
1350 (void)request_module_nowait("stm_p_basic");
1167 stm_core_up++; 1351 stm_core_up++;
1168 1352
1169 return 0; 1353 return 0;
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 7db42395e131..3e7df1c0477f 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -76,7 +76,7 @@ static int stm_heartbeat_init(void)
76 goto fail_unregister; 76 goto fail_unregister;
77 77
78 stm_heartbeat[i].data.nr_chans = 1; 78 stm_heartbeat[i].data.nr_chans = 1;
79 stm_heartbeat[i].data.link = stm_heartbeat_link; 79 stm_heartbeat[i].data.link = stm_heartbeat_link;
80 stm_heartbeat[i].data.unlink = stm_heartbeat_unlink; 80 stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
81 hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC, 81 hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
82 HRTIMER_MODE_ABS); 82 HRTIMER_MODE_ABS);
diff --git a/drivers/hwtracing/stm/p_basic.c b/drivers/hwtracing/stm/p_basic.c
new file mode 100644
index 000000000000..8980a6a5fd6c
--- /dev/null
+++ b/drivers/hwtracing/stm/p_basic.c
@@ -0,0 +1,48 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Basic framing protocol for STM devices.
4 * Copyright (c) 2018, Intel Corporation.
5 */
6
7#include <linux/module.h>
8#include <linux/device.h>
9#include <linux/stm.h>
10#include "stm.h"
11
12static ssize_t basic_write(struct stm_data *data, struct stm_output *output,
13 unsigned int chan, const char *buf, size_t count)
14{
15 unsigned int c = output->channel + chan;
16 unsigned int m = output->master;
17 const unsigned char nil = 0;
18 ssize_t sz;
19
20 sz = stm_data_write(data, m, c, true, buf, count);
21 if (sz > 0)
22 data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
23
24 return sz;
25}
26
27static const struct stm_protocol_driver basic_pdrv = {
28 .owner = THIS_MODULE,
29 .name = "p_basic",
30 .write = basic_write,
31};
32
33static int basic_stm_init(void)
34{
35 return stm_register_protocol(&basic_pdrv);
36}
37
38static void basic_stm_exit(void)
39{
40 stm_unregister_protocol(&basic_pdrv);
41}
42
43module_init(basic_stm_init);
44module_exit(basic_stm_exit);
45
46MODULE_LICENSE("GPL v2");
47MODULE_DESCRIPTION("Basic STM framing protocol driver");
48MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c
new file mode 100644
index 000000000000..b178a5495b67
--- /dev/null
+++ b/drivers/hwtracing/stm/p_sys-t.c
@@ -0,0 +1,382 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MIPI SyS-T framing protocol for STM devices.
4 * Copyright (c) 2018, Intel Corporation.
5 */
6
7#include <linux/configfs.h>
8#include <linux/module.h>
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/uuid.h>
12#include <linux/stm.h>
13#include "stm.h"
14
15enum sys_t_message_type {
16 MIPI_SYST_TYPE_BUILD = 0,
17 MIPI_SYST_TYPE_SHORT32,
18 MIPI_SYST_TYPE_STRING,
19 MIPI_SYST_TYPE_CATALOG,
20 MIPI_SYST_TYPE_RAW = 6,
21 MIPI_SYST_TYPE_SHORT64,
22 MIPI_SYST_TYPE_CLOCK,
23};
24
25enum sys_t_message_severity {
26 MIPI_SYST_SEVERITY_MAX = 0,
27 MIPI_SYST_SEVERITY_FATAL,
28 MIPI_SYST_SEVERITY_ERROR,
29 MIPI_SYST_SEVERITY_WARNING,
30 MIPI_SYST_SEVERITY_INFO,
31 MIPI_SYST_SEVERITY_USER1,
32 MIPI_SYST_SEVERITY_USER2,
33 MIPI_SYST_SEVERITY_DEBUG,
34};
35
36enum sys_t_message_build_subtype {
37 MIPI_SYST_BUILD_ID_COMPACT32 = 0,
38 MIPI_SYST_BUILD_ID_COMPACT64,
39 MIPI_SYST_BUILD_ID_LONG,
40};
41
42enum sys_t_message_clock_subtype {
43 MIPI_SYST_CLOCK_TRANSPORT_SYNC = 1,
44};
45
46enum sys_t_message_string_subtype {
47 MIPI_SYST_STRING_GENERIC = 1,
48 MIPI_SYST_STRING_FUNCTIONENTER,
49 MIPI_SYST_STRING_FUNCTIONEXIT,
50 MIPI_SYST_STRING_INVALIDPARAM = 5,
51 MIPI_SYST_STRING_ASSERT = 7,
52 MIPI_SYST_STRING_PRINTF_32 = 11,
53 MIPI_SYST_STRING_PRINTF_64 = 12,
54};
55
56#define MIPI_SYST_TYPE(t) ((u32)(MIPI_SYST_TYPE_ ## t))
57#define MIPI_SYST_SEVERITY(s) ((u32)(MIPI_SYST_SEVERITY_ ## s) << 4)
58#define MIPI_SYST_OPT_LOC BIT(8)
59#define MIPI_SYST_OPT_LEN BIT(9)
60#define MIPI_SYST_OPT_CHK BIT(10)
61#define MIPI_SYST_OPT_TS BIT(11)
62#define MIPI_SYST_UNIT(u) ((u32)(u) << 12)
63#define MIPI_SYST_ORIGIN(o) ((u32)(o) << 16)
64#define MIPI_SYST_OPT_GUID BIT(23)
65#define MIPI_SYST_SUBTYPE(s) ((u32)(MIPI_SYST_ ## s) << 24)
66#define MIPI_SYST_UNITLARGE(u) (MIPI_SYST_UNIT(u & 0xf) | \
67 MIPI_SYST_ORIGIN(u >> 4))
68#define MIPI_SYST_TYPES(t, s) (MIPI_SYST_TYPE(t) | \
69 MIPI_SYST_SUBTYPE(t ## _ ## s))
70
71#define DATA_HEADER (MIPI_SYST_TYPES(STRING, GENERIC) | \
72 MIPI_SYST_SEVERITY(INFO) | \
73 MIPI_SYST_OPT_GUID)
74
75#define CLOCK_SYNC_HEADER (MIPI_SYST_TYPES(CLOCK, TRANSPORT_SYNC) | \
76 MIPI_SYST_SEVERITY(MAX))
77
78struct sys_t_policy_node {
79 uuid_t uuid;
80 bool do_len;
81 unsigned long ts_interval;
82 unsigned long clocksync_interval;
83};
84
85struct sys_t_output {
86 struct sys_t_policy_node node;
87 unsigned long ts_jiffies;
88 unsigned long clocksync_jiffies;
89};
90
91static void sys_t_policy_node_init(void *priv)
92{
93 struct sys_t_policy_node *pn = priv;
94
95 generate_random_uuid(pn->uuid.b);
96}
97
98static int sys_t_output_open(void *priv, struct stm_output *output)
99{
100 struct sys_t_policy_node *pn = priv;
101 struct sys_t_output *opriv;
102
103 opriv = kzalloc(sizeof(*opriv), GFP_ATOMIC);
104 if (!opriv)
105 return -ENOMEM;
106
107 memcpy(&opriv->node, pn, sizeof(opriv->node));
108 output->pdrv_private = opriv;
109
110 return 0;
111}
112
113static void sys_t_output_close(struct stm_output *output)
114{
115 kfree(output->pdrv_private);
116}
117
118static ssize_t sys_t_policy_uuid_show(struct config_item *item,
119 char *page)
120{
121 struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
122
123 return sprintf(page, "%pU\n", &pn->uuid);
124}
125
126static ssize_t
127sys_t_policy_uuid_store(struct config_item *item, const char *page,
128 size_t count)
129{
130 struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
131 struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
132 int ret;
133
134 mutex_lock(mutexp);
135 ret = uuid_parse(page, &pn->uuid);
136 mutex_unlock(mutexp);
137
138 return ret < 0 ? ret : count;
139}
140
141CONFIGFS_ATTR(sys_t_policy_, uuid);
142
143static ssize_t sys_t_policy_do_len_show(struct config_item *item,
144 char *page)
145{
146 struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
147
148 return sprintf(page, "%d\n", pn->do_len);
149}
150
151static ssize_t
152sys_t_policy_do_len_store(struct config_item *item, const char *page,
153 size_t count)
154{
155 struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
156 struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
157 int ret;
158
159 mutex_lock(mutexp);
160 ret = kstrtobool(page, &pn->do_len);
161 mutex_unlock(mutexp);
162
163 return ret ? ret : count;
164}
165
166CONFIGFS_ATTR(sys_t_policy_, do_len);
167
168static ssize_t sys_t_policy_ts_interval_show(struct config_item *item,
169 char *page)
170{
171 struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
172
173 return sprintf(page, "%u\n", jiffies_to_msecs(pn->ts_interval));
174}
175
176static ssize_t
177sys_t_policy_ts_interval_store(struct config_item *item, const char *page,
178 size_t count)
179{
180 struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
181 struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
182 unsigned int ms;
183 int ret;
184
185 mutex_lock(mutexp);
186 ret = kstrtouint(page, 10, &ms);
187 mutex_unlock(mutexp);
188
189 if (!ret) {
190 pn->ts_interval = msecs_to_jiffies(ms);
191 return count;
192 }
193
194 return ret;
195}
196
197CONFIGFS_ATTR(sys_t_policy_, ts_interval);
198
199static ssize_t sys_t_policy_clocksync_interval_show(struct config_item *item,
200 char *page)
201{
202 struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
203
204 return sprintf(page, "%u\n", jiffies_to_msecs(pn->clocksync_interval));
205}
206
207static ssize_t
208sys_t_policy_clocksync_interval_store(struct config_item *item,
209 const char *page, size_t count)
210{
211 struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
212 struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
213 unsigned int ms;
214 int ret;
215
216 mutex_lock(mutexp);
217 ret = kstrtouint(page, 10, &ms);
218 mutex_unlock(mutexp);
219
220 if (!ret) {
221 pn->clocksync_interval = msecs_to_jiffies(ms);
222 return count;
223 }
224
225 return ret;
226}
227
228CONFIGFS_ATTR(sys_t_policy_, clocksync_interval);
229
230static struct configfs_attribute *sys_t_policy_attrs[] = {
231 &sys_t_policy_attr_uuid,
232 &sys_t_policy_attr_do_len,
233 &sys_t_policy_attr_ts_interval,
234 &sys_t_policy_attr_clocksync_interval,
235 NULL,
236};
237
238static inline bool sys_t_need_ts(struct sys_t_output *op)
239{
240 if (op->node.ts_interval &&
241 time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) {
242 op->ts_jiffies = jiffies;
243
244 return true;
245 }
246
247 return false;
248}
249
250static bool sys_t_need_clock_sync(struct sys_t_output *op)
251{
252 if (op->node.clocksync_interval &&
253 time_after(op->clocksync_jiffies + op->node.clocksync_interval,
254 jiffies)) {
255 op->clocksync_jiffies = jiffies;
256
257 return true;
258 }
259
260 return false;
261}
262
263static ssize_t
264sys_t_clock_sync(struct stm_data *data, unsigned int m, unsigned int c)
265{
266 u32 header = CLOCK_SYNC_HEADER;
267 const unsigned char nil = 0;
268 u64 payload[2]; /* Clock value and frequency */
269 ssize_t sz;
270
271 sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED,
272 4, (u8 *)&header);
273 if (sz <= 0)
274 return sz;
275
276 payload[0] = ktime_get_real_ns();
277 payload[1] = NSEC_PER_SEC;
278 sz = stm_data_write(data, m, c, false, &payload, sizeof(payload));
279 if (sz <= 0)
280 return sz;
281
282 data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
283
284 return sizeof(header) + sizeof(payload);
285}
286
287static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output,
288 unsigned int chan, const char *buf, size_t count)
289{
290 struct sys_t_output *op = output->pdrv_private;
291 unsigned int c = output->channel + chan;
292 unsigned int m = output->master;
293 const unsigned char nil = 0;
294 u32 header = DATA_HEADER;
295 ssize_t sz;
296
297 /* We require an existing policy node to proceed */
298 if (!op)
299 return -EINVAL;
300
301 if (sys_t_need_clock_sync(op)) {
302 sz = sys_t_clock_sync(data, m, c);
303 if (sz <= 0)
304 return sz;
305 }
306
307 if (op->node.do_len)
308 header |= MIPI_SYST_OPT_LEN;
309 if (sys_t_need_ts(op))
310 header |= MIPI_SYST_OPT_TS;
311
312 /*
313 * STP framing rules for SyS-T frames:
314 * * the first packet of the SyS-T frame is timestamped;
315 * * the last packet is a FLAG.
316 */
317 /* Message layout: HEADER / GUID / [LENGTH /][TIMESTAMP /] DATA */
318 /* HEADER */
319 sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED,
320 4, (u8 *)&header);
321 if (sz <= 0)
322 return sz;
323
324 /* GUID */
325 sz = stm_data_write(data, m, c, false, op->node.uuid.b, UUID_SIZE);
326 if (sz <= 0)
327 return sz;
328
329 /* [LENGTH] */
330 if (op->node.do_len) {
331 u16 length = count;
332
333 sz = data->packet(data, m, c, STP_PACKET_DATA, 0, 2,
334 (u8 *)&length);
335 if (sz <= 0)
336 return sz;
337 }
338
339 /* [TIMESTAMP] */
340 if (header & MIPI_SYST_OPT_TS) {
341 u64 ts = ktime_get_real_ns();
342
343 sz = stm_data_write(data, m, c, false, &ts, sizeof(ts));
344 if (sz <= 0)
345 return sz;
346 }
347
348 /* DATA */
349 sz = stm_data_write(data, m, c, false, buf, count);
350 if (sz > 0)
351 data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
352
353 return sz;
354}
355
356static const struct stm_protocol_driver sys_t_pdrv = {
357 .owner = THIS_MODULE,
358 .name = "p_sys-t",
359 .priv_sz = sizeof(struct sys_t_policy_node),
360 .write = sys_t_write,
361 .policy_attr = sys_t_policy_attrs,
362 .policy_node_init = sys_t_policy_node_init,
363 .output_open = sys_t_output_open,
364 .output_close = sys_t_output_close,
365};
366
367static int sys_t_stm_init(void)
368{
369 return stm_register_protocol(&sys_t_pdrv);
370}
371
372static void sys_t_stm_exit(void)
373{
374 stm_unregister_protocol(&sys_t_pdrv);
375}
376
377module_init(sys_t_stm_init);
378module_exit(sys_t_stm_exit);
379
380MODULE_LICENSE("GPL v2");
381MODULE_DESCRIPTION("MIPI SyS-T STM framing protocol driver");
382MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 3fd07e275b34..0910ec807187 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -33,8 +33,18 @@ struct stp_policy_node {
33 unsigned int last_master; 33 unsigned int last_master;
34 unsigned int first_channel; 34 unsigned int first_channel;
35 unsigned int last_channel; 35 unsigned int last_channel;
36 /* this is the one that's exposed to the attributes */
37 unsigned char priv[0];
36}; 38};
37 39
40void *stp_policy_node_priv(struct stp_policy_node *pn)
41{
42 if (!pn)
43 return NULL;
44
45 return pn->priv;
46}
47
38static struct configfs_subsystem stp_policy_subsys; 48static struct configfs_subsystem stp_policy_subsys;
39 49
40void stp_policy_node_get_ranges(struct stp_policy_node *policy_node, 50void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
@@ -68,6 +78,14 @@ to_stp_policy_node(struct config_item *item)
68 NULL; 78 NULL;
69} 79}
70 80
81void *to_pdrv_policy_node(struct config_item *item)
82{
83 struct stp_policy_node *node = to_stp_policy_node(item);
84
85 return stp_policy_node_priv(node);
86}
87EXPORT_SYMBOL_GPL(to_pdrv_policy_node);
88
71static ssize_t 89static ssize_t
72stp_policy_node_masters_show(struct config_item *item, char *page) 90stp_policy_node_masters_show(struct config_item *item, char *page)
73{ 91{
@@ -163,7 +181,9 @@ unlock:
163 181
164static void stp_policy_node_release(struct config_item *item) 182static void stp_policy_node_release(struct config_item *item)
165{ 183{
166 kfree(to_stp_policy_node(item)); 184 struct stp_policy_node *node = to_stp_policy_node(item);
185
186 kfree(node);
167} 187}
168 188
169static struct configfs_item_operations stp_policy_node_item_ops = { 189static struct configfs_item_operations stp_policy_node_item_ops = {
@@ -182,10 +202,34 @@ static struct configfs_attribute *stp_policy_node_attrs[] = {
182static const struct config_item_type stp_policy_type; 202static const struct config_item_type stp_policy_type;
183static const struct config_item_type stp_policy_node_type; 203static const struct config_item_type stp_policy_node_type;
184 204
205const struct config_item_type *
206get_policy_node_type(struct configfs_attribute **attrs)
207{
208 struct config_item_type *type;
209 struct configfs_attribute **merged;
210
211 type = kmemdup(&stp_policy_node_type, sizeof(stp_policy_node_type),
212 GFP_KERNEL);
213 if (!type)
214 return NULL;
215
216 merged = memcat_p(stp_policy_node_attrs, attrs);
217 if (!merged) {
218 kfree(type);
219 return NULL;
220 }
221
222 type->ct_attrs = merged;
223
224 return type;
225}
226
185static struct config_group * 227static struct config_group *
186stp_policy_node_make(struct config_group *group, const char *name) 228stp_policy_node_make(struct config_group *group, const char *name)
187{ 229{
230 const struct config_item_type *type = &stp_policy_node_type;
188 struct stp_policy_node *policy_node, *parent_node; 231 struct stp_policy_node *policy_node, *parent_node;
232 const struct stm_protocol_driver *pdrv;
189 struct stp_policy *policy; 233 struct stp_policy *policy;
190 234
191 if (group->cg_item.ci_type == &stp_policy_type) { 235 if (group->cg_item.ci_type == &stp_policy_type) {
@@ -199,12 +243,20 @@ stp_policy_node_make(struct config_group *group, const char *name)
199 if (!policy->stm) 243 if (!policy->stm)
200 return ERR_PTR(-ENODEV); 244 return ERR_PTR(-ENODEV);
201 245
202 policy_node = kzalloc(sizeof(struct stp_policy_node), GFP_KERNEL); 246 pdrv = policy->stm->pdrv;
247 policy_node =
248 kzalloc(offsetof(struct stp_policy_node, priv[pdrv->priv_sz]),
249 GFP_KERNEL);
203 if (!policy_node) 250 if (!policy_node)
204 return ERR_PTR(-ENOMEM); 251 return ERR_PTR(-ENOMEM);
205 252
206 config_group_init_type_name(&policy_node->group, name, 253 if (pdrv->policy_node_init)
207 &stp_policy_node_type); 254 pdrv->policy_node_init((void *)policy_node->priv);
255
256 if (policy->stm->pdrv_node_type)
257 type = policy->stm->pdrv_node_type;
258
259 config_group_init_type_name(&policy_node->group, name, type);
208 260
209 policy_node->policy = policy; 261 policy_node->policy = policy;
210 262
@@ -254,8 +306,25 @@ static ssize_t stp_policy_device_show(struct config_item *item,
254 306
255CONFIGFS_ATTR_RO(stp_policy_, device); 307CONFIGFS_ATTR_RO(stp_policy_, device);
256 308
309static ssize_t stp_policy_protocol_show(struct config_item *item,
310 char *page)
311{
312 struct stp_policy *policy = to_stp_policy(item);
313 ssize_t count;
314
315 count = sprintf(page, "%s\n",
316 (policy && policy->stm) ?
317 policy->stm->pdrv->name :
318 "<none>");
319
320 return count;
321}
322
323CONFIGFS_ATTR_RO(stp_policy_, protocol);
324
257static struct configfs_attribute *stp_policy_attrs[] = { 325static struct configfs_attribute *stp_policy_attrs[] = {
258 &stp_policy_attr_device, 326 &stp_policy_attr_device,
327 &stp_policy_attr_protocol,
259 NULL, 328 NULL,
260}; 329};
261 330
@@ -276,6 +345,7 @@ void stp_policy_unbind(struct stp_policy *policy)
276 stm->policy = NULL; 345 stm->policy = NULL;
277 policy->stm = NULL; 346 policy->stm = NULL;
278 347
348 stm_put_protocol(stm->pdrv);
279 stm_put_device(stm); 349 stm_put_device(stm);
280} 350}
281 351
@@ -311,11 +381,14 @@ static const struct config_item_type stp_policy_type = {
311}; 381};
312 382
313static struct config_group * 383static struct config_group *
314stp_policies_make(struct config_group *group, const char *name) 384stp_policy_make(struct config_group *group, const char *name)
315{ 385{
386 const struct config_item_type *pdrv_node_type;
387 const struct stm_protocol_driver *pdrv;
388 char *devname, *proto, *p;
316 struct config_group *ret; 389 struct config_group *ret;
317 struct stm_device *stm; 390 struct stm_device *stm;
318 char *devname, *p; 391 int err;
319 392
320 devname = kasprintf(GFP_KERNEL, "%s", name); 393 devname = kasprintf(GFP_KERNEL, "%s", name);
321 if (!devname) 394 if (!devname)
@@ -326,6 +399,7 @@ stp_policies_make(struct config_group *group, const char *name)
326 * <device_name> is the name of an existing stm device; may 399 * <device_name> is the name of an existing stm device; may
327 * contain dots; 400 * contain dots;
328 * <policy_name> is an arbitrary string; may not contain dots 401 * <policy_name> is an arbitrary string; may not contain dots
402 * <device_name>:<protocol_name>.<policy_name>
329 */ 403 */
330 p = strrchr(devname, '.'); 404 p = strrchr(devname, '.');
331 if (!p) { 405 if (!p) {
@@ -335,11 +409,28 @@ stp_policies_make(struct config_group *group, const char *name)
335 409
336 *p = '\0'; 410 *p = '\0';
337 411
412 /*
413 * look for ":<protocol_name>":
414 * + no protocol suffix: fall back to whatever is available;
415 * + unknown protocol: fail the whole thing
416 */
417 proto = strrchr(devname, ':');
418 if (proto)
419 *proto++ = '\0';
420
338 stm = stm_find_device(devname); 421 stm = stm_find_device(devname);
422 if (!stm) {
423 kfree(devname);
424 return ERR_PTR(-ENODEV);
425 }
426
427 err = stm_lookup_protocol(proto, &pdrv, &pdrv_node_type);
339 kfree(devname); 428 kfree(devname);
340 429
341 if (!stm) 430 if (err) {
431 stm_put_device(stm);
342 return ERR_PTR(-ENODEV); 432 return ERR_PTR(-ENODEV);
433 }
343 434
344 mutex_lock(&stm->policy_mutex); 435 mutex_lock(&stm->policy_mutex);
345 if (stm->policy) { 436 if (stm->policy) {
@@ -349,31 +440,37 @@ stp_policies_make(struct config_group *group, const char *name)
349 440
350 stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL); 441 stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
351 if (!stm->policy) { 442 if (!stm->policy) {
352 ret = ERR_PTR(-ENOMEM); 443 mutex_unlock(&stm->policy_mutex);
353 goto unlock_policy; 444 stm_put_protocol(pdrv);
445 stm_put_device(stm);
446 return ERR_PTR(-ENOMEM);
354 } 447 }
355 448
356 config_group_init_type_name(&stm->policy->group, name, 449 config_group_init_type_name(&stm->policy->group, name,
357 &stp_policy_type); 450 &stp_policy_type);
358 stm->policy->stm = stm;
359 451
452 stm->pdrv = pdrv;
453 stm->pdrv_node_type = pdrv_node_type;
454 stm->policy->stm = stm;
360 ret = &stm->policy->group; 455 ret = &stm->policy->group;
361 456
362unlock_policy: 457unlock_policy:
363 mutex_unlock(&stm->policy_mutex); 458 mutex_unlock(&stm->policy_mutex);
364 459
365 if (IS_ERR(ret)) 460 if (IS_ERR(ret)) {
461 stm_put_protocol(stm->pdrv);
366 stm_put_device(stm); 462 stm_put_device(stm);
463 }
367 464
368 return ret; 465 return ret;
369} 466}
370 467
371static struct configfs_group_operations stp_policies_group_ops = { 468static struct configfs_group_operations stp_policy_root_group_ops = {
372 .make_group = stp_policies_make, 469 .make_group = stp_policy_make,
373}; 470};
374 471
375static const struct config_item_type stp_policies_type = { 472static const struct config_item_type stp_policy_root_type = {
376 .ct_group_ops = &stp_policies_group_ops, 473 .ct_group_ops = &stp_policy_root_group_ops,
377 .ct_owner = THIS_MODULE, 474 .ct_owner = THIS_MODULE,
378}; 475};
379 476
@@ -381,7 +478,7 @@ static struct configfs_subsystem stp_policy_subsys = {
381 .su_group = { 478 .su_group = {
382 .cg_item = { 479 .cg_item = {
383 .ci_namebuf = "stp-policy", 480 .ci_namebuf = "stp-policy",
384 .ci_type = &stp_policies_type, 481 .ci_type = &stp_policy_root_type,
385 }, 482 },
386 }, 483 },
387}; 484};
@@ -392,7 +489,7 @@ static struct configfs_subsystem stp_policy_subsys = {
392static struct stp_policy_node * 489static struct stp_policy_node *
393__stp_policy_node_lookup(struct stp_policy *policy, char *s) 490__stp_policy_node_lookup(struct stp_policy *policy, char *s)
394{ 491{
395 struct stp_policy_node *policy_node, *ret; 492 struct stp_policy_node *policy_node, *ret = NULL;
396 struct list_head *head = &policy->group.cg_children; 493 struct list_head *head = &policy->group.cg_children;
397 struct config_item *item; 494 struct config_item *item;
398 char *start, *end = s; 495 char *start, *end = s;
@@ -400,10 +497,6 @@ __stp_policy_node_lookup(struct stp_policy *policy, char *s)
400 if (list_empty(head)) 497 if (list_empty(head))
401 return NULL; 498 return NULL;
402 499
403 /* return the first entry if everything else fails */
404 item = list_entry(head->next, struct config_item, ci_entry);
405 ret = to_stp_policy_node(item);
406
407next: 500next:
408 for (;;) { 501 for (;;) {
409 start = strsep(&end, "/"); 502 start = strsep(&end, "/");
@@ -449,25 +542,25 @@ stp_policy_node_lookup(struct stm_device *stm, char *s)
449 542
450 if (policy_node) 543 if (policy_node)
451 config_item_get(&policy_node->group.cg_item); 544 config_item_get(&policy_node->group.cg_item);
452 mutex_unlock(&stp_policy_subsys.su_mutex); 545 else
546 mutex_unlock(&stp_policy_subsys.su_mutex);
453 547
454 return policy_node; 548 return policy_node;
455} 549}
456 550
457void stp_policy_node_put(struct stp_policy_node *policy_node) 551void stp_policy_node_put(struct stp_policy_node *policy_node)
458{ 552{
553 lockdep_assert_held(&stp_policy_subsys.su_mutex);
554
555 mutex_unlock(&stp_policy_subsys.su_mutex);
459 config_item_put(&policy_node->group.cg_item); 556 config_item_put(&policy_node->group.cg_item);
460} 557}
461 558
462int __init stp_configfs_init(void) 559int __init stp_configfs_init(void)
463{ 560{
464 int err;
465
466 config_group_init(&stp_policy_subsys.su_group); 561 config_group_init(&stp_policy_subsys.su_group);
467 mutex_init(&stp_policy_subsys.su_mutex); 562 mutex_init(&stp_policy_subsys.su_mutex);
468 err = configfs_register_subsystem(&stp_policy_subsys); 563 return configfs_register_subsystem(&stp_policy_subsys);
469
470 return err;
471} 564}
472 565
473void __exit stp_configfs_exit(void) 566void __exit stp_configfs_exit(void)
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
index 923571adc6f4..3569439d53bb 100644
--- a/drivers/hwtracing/stm/stm.h
+++ b/drivers/hwtracing/stm/stm.h
@@ -10,20 +10,17 @@
10#ifndef _STM_STM_H_ 10#ifndef _STM_STM_H_
11#define _STM_STM_H_ 11#define _STM_STM_H_
12 12
13#include <linux/configfs.h>
14
13struct stp_policy; 15struct stp_policy;
14struct stp_policy_node; 16struct stp_policy_node;
17struct stm_protocol_driver;
15 18
16struct stp_policy_node *
17stp_policy_node_lookup(struct stm_device *stm, char *s);
18void stp_policy_node_put(struct stp_policy_node *policy_node);
19void stp_policy_unbind(struct stp_policy *policy);
20
21void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
22 unsigned int *mstart, unsigned int *mend,
23 unsigned int *cstart, unsigned int *cend);
24int stp_configfs_init(void); 19int stp_configfs_init(void);
25void stp_configfs_exit(void); 20void stp_configfs_exit(void);
26 21
22void *stp_policy_node_priv(struct stp_policy_node *pn);
23
27struct stp_master { 24struct stp_master {
28 unsigned int nr_free; 25 unsigned int nr_free;
29 unsigned long chan_map[0]; 26 unsigned long chan_map[0];
@@ -40,6 +37,9 @@ struct stm_device {
40 struct mutex link_mutex; 37 struct mutex link_mutex;
41 spinlock_t link_lock; 38 spinlock_t link_lock;
42 struct list_head link_list; 39 struct list_head link_list;
40 /* framing protocol in use */
41 const struct stm_protocol_driver *pdrv;
42 const struct config_item_type *pdrv_node_type;
43 /* master allocation */ 43 /* master allocation */
44 spinlock_t mc_lock; 44 spinlock_t mc_lock;
45 struct stp_master *masters[0]; 45 struct stp_master *masters[0];
@@ -48,16 +48,28 @@ struct stm_device {
48#define to_stm_device(_d) \ 48#define to_stm_device(_d) \
49 container_of((_d), struct stm_device, dev) 49 container_of((_d), struct stm_device, dev)
50 50
51struct stp_policy_node *
52stp_policy_node_lookup(struct stm_device *stm, char *s);
53void stp_policy_node_put(struct stp_policy_node *policy_node);
54void stp_policy_unbind(struct stp_policy *policy);
55
56void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
57 unsigned int *mstart, unsigned int *mend,
58 unsigned int *cstart, unsigned int *cend);
59
60const struct config_item_type *
61get_policy_node_type(struct configfs_attribute **attrs);
62
51struct stm_output { 63struct stm_output {
52 spinlock_t lock; 64 spinlock_t lock;
53 unsigned int master; 65 unsigned int master;
54 unsigned int channel; 66 unsigned int channel;
55 unsigned int nr_chans; 67 unsigned int nr_chans;
68 void *pdrv_private;
56}; 69};
57 70
58struct stm_file { 71struct stm_file {
59 struct stm_device *stm; 72 struct stm_device *stm;
60 struct stp_policy_node *policy_node;
61 struct stm_output output; 73 struct stm_output output;
62}; 74};
63 75
@@ -71,11 +83,35 @@ struct stm_source_device {
71 struct stm_device __rcu *link; 83 struct stm_device __rcu *link;
72 struct list_head link_entry; 84 struct list_head link_entry;
73 /* one output per stm_source device */ 85 /* one output per stm_source device */
74 struct stp_policy_node *policy_node;
75 struct stm_output output; 86 struct stm_output output;
76}; 87};
77 88
78#define to_stm_source_device(_d) \ 89#define to_stm_source_device(_d) \
79 container_of((_d), struct stm_source_device, dev) 90 container_of((_d), struct stm_source_device, dev)
80 91
92void *to_pdrv_policy_node(struct config_item *item);
93
94struct stm_protocol_driver {
95 struct module *owner;
96 const char *name;
97 ssize_t (*write)(struct stm_data *data,
98 struct stm_output *output, unsigned int chan,
99 const char *buf, size_t count);
100 void (*policy_node_init)(void *arg);
101 int (*output_open)(void *priv, struct stm_output *output);
102 void (*output_close)(struct stm_output *output);
103 ssize_t priv_sz;
104 struct configfs_attribute **policy_attr;
105};
106
107int stm_register_protocol(const struct stm_protocol_driver *pdrv);
108void stm_unregister_protocol(const struct stm_protocol_driver *pdrv);
109int stm_lookup_protocol(const char *name,
110 const struct stm_protocol_driver **pdrv,
111 const struct config_item_type **type);
112void stm_put_protocol(const struct stm_protocol_driver *pdrv);
113ssize_t stm_data_write(struct stm_data *data, unsigned int m,
114 unsigned int c, bool ts_first, const void *buf,
115 size_t count);
116
81#endif /* _STM_STM_H_ */ 117#endif /* _STM_STM_H_ */
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 4f832002d116..1827c69959fb 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -114,6 +114,6 @@ static struct i2c_driver ad_dpot_i2c_driver = {
114 114
115module_i2c_driver(ad_dpot_i2c_driver); 115module_i2c_driver(ad_dpot_i2c_driver);
116 116
117MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 117MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
118MODULE_DESCRIPTION("digital potentiometer I2C bus driver"); 118MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
119MODULE_LICENSE("GPL"); 119MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 39a7f517ee7e..0383ec153725 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -140,7 +140,7 @@ static struct spi_driver ad_dpot_spi_driver = {
140 140
141module_spi_driver(ad_dpot_spi_driver); 141module_spi_driver(ad_dpot_spi_driver);
142 142
143MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 143MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
144MODULE_DESCRIPTION("digital potentiometer SPI bus driver"); 144MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
145MODULE_LICENSE("GPL"); 145MODULE_LICENSE("GPL");
146MODULE_ALIAS("spi:ad_dpot"); 146MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index bc591b7168db..a0afadefcc49 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ad525x_dpot: Driver for the Analog Devices digital potentiometers 2 * ad525x_dpot: Driver for the Analog Devices digital potentiometers
3 * Copyright (c) 2009-2010 Analog Devices, Inc. 3 * Copyright (c) 2009-2010 Analog Devices, Inc.
4 * Author: Michael Hennerich <hennerich@blackfin.uclinux.org> 4 * Author: Michael Hennerich <michael.hennerich@analog.com>
5 * 5 *
6 * DEVID #Wipers #Positions Resistor Options (kOhm) 6 * DEVID #Wipers #Positions Resistor Options (kOhm)
7 * AD5258 1 64 1, 10, 50, 100 7 * AD5258 1 64 1, 10, 50, 100
@@ -64,7 +64,7 @@
64 * Author: Chris Verges <chrisv@cyberswitching.com> 64 * Author: Chris Verges <chrisv@cyberswitching.com>
65 * 65 *
66 * derived from ad5252.c 66 * derived from ad5252.c
67 * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org> 67 * Copyright (c) 2006-2011 Michael Hennerich <michael.hennerich@analog.com>
68 * 68 *
69 * Licensed under the GPL-2 or later. 69 * Licensed under the GPL-2 or later.
70 */ 70 */
@@ -760,6 +760,6 @@ EXPORT_SYMBOL(ad_dpot_remove);
760 760
761 761
762MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, " 762MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
763 "Michael Hennerich <hennerich@blackfin.uclinux.org>"); 763 "Michael Hennerich <michael.hennerich@analog.com>");
764MODULE_DESCRIPTION("Digital potentiometer driver"); 764MODULE_DESCRIPTION("Digital potentiometer driver");
765MODULE_LICENSE("GPL"); 765MODULE_LICENSE("GPL");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index ed9412d750b7..24876c615c3c 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -188,7 +188,6 @@ struct apds990x_chip {
188#define APDS_LUX_DEFAULT_RATE 200 188#define APDS_LUX_DEFAULT_RATE 200
189 189
190static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */ 190static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
191static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */
192 191
193/* Following two tables must match i.e 10Hz rate means 1 as persistence value */ 192/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
194static const u16 arates_hz[] = {10, 5, 2, 1}; 193static const u16 arates_hz[] = {10, 5, 2, 1};
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 9c62bf064f77..17e81ce9925b 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -180,9 +180,6 @@ static const char reg_vleds[] = "Vleds";
180static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2}; 180static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
181static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500}; 181static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
182 182
183/* Supported IR-led currents in mA */
184static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
185
186/* 183/*
187 * Supported stand alone rates in ms from chip data sheet 184 * Supported stand alone rates in ms from chip data sheet
188 * {100, 200, 500, 1000, 2000}; 185 * {100, 200, 500, 1000, 2000};
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 43917898fb9a..4d6836f19489 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -92,8 +92,8 @@ static int update_property(struct device_node *dn, const char *name,
92 92
93 val = (u32 *)new_prop->value; 93 val = (u32 *)new_prop->value;
94 rc = cxl_update_properties(dn, new_prop); 94 rc = cxl_update_properties(dn, new_prop);
95 pr_devel("%s: update property (%s, length: %i, value: %#x)\n", 95 pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
96 dn->name, name, vd, be32_to_cpu(*val)); 96 dn, name, vd, be32_to_cpu(*val));
97 97
98 if (rc) { 98 if (rc) {
99 kfree(new_prop->name); 99 kfree(new_prop->name);
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3bc0c15d4d85..5d28d9e454f5 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1018,8 +1018,6 @@ err1:
1018 1018
1019void cxl_guest_remove_afu(struct cxl_afu *afu) 1019void cxl_guest_remove_afu(struct cxl_afu *afu)
1020{ 1020{
1021 pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
1022
1023 if (!afu) 1021 if (!afu)
1024 return; 1022 return;
1025 1023
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 8a5adc0d2e88..3ebe5d75ad6a 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -381,7 +381,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
381 */ 381 */
382 ec->factor = 0; 382 ec->factor = 0;
383 ec->shift = 0; 383 ec->shift = 0;
384 if ((ec->nonupdate_dwell == 0)) { 384 if (!ec->nonupdate_dwell) {
385 int p, logp, shift; 385 int p, logp, shift;
386 386
387 /* Determine: 387 /* Determine:
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 68a1ac929917..fe7a1d27a017 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -111,4 +111,15 @@ config EEPROM_IDT_89HPESX
111 This driver can also be built as a module. If so, the module 111 This driver can also be built as a module. If so, the module
112 will be called idt_89hpesx. 112 will be called idt_89hpesx.
113 113
114config EEPROM_EE1004
115 tristate "SPD EEPROMs on DDR4 memory modules"
116 depends on I2C && SYSFS
117 help
118 Enable this driver to get read support to SPD EEPROMs following
119 the JEDEC EE1004 standard. These are typically found on DDR4
120 SDRAM memory modules.
121
122 This driver can also be built as a module. If so, the module
123 will be called ee1004.
124
114endmenu 125endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 2aab60ef3e3e..a9b4b6579b75 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
7obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o 7obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
8obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o 8obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
9obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o 9obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
10obj-$(CONFIG_EEPROM_EE1004) += ee1004.o
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 840afb398f9e..99de6939cd5a 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -366,7 +366,7 @@ static int at25_probe(struct spi_device *spi)
366 at25->nvmem_config.word_size = 1; 366 at25->nvmem_config.word_size = 1;
367 at25->nvmem_config.size = chip.byte_len; 367 at25->nvmem_config.size = chip.byte_len;
368 368
369 at25->nvmem = nvmem_register(&at25->nvmem_config); 369 at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
370 if (IS_ERR(at25->nvmem)) 370 if (IS_ERR(at25->nvmem))
371 return PTR_ERR(at25->nvmem); 371 return PTR_ERR(at25->nvmem);
372 372
@@ -379,16 +379,6 @@ static int at25_probe(struct spi_device *spi)
379 return 0; 379 return 0;
380} 380}
381 381
382static int at25_remove(struct spi_device *spi)
383{
384 struct at25_data *at25;
385
386 at25 = spi_get_drvdata(spi);
387 nvmem_unregister(at25->nvmem);
388
389 return 0;
390}
391
392/*-------------------------------------------------------------------------*/ 382/*-------------------------------------------------------------------------*/
393 383
394static const struct of_device_id at25_of_match[] = { 384static const struct of_device_id at25_of_match[] = {
@@ -403,7 +393,6 @@ static struct spi_driver at25_driver = {
403 .of_match_table = at25_of_match, 393 .of_match_table = at25_of_match,
404 }, 394 },
405 .probe = at25_probe, 395 .probe = at25_probe,
406 .remove = at25_remove,
407}; 396};
408 397
409module_spi_driver(at25_driver); 398module_spi_driver(at25_driver);
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
new file mode 100644
index 000000000000..276c1690ea1b
--- /dev/null
+++ b/drivers/misc/eeprom/ee1004.c
@@ -0,0 +1,281 @@
1/*
2 * ee1004 - driver for DDR4 SPD EEPROMs
3 *
4 * Copyright (C) 2017 Jean Delvare
5 *
6 * Based on the at24 driver:
7 * Copyright (C) 2005-2007 David Brownell
8 * Copyright (C) 2008 Wolfram Sang, Pengutronix
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/i2c.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/mod_devicetable.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22
23/*
24 * DDR4 memory modules use special EEPROMs following the Jedec EE1004
25 * specification. These are 512-byte EEPROMs using a single I2C address
26 * in the 0x50-0x57 range for data. One of two 256-byte page is selected
27 * by writing a command to I2C address 0x36 or 0x37 on the same I2C bus.
28 *
29 * Therefore we need to request these 2 additional addresses, and serialize
30 * access to all such EEPROMs with a single mutex.
31 *
32 * We assume it is safe to read up to 32 bytes at once from these EEPROMs.
33 * We use SMBus access even if I2C is available, these EEPROMs are small
34 * enough, and reading from them infrequent enough, that we favor simplicity
35 * over performance.
36 */
37
38#define EE1004_ADDR_SET_PAGE 0x36
39#define EE1004_EEPROM_SIZE 512
40#define EE1004_PAGE_SIZE 256
41#define EE1004_PAGE_SHIFT 8
42
43/*
44 * Mutex protects ee1004_set_page and ee1004_dev_count, and must be held
45 * from page selection to end of read.
46 */
47static DEFINE_MUTEX(ee1004_bus_lock);
48static struct i2c_client *ee1004_set_page[2];
49static unsigned int ee1004_dev_count;
50static int ee1004_current_page;
51
52static const struct i2c_device_id ee1004_ids[] = {
53 { "ee1004", 0 },
54 { }
55};
56MODULE_DEVICE_TABLE(i2c, ee1004_ids);
57
58/*-------------------------------------------------------------------------*/
59
60static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
61 unsigned int offset, size_t count)
62{
63 int status;
64
65 if (count > I2C_SMBUS_BLOCK_MAX)
66 count = I2C_SMBUS_BLOCK_MAX;
67 /* Can't cross page boundaries */
68 if (unlikely(offset + count > EE1004_PAGE_SIZE))
69 count = EE1004_PAGE_SIZE - offset;
70
71 status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset,
72 count, buf);
73 dev_dbg(&client->dev, "read %zu@%d --> %d\n", count, offset, status);
74
75 return status;
76}
77
78static ssize_t ee1004_read(struct file *filp, struct kobject *kobj,
79 struct bin_attribute *bin_attr,
80 char *buf, loff_t off, size_t count)
81{
82 struct device *dev = kobj_to_dev(kobj);
83 struct i2c_client *client = to_i2c_client(dev);
84 size_t requested = count;
85 int page;
86
87 if (unlikely(!count))
88 return count;
89
90 page = off >> EE1004_PAGE_SHIFT;
91 if (unlikely(page > 1))
92 return 0;
93 off &= (1 << EE1004_PAGE_SHIFT) - 1;
94
95 /*
96 * Read data from chip, protecting against concurrent access to
97 * other EE1004 SPD EEPROMs on the same adapter.
98 */
99 mutex_lock(&ee1004_bus_lock);
100
101 while (count) {
102 int status;
103
104 /* Select page */
105 if (page != ee1004_current_page) {
106 /* Data is ignored */
107 status = i2c_smbus_write_byte(ee1004_set_page[page],
108 0x00);
109 if (status < 0) {
110 dev_err(dev, "Failed to select page %d (%d)\n",
111 page, status);
112 mutex_unlock(&ee1004_bus_lock);
113 return status;
114 }
115 dev_dbg(dev, "Selected page %d\n", page);
116 ee1004_current_page = page;
117 }
118
119 status = ee1004_eeprom_read(client, buf, off, count);
120 if (status < 0) {
121 mutex_unlock(&ee1004_bus_lock);
122 return status;
123 }
124 buf += status;
125 off += status;
126 count -= status;
127
128 if (off == EE1004_PAGE_SIZE) {
129 page++;
130 off = 0;
131 }
132 }
133
134 mutex_unlock(&ee1004_bus_lock);
135
136 return requested;
137}
138
139static const struct bin_attribute eeprom_attr = {
140 .attr = {
141 .name = "eeprom",
142 .mode = 0444,
143 },
144 .size = EE1004_EEPROM_SIZE,
145 .read = ee1004_read,
146};
147
148static int ee1004_probe(struct i2c_client *client,
149 const struct i2c_device_id *id)
150{
151 int err, cnr = 0;
152 const char *slow = NULL;
153
154 /* Make sure we can operate on this adapter */
155 if (!i2c_check_functionality(client->adapter,
156 I2C_FUNC_SMBUS_READ_BYTE |
157 I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
158 if (i2c_check_functionality(client->adapter,
159 I2C_FUNC_SMBUS_READ_BYTE |
160 I2C_FUNC_SMBUS_READ_WORD_DATA))
161 slow = "word";
162 else if (i2c_check_functionality(client->adapter,
163 I2C_FUNC_SMBUS_READ_BYTE |
164 I2C_FUNC_SMBUS_READ_BYTE_DATA))
165 slow = "byte";
166 else
167 return -EPFNOSUPPORT;
168 }
169
170 /* Use 2 dummy devices for page select command */
171 mutex_lock(&ee1004_bus_lock);
172 if (++ee1004_dev_count == 1) {
173 for (cnr = 0; cnr < 2; cnr++) {
174 ee1004_set_page[cnr] = i2c_new_dummy(client->adapter,
175 EE1004_ADDR_SET_PAGE + cnr);
176 if (!ee1004_set_page[cnr]) {
177 dev_err(&client->dev,
178 "address 0x%02x unavailable\n",
179 EE1004_ADDR_SET_PAGE + cnr);
180 err = -EADDRINUSE;
181 goto err_clients;
182 }
183 }
184 } else if (i2c_adapter_id(client->adapter) !=
185 i2c_adapter_id(ee1004_set_page[0]->adapter)) {
186 dev_err(&client->dev,
187 "Driver only supports devices on a single I2C bus\n");
188 err = -EOPNOTSUPP;
189 goto err_clients;
190 }
191
192 /* Remember current page to avoid unneeded page select */
193 err = i2c_smbus_read_byte(ee1004_set_page[0]);
194 if (err == -ENXIO) {
195 /* Nack means page 1 is selected */
196 ee1004_current_page = 1;
197 } else if (err < 0) {
198 /* Anything else is a real error, bail out */
199 goto err_clients;
200 } else {
201 /* Ack means page 0 is selected, returned value meaningless */
202 ee1004_current_page = 0;
203 }
204 dev_dbg(&client->dev, "Currently selected page: %d\n",
205 ee1004_current_page);
206 mutex_unlock(&ee1004_bus_lock);
207
208 /* Create the sysfs eeprom file */
209 err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
210 if (err)
211 goto err_clients_lock;
212
213 dev_info(&client->dev,
214 "%u byte EE1004-compliant SPD EEPROM, read-only\n",
215 EE1004_EEPROM_SIZE);
216 if (slow)
217 dev_notice(&client->dev,
218 "Falling back to %s reads, performance will suffer\n",
219 slow);
220
221 return 0;
222
223 err_clients_lock:
224 mutex_lock(&ee1004_bus_lock);
225 err_clients:
226 if (--ee1004_dev_count == 0) {
227 for (cnr--; cnr >= 0; cnr--) {
228 i2c_unregister_device(ee1004_set_page[cnr]);
229 ee1004_set_page[cnr] = NULL;
230 }
231 }
232 mutex_unlock(&ee1004_bus_lock);
233
234 return err;
235}
236
237static int ee1004_remove(struct i2c_client *client)
238{
239 int i;
240
241 sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
242
243 /* Remove page select clients if this is the last device */
244 mutex_lock(&ee1004_bus_lock);
245 if (--ee1004_dev_count == 0) {
246 for (i = 0; i < 2; i++) {
247 i2c_unregister_device(ee1004_set_page[i]);
248 ee1004_set_page[i] = NULL;
249 }
250 }
251 mutex_unlock(&ee1004_bus_lock);
252
253 return 0;
254}
255
256/*-------------------------------------------------------------------------*/
257
258static struct i2c_driver ee1004_driver = {
259 .driver = {
260 .name = "ee1004",
261 },
262 .probe = ee1004_probe,
263 .remove = ee1004_remove,
264 .id_table = ee1004_ids,
265};
266
267static int __init ee1004_init(void)
268{
269 return i2c_add_driver(&ee1004_driver);
270}
271module_init(ee1004_init);
272
273static void __exit ee1004_exit(void)
274{
275 i2c_del_driver(&ee1004_driver);
276}
277module_exit(ee1004_exit);
278
279MODULE_DESCRIPTION("Driver for EE1004-compliant DDR4 SPD EEPROMs");
280MODULE_AUTHOR("Jean Delvare");
281MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 38766968bfa2..c6dd9ad9bf7b 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -439,7 +439,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
439 return -ENODEV; 439 return -ENODEV;
440 } 440 }
441 441
442 edev = kzalloc(sizeof(*edev), GFP_KERNEL); 442 edev = devm_kzalloc(&spi->dev, sizeof(*edev), GFP_KERNEL);
443 if (!edev) 443 if (!edev)
444 return -ENOMEM; 444 return -ENOMEM;
445 445
@@ -449,8 +449,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
449 edev->addrlen = 6; 449 edev->addrlen = 6;
450 else { 450 else {
451 dev_err(&spi->dev, "unspecified address type\n"); 451 dev_err(&spi->dev, "unspecified address type\n");
452 err = -EINVAL; 452 return -EINVAL;
453 goto fail;
454 } 453 }
455 454
456 mutex_init(&edev->lock); 455 mutex_init(&edev->lock);
@@ -473,11 +472,9 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
473 edev->nvmem_config.word_size = 1; 472 edev->nvmem_config.word_size = 1;
474 edev->nvmem_config.size = edev->size; 473 edev->nvmem_config.size = edev->size;
475 474
476 edev->nvmem = nvmem_register(&edev->nvmem_config); 475 edev->nvmem = devm_nvmem_register(&spi->dev, &edev->nvmem_config);
477 if (IS_ERR(edev->nvmem)) { 476 if (IS_ERR(edev->nvmem))
478 err = PTR_ERR(edev->nvmem); 477 return PTR_ERR(edev->nvmem);
479 goto fail;
480 }
481 478
482 dev_info(&spi->dev, "%d-bit eeprom %s\n", 479 dev_info(&spi->dev, "%d-bit eeprom %s\n",
483 (pd->flags & EE_ADDR8) ? 8 : 16, 480 (pd->flags & EE_ADDR8) ? 8 : 16,
@@ -490,21 +487,15 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
490 487
491 spi_set_drvdata(spi, edev); 488 spi_set_drvdata(spi, edev);
492 return 0; 489 return 0;
493fail:
494 kfree(edev);
495 return err;
496} 490}
497 491
498static int eeprom_93xx46_remove(struct spi_device *spi) 492static int eeprom_93xx46_remove(struct spi_device *spi)
499{ 493{
500 struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi); 494 struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
501 495
502 nvmem_unregister(edev->nvmem);
503
504 if (!(edev->pdata->flags & EE_READONLY)) 496 if (!(edev->pdata->flags & EE_READONLY))
505 device_remove_file(&spi->dev, &dev_attr_erase); 497 device_remove_file(&spi->dev, &dev_attr_erase);
506 498
507 kfree(edev);
508 return 0; 499 return 0;
509} 500}
510 501
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index c7cd3675bcd1..d137d0fab9bf 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -24,7 +24,6 @@
24 * controlled from here. 24 * controlled from here.
25 */ 25 */
26 26
27#include <linux/module.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/pci.h> 28#include <linux/pci.h>
30#include <linux/err.h> 29#include <linux/err.h>
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 656449cb4476..9a65bd9d6152 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/sched.h> 30#include <linux/sched.h>
32#include <linux/wait.h> 31#include <linux/wait.h>
33#include <linux/pci.h> 32#include <linux/pci.h>
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8679e0bd8ec2..3fcb9a2fe1c9 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -23,14 +23,12 @@
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/sched.h> 26#include <linux/sched.h>
28#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
29#include <linux/page-flags.h> 28#include <linux/page-flags.h>
30#include <linux/scatterlist.h> 29#include <linux/scatterlist.h>
31#include <linux/hugetlb.h> 30#include <linux/hugetlb.h>
32#include <linux/iommu.h> 31#include <linux/iommu.h>
33#include <linux/delay.h>
34#include <linux/pci.h> 32#include <linux/pci.h>
35#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
36#include <linux/ctype.h> 34#include <linux/ctype.h>
@@ -298,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
298int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, 296int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
299 void __user *user_addr, size_t user_size, int write) 297 void __user *user_addr, size_t user_size, int write)
300{ 298{
301 int rc; 299 int ret = -ENOMEM;
302 struct pci_dev *pci_dev = cd->pci_dev; 300 struct pci_dev *pci_dev = cd->pci_dev;
303 301
304 sgl->fpage_offs = offset_in_page((unsigned long)user_addr); 302 sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
@@ -318,7 +316,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
318 if (get_order(sgl->sgl_size) > MAX_ORDER) { 316 if (get_order(sgl->sgl_size) > MAX_ORDER) {
319 dev_err(&pci_dev->dev, 317 dev_err(&pci_dev->dev,
320 "[%s] err: too much memory requested!\n", __func__); 318 "[%s] err: too much memory requested!\n", __func__);
321 return -ENOMEM; 319 return ret;
322 } 320 }
323 321
324 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size, 322 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
@@ -326,7 +324,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
326 if (sgl->sgl == NULL) { 324 if (sgl->sgl == NULL) {
327 dev_err(&pci_dev->dev, 325 dev_err(&pci_dev->dev,
328 "[%s] err: no memory available!\n", __func__); 326 "[%s] err: no memory available!\n", __func__);
329 return -ENOMEM; 327 return ret;
330 } 328 }
331 329
332 /* Only use buffering on incomplete pages */ 330 /* Only use buffering on incomplete pages */
@@ -339,7 +337,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
339 /* Sync with user memory */ 337 /* Sync with user memory */
340 if (copy_from_user(sgl->fpage + sgl->fpage_offs, 338 if (copy_from_user(sgl->fpage + sgl->fpage_offs,
341 user_addr, sgl->fpage_size)) { 339 user_addr, sgl->fpage_size)) {
342 rc = -EFAULT; 340 ret = -EFAULT;
343 goto err_out; 341 goto err_out;
344 } 342 }
345 } 343 }
@@ -352,7 +350,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
352 /* Sync with user memory */ 350 /* Sync with user memory */
353 if (copy_from_user(sgl->lpage, user_addr + user_size - 351 if (copy_from_user(sgl->lpage, user_addr + user_size -
354 sgl->lpage_size, sgl->lpage_size)) { 352 sgl->lpage_size, sgl->lpage_size)) {
355 rc = -EFAULT; 353 ret = -EFAULT;
356 goto err_out2; 354 goto err_out2;
357 } 355 }
358 } 356 }
@@ -374,7 +372,8 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
374 sgl->sgl = NULL; 372 sgl->sgl = NULL;
375 sgl->sgl_dma_addr = 0; 373 sgl->sgl_dma_addr = 0;
376 sgl->sgl_size = 0; 374 sgl->sgl_size = 0;
377 return -ENOMEM; 375
376 return ret;
378} 377}
379 378
380int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, 379int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 6193270e7b3d..de20bdaa148d 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -985,6 +985,12 @@ static void kgdbts_run_tests(void)
985 int nmi_sleep = 0; 985 int nmi_sleep = 0;
986 int i; 986 int i;
987 987
988 verbose = 0;
989 if (strstr(config, "V1"))
990 verbose = 1;
991 if (strstr(config, "V2"))
992 verbose = 2;
993
988 ptr = strchr(config, 'F'); 994 ptr = strchr(config, 'F');
989 if (ptr) 995 if (ptr)
990 fork_test = simple_strtol(ptr + 1, NULL, 10); 996 fork_test = simple_strtol(ptr + 1, NULL, 10);
@@ -1068,13 +1074,6 @@ static int kgdbts_option_setup(char *opt)
1068 return -ENOSPC; 1074 return -ENOSPC;
1069 } 1075 }
1070 strcpy(config, opt); 1076 strcpy(config, opt);
1071
1072 verbose = 0;
1073 if (strstr(config, "V1"))
1074 verbose = 1;
1075 if (strstr(config, "V2"))
1076 verbose = 2;
1077
1078 return 0; 1077 return 0;
1079} 1078}
1080 1079
@@ -1086,9 +1085,6 @@ static int configure_kgdbts(void)
1086 1085
1087 if (!strlen(config) || isspace(config[0])) 1086 if (!strlen(config) || isspace(config[0]))
1088 goto noconfig; 1087 goto noconfig;
1089 err = kgdbts_option_setup(config);
1090 if (err)
1091 goto noconfig;
1092 1088
1093 final_ack = 0; 1089 final_ack = 0;
1094 run_plant_and_detach_test(1); 1090 run_plant_and_detach_test(1);
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 389475b25bb7..d5a0e7f1813b 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -18,7 +18,7 @@
18 * hardened usercopy checks by added "unconst" to all the const copies, 18 * hardened usercopy checks by added "unconst" to all the const copies,
19 * and making sure "cache_size" isn't optimized into a const. 19 * and making sure "cache_size" isn't optimized into a const.
20 */ 20 */
21static volatile size_t unconst = 0; 21static volatile size_t unconst;
22static volatile size_t cache_size = 1024; 22static volatile size_t cache_size = 1024;
23static struct kmem_cache *whitelist_cache; 23static struct kmem_cache *whitelist_cache;
24 24
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index a6f41f96f2a1..80215c312f0e 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -17,7 +17,6 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/device.h> 20#include <linux/device.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23#include <linux/uuid.h> 22#include <linux/uuid.h>
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 4d77a6ae183a..87281b3695e6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -599,10 +599,10 @@ static __poll_t mei_poll(struct file *file, poll_table *wait)
599 mei_cl_read_start(cl, mei_cl_mtu(cl), file); 599 mei_cl_read_start(cl, mei_cl_mtu(cl), file);
600 } 600 }
601 601
602 if (req_events & (POLLOUT | POLLWRNORM)) { 602 if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
603 poll_wait(file, &cl->tx_wait, wait); 603 poll_wait(file, &cl->tx_wait, wait);
604 if (cl->tx_cb_queued < dev->tx_queue_limit) 604 if (cl->tx_cb_queued < dev->tx_queue_limit)
605 mask |= POLLOUT | POLLWRNORM; 605 mask |= EPOLLOUT | EPOLLWRNORM;
606 } 606 }
607 607
608out: 608out:
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 6369aeaa7056..18b8ed57c4ac 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -1035,8 +1035,6 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
1035 } 1035 }
1036 dma_async_issue_pending(chan); 1036 dma_async_issue_pending(chan);
1037 } 1037 }
1038 if (ret < 0)
1039 goto err;
1040 offset += loop_len; 1038 offset += loop_len;
1041 temp += loop_len; 1039 temp += loop_len;
1042 temp_phys += loop_len; 1040 temp_phys += loop_len;
@@ -1553,9 +1551,8 @@ static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
1553 int src_cache_off, dst_cache_off; 1551 int src_cache_off, dst_cache_off;
1554 s64 src_offset = work->src_offset, dst_offset = work->dst_offset; 1552 s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1555 u8 *temp = NULL; 1553 u8 *temp = NULL;
1556 bool src_local = true, dst_local = false; 1554 bool src_local = true;
1557 struct scif_dma_comp_cb *comp_cb; 1555 struct scif_dma_comp_cb *comp_cb;
1558 dma_addr_t src_dma_addr, dst_dma_addr;
1559 int err; 1556 int err;
1560 1557
1561 if (is_dma_copy_aligned(chan->device, 1, 1, 1)) 1558 if (is_dma_copy_aligned(chan->device, 1, 1, 1))
@@ -1569,12 +1566,8 @@ static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
1569 1566
1570 if (work->loopback) 1567 if (work->loopback)
1571 return scif_rma_list_cpu_copy(work); 1568 return scif_rma_list_cpu_copy(work);
1572 src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset);
1573 dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset);
1574 src_local = work->src_window->type == SCIF_WINDOW_SELF; 1569 src_local = work->src_window->type == SCIF_WINDOW_SELF;
1575 dst_local = work->dst_window->type == SCIF_WINDOW_SELF;
1576 1570
1577 dst_local = dst_local;
1578 /* Allocate dma_completion cb */ 1571 /* Allocate dma_completion cb */
1579 comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL); 1572 comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
1580 if (!comp_cb) 1573 if (!comp_cb)
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index cac3bcc308a7..7bb929f05d85 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
272dma_fail: 272dma_fail:
273 if (!x100) 273 if (!x100)
274 dma_pool_free(ep->remote_dev->signal_pool, status, 274 dma_pool_free(ep->remote_dev->signal_pool, status,
275 status->src_dma_addr); 275 src - offsetof(struct scif_status, val));
276alloc_fail: 276alloc_fail:
277 return err; 277 return err;
278} 278}
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 030769018461..4b23d586fc3f 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -634,7 +634,7 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
634 break; 634 break;
635 case CBSS_PAGE_OVERFLOW: 635 case CBSS_PAGE_OVERFLOW:
636 STAT(mesq_noop_page_overflow); 636 STAT(mesq_noop_page_overflow);
637 /* fallthru */ 637 /* fall through */
638 default: 638 default:
639 BUG(); 639 BUG();
640 } 640 }
@@ -792,7 +792,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
792 break; 792 break;
793 case CBSS_PAGE_OVERFLOW: 793 case CBSS_PAGE_OVERFLOW:
794 STAT(mesq_page_overflow); 794 STAT(mesq_page_overflow);
795 /* fallthru */ 795 /* fall through */
796 default: 796 default:
797 BUG(); 797 BUG();
798 } 798 }
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 05a890ce2ab8..8e6607fc8a67 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -28,7 +28,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
28{ 28{
29 enum xp_retval ret; 29 enum xp_retval ret;
30 30
31 DBUG_ON(!spin_is_locked(&ch->lock)); 31 lockdep_assert_held(&ch->lock);
32 32
33 if (!(ch->flags & XPC_C_OPENREQUEST) || 33 if (!(ch->flags & XPC_C_OPENREQUEST) ||
34 !(ch->flags & XPC_C_ROPENREQUEST)) { 34 !(ch->flags & XPC_C_ROPENREQUEST)) {
@@ -82,7 +82,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
82 struct xpc_partition *part = &xpc_partitions[ch->partid]; 82 struct xpc_partition *part = &xpc_partitions[ch->partid];
83 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); 83 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
84 84
85 DBUG_ON(!spin_is_locked(&ch->lock)); 85 lockdep_assert_held(&ch->lock);
86 86
87 if (!(ch->flags & XPC_C_DISCONNECTING)) 87 if (!(ch->flags & XPC_C_DISCONNECTING))
88 return; 88 return;
@@ -755,7 +755,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
755{ 755{
756 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); 756 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
757 757
758 DBUG_ON(!spin_is_locked(&ch->lock)); 758 lockdep_assert_held(&ch->lock);
759 759
760 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) 760 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
761 return; 761 return;
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 0c3ef6f1df54..3eba1c420cc0 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -98,8 +98,7 @@ xpc_get_rsvd_page_pa(int nasid)
98 len = L1_CACHE_ALIGN(len); 98 len = L1_CACHE_ALIGN(len);
99 99
100 if (len > buf_len) { 100 if (len > buf_len) {
101 if (buf_base != NULL) 101 kfree(buf_base);
102 kfree(buf_base);
103 buf_len = L1_CACHE_ALIGN(len); 102 buf_len = L1_CACHE_ALIGN(len);
104 buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, 103 buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
105 &buf_base); 104 &buf_base);
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 5a12d2a54049..0ae69b9390ce 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -1671,7 +1671,7 @@ xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
1671{ 1671{
1672 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; 1672 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1673 1673
1674 DBUG_ON(!spin_is_locked(&ch->lock)); 1674 lockdep_assert_held(&ch->lock);
1675 1675
1676 ch_sn2->remote_msgqueue_pa = 0; 1676 ch_sn2->remote_msgqueue_pa = 0;
1677 1677
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 340b44d9e8cf..0441abe87880 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -1183,7 +1183,7 @@ xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
1183{ 1183{
1184 struct xpc_channel_uv *ch_uv = &ch->sn.uv; 1184 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1185 1185
1186 DBUG_ON(!spin_is_locked(&ch->lock)); 1186 lockdep_assert_held(&ch->lock);
1187 1187
1188 kfree(ch_uv->cached_notify_gru_mq_desc); 1188 kfree(ch_uv->cached_notify_gru_mq_desc);
1189 ch_uv->cached_notify_gru_mq_desc = NULL; 1189 ch_uv->cached_notify_gru_mq_desc = NULL;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 74b183baf044..80d8cbe8c01a 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -323,10 +323,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
323 cur_start = block->start + block->size; 323 cur_start = block->start + block->size;
324 } 324 }
325 325
326 err_chunks: 326err_chunks:
327 if (child) 327 of_node_put(child);
328 of_node_put(child);
329
330 kfree(rblocks); 328 kfree(rblocks);
331 329
332 return ret; 330 return ret;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2543ef1ece17..9b0b3fa4f836 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -25,6 +25,9 @@
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <linux/debugfs.h> 26#include <linux/debugfs.h>
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28#include <linux/rwsem.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
28#include <linux/vmw_vmci_defs.h> 31#include <linux/vmw_vmci_defs.h>
29#include <linux/vmw_vmci_api.h> 32#include <linux/vmw_vmci_api.h>
30#include <asm/hypervisor.h> 33#include <asm/hypervisor.h>
@@ -37,20 +40,20 @@ MODULE_ALIAS("vmware_vmmemctl");
37MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
38 41
39/* 42/*
40 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't 43 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't allow wait
41 * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use 44 * (__GFP_RECLAIM) for huge page allocations. Use __GFP_NOWARN, to suppress page
42 * __GFP_NOWARN, to suppress page allocation failure warnings. 45 * allocation failure warnings. Disallow access to emergency low-memory pools.
43 */ 46 */
44#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN) 47#define VMW_HUGE_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
48 __GFP_NOMEMALLOC)
45 49
46/* 50/*
47 * Use GFP_HIGHUSER when executing in a separate kernel thread 51 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We allow lightweight
48 * context and allocation can sleep. This is less stressful to 52 * reclamation (__GFP_NORETRY). Use __GFP_NOWARN, to suppress page allocation
49 * the guest memory system, since it allows the thread to block 53 * failure warnings. Disallow access to emergency low-memory pools.
50 * while memory is reclaimed, and won't take pages from emergency
51 * low-memory pools.
52 */ 54 */
53#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER) 55#define VMW_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
56 __GFP_NOMEMALLOC|__GFP_NORETRY)
54 57
55/* Maximum number of refused pages we accumulate during inflation cycle */ 58/* Maximum number of refused pages we accumulate during inflation cycle */
56#define VMW_BALLOON_MAX_REFUSED 16 59#define VMW_BALLOON_MAX_REFUSED 16
@@ -77,225 +80,420 @@ enum vmwballoon_capabilities {
77 | VMW_BALLOON_BATCHED_2M_CMDS \ 80 | VMW_BALLOON_BATCHED_2M_CMDS \
78 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD) 81 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
79 82
80#define VMW_BALLOON_2M_SHIFT (9) 83#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
81#define VMW_BALLOON_NUM_PAGE_SIZES (2)
82 84
83/* 85enum vmballoon_page_size_type {
84 * Backdoor commands availability: 86 VMW_BALLOON_4K_PAGE,
85 * 87 VMW_BALLOON_2M_PAGE,
86 * START, GET_TARGET and GUEST_ID are always available, 88 VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
87 * 89};
88 * VMW_BALLOON_BASIC_CMDS:
89 * LOCK and UNLOCK commands,
90 * VMW_BALLOON_BATCHED_CMDS:
91 * BATCHED_LOCK and BATCHED_UNLOCK commands.
92 * VMW BALLOON_BATCHED_2M_CMDS:
93 * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
94 * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
95 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
96 */
97#define VMW_BALLOON_CMD_START 0
98#define VMW_BALLOON_CMD_GET_TARGET 1
99#define VMW_BALLOON_CMD_LOCK 2
100#define VMW_BALLOON_CMD_UNLOCK 3
101#define VMW_BALLOON_CMD_GUEST_ID 4
102#define VMW_BALLOON_CMD_BATCHED_LOCK 6
103#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
104#define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
105#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
106#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
107
108
109/* error codes */
110#define VMW_BALLOON_SUCCESS 0
111#define VMW_BALLOON_FAILURE -1
112#define VMW_BALLOON_ERROR_CMD_INVALID 1
113#define VMW_BALLOON_ERROR_PPN_INVALID 2
114#define VMW_BALLOON_ERROR_PPN_LOCKED 3
115#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
116#define VMW_BALLOON_ERROR_PPN_PINNED 5
117#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
118#define VMW_BALLOON_ERROR_RESET 7
119#define VMW_BALLOON_ERROR_BUSY 8
120 90
121#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000) 91#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
122 92
123/* Batch page description */ 93static const char * const vmballoon_page_size_names[] = {
94 [VMW_BALLOON_4K_PAGE] = "4k",
95 [VMW_BALLOON_2M_PAGE] = "2M"
96};
124 97
125/* 98enum vmballoon_op {
126 * Layout of a page in the batch page: 99 VMW_BALLOON_INFLATE,
100 VMW_BALLOON_DEFLATE
101};
102
103enum vmballoon_op_stat_type {
104 VMW_BALLOON_OP_STAT,
105 VMW_BALLOON_OP_FAIL_STAT
106};
107
108#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
109
110/**
111 * enum vmballoon_cmd_type - backdoor commands.
127 * 112 *
128 * +-------------+----------+--------+ 113 * Availability of the commands is as followed:
129 * | | | |
130 * | Page number | Reserved | Status |
131 * | | | |
132 * +-------------+----------+--------+
133 * 64 PAGE_SHIFT 6 0
134 * 114 *
135 * The reserved field should be set to 0. 115 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
116 * %VMW_BALLOON_CMD_GUEST_ID are always available.
117 *
118 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
119 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
120 *
121 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
122 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
123 * are available.
124 *
125 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
126 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
127 * are supported.
128 *
129 * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
130 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
131 *
132 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
133 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
134 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
135 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
136 * to be deflated from the balloon.
137 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
138 * runs in the VM.
139 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
140 * ballooned pages (up to 512).
141 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
142 * pages that are about to be deflated from the
143 * balloon (up to 512).
144 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
145 * for 2MB pages.
146 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
147 * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
148 * pages.
149 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
150 * that would be invoked when the balloon
151 * size changes.
152 * @VMW_BALLOON_CMD_LAST: Value of the last command.
136 */ 153 */
137#define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64)) 154enum vmballoon_cmd_type {
138#define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1) 155 VMW_BALLOON_CMD_START,
139#define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1)) 156 VMW_BALLOON_CMD_GET_TARGET,
140 157 VMW_BALLOON_CMD_LOCK,
141struct vmballoon_batch_page { 158 VMW_BALLOON_CMD_UNLOCK,
142 u64 pages[VMW_BALLOON_BATCH_MAX_PAGES]; 159 VMW_BALLOON_CMD_GUEST_ID,
160 /* No command 5 */
161 VMW_BALLOON_CMD_BATCHED_LOCK = 6,
162 VMW_BALLOON_CMD_BATCHED_UNLOCK,
163 VMW_BALLOON_CMD_BATCHED_2M_LOCK,
164 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
165 VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
166 VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
143}; 167};
144 168
145static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx) 169#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
146{ 170
147 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK; 171enum vmballoon_error_codes {
148} 172 VMW_BALLOON_SUCCESS,
173 VMW_BALLOON_ERROR_CMD_INVALID,
174 VMW_BALLOON_ERROR_PPN_INVALID,
175 VMW_BALLOON_ERROR_PPN_LOCKED,
176 VMW_BALLOON_ERROR_PPN_UNLOCKED,
177 VMW_BALLOON_ERROR_PPN_PINNED,
178 VMW_BALLOON_ERROR_PPN_NOTNEEDED,
179 VMW_BALLOON_ERROR_RESET,
180 VMW_BALLOON_ERROR_BUSY
181};
149 182
150static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch, 183#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
151 int idx)
152{
153 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
154}
155 184
156static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx, 185#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
157 u64 pa) 186 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
158{ 187 (1UL << VMW_BALLOON_CMD_LOCK) | \
159 batch->pages[idx] = pa; 188 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
160} 189 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
190 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
191 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
192 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
193
194static const char * const vmballoon_cmd_names[] = {
195 [VMW_BALLOON_CMD_START] = "start",
196 [VMW_BALLOON_CMD_GET_TARGET] = "target",
197 [VMW_BALLOON_CMD_LOCK] = "lock",
198 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
199 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
200 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
201 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
202 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
203 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
204 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
205};
161 206
207enum vmballoon_stat_page {
208 VMW_BALLOON_PAGE_STAT_ALLOC,
209 VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
210 VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
211 VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
212 VMW_BALLOON_PAGE_STAT_FREE,
213 VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
214};
162 215
163#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \ 216#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
164({ \
165 unsigned long __status, __dummy1, __dummy2, __dummy3; \
166 __asm__ __volatile__ ("inl %%dx" : \
167 "=a"(__status), \
168 "=c"(__dummy1), \
169 "=d"(__dummy2), \
170 "=b"(result), \
171 "=S" (__dummy3) : \
172 "0"(VMW_BALLOON_HV_MAGIC), \
173 "1"(VMW_BALLOON_CMD_##cmd), \
174 "2"(VMW_BALLOON_HV_PORT), \
175 "3"(arg1), \
176 "4" (arg2) : \
177 "memory"); \
178 if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
179 result = __dummy1; \
180 result &= -1UL; \
181 __status & -1UL; \
182})
183 217
184#ifdef CONFIG_DEBUG_FS 218enum vmballoon_stat_general {
185struct vmballoon_stats { 219 VMW_BALLOON_STAT_TIMER,
186 unsigned int timer; 220 VMW_BALLOON_STAT_DOORBELL,
187 unsigned int doorbell; 221 VMW_BALLOON_STAT_RESET,
188 222 VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_RESET
189 /* allocation statistics */
190 unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
191 unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
192 unsigned int sleep_alloc;
193 unsigned int sleep_alloc_fail;
194 unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
195 unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
196 unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
197
198 /* monitor operations */
199 unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
200 unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
201 unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
202 unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
203 unsigned int target;
204 unsigned int target_fail;
205 unsigned int start;
206 unsigned int start_fail;
207 unsigned int guest_type;
208 unsigned int guest_type_fail;
209 unsigned int doorbell_set;
210 unsigned int doorbell_unset;
211}; 223};
212 224
213#define STATS_INC(stat) (stat)++ 225#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
214#else
215#define STATS_INC(stat)
216#endif
217 226
218struct vmballoon;
219 227
220struct vmballoon_ops { 228static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
221 void (*add_page)(struct vmballoon *b, int idx, struct page *p); 229static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
222 int (*lock)(struct vmballoon *b, unsigned int num_pages, 230
223 bool is_2m_pages, unsigned int *target); 231struct vmballoon_ctl {
224 int (*unlock)(struct vmballoon *b, unsigned int num_pages, 232 struct list_head pages;
225 bool is_2m_pages, unsigned int *target); 233 struct list_head refused_pages;
234 unsigned int n_refused_pages;
235 unsigned int n_pages;
236 enum vmballoon_page_size_type page_size;
237 enum vmballoon_op op;
226}; 238};
227 239
228struct vmballoon_page_size { 240struct vmballoon_page_size {
229 /* list of reserved physical pages */ 241 /* list of reserved physical pages */
230 struct list_head pages; 242 struct list_head pages;
231
232 /* transient list of non-balloonable pages */
233 struct list_head refused_pages;
234 unsigned int n_refused_pages;
235}; 243};
236 244
245/**
246 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
247 *
248 * @status: the status of the operation, which is written by the hypervisor.
249 * @reserved: reserved for future use. Must be set to zero.
250 * @pfn: the physical frame number of the page to be locked or unlocked.
251 */
252struct vmballoon_batch_entry {
253 u64 status : 5;
254 u64 reserved : PAGE_SHIFT - 5;
255 u64 pfn : 52;
256} __packed;
257
237struct vmballoon { 258struct vmballoon {
238 struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES]; 259 struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
239 260
240 /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */ 261 /**
241 unsigned supported_page_sizes; 262 * @max_page_size: maximum supported page size for ballooning.
263 *
264 * Protected by @conf_sem
265 */
266 enum vmballoon_page_size_type max_page_size;
267
268 /**
269 * @size: balloon actual size in basic page size (frames).
270 *
271 * While we currently do not support size which is bigger than 32-bit,
272 * in preparation for future support, use 64-bits.
273 */
274 atomic64_t size;
242 275
243 /* balloon size in pages */ 276 /**
244 unsigned int size; 277 * @target: balloon target size in basic page size (frames).
245 unsigned int target; 278 *
279 * We do not protect the target under the assumption that setting the
280 * value is always done through a single write. If this assumption ever
281 * breaks, we would have to use X_ONCE for accesses, and suffer the less
282 * optimized code. Although we may read stale target value if multiple
283 * accesses happen at once, the performance impact should be minor.
284 */
285 unsigned long target;
246 286
247 /* reset flag */ 287 /**
288 * @reset_required: reset flag
289 *
290 * Setting this flag may introduce races, but the code is expected to
291 * handle them gracefully. In the worst case, another operation will
292 * fail as reset did not take place. Clearing the flag is done while
293 * holding @conf_sem for write.
294 */
248 bool reset_required; 295 bool reset_required;
249 296
297 /**
298 * @capabilities: hypervisor balloon capabilities.
299 *
300 * Protected by @conf_sem.
301 */
250 unsigned long capabilities; 302 unsigned long capabilities;
251 303
252 struct vmballoon_batch_page *batch_page; 304 /**
305 * @batch_page: pointer to communication batch page.
306 *
307 * When batching is used, batch_page points to a page, which holds up to
308 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
309 */
310 struct vmballoon_batch_entry *batch_page;
311
312 /**
313 * @batch_max_pages: maximum pages that can be locked/unlocked.
314 *
315 * Indicates the number of pages that the hypervisor can lock or unlock
316 * at once, according to whether batching is enabled. If batching is
317 * disabled, only a single page can be locked/unlock on each operation.
318 *
319 * Protected by @conf_sem.
320 */
253 unsigned int batch_max_pages; 321 unsigned int batch_max_pages;
254 struct page *page;
255 322
256 const struct vmballoon_ops *ops; 323 /**
324 * @page: page to be locked/unlocked by the hypervisor
325 *
326 * @page is only used when batching is disabled and a single page is
327 * reclaimed on each iteration.
328 *
329 * Protected by @comm_lock.
330 */
331 struct page *page;
257 332
258#ifdef CONFIG_DEBUG_FS
259 /* statistics */ 333 /* statistics */
260 struct vmballoon_stats stats; 334 struct vmballoon_stats *stats;
261 335
336#ifdef CONFIG_DEBUG_FS
262 /* debugfs file exporting statistics */ 337 /* debugfs file exporting statistics */
263 struct dentry *dbg_entry; 338 struct dentry *dbg_entry;
264#endif 339#endif
265 340
266 struct sysinfo sysinfo;
267
268 struct delayed_work dwork; 341 struct delayed_work dwork;
269 342
343 /**
344 * @vmci_doorbell.
345 *
346 * Protected by @conf_sem.
347 */
270 struct vmci_handle vmci_doorbell; 348 struct vmci_handle vmci_doorbell;
349
350 /**
351 * @conf_sem: semaphore to protect the configuration and the statistics.
352 */
353 struct rw_semaphore conf_sem;
354
355 /**
356 * @comm_lock: lock to protect the communication with the host.
357 *
358 * Lock ordering: @conf_sem -> @comm_lock .
359 */
360 spinlock_t comm_lock;
271}; 361};
272 362
273static struct vmballoon balloon; 363static struct vmballoon balloon;
274 364
365struct vmballoon_stats {
366 /* timer / doorbell operations */
367 atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
368
369 /* allocation statistics for huge and small pages */
370 atomic64_t
371 page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
372
373 /* Monitor operations: total operations, and failures */
374 atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
375};
376
377static inline bool is_vmballoon_stats_on(void)
378{
379 return IS_ENABLED(CONFIG_DEBUG_FS) &&
380 static_branch_unlikely(&balloon_stat_enabled);
381}
382
383static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
384 enum vmballoon_op_stat_type type)
385{
386 if (is_vmballoon_stats_on())
387 atomic64_inc(&b->stats->ops[op][type]);
388}
389
390static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
391 enum vmballoon_stat_general stat)
392{
393 if (is_vmballoon_stats_on())
394 atomic64_inc(&b->stats->general_stat[stat]);
395}
396
397static inline void vmballoon_stats_gen_add(struct vmballoon *b,
398 enum vmballoon_stat_general stat,
399 unsigned int val)
400{
401 if (is_vmballoon_stats_on())
402 atomic64_add(val, &b->stats->general_stat[stat]);
403}
404
405static inline void vmballoon_stats_page_inc(struct vmballoon *b,
406 enum vmballoon_stat_page stat,
407 enum vmballoon_page_size_type size)
408{
409 if (is_vmballoon_stats_on())
410 atomic64_inc(&b->stats->page_stat[stat][size]);
411}
412
413static inline void vmballoon_stats_page_add(struct vmballoon *b,
414 enum vmballoon_stat_page stat,
415 enum vmballoon_page_size_type size,
416 unsigned int val)
417{
418 if (is_vmballoon_stats_on())
419 atomic64_add(val, &b->stats->page_stat[stat][size]);
420}
421
422static inline unsigned long
423__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
424 unsigned long arg2, unsigned long *result)
425{
426 unsigned long status, dummy1, dummy2, dummy3, local_result;
427
428 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
429
430 asm volatile ("inl %%dx" :
431 "=a"(status),
432 "=c"(dummy1),
433 "=d"(dummy2),
434 "=b"(local_result),
435 "=S"(dummy3) :
436 "0"(VMW_BALLOON_HV_MAGIC),
437 "1"(cmd),
438 "2"(VMW_BALLOON_HV_PORT),
439 "3"(arg1),
440 "4"(arg2) :
441 "memory");
442
443 /* update the result if needed */
444 if (result)
445 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
446 local_result;
447
448 /* update target when applicable */
449 if (status == VMW_BALLOON_SUCCESS &&
450 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
451 WRITE_ONCE(b->target, local_result);
452
453 if (status != VMW_BALLOON_SUCCESS &&
454 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
455 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
456 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
457 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
458 status);
459 }
460
461 /* mark reset required accordingly */
462 if (status == VMW_BALLOON_ERROR_RESET)
463 b->reset_required = true;
464
465 return status;
466}
467
468static __always_inline unsigned long
469vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
470 unsigned long arg2)
471{
472 unsigned long dummy;
473
474 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
475}
476
275/* 477/*
276 * Send "start" command to the host, communicating supported version 478 * Send "start" command to the host, communicating supported version
277 * of the protocol. 479 * of the protocol.
278 */ 480 */
279static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) 481static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
280{ 482{
281 unsigned long status, capabilities, dummy = 0; 483 unsigned long status, capabilities;
282 bool success;
283
284 STATS_INC(b->stats.start);
285 484
286 status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities); 485 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
486 &capabilities);
287 487
288 switch (status) { 488 switch (status) {
289 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES: 489 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
290 b->capabilities = capabilities; 490 b->capabilities = capabilities;
291 success = true;
292 break; 491 break;
293 case VMW_BALLOON_SUCCESS: 492 case VMW_BALLOON_SUCCESS:
294 b->capabilities = VMW_BALLOON_BASIC_CMDS; 493 b->capabilities = VMW_BALLOON_BASIC_CMDS;
295 success = true;
296 break; 494 break;
297 default: 495 default:
298 success = false; 496 return -EIO;
299 } 497 }
300 498
301 /* 499 /*
@@ -303,626 +501,693 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
303 * reason disabled, do not use 2MB pages, since otherwise the legacy 501 * reason disabled, do not use 2MB pages, since otherwise the legacy
304 * mechanism is used with 2MB pages, causing a failure. 502 * mechanism is used with 2MB pages, causing a failure.
305 */ 503 */
504 b->max_page_size = VMW_BALLOON_4K_PAGE;
306 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && 505 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
307 (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) 506 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
308 b->supported_page_sizes = 2; 507 b->max_page_size = VMW_BALLOON_2M_PAGE;
309 else
310 b->supported_page_sizes = 1;
311
312 if (!success) {
313 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
314 STATS_INC(b->stats.start_fail);
315 }
316 return success;
317}
318 508
319static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
320{
321 switch (status) {
322 case VMW_BALLOON_SUCCESS:
323 return true;
324 509
325 case VMW_BALLOON_ERROR_RESET: 510 return 0;
326 b->reset_required = true;
327 /* fall through */
328
329 default:
330 return false;
331 }
332} 511}
333 512
334/* 513/**
514 * vmballoon_send_guest_id - communicate guest type to the host.
515 *
516 * @b: pointer to the balloon.
517 *
335 * Communicate guest type to the host so that it can adjust ballooning 518 * Communicate guest type to the host so that it can adjust ballooning
336 * algorithm to the one most appropriate for the guest. This command 519 * algorithm to the one most appropriate for the guest. This command
337 * is normally issued after sending "start" command and is part of 520 * is normally issued after sending "start" command and is part of
338 * standard reset sequence. 521 * standard reset sequence.
522 *
523 * Return: zero on success or appropriate error code.
339 */ 524 */
340static bool vmballoon_send_guest_id(struct vmballoon *b) 525static int vmballoon_send_guest_id(struct vmballoon *b)
341{ 526{
342 unsigned long status, dummy = 0; 527 unsigned long status;
343
344 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
345 dummy);
346
347 STATS_INC(b->stats.guest_type);
348 528
349 if (vmballoon_check_status(b, status)) 529 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
350 return true; 530 VMW_BALLOON_GUEST_ID, 0);
351 531
352 pr_debug("%s - failed, hv returns %ld\n", __func__, status); 532 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
353 STATS_INC(b->stats.guest_type_fail);
354 return false;
355} 533}
356 534
357static u16 vmballoon_page_size(bool is_2m_page) 535/**
536 * vmballoon_page_order() - return the order of the page
537 * @page_size: the size of the page.
538 *
539 * Return: the allocation order.
540 */
541static inline
542unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
358{ 543{
359 if (is_2m_page) 544 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
360 return 1 << VMW_BALLOON_2M_SHIFT; 545}
361 546
362 return 1; 547/**
548 * vmballoon_page_in_frames() - returns the number of frames in a page.
549 * @page_size: the size of the page.
550 *
551 * Return: the number of 4k frames.
552 */
553static inline unsigned int
554vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
555{
556 return 1 << vmballoon_page_order(page_size);
363} 557}
364 558
365/* 559/**
366 * Retrieve desired balloon size from the host. 560 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
561 *
562 * @b: pointer to the balloon.
563 *
564 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
565 * by the host-guest protocol and EIO if an error occurred in communicating with
566 * the host.
367 */ 567 */
368static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target) 568static int vmballoon_send_get_target(struct vmballoon *b)
369{ 569{
370 unsigned long status; 570 unsigned long status;
371 unsigned long target;
372 unsigned long limit; 571 unsigned long limit;
373 unsigned long dummy = 0;
374 u32 limit32;
375 572
376 /* 573 limit = totalram_pages;
377 * si_meminfo() is cheap. Moreover, we want to provide dynamic
378 * max balloon size later. So let us call si_meminfo() every
379 * iteration.
380 */
381 si_meminfo(&b->sysinfo);
382 limit = b->sysinfo.totalram;
383 574
384 /* Ensure limit fits in 32-bits */ 575 /* Ensure limit fits in 32-bits */
385 limit32 = (u32)limit; 576 if (limit != (u32)limit)
386 if (limit != limit32) 577 return -EINVAL;
387 return false;
388
389 /* update stats */
390 STATS_INC(b->stats.target);
391 578
392 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target); 579 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
393 if (vmballoon_check_status(b, status)) {
394 *new_target = target;
395 return true;
396 }
397 580
398 pr_debug("%s - failed, hv returns %ld\n", __func__, status); 581 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
399 STATS_INC(b->stats.target_fail);
400 return false;
401} 582}
402 583
403/* 584/**
404 * Notify the host about allocated page so that host can use it without 585 * vmballoon_alloc_page_list - allocates a list of pages.
405 * fear that guest will need it. Host may reject some pages, we need to 586 *
406 * check the return value and maybe submit a different page. 587 * @b: pointer to the balloon.
588 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
589 * @req_n_pages: the number of requested pages.
590 *
591 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
592 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
593 *
594 * Return: zero on success or error code otherwise.
407 */ 595 */
408static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, 596static int vmballoon_alloc_page_list(struct vmballoon *b,
409 unsigned int *hv_status, unsigned int *target) 597 struct vmballoon_ctl *ctl,
598 unsigned int req_n_pages)
410{ 599{
411 unsigned long status, dummy = 0; 600 struct page *page;
412 u32 pfn32; 601 unsigned int i;
413
414 pfn32 = (u32)pfn;
415 if (pfn32 != pfn)
416 return -EINVAL;
417
418 STATS_INC(b->stats.lock[false]);
419 602
420 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target); 603 for (i = 0; i < req_n_pages; i++) {
421 if (vmballoon_check_status(b, status)) 604 if (ctl->page_size == VMW_BALLOON_2M_PAGE)
422 return 0; 605 page = alloc_pages(VMW_HUGE_PAGE_ALLOC_FLAGS,
606 VMW_BALLOON_2M_ORDER);
607 else
608 page = alloc_page(VMW_PAGE_ALLOC_FLAGS);
423 609
424 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); 610 /* Update statistics */
425 STATS_INC(b->stats.lock_fail[false]); 611 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
426 return -EIO; 612 ctl->page_size);
427}
428 613
429static int vmballoon_send_batched_lock(struct vmballoon *b, 614 if (page) {
430 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 615 /* Success. Add the page to the list and continue. */
431{ 616 list_add(&page->lru, &ctl->pages);
432 unsigned long status; 617 continue;
433 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); 618 }
434 619
435 STATS_INC(b->stats.lock[is_2m_pages]); 620 /* Allocation failed. Update statistics and stop. */
621 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
622 ctl->page_size);
623 break;
624 }
436 625
437 if (is_2m_pages) 626 ctl->n_pages = i;
438 status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
439 *target);
440 else
441 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
442 *target);
443 627
444 if (vmballoon_check_status(b, status)) 628 return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
445 return 0;
446
447 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
448 STATS_INC(b->stats.lock_fail[is_2m_pages]);
449 return 1;
450} 629}
451 630
452/* 631/**
453 * Notify the host that guest intends to release given page back into 632 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
454 * the pool of available (to the guest) pages. 633 *
634 * @b: pointer for %struct vmballoon.
635 * @page: pointer for the page whose result should be handled.
636 * @page_size: size of the page.
637 * @status: status of the operation as provided by the hypervisor.
455 */ 638 */
456static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn, 639static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
457 unsigned int *target) 640 enum vmballoon_page_size_type page_size,
641 unsigned long status)
458{ 642{
459 unsigned long status, dummy = 0; 643 /* On success do nothing. The page is already on the balloon list. */
460 u32 pfn32; 644 if (likely(status == VMW_BALLOON_SUCCESS))
461 645 return 0;
462 pfn32 = (u32)pfn;
463 if (pfn32 != pfn)
464 return false;
465 646
466 STATS_INC(b->stats.unlock[false]); 647 pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
648 page_to_pfn(page), status,
649 vmballoon_page_size_names[page_size]);
467 650
468 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target); 651 /* Error occurred */
469 if (vmballoon_check_status(b, status)) 652 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
470 return true; 653 page_size);
471 654
472 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); 655 return -EIO;
473 STATS_INC(b->stats.unlock_fail[false]);
474 return false;
475} 656}
476 657
477static bool vmballoon_send_batched_unlock(struct vmballoon *b, 658/**
478 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 659 * vmballoon_status_page - returns the status of (un)lock operation
660 *
661 * @b: pointer to the balloon.
662 * @idx: index for the page for which the operation is performed.
663 * @p: pointer to where the page struct is returned.
664 *
665 * Following a lock or unlock operation, returns the status of the operation for
666 * an individual page. Provides the page that the operation was performed on on
667 * the @page argument.
668 *
669 * Returns: The status of a lock or unlock operation for an individual page.
670 */
671static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
672 struct page **p)
479{ 673{
480 unsigned long status; 674 if (static_branch_likely(&vmw_balloon_batching)) {
481 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); 675 /* batching mode */
482 676 *p = pfn_to_page(b->batch_page[idx].pfn);
483 STATS_INC(b->stats.unlock[is_2m_pages]); 677 return b->batch_page[idx].status;
484 678 }
485 if (is_2m_pages)
486 status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
487 *target);
488 else
489 status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
490 *target);
491 679
492 if (vmballoon_check_status(b, status)) 680 /* non-batching mode */
493 return true; 681 *p = b->page;
494 682
495 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status); 683 /*
496 STATS_INC(b->stats.unlock_fail[is_2m_pages]); 684 * If a failure occurs, the indication will be provided in the status
497 return false; 685 * of the entire operation, which is considered before the individual
686 * page status. So for non-batching mode, the indication is always of
687 * success.
688 */
689 return VMW_BALLOON_SUCCESS;
498} 690}
499 691
500static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page) 692/**
693 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
694 * @b: pointer to the balloon.
695 * @num_pages: number of inflated/deflated pages.
696 * @page_size: size of the page.
697 * @op: the type of operation (lock or unlock).
698 *
699 * Notify the host about page(s) that were ballooned (or removed from the
700 * balloon) so that host can use it without fear that guest will need it (or
701 * stop using them since the VM does). Host may reject some pages, we need to
702 * check the return value and maybe submit a different page. The pages that are
703 * inflated/deflated are pointed by @b->page.
704 *
705 * Return: result as provided by the hypervisor.
706 */
707static unsigned long vmballoon_lock_op(struct vmballoon *b,
708 unsigned int num_pages,
709 enum vmballoon_page_size_type page_size,
710 enum vmballoon_op op)
501{ 711{
502 if (is_2m_page) 712 unsigned long cmd, pfn;
503 return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
504 713
505 return alloc_page(flags); 714 lockdep_assert_held(&b->comm_lock);
506}
507 715
508static void vmballoon_free_page(struct page *page, bool is_2m_page) 716 if (static_branch_likely(&vmw_balloon_batching)) {
509{ 717 if (op == VMW_BALLOON_INFLATE)
510 if (is_2m_page) 718 cmd = page_size == VMW_BALLOON_2M_PAGE ?
511 __free_pages(page, VMW_BALLOON_2M_SHIFT); 719 VMW_BALLOON_CMD_BATCHED_2M_LOCK :
512 else 720 VMW_BALLOON_CMD_BATCHED_LOCK;
513 __free_page(page); 721 else
722 cmd = page_size == VMW_BALLOON_2M_PAGE ?
723 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
724 VMW_BALLOON_CMD_BATCHED_UNLOCK;
725
726 pfn = PHYS_PFN(virt_to_phys(b->batch_page));
727 } else {
728 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
729 VMW_BALLOON_CMD_UNLOCK;
730 pfn = page_to_pfn(b->page);
731
732 /* In non-batching mode, PFNs must fit in 32-bit */
733 if (unlikely(pfn != (u32)pfn))
734 return VMW_BALLOON_ERROR_PPN_INVALID;
735 }
736
737 return vmballoon_cmd(b, cmd, pfn, num_pages);
514} 738}
515 739
516/* 740/**
517 * Quickly release all pages allocated for the balloon. This function is 741 * vmballoon_add_page - adds a page towards lock/unlock operation.
518 * called when host decides to "reset" balloon for one reason or another. 742 *
519 * Unlike normal "deflate" we do not (shall not) notify host of the pages 743 * @b: pointer to the balloon.
520 * being released. 744 * @idx: index of the page to be ballooned in this batch.
745 * @p: pointer to the page that is about to be ballooned.
746 *
747 * Adds the page to be ballooned. Must be called while holding @comm_lock.
521 */ 748 */
522static void vmballoon_pop(struct vmballoon *b) 749static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
750 struct page *p)
523{ 751{
524 struct page *page, *next; 752 lockdep_assert_held(&b->comm_lock);
525 unsigned is_2m_pages;
526
527 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
528 is_2m_pages++) {
529 struct vmballoon_page_size *page_size =
530 &b->page_sizes[is_2m_pages];
531 u16 size_per_page = vmballoon_page_size(is_2m_pages);
532
533 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
534 list_del(&page->lru);
535 vmballoon_free_page(page, is_2m_pages);
536 STATS_INC(b->stats.free[is_2m_pages]);
537 b->size -= size_per_page;
538 cond_resched();
539 }
540 }
541 753
542 /* Clearing the batch_page unconditionally has no adverse effect */ 754 if (static_branch_likely(&vmw_balloon_batching))
543 free_page((unsigned long)b->batch_page); 755 b->batch_page[idx] = (struct vmballoon_batch_entry)
544 b->batch_page = NULL; 756 { .pfn = page_to_pfn(p) };
757 else
758 b->page = p;
545} 759}
546 760
547/* 761/**
548 * Notify the host of a ballooned page. If host rejects the page put it on the 762 * vmballoon_lock - lock or unlock a batch of pages.
549 * refuse list, those refused page are then released at the end of the 763 *
550 * inflation cycle. 764 * @b: pointer to the balloon.
765 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
766 *
767 * Notifies the host of about ballooned pages (after inflation or deflation,
768 * according to @ctl). If the host rejects the page put it on the
769 * @ctl refuse list. These refused page are then released when moving to the
770 * next size of pages.
771 *
772 * Note that we neither free any @page here nor put them back on the ballooned
773 * pages list. Instead we queue it for later processing. We do that for several
774 * reasons. First, we do not want to free the page under the lock. Second, it
775 * allows us to unify the handling of lock and unlock. In the inflate case, the
776 * caller will check if there are too many refused pages and release them.
777 * Although it is not identical to the past behavior, it should not affect
778 * performance.
551 */ 779 */
552static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, 780static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
553 bool is_2m_pages, unsigned int *target)
554{ 781{
555 int locked, hv_status; 782 unsigned long batch_status;
556 struct page *page = b->page; 783 struct page *page;
557 struct vmballoon_page_size *page_size = &b->page_sizes[false]; 784 unsigned int i, num_pages;
558
559 /* is_2m_pages can never happen as 2m pages support implies batching */
560
561 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
562 target);
563 if (locked) {
564 STATS_INC(b->stats.refused_alloc[false]);
565
566 if (locked == -EIO &&
567 (hv_status == VMW_BALLOON_ERROR_RESET ||
568 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
569 vmballoon_free_page(page, false);
570 return -EIO;
571 }
572
573 /*
574 * Place page on the list of non-balloonable pages
575 * and retry allocation, unless we already accumulated
576 * too many of them, in which case take a breather.
577 */
578 if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
579 page_size->n_refused_pages++;
580 list_add(&page->lru, &page_size->refused_pages);
581 } else {
582 vmballoon_free_page(page, false);
583 }
584 return locked;
585 }
586
587 /* track allocated page */
588 list_add(&page->lru, &page_size->pages);
589 785
590 /* update balloon size */ 786 num_pages = ctl->n_pages;
591 b->size++; 787 if (num_pages == 0)
788 return 0;
592 789
593 return 0; 790 /* communication with the host is done under the communication lock */
594} 791 spin_lock(&b->comm_lock);
595 792
596static int vmballoon_lock_batched_page(struct vmballoon *b, 793 i = 0;
597 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 794 list_for_each_entry(page, &ctl->pages, lru)
598{ 795 vmballoon_add_page(b, i++, page);
599 int locked, i;
600 u16 size_per_page = vmballoon_page_size(is_2m_pages);
601 796
602 locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages, 797 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
603 target); 798 ctl->op);
604 if (locked > 0) {
605 for (i = 0; i < num_pages; i++) {
606 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
607 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
608 799
609 vmballoon_free_page(p, is_2m_pages); 800 /*
610 } 801 * Iterate over the pages in the provided list. Since we are changing
802 * @ctl->n_pages we are saving the original value in @num_pages and
803 * use this value to bound the loop.
804 */
805 for (i = 0; i < num_pages; i++) {
806 unsigned long status;
611 807
612 return -EIO; 808 status = vmballoon_status_page(b, i, &page);
613 }
614 809
615 for (i = 0; i < num_pages; i++) { 810 /*
616 u64 pa = vmballoon_batch_get_pa(b->batch_page, i); 811 * Failure of the whole batch overrides a single operation
617 struct page *p = pfn_to_page(pa >> PAGE_SHIFT); 812 * results.
618 struct vmballoon_page_size *page_size = 813 */
619 &b->page_sizes[is_2m_pages]; 814 if (batch_status != VMW_BALLOON_SUCCESS)
815 status = batch_status;
620 816
621 locked = vmballoon_batch_get_status(b->batch_page, i); 817 /* Continue if no error happened */
818 if (!vmballoon_handle_one_result(b, page, ctl->page_size,
819 status))
820 continue;
622 821
623 switch (locked) { 822 /*
624 case VMW_BALLOON_SUCCESS: 823 * Error happened. Move the pages to the refused list and update
625 list_add(&p->lru, &page_size->pages); 824 * the pages number.
626 b->size += size_per_page; 825 */
627 break; 826 list_move(&page->lru, &ctl->refused_pages);
628 case VMW_BALLOON_ERROR_PPN_PINNED: 827 ctl->n_pages--;
629 case VMW_BALLOON_ERROR_PPN_INVALID: 828 ctl->n_refused_pages++;
630 if (page_size->n_refused_pages
631 < VMW_BALLOON_MAX_REFUSED) {
632 list_add(&p->lru, &page_size->refused_pages);
633 page_size->n_refused_pages++;
634 break;
635 }
636 /* Fallthrough */
637 case VMW_BALLOON_ERROR_RESET:
638 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
639 vmballoon_free_page(p, is_2m_pages);
640 break;
641 default:
642 /* This should never happen */
643 WARN_ON_ONCE(true);
644 }
645 } 829 }
646 830
647 return 0; 831 spin_unlock(&b->comm_lock);
832
833 return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
648} 834}
649 835
650/* 836/**
651 * Release the page allocated for the balloon. Note that we first notify 837 * vmballoon_release_page_list() - Releases a page list
652 * the host so it can make sure the page will be available for the guest 838 *
653 * to use, if needed. 839 * @page_list: list of pages to release.
840 * @n_pages: pointer to the number of pages.
841 * @page_size: whether the pages in the list are 2MB (or else 4KB).
842 *
843 * Releases the list of pages and zeros the number of pages.
654 */ 844 */
655static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages, 845static void vmballoon_release_page_list(struct list_head *page_list,
656 bool is_2m_pages, unsigned int *target) 846 int *n_pages,
847 enum vmballoon_page_size_type page_size)
657{ 848{
658 struct page *page = b->page; 849 struct page *page, *tmp;
659 struct vmballoon_page_size *page_size = &b->page_sizes[false];
660
661 /* is_2m_pages can never happen as 2m pages support implies batching */
662 850
663 if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) { 851 list_for_each_entry_safe(page, tmp, page_list, lru) {
664 list_add(&page->lru, &page_size->pages); 852 list_del(&page->lru);
665 return -EIO; 853 __free_pages(page, vmballoon_page_order(page_size));
666 } 854 }
667 855
668 /* deallocate page */ 856 *n_pages = 0;
669 vmballoon_free_page(page, false); 857}
670 STATS_INC(b->stats.free[false]);
671 858
672 /* update balloon size */
673 b->size--;
674 859
675 return 0; 860/*
861 * Release pages that were allocated while attempting to inflate the
862 * balloon but were refused by the host for one reason or another.
863 */
864static void vmballoon_release_refused_pages(struct vmballoon *b,
865 struct vmballoon_ctl *ctl)
866{
867 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
868 ctl->page_size);
869
870 vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
871 ctl->page_size);
676} 872}
677 873
678static int vmballoon_unlock_batched_page(struct vmballoon *b, 874/**
679 unsigned int num_pages, bool is_2m_pages, 875 * vmballoon_change - retrieve the required balloon change
680 unsigned int *target) 876 *
877 * @b: pointer for the balloon.
878 *
879 * Return: the required change for the balloon size. A positive number
880 * indicates inflation, a negative number indicates a deflation.
881 */
882static int64_t vmballoon_change(struct vmballoon *b)
681{ 883{
682 int locked, i, ret = 0; 884 int64_t size, target;
683 bool hv_success;
684 u16 size_per_page = vmballoon_page_size(is_2m_pages);
685 885
686 hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages, 886 size = atomic64_read(&b->size);
687 target); 887 target = READ_ONCE(b->target);
688 if (!hv_success)
689 ret = -EIO;
690 888
691 for (i = 0; i < num_pages; i++) { 889 /*
692 u64 pa = vmballoon_batch_get_pa(b->batch_page, i); 890 * We must cast first because of int sizes
693 struct page *p = pfn_to_page(pa >> PAGE_SHIFT); 891 * Otherwise we might get huge positives instead of negatives
694 struct vmballoon_page_size *page_size = 892 */
695 &b->page_sizes[is_2m_pages];
696 893
697 locked = vmballoon_batch_get_status(b->batch_page, i); 894 if (b->reset_required)
698 if (!hv_success || locked != VMW_BALLOON_SUCCESS) { 895 return 0;
699 /* 896
700 * That page wasn't successfully unlocked by the 897 /* consider a 2MB slack on deflate, unless the balloon is emptied */
701 * hypervisor, re-add it to the list of pages owned by 898 if (target < size && target != 0 &&
702 * the balloon driver. 899 size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
703 */ 900 return 0;
704 list_add(&p->lru, &page_size->pages);
705 } else {
706 /* deallocate page */
707 vmballoon_free_page(p, is_2m_pages);
708 STATS_INC(b->stats.free[is_2m_pages]);
709
710 /* update balloon size */
711 b->size -= size_per_page;
712 }
713 }
714 901
715 return ret; 902 return target - size;
716} 903}
717 904
718/* 905/**
719 * Release pages that were allocated while attempting to inflate the 906 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
720 * balloon but were refused by the host for one reason or another. 907 *
908 * @b: pointer to balloon.
909 * @pages: list of pages to enqueue.
910 * @n_pages: pointer to number of pages in list. The value is zeroed.
911 * @page_size: whether the pages are 2MB or 4KB pages.
912 *
913 * Enqueues the provides list of pages in the ballooned page list, clears the
914 * list and zeroes the number of pages that was provided.
721 */ 915 */
722static void vmballoon_release_refused_pages(struct vmballoon *b, 916static void vmballoon_enqueue_page_list(struct vmballoon *b,
723 bool is_2m_pages) 917 struct list_head *pages,
918 unsigned int *n_pages,
919 enum vmballoon_page_size_type page_size)
724{ 920{
725 struct page *page, *next; 921 struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
726 struct vmballoon_page_size *page_size =
727 &b->page_sizes[is_2m_pages];
728 922
729 list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) { 923 list_splice_init(pages, &page_size_info->pages);
730 list_del(&page->lru); 924 *n_pages = 0;
731 vmballoon_free_page(page, is_2m_pages);
732 STATS_INC(b->stats.refused_free[is_2m_pages]);
733 }
734
735 page_size->n_refused_pages = 0;
736} 925}
737 926
738static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p) 927/**
928 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
929 *
930 * @b: pointer to balloon.
931 * @pages: list of pages to enqueue.
932 * @n_pages: pointer to number of pages in list. The value is zeroed.
933 * @page_size: whether the pages are 2MB or 4KB pages.
934 * @n_req_pages: the number of requested pages.
935 *
936 * Dequeues the number of requested pages from the balloon for deflation. The
937 * number of dequeued pages may be lower, if not enough pages in the requested
938 * size are available.
939 */
940static void vmballoon_dequeue_page_list(struct vmballoon *b,
941 struct list_head *pages,
942 unsigned int *n_pages,
943 enum vmballoon_page_size_type page_size,
944 unsigned int n_req_pages)
739{ 945{
740 b->page = p; 946 struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
741} 947 struct page *page, *tmp;
948 unsigned int i = 0;
742 949
743static void vmballoon_add_batched_page(struct vmballoon *b, int idx, 950 list_for_each_entry_safe(page, tmp, &page_size_info->pages, lru) {
744 struct page *p) 951 list_move(&page->lru, pages);
745{ 952 if (++i == n_req_pages)
746 vmballoon_batch_set_pa(b->batch_page, idx, 953 break;
747 (u64)page_to_pfn(p) << PAGE_SHIFT); 954 }
955 *n_pages = i;
748} 956}
749 957
750/* 958/**
751 * Inflate the balloon towards its target size. Note that we try to limit 959 * vmballoon_inflate() - Inflate the balloon towards its target size.
752 * the rate of allocation to make sure we are not choking the rest of the 960 *
753 * system. 961 * @b: pointer to the balloon.
754 */ 962 */
755static void vmballoon_inflate(struct vmballoon *b) 963static void vmballoon_inflate(struct vmballoon *b)
756{ 964{
757 unsigned int num_pages = 0; 965 int64_t to_inflate_frames;
758 int error = 0; 966 struct vmballoon_ctl ctl = {
759 gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP; 967 .pages = LIST_HEAD_INIT(ctl.pages),
760 bool is_2m_pages; 968 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
969 .page_size = b->max_page_size,
970 .op = VMW_BALLOON_INFLATE
971 };
761 972
762 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); 973 while ((to_inflate_frames = vmballoon_change(b)) > 0) {
974 unsigned int to_inflate_pages, page_in_frames;
975 int alloc_error, lock_error = 0;
763 976
764 /* 977 VM_BUG_ON(!list_empty(&ctl.pages));
765 * First try NOSLEEP page allocations to inflate balloon. 978 VM_BUG_ON(ctl.n_pages != 0);
766 *
767 * If we do not throttle nosleep allocations, we can drain all
768 * free pages in the guest quickly (if the balloon target is high).
769 * As a side-effect, draining free pages helps to inform (force)
770 * the guest to start swapping if balloon target is not met yet,
771 * which is a desired behavior. However, balloon driver can consume
772 * all available CPU cycles if too many pages are allocated in a
773 * second. Therefore, we throttle nosleep allocations even when
774 * the guest is not under memory pressure. OTOH, if we have already
775 * predicted that the guest is under memory pressure, then we
776 * slowdown page allocations considerably.
777 */
778 979
779 /* 980 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
780 * Start with no sleep allocation rate which may be higher
781 * than sleeping allocation rate.
782 */
783 is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
784 981
785 pr_debug("%s - goal: %d", __func__, b->target - b->size); 982 to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
983 DIV_ROUND_UP_ULL(to_inflate_frames,
984 page_in_frames));
786 985
787 while (!b->reset_required && 986 /* Start by allocating */
788 b->size + num_pages * vmballoon_page_size(is_2m_pages) 987 alloc_error = vmballoon_alloc_page_list(b, &ctl,
789 < b->target) { 988 to_inflate_pages);
790 struct page *page;
791 989
792 if (flags == VMW_PAGE_ALLOC_NOSLEEP) 990 /* Actually lock the pages by telling the hypervisor */
793 STATS_INC(b->stats.alloc[is_2m_pages]); 991 lock_error = vmballoon_lock(b, &ctl);
794 else 992
795 STATS_INC(b->stats.sleep_alloc); 993 /*
796 994 * If an error indicates that something serious went wrong,
797 page = vmballoon_alloc_page(flags, is_2m_pages); 995 * stop the inflation.
798 if (!page) { 996 */
799 STATS_INC(b->stats.alloc_fail[is_2m_pages]); 997 if (lock_error)
800 998 break;
801 if (is_2m_pages) { 999
802 b->ops->lock(b, num_pages, true, &b->target); 1000 /* Update the balloon size */
803 1001 atomic64_add(ctl.n_pages * page_in_frames, &b->size);
804 /* 1002
805 * ignore errors from locking as we now switch 1003 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
806 * to 4k pages and we might get different 1004 ctl.page_size);
807 * errors. 1005
808 */ 1006 /*
809 1007 * If allocation failed or the number of refused pages exceeds
810 num_pages = 0; 1008 * the maximum allowed, move to the next page size.
811 is_2m_pages = false; 1009 */
812 continue; 1010 if (alloc_error ||
813 } 1011 ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
814 1012 if (ctl.page_size == VMW_BALLOON_4K_PAGE)
815 if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
816 /*
817 * CANSLEEP page allocation failed, so guest
818 * is under severe memory pressure. We just log
819 * the event, but do not stop the inflation
820 * due to its negative impact on performance.
821 */
822 STATS_INC(b->stats.sleep_alloc_fail);
823 break; 1013 break;
824 }
825 1014
826 /* 1015 /*
827 * NOSLEEP page allocation failed, so the guest is 1016 * Ignore errors from locking as we now switch to 4k
828 * under memory pressure. Slowing down page alloctions 1017 * pages and we might get different errors.
829 * seems to be reasonable, but doing so might actually
830 * cause the hypervisor to throttle us down, resulting
831 * in degraded performance. We will count on the
832 * scheduler and standard memory management mechanisms
833 * for now.
834 */ 1018 */
835 flags = VMW_PAGE_ALLOC_CANSLEEP; 1019 vmballoon_release_refused_pages(b, &ctl);
836 continue; 1020 ctl.page_size--;
837 }
838
839 b->ops->add_page(b, num_pages++, page);
840 if (num_pages == b->batch_max_pages) {
841 error = b->ops->lock(b, num_pages, is_2m_pages,
842 &b->target);
843 num_pages = 0;
844 if (error)
845 break;
846 } 1021 }
847 1022
848 cond_resched(); 1023 cond_resched();
849 } 1024 }
850 1025
851 if (num_pages > 0) 1026 /*
852 b->ops->lock(b, num_pages, is_2m_pages, &b->target); 1027 * Release pages that were allocated while attempting to inflate the
853 1028 * balloon but were refused by the host for one reason or another,
854 vmballoon_release_refused_pages(b, true); 1029 * and update the statistics.
855 vmballoon_release_refused_pages(b, false); 1030 */
1031 if (ctl.n_refused_pages != 0)
1032 vmballoon_release_refused_pages(b, &ctl);
856} 1033}
857 1034
858/* 1035/**
1036 * vmballoon_deflate() - Decrease the size of the balloon.
1037 *
1038 * @b: pointer to the balloon
1039 * @n_frames: the number of frames to deflate. If zero, automatically
1040 * calculated according to the target size.
1041 * @coordinated: whether to coordinate with the host
1042 *
859 * Decrease the size of the balloon allowing guest to use more memory. 1043 * Decrease the size of the balloon allowing guest to use more memory.
1044 *
1045 * Return: The number of deflated frames (i.e., basic page size units)
860 */ 1046 */
861static void vmballoon_deflate(struct vmballoon *b) 1047static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1048 bool coordinated)
862{ 1049{
863 unsigned is_2m_pages; 1050 unsigned long deflated_frames = 0;
864 1051 unsigned long tried_frames = 0;
865 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); 1052 struct vmballoon_ctl ctl = {
1053 .pages = LIST_HEAD_INIT(ctl.pages),
1054 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1055 .page_size = VMW_BALLOON_4K_PAGE,
1056 .op = VMW_BALLOON_DEFLATE
1057 };
866 1058
867 /* free pages to reach target */ 1059 /* free pages to reach target */
868 for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes; 1060 while (true) {
869 is_2m_pages++) { 1061 unsigned int to_deflate_pages, n_unlocked_frames;
870 struct page *page, *next; 1062 unsigned int page_in_frames;
871 unsigned int num_pages = 0; 1063 int64_t to_deflate_frames;
872 struct vmballoon_page_size *page_size = 1064 bool deflated_all;
873 &b->page_sizes[is_2m_pages]; 1065
874 1066 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
875 list_for_each_entry_safe(page, next, &page_size->pages, lru) { 1067
876 if (b->reset_required || 1068 VM_BUG_ON(!list_empty(&ctl.pages));
877 (b->target > 0 && 1069 VM_BUG_ON(ctl.n_pages);
878 b->size - num_pages 1070 VM_BUG_ON(!list_empty(&ctl.refused_pages));
879 * vmballoon_page_size(is_2m_pages) 1071 VM_BUG_ON(ctl.n_refused_pages);
880 < b->target + vmballoon_page_size(true))) 1072
881 break; 1073 /*
1074 * If we were requested a specific number of frames, we try to
1075 * deflate this number of frames. Otherwise, deflation is
1076 * performed according to the target and balloon size.
1077 */
1078 to_deflate_frames = n_frames ? n_frames - tried_frames :
1079 -vmballoon_change(b);
1080
1081 /* break if no work to do */
1082 if (to_deflate_frames <= 0)
1083 break;
1084
1085 /*
1086 * Calculate the number of frames based on current page size,
1087 * but limit the deflated frames to a single chunk
1088 */
1089 to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1090 DIV_ROUND_UP_ULL(to_deflate_frames,
1091 page_in_frames));
1092
1093 /* First take the pages from the balloon pages. */
1094 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1095 ctl.page_size, to_deflate_pages);
1096
1097 /*
1098 * Before pages are moving to the refused list, count their
1099 * frames as frames that we tried to deflate.
1100 */
1101 tried_frames += ctl.n_pages * page_in_frames;
1102
1103 /*
1104 * Unlock the pages by communicating with the hypervisor if the
1105 * communication is coordinated (i.e., not pop). We ignore the
1106 * return code. Instead we check if all the pages we manage to
1107 * unlock all the pages. If we failed, we will move to the next
1108 * page size, and would eventually try again later.
1109 */
1110 if (coordinated)
1111 vmballoon_lock(b, &ctl);
1112
1113 /*
1114 * Check if we deflated enough. We will move to the next page
1115 * size if we did not manage to do so. This calculation takes
1116 * place now, as once the pages are released, the number of
1117 * pages is zeroed.
1118 */
1119 deflated_all = (ctl.n_pages == to_deflate_pages);
1120
1121 /* Update local and global counters */
1122 n_unlocked_frames = ctl.n_pages * page_in_frames;
1123 atomic64_sub(n_unlocked_frames, &b->size);
1124 deflated_frames += n_unlocked_frames;
882 1125
883 list_del(&page->lru); 1126 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
884 b->ops->add_page(b, num_pages++, page); 1127 ctl.page_size, ctl.n_pages);
885 1128
886 if (num_pages == b->batch_max_pages) { 1129 /* free the ballooned pages */
887 int error; 1130 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1131 ctl.page_size);
888 1132
889 error = b->ops->unlock(b, num_pages, 1133 /* Return the refused pages to the ballooned list. */
890 is_2m_pages, &b->target); 1134 vmballoon_enqueue_page_list(b, &ctl.refused_pages,
891 num_pages = 0; 1135 &ctl.n_refused_pages,
892 if (error) 1136 ctl.page_size);
893 return;
894 }
895 1137
896 cond_resched(); 1138 /* If we failed to unlock all the pages, move to next size. */
1139 if (!deflated_all) {
1140 if (ctl.page_size == b->max_page_size)
1141 break;
1142 ctl.page_size++;
897 } 1143 }
898 1144
899 if (num_pages > 0) 1145 cond_resched();
900 b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
901 } 1146 }
902}
903 1147
904static const struct vmballoon_ops vmballoon_basic_ops = { 1148 return deflated_frames;
905 .add_page = vmballoon_add_page, 1149}
906 .lock = vmballoon_lock_page,
907 .unlock = vmballoon_unlock_page
908};
909 1150
910static const struct vmballoon_ops vmballoon_batched_ops = { 1151/**
911 .add_page = vmballoon_add_batched_page, 1152 * vmballoon_deinit_batching - disables batching mode.
912 .lock = vmballoon_lock_batched_page, 1153 *
913 .unlock = vmballoon_unlock_batched_page 1154 * @b: pointer to &struct vmballoon.
914}; 1155 *
1156 * Disables batching, by deallocating the page for communication with the
1157 * hypervisor and disabling the static key to indicate that batching is off.
1158 */
1159static void vmballoon_deinit_batching(struct vmballoon *b)
1160{
1161 free_page((unsigned long)b->batch_page);
1162 b->batch_page = NULL;
1163 static_branch_disable(&vmw_balloon_batching);
1164 b->batch_max_pages = 1;
1165}
915 1166
916static bool vmballoon_init_batching(struct vmballoon *b) 1167/**
1168 * vmballoon_init_batching - enable batching mode.
1169 *
1170 * @b: pointer to &struct vmballoon.
1171 *
1172 * Enables batching, by allocating a page for communication with the hypervisor
1173 * and enabling the static_key to use batching.
1174 *
1175 * Return: zero on success or an appropriate error-code.
1176 */
1177static int vmballoon_init_batching(struct vmballoon *b)
917{ 1178{
918 struct page *page; 1179 struct page *page;
919 1180
920 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 1181 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
921 if (!page) 1182 if (!page)
922 return false; 1183 return -ENOMEM;
923 1184
924 b->batch_page = page_address(page); 1185 b->batch_page = page_address(page);
925 return true; 1186 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1187
1188 static_branch_enable(&vmw_balloon_batching);
1189
1190 return 0;
926} 1191}
927 1192
928/* 1193/*
@@ -932,7 +1197,7 @@ static void vmballoon_doorbell(void *client_data)
932{ 1197{
933 struct vmballoon *b = client_data; 1198 struct vmballoon *b = client_data;
934 1199
935 STATS_INC(b->stats.doorbell); 1200 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
936 1201
937 mod_delayed_work(system_freezable_wq, &b->dwork, 0); 1202 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
938} 1203}
@@ -942,11 +1207,8 @@ static void vmballoon_doorbell(void *client_data)
942 */ 1207 */
943static void vmballoon_vmci_cleanup(struct vmballoon *b) 1208static void vmballoon_vmci_cleanup(struct vmballoon *b)
944{ 1209{
945 int error; 1210 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
946 1211 VMCI_INVALID_ID, VMCI_INVALID_ID);
947 VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
948 VMCI_INVALID_ID, error);
949 STATS_INC(b->stats.doorbell_unset);
950 1212
951 if (!vmci_handle_is_invalid(b->vmci_doorbell)) { 1213 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
952 vmci_doorbell_destroy(b->vmci_doorbell); 1214 vmci_doorbell_destroy(b->vmci_doorbell);
@@ -954,12 +1216,19 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
954 } 1216 }
955} 1217}
956 1218
957/* 1219/**
958 * Initialize vmci doorbell, to get notified as soon as balloon changes 1220 * vmballoon_vmci_init - Initialize vmci doorbell.
1221 *
1222 * @b: pointer to the balloon.
1223 *
1224 * Return: zero on success or when wakeup command not supported. Error-code
1225 * otherwise.
1226 *
1227 * Initialize vmci doorbell, to get notified as soon as balloon changes.
959 */ 1228 */
960static int vmballoon_vmci_init(struct vmballoon *b) 1229static int vmballoon_vmci_init(struct vmballoon *b)
961{ 1230{
962 unsigned long error, dummy; 1231 unsigned long error;
963 1232
964 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) 1233 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
965 return 0; 1234 return 0;
@@ -971,10 +1240,9 @@ static int vmballoon_vmci_init(struct vmballoon *b)
971 if (error != VMCI_SUCCESS) 1240 if (error != VMCI_SUCCESS)
972 goto fail; 1241 goto fail;
973 1242
974 error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context, 1243 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
975 b->vmci_doorbell.resource, dummy); 1244 b->vmci_doorbell.context,
976 1245 b->vmci_doorbell.resource, NULL);
977 STATS_INC(b->stats.doorbell_set);
978 1246
979 if (error != VMW_BALLOON_SUCCESS) 1247 if (error != VMW_BALLOON_SUCCESS)
980 goto fail; 1248 goto fail;
@@ -985,6 +1253,23 @@ fail:
985 return -EIO; 1253 return -EIO;
986} 1254}
987 1255
1256/**
1257 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1258 *
1259 * @b: pointer to the balloon.
1260 *
1261 * This function is called when host decides to "reset" balloon for one reason
1262 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1263 * pages being released.
1264 */
1265static void vmballoon_pop(struct vmballoon *b)
1266{
1267 unsigned long size;
1268
1269 while ((size = atomic64_read(&b->size)))
1270 vmballoon_deflate(b, size, false);
1271}
1272
988/* 1273/*
989 * Perform standard reset sequence by popping the balloon (in case it 1274 * Perform standard reset sequence by popping the balloon (in case it
990 * is not empty) and then restarting protocol. This operation normally 1275 * is not empty) and then restarting protocol. This operation normally
@@ -994,18 +1279,18 @@ static void vmballoon_reset(struct vmballoon *b)
994{ 1279{
995 int error; 1280 int error;
996 1281
1282 down_write(&b->conf_sem);
1283
997 vmballoon_vmci_cleanup(b); 1284 vmballoon_vmci_cleanup(b);
998 1285
999 /* free all pages, skipping monitor unlock */ 1286 /* free all pages, skipping monitor unlock */
1000 vmballoon_pop(b); 1287 vmballoon_pop(b);
1001 1288
1002 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES)) 1289 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1003 return; 1290 return;
1004 1291
1005 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) { 1292 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1006 b->ops = &vmballoon_batched_ops; 1293 if (vmballoon_init_batching(b)) {
1007 b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
1008 if (!vmballoon_init_batching(b)) {
1009 /* 1294 /*
1010 * We failed to initialize batching, inform the monitor 1295 * We failed to initialize batching, inform the monitor
1011 * about it by sending a null capability. 1296 * about it by sending a null capability.
@@ -1016,52 +1301,70 @@ static void vmballoon_reset(struct vmballoon *b)
1016 return; 1301 return;
1017 } 1302 }
1018 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) { 1303 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1019 b->ops = &vmballoon_basic_ops; 1304 vmballoon_deinit_batching(b);
1020 b->batch_max_pages = 1;
1021 } 1305 }
1022 1306
1307 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1023 b->reset_required = false; 1308 b->reset_required = false;
1024 1309
1025 error = vmballoon_vmci_init(b); 1310 error = vmballoon_vmci_init(b);
1026 if (error) 1311 if (error)
1027 pr_err("failed to initialize vmci doorbell\n"); 1312 pr_err("failed to initialize vmci doorbell\n");
1028 1313
1029 if (!vmballoon_send_guest_id(b)) 1314 if (vmballoon_send_guest_id(b))
1030 pr_err("failed to send guest ID to the host\n"); 1315 pr_err("failed to send guest ID to the host\n");
1316
1317 up_write(&b->conf_sem);
1031} 1318}
1032 1319
1033/* 1320/**
1034 * Balloon work function: reset protocol, if needed, get the new size and 1321 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1035 * adjust balloon as needed. Repeat in 1 sec. 1322 *
1323 * @work: pointer to the &work_struct which is provided by the workqueue.
1324 *
1325 * Resets the protocol if needed, gets the new size and adjusts balloon as
1326 * needed. Repeat in 1 sec.
1036 */ 1327 */
1037static void vmballoon_work(struct work_struct *work) 1328static void vmballoon_work(struct work_struct *work)
1038{ 1329{
1039 struct delayed_work *dwork = to_delayed_work(work); 1330 struct delayed_work *dwork = to_delayed_work(work);
1040 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork); 1331 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1041 unsigned int target; 1332 int64_t change = 0;
1042
1043 STATS_INC(b->stats.timer);
1044 1333
1045 if (b->reset_required) 1334 if (b->reset_required)
1046 vmballoon_reset(b); 1335 vmballoon_reset(b);
1047 1336
1048 if (!b->reset_required && vmballoon_send_get_target(b, &target)) { 1337 down_read(&b->conf_sem);
1049 /* update target, adjust size */ 1338
1050 b->target = target; 1339 /*
1340 * Update the stats while holding the semaphore to ensure that
1341 * @stats_enabled is consistent with whether the stats are actually
1342 * enabled
1343 */
1344 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1345
1346 if (!vmballoon_send_get_target(b))
1347 change = vmballoon_change(b);
1348
1349 if (change != 0) {
1350 pr_debug("%s - size: %llu, target %lu\n", __func__,
1351 atomic64_read(&b->size), READ_ONCE(b->target));
1051 1352
1052 if (b->size < target) 1353 if (change > 0)
1053 vmballoon_inflate(b); 1354 vmballoon_inflate(b);
1054 else if (target == 0 || 1355 else /* (change < 0) */
1055 b->size > target + vmballoon_page_size(true)) 1356 vmballoon_deflate(b, 0, true);
1056 vmballoon_deflate(b);
1057 } 1357 }
1058 1358
1359 up_read(&b->conf_sem);
1360
1059 /* 1361 /*
1060 * We are using a freezable workqueue so that balloon operations are 1362 * We are using a freezable workqueue so that balloon operations are
1061 * stopped while the system transitions to/from sleep/hibernation. 1363 * stopped while the system transitions to/from sleep/hibernation.
1062 */ 1364 */
1063 queue_delayed_work(system_freezable_wq, 1365 queue_delayed_work(system_freezable_wq,
1064 dwork, round_jiffies_relative(HZ)); 1366 dwork, round_jiffies_relative(HZ));
1367
1065} 1368}
1066 1369
1067/* 1370/*
@@ -1069,64 +1372,100 @@ static void vmballoon_work(struct work_struct *work)
1069 */ 1372 */
1070#ifdef CONFIG_DEBUG_FS 1373#ifdef CONFIG_DEBUG_FS
1071 1374
1375static const char * const vmballoon_stat_page_names[] = {
1376 [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
1377 [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
1378 [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
1379 [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
1380 [VMW_BALLOON_PAGE_STAT_FREE] = "free"
1381};
1382
1383static const char * const vmballoon_stat_names[] = {
1384 [VMW_BALLOON_STAT_TIMER] = "timer",
1385 [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
1386 [VMW_BALLOON_STAT_RESET] = "reset",
1387};
1388
1389static int vmballoon_enable_stats(struct vmballoon *b)
1390{
1391 int r = 0;
1392
1393 down_write(&b->conf_sem);
1394
1395 /* did we somehow race with another reader which enabled stats? */
1396 if (b->stats)
1397 goto out;
1398
1399 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1400
1401 if (!b->stats) {
1402 /* allocation failed */
1403 r = -ENOMEM;
1404 goto out;
1405 }
1406 static_key_enable(&balloon_stat_enabled.key);
1407out:
1408 up_write(&b->conf_sem);
1409 return r;
1410}
1411
1412/**
1413 * vmballoon_debug_show - shows statistics of balloon operations.
1414 * @f: pointer to the &struct seq_file.
1415 * @offset: ignored.
1416 *
1417 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1418 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1419 * we only collect statistics after the first time the counters are read.
1420 *
1421 * Return: zero on success or an error code.
1422 */
1072static int vmballoon_debug_show(struct seq_file *f, void *offset) 1423static int vmballoon_debug_show(struct seq_file *f, void *offset)
1073{ 1424{
1074 struct vmballoon *b = f->private; 1425 struct vmballoon *b = f->private;
1075 struct vmballoon_stats *stats = &b->stats; 1426 int i, j;
1427
1428 /* enables stats if they are disabled */
1429 if (!b->stats) {
1430 int r = vmballoon_enable_stats(b);
1431
1432 if (r)
1433 return r;
1434 }
1076 1435
1077 /* format capabilities info */ 1436 /* format capabilities info */
1078 seq_printf(f, 1437 seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1079 "balloon capabilities: %#4x\n" 1438 VMW_BALLOON_CAPABILITIES);
1080 "used capabilities: %#4lx\n" 1439 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1081 "is resetting: %c\n", 1440 seq_printf(f, "%-22s: %16s\n", "is resetting",
1082 VMW_BALLOON_CAPABILITIES, b->capabilities, 1441 b->reset_required ? "y" : "n");
1083 b->reset_required ? 'y' : 'n');
1084 1442
1085 /* format size info */ 1443 /* format size info */
1086 seq_printf(f, 1444 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1087 "target: %8d pages\n" 1445 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1088 "current: %8d pages\n", 1446
1089 b->target, b->size); 1447 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1090 1448 if (vmballoon_cmd_names[i] == NULL)
1091 seq_printf(f, 1449 continue;
1092 "\n" 1450
1093 "timer: %8u\n" 1451 seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1094 "doorbell: %8u\n" 1452 vmballoon_cmd_names[i],
1095 "start: %8u (%4u failed)\n" 1453 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1096 "guestType: %8u (%4u failed)\n" 1454 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1097 "2m-lock: %8u (%4u failed)\n" 1455 }
1098 "lock: %8u (%4u failed)\n" 1456
1099 "2m-unlock: %8u (%4u failed)\n" 1457 for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1100 "unlock: %8u (%4u failed)\n" 1458 seq_printf(f, "%-22s: %16llu\n",
1101 "target: %8u (%4u failed)\n" 1459 vmballoon_stat_names[i],
1102 "prim2mAlloc: %8u (%4u failed)\n" 1460 atomic64_read(&b->stats->general_stat[i]));
1103 "primNoSleepAlloc: %8u (%4u failed)\n" 1461
1104 "primCanSleepAlloc: %8u (%4u failed)\n" 1462 for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1105 "prim2mFree: %8u\n" 1463 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1106 "primFree: %8u\n" 1464 seq_printf(f, "%-18s(%s): %16llu\n",
1107 "err2mAlloc: %8u\n" 1465 vmballoon_stat_page_names[i],
1108 "errAlloc: %8u\n" 1466 vmballoon_page_size_names[j],
1109 "err2mFree: %8u\n" 1467 atomic64_read(&b->stats->page_stat[i][j]));
1110 "errFree: %8u\n" 1468 }
1111 "doorbellSet: %8u\n"
1112 "doorbellUnset: %8u\n",
1113 stats->timer,
1114 stats->doorbell,
1115 stats->start, stats->start_fail,
1116 stats->guest_type, stats->guest_type_fail,
1117 stats->lock[true], stats->lock_fail[true],
1118 stats->lock[false], stats->lock_fail[false],
1119 stats->unlock[true], stats->unlock_fail[true],
1120 stats->unlock[false], stats->unlock_fail[false],
1121 stats->target, stats->target_fail,
1122 stats->alloc[true], stats->alloc_fail[true],
1123 stats->alloc[false], stats->alloc_fail[false],
1124 stats->sleep_alloc, stats->sleep_alloc_fail,
1125 stats->free[true],
1126 stats->free[false],
1127 stats->refused_alloc[true], stats->refused_alloc[false],
1128 stats->refused_free[true], stats->refused_free[false],
1129 stats->doorbell_set, stats->doorbell_unset);
1130 1469
1131 return 0; 1470 return 0;
1132} 1471}
@@ -1161,7 +1500,10 @@ static int __init vmballoon_debugfs_init(struct vmballoon *b)
1161 1500
1162static void __exit vmballoon_debugfs_exit(struct vmballoon *b) 1501static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1163{ 1502{
1503 static_key_disable(&balloon_stat_enabled.key);
1164 debugfs_remove(b->dbg_entry); 1504 debugfs_remove(b->dbg_entry);
1505 kfree(b->stats);
1506 b->stats = NULL;
1165} 1507}
1166 1508
1167#else 1509#else
@@ -1179,8 +1521,9 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1179 1521
1180static int __init vmballoon_init(void) 1522static int __init vmballoon_init(void)
1181{ 1523{
1524 enum vmballoon_page_size_type page_size;
1182 int error; 1525 int error;
1183 unsigned is_2m_pages; 1526
1184 /* 1527 /*
1185 * Check if we are running on VMware's hypervisor and bail out 1528 * Check if we are running on VMware's hypervisor and bail out
1186 * if we are not. 1529 * if we are not.
@@ -1188,11 +1531,10 @@ static int __init vmballoon_init(void)
1188 if (x86_hyper_type != X86_HYPER_VMWARE) 1531 if (x86_hyper_type != X86_HYPER_VMWARE)
1189 return -ENODEV; 1532 return -ENODEV;
1190 1533
1191 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES; 1534 for (page_size = VMW_BALLOON_4K_PAGE;
1192 is_2m_pages++) { 1535 page_size <= VMW_BALLOON_LAST_SIZE; page_size++)
1193 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages); 1536 INIT_LIST_HEAD(&balloon.page_sizes[page_size].pages);
1194 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages); 1537
1195 }
1196 1538
1197 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); 1539 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1198 1540
@@ -1200,6 +1542,8 @@ static int __init vmballoon_init(void)
1200 if (error) 1542 if (error)
1201 return error; 1543 return error;
1202 1544
1545 spin_lock_init(&balloon.comm_lock);
1546 init_rwsem(&balloon.conf_sem);
1203 balloon.vmci_doorbell = VMCI_INVALID_HANDLE; 1547 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1204 balloon.batch_page = NULL; 1548 balloon.batch_page = NULL;
1205 balloon.page = NULL; 1549 balloon.page = NULL;
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index d7eaf1eb11e7..003bfba40758 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
113 113
114MODULE_AUTHOR("VMware, Inc."); 114MODULE_AUTHOR("VMware, Inc.");
115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); 115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
116MODULE_VERSION("1.1.5.0-k"); 116MODULE_VERSION("1.1.6.0-k");
117MODULE_LICENSE("GPL v2"); 117MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 83e0c95d20a4..edfffc9699ba 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -15,7 +15,6 @@
15 15
16#include <linux/vmw_vmci_defs.h> 16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h> 17#include <linux/vmw_vmci_api.h>
18#include <linux/moduleparam.h>
19#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
20#include <linux/interrupt.h> 19#include <linux/interrupt.h>
21#include <linux/highmem.h> 20#include <linux/highmem.h>
@@ -448,15 +447,12 @@ static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
448 struct vmci_handle handle; 447 struct vmci_handle handle;
449 int vmci_status; 448 int vmci_status;
450 int __user *retptr; 449 int __user *retptr;
451 u32 cid;
452 450
453 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 451 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
454 vmci_ioctl_err("only valid for contexts\n"); 452 vmci_ioctl_err("only valid for contexts\n");
455 return -EINVAL; 453 return -EINVAL;
456 } 454 }
457 455
458 cid = vmci_ctx_get_id(vmci_host_dev->context);
459
460 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 456 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
461 struct vmci_qp_alloc_info_vmvm alloc_info; 457 struct vmci_qp_alloc_info_vmvm alloc_info;
462 struct vmci_qp_alloc_info_vmvm __user *info = uptr; 458 struct vmci_qp_alloc_info_vmvm __user *info = uptr;
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index 1ab6e8737a5f..da1ee2e1ba99 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -57,7 +57,8 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
57 57
58 if (r->type == type && 58 if (r->type == type &&
59 rid == handle.resource && 59 rid == handle.resource &&
60 (cid == handle.context || cid == VMCI_INVALID_ID)) { 60 (cid == handle.context || cid == VMCI_INVALID_ID ||
61 handle.context == VMCI_INVALID_ID)) {
61 resource = r; 62 resource = r;
62 break; 63 break;
63 } 64 }
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index aa1657831b70..9b18ce90f907 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1,17 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * nvmem framework core. 3 * nvmem framework core.
3 * 4 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */ 7 */
16 8
17#include <linux/device.h> 9#include <linux/device.h>
@@ -19,6 +11,7 @@
19#include <linux/fs.h> 11#include <linux/fs.h>
20#include <linux/idr.h> 12#include <linux/idr.h>
21#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kref.h>
22#include <linux/module.h> 15#include <linux/module.h>
23#include <linux/nvmem-consumer.h> 16#include <linux/nvmem-consumer.h>
24#include <linux/nvmem-provider.h> 17#include <linux/nvmem-provider.h>
@@ -26,18 +19,18 @@
26#include <linux/slab.h> 19#include <linux/slab.h>
27 20
28struct nvmem_device { 21struct nvmem_device {
29 const char *name;
30 struct module *owner; 22 struct module *owner;
31 struct device dev; 23 struct device dev;
32 int stride; 24 int stride;
33 int word_size; 25 int word_size;
34 int id; 26 int id;
35 int users; 27 struct kref refcnt;
36 size_t size; 28 size_t size;
37 bool read_only; 29 bool read_only;
38 int flags; 30 int flags;
39 struct bin_attribute eeprom; 31 struct bin_attribute eeprom;
40 struct device *base_dev; 32 struct device *base_dev;
33 struct list_head cells;
41 nvmem_reg_read_t reg_read; 34 nvmem_reg_read_t reg_read;
42 nvmem_reg_write_t reg_write; 35 nvmem_reg_write_t reg_write;
43 void *priv; 36 void *priv;
@@ -58,8 +51,13 @@ struct nvmem_cell {
58static DEFINE_MUTEX(nvmem_mutex); 51static DEFINE_MUTEX(nvmem_mutex);
59static DEFINE_IDA(nvmem_ida); 52static DEFINE_IDA(nvmem_ida);
60 53
61static LIST_HEAD(nvmem_cells); 54static DEFINE_MUTEX(nvmem_cell_mutex);
62static DEFINE_MUTEX(nvmem_cells_mutex); 55static LIST_HEAD(nvmem_cell_tables);
56
57static DEFINE_MUTEX(nvmem_lookup_mutex);
58static LIST_HEAD(nvmem_lookup_list);
59
60static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
63 61
64#ifdef CONFIG_DEBUG_LOCK_ALLOC 62#ifdef CONFIG_DEBUG_LOCK_ALLOC
65static struct lock_class_key eeprom_lock_key; 63static struct lock_class_key eeprom_lock_key;
@@ -156,7 +154,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
156static struct bin_attribute bin_attr_rw_nvmem = { 154static struct bin_attribute bin_attr_rw_nvmem = {
157 .attr = { 155 .attr = {
158 .name = "nvmem", 156 .name = "nvmem",
159 .mode = S_IWUSR | S_IRUGO, 157 .mode = 0644,
160 }, 158 },
161 .read = bin_attr_nvmem_read, 159 .read = bin_attr_nvmem_read,
162 .write = bin_attr_nvmem_write, 160 .write = bin_attr_nvmem_write,
@@ -180,7 +178,7 @@ static const struct attribute_group *nvmem_rw_dev_groups[] = {
180static struct bin_attribute bin_attr_ro_nvmem = { 178static struct bin_attribute bin_attr_ro_nvmem = {
181 .attr = { 179 .attr = {
182 .name = "nvmem", 180 .name = "nvmem",
183 .mode = S_IRUGO, 181 .mode = 0444,
184 }, 182 },
185 .read = bin_attr_nvmem_read, 183 .read = bin_attr_nvmem_read,
186}; 184};
@@ -203,7 +201,7 @@ static const struct attribute_group *nvmem_ro_dev_groups[] = {
203static struct bin_attribute bin_attr_rw_root_nvmem = { 201static struct bin_attribute bin_attr_rw_root_nvmem = {
204 .attr = { 202 .attr = {
205 .name = "nvmem", 203 .name = "nvmem",
206 .mode = S_IWUSR | S_IRUSR, 204 .mode = 0600,
207 }, 205 },
208 .read = bin_attr_nvmem_read, 206 .read = bin_attr_nvmem_read,
209 .write = bin_attr_nvmem_write, 207 .write = bin_attr_nvmem_write,
@@ -227,7 +225,7 @@ static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
227static struct bin_attribute bin_attr_ro_root_nvmem = { 225static struct bin_attribute bin_attr_ro_root_nvmem = {
228 .attr = { 226 .attr = {
229 .name = "nvmem", 227 .name = "nvmem",
230 .mode = S_IRUSR, 228 .mode = 0400,
231 }, 229 },
232 .read = bin_attr_nvmem_read, 230 .read = bin_attr_nvmem_read,
233}; 231};
@@ -282,48 +280,42 @@ static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
282 return to_nvmem_device(d); 280 return to_nvmem_device(d);
283} 281}
284 282
285static struct nvmem_cell *nvmem_find_cell(const char *cell_id) 283static struct nvmem_device *nvmem_find(const char *name)
286{ 284{
287 struct nvmem_cell *p; 285 struct device *d;
288
289 mutex_lock(&nvmem_cells_mutex);
290 286
291 list_for_each_entry(p, &nvmem_cells, node) 287 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
292 if (!strcmp(p->name, cell_id)) {
293 mutex_unlock(&nvmem_cells_mutex);
294 return p;
295 }
296 288
297 mutex_unlock(&nvmem_cells_mutex); 289 if (!d)
290 return NULL;
298 291
299 return NULL; 292 return to_nvmem_device(d);
300} 293}
301 294
302static void nvmem_cell_drop(struct nvmem_cell *cell) 295static void nvmem_cell_drop(struct nvmem_cell *cell)
303{ 296{
304 mutex_lock(&nvmem_cells_mutex); 297 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
298 mutex_lock(&nvmem_mutex);
305 list_del(&cell->node); 299 list_del(&cell->node);
306 mutex_unlock(&nvmem_cells_mutex); 300 mutex_unlock(&nvmem_mutex);
301 kfree(cell->name);
307 kfree(cell); 302 kfree(cell);
308} 303}
309 304
310static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 305static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
311{ 306{
312 struct nvmem_cell *cell; 307 struct nvmem_cell *cell, *p;
313 struct list_head *p, *n;
314 308
315 list_for_each_safe(p, n, &nvmem_cells) { 309 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
316 cell = list_entry(p, struct nvmem_cell, node); 310 nvmem_cell_drop(cell);
317 if (cell->nvmem == nvmem)
318 nvmem_cell_drop(cell);
319 }
320} 311}
321 312
322static void nvmem_cell_add(struct nvmem_cell *cell) 313static void nvmem_cell_add(struct nvmem_cell *cell)
323{ 314{
324 mutex_lock(&nvmem_cells_mutex); 315 mutex_lock(&nvmem_mutex);
325 list_add_tail(&cell->node, &nvmem_cells); 316 list_add_tail(&cell->node, &cell->nvmem->cells);
326 mutex_unlock(&nvmem_cells_mutex); 317 mutex_unlock(&nvmem_mutex);
318 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
327} 319}
328 320
329static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 321static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
@@ -361,7 +353,7 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
361 * 353 *
362 * Return: 0 or negative error code on failure. 354 * Return: 0 or negative error code on failure.
363 */ 355 */
364int nvmem_add_cells(struct nvmem_device *nvmem, 356static int nvmem_add_cells(struct nvmem_device *nvmem,
365 const struct nvmem_cell_info *info, 357 const struct nvmem_cell_info *info,
366 int ncells) 358 int ncells)
367{ 359{
@@ -400,7 +392,6 @@ err:
400 392
401 return rval; 393 return rval;
402} 394}
403EXPORT_SYMBOL_GPL(nvmem_add_cells);
404 395
405/* 396/*
406 * nvmem_setup_compat() - Create an additional binary entry in 397 * nvmem_setup_compat() - Create an additional binary entry in
@@ -440,6 +431,136 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
440} 431}
441 432
442/** 433/**
434 * nvmem_register_notifier() - Register a notifier block for nvmem events.
435 *
436 * @nb: notifier block to be called on nvmem events.
437 *
438 * Return: 0 on success, negative error number on failure.
439 */
440int nvmem_register_notifier(struct notifier_block *nb)
441{
442 return blocking_notifier_chain_register(&nvmem_notifier, nb);
443}
444EXPORT_SYMBOL_GPL(nvmem_register_notifier);
445
446/**
447 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
448 *
449 * @nb: notifier block to be unregistered.
450 *
451 * Return: 0 on success, negative error number on failure.
452 */
453int nvmem_unregister_notifier(struct notifier_block *nb)
454{
455 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
456}
457EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
458
459static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
460{
461 const struct nvmem_cell_info *info;
462 struct nvmem_cell_table *table;
463 struct nvmem_cell *cell;
464 int rval = 0, i;
465
466 mutex_lock(&nvmem_cell_mutex);
467 list_for_each_entry(table, &nvmem_cell_tables, node) {
468 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
469 for (i = 0; i < table->ncells; i++) {
470 info = &table->cells[i];
471
472 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
473 if (!cell) {
474 rval = -ENOMEM;
475 goto out;
476 }
477
478 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
479 info,
480 cell);
481 if (rval) {
482 kfree(cell);
483 goto out;
484 }
485
486 nvmem_cell_add(cell);
487 }
488 }
489 }
490
491out:
492 mutex_unlock(&nvmem_cell_mutex);
493 return rval;
494}
495
496static struct nvmem_cell *
497nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
498{
499 struct nvmem_cell *cell = NULL;
500
501 mutex_lock(&nvmem_mutex);
502 list_for_each_entry(cell, &nvmem->cells, node) {
503 if (strcmp(cell_id, cell->name) == 0)
504 break;
505 }
506 mutex_unlock(&nvmem_mutex);
507
508 return cell;
509}
510
511static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
512{
513 struct device_node *parent, *child;
514 struct device *dev = &nvmem->dev;
515 struct nvmem_cell *cell;
516 const __be32 *addr;
517 int len;
518
519 parent = dev->of_node;
520
521 for_each_child_of_node(parent, child) {
522 addr = of_get_property(child, "reg", &len);
523 if (!addr || (len < 2 * sizeof(u32))) {
524 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
525 return -EINVAL;
526 }
527
528 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
529 if (!cell)
530 return -ENOMEM;
531
532 cell->nvmem = nvmem;
533 cell->offset = be32_to_cpup(addr++);
534 cell->bytes = be32_to_cpup(addr);
535 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
536
537 addr = of_get_property(child, "bits", &len);
538 if (addr && len == (2 * sizeof(u32))) {
539 cell->bit_offset = be32_to_cpup(addr++);
540 cell->nbits = be32_to_cpup(addr);
541 }
542
543 if (cell->nbits)
544 cell->bytes = DIV_ROUND_UP(
545 cell->nbits + cell->bit_offset,
546 BITS_PER_BYTE);
547
548 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
549 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
550 cell->name, nvmem->stride);
551 /* Cells already added will be freed later. */
552 kfree(cell->name);
553 kfree(cell);
554 return -EINVAL;
555 }
556
557 nvmem_cell_add(cell);
558 }
559
560 return 0;
561}
562
563/**
443 * nvmem_register() - Register a nvmem device for given nvmem_config. 564 * nvmem_register() - Register a nvmem device for given nvmem_config.
444 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 565 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
445 * 566 *
@@ -467,6 +588,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
467 return ERR_PTR(rval); 588 return ERR_PTR(rval);
468 } 589 }
469 590
591 kref_init(&nvmem->refcnt);
592 INIT_LIST_HEAD(&nvmem->cells);
593
470 nvmem->id = rval; 594 nvmem->id = rval;
471 nvmem->owner = config->owner; 595 nvmem->owner = config->owner;
472 if (!nvmem->owner && config->dev->driver) 596 if (!nvmem->owner && config->dev->driver)
@@ -516,11 +640,31 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
516 goto err_device_del; 640 goto err_device_del;
517 } 641 }
518 642
519 if (config->cells) 643 if (config->cells) {
520 nvmem_add_cells(nvmem, config->cells, config->ncells); 644 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
645 if (rval)
646 goto err_teardown_compat;
647 }
648
649 rval = nvmem_add_cells_from_table(nvmem);
650 if (rval)
651 goto err_remove_cells;
652
653 rval = nvmem_add_cells_from_of(nvmem);
654 if (rval)
655 goto err_remove_cells;
656
657 rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
658 if (rval)
659 goto err_remove_cells;
521 660
522 return nvmem; 661 return nvmem;
523 662
663err_remove_cells:
664 nvmem_device_remove_all_cells(nvmem);
665err_teardown_compat:
666 if (config->compat)
667 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
524err_device_del: 668err_device_del:
525 device_del(&nvmem->dev); 669 device_del(&nvmem->dev);
526err_put_device: 670err_put_device:
@@ -530,21 +674,13 @@ err_put_device:
530} 674}
531EXPORT_SYMBOL_GPL(nvmem_register); 675EXPORT_SYMBOL_GPL(nvmem_register);
532 676
533/** 677static void nvmem_device_release(struct kref *kref)
534 * nvmem_unregister() - Unregister previously registered nvmem device
535 *
536 * @nvmem: Pointer to previously registered nvmem device.
537 *
538 * Return: Will be an negative on error or a zero on success.
539 */
540int nvmem_unregister(struct nvmem_device *nvmem)
541{ 678{
542 mutex_lock(&nvmem_mutex); 679 struct nvmem_device *nvmem;
543 if (nvmem->users) { 680
544 mutex_unlock(&nvmem_mutex); 681 nvmem = container_of(kref, struct nvmem_device, refcnt);
545 return -EBUSY; 682
546 } 683 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
547 mutex_unlock(&nvmem_mutex);
548 684
549 if (nvmem->flags & FLAG_COMPAT) 685 if (nvmem->flags & FLAG_COMPAT)
550 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 686 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
@@ -552,14 +688,22 @@ int nvmem_unregister(struct nvmem_device *nvmem)
552 nvmem_device_remove_all_cells(nvmem); 688 nvmem_device_remove_all_cells(nvmem);
553 device_del(&nvmem->dev); 689 device_del(&nvmem->dev);
554 put_device(&nvmem->dev); 690 put_device(&nvmem->dev);
691}
555 692
556 return 0; 693/**
694 * nvmem_unregister() - Unregister previously registered nvmem device
695 *
696 * @nvmem: Pointer to previously registered nvmem device.
697 */
698void nvmem_unregister(struct nvmem_device *nvmem)
699{
700 kref_put(&nvmem->refcnt, nvmem_device_release);
557} 701}
558EXPORT_SYMBOL_GPL(nvmem_unregister); 702EXPORT_SYMBOL_GPL(nvmem_unregister);
559 703
560static void devm_nvmem_release(struct device *dev, void *res) 704static void devm_nvmem_release(struct device *dev, void *res)
561{ 705{
562 WARN_ON(nvmem_unregister(*(struct nvmem_device **)res)); 706 nvmem_unregister(*(struct nvmem_device **)res);
563} 707}
564 708
565/** 709/**
@@ -617,71 +761,34 @@ int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
617} 761}
618EXPORT_SYMBOL(devm_nvmem_unregister); 762EXPORT_SYMBOL(devm_nvmem_unregister);
619 763
620
621static struct nvmem_device *__nvmem_device_get(struct device_node *np, 764static struct nvmem_device *__nvmem_device_get(struct device_node *np,
622 struct nvmem_cell **cellp, 765 const char *nvmem_name)
623 const char *cell_id)
624{ 766{
625 struct nvmem_device *nvmem = NULL; 767 struct nvmem_device *nvmem = NULL;
626 768
627 mutex_lock(&nvmem_mutex); 769 mutex_lock(&nvmem_mutex);
628 770 nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
629 if (np) {
630 nvmem = of_nvmem_find(np);
631 if (!nvmem) {
632 mutex_unlock(&nvmem_mutex);
633 return ERR_PTR(-EPROBE_DEFER);
634 }
635 } else {
636 struct nvmem_cell *cell = nvmem_find_cell(cell_id);
637
638 if (cell) {
639 nvmem = cell->nvmem;
640 *cellp = cell;
641 }
642
643 if (!nvmem) {
644 mutex_unlock(&nvmem_mutex);
645 return ERR_PTR(-ENOENT);
646 }
647 }
648
649 nvmem->users++;
650 mutex_unlock(&nvmem_mutex); 771 mutex_unlock(&nvmem_mutex);
772 if (!nvmem)
773 return ERR_PTR(-EPROBE_DEFER);
651 774
652 if (!try_module_get(nvmem->owner)) { 775 if (!try_module_get(nvmem->owner)) {
653 dev_err(&nvmem->dev, 776 dev_err(&nvmem->dev,
654 "could not increase module refcount for cell %s\n", 777 "could not increase module refcount for cell %s\n",
655 nvmem->name); 778 nvmem_dev_name(nvmem));
656
657 mutex_lock(&nvmem_mutex);
658 nvmem->users--;
659 mutex_unlock(&nvmem_mutex);
660 779
661 return ERR_PTR(-EINVAL); 780 return ERR_PTR(-EINVAL);
662 } 781 }
663 782
783 kref_get(&nvmem->refcnt);
784
664 return nvmem; 785 return nvmem;
665} 786}
666 787
667static void __nvmem_device_put(struct nvmem_device *nvmem) 788static void __nvmem_device_put(struct nvmem_device *nvmem)
668{ 789{
669 module_put(nvmem->owner); 790 module_put(nvmem->owner);
670 mutex_lock(&nvmem_mutex); 791 kref_put(&nvmem->refcnt, nvmem_device_release);
671 nvmem->users--;
672 mutex_unlock(&nvmem_mutex);
673}
674
675static struct nvmem_device *nvmem_find(const char *name)
676{
677 struct device *d;
678
679 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
680
681 if (!d)
682 return NULL;
683
684 return to_nvmem_device(d);
685} 792}
686 793
687#if IS_ENABLED(CONFIG_OF) 794#if IS_ENABLED(CONFIG_OF)
@@ -706,7 +813,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
706 if (!nvmem_np) 813 if (!nvmem_np)
707 return ERR_PTR(-EINVAL); 814 return ERR_PTR(-EINVAL);
708 815
709 return __nvmem_device_get(nvmem_np, NULL, NULL); 816 return __nvmem_device_get(nvmem_np, NULL);
710} 817}
711EXPORT_SYMBOL_GPL(of_nvmem_device_get); 818EXPORT_SYMBOL_GPL(of_nvmem_device_get);
712#endif 819#endif
@@ -810,44 +917,86 @@ struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
810} 917}
811EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 918EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
812 919
813static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) 920static struct nvmem_cell *
921nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
814{ 922{
815 struct nvmem_cell *cell = NULL; 923 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
924 struct nvmem_cell_lookup *lookup;
816 struct nvmem_device *nvmem; 925 struct nvmem_device *nvmem;
926 const char *dev_id;
817 927
818 nvmem = __nvmem_device_get(NULL, &cell, cell_id); 928 if (!dev)
819 if (IS_ERR(nvmem)) 929 return ERR_PTR(-EINVAL);
820 return ERR_CAST(nvmem);
821 930
931 dev_id = dev_name(dev);
932
933 mutex_lock(&nvmem_lookup_mutex);
934
935 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
936 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
937 (strcmp(lookup->con_id, con_id) == 0)) {
938 /* This is the right entry. */
939 nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
940 if (IS_ERR(nvmem)) {
941 /* Provider may not be registered yet. */
942 cell = ERR_CAST(nvmem);
943 goto out;
944 }
945
946 cell = nvmem_find_cell_by_name(nvmem,
947 lookup->cell_name);
948 if (!cell) {
949 __nvmem_device_put(nvmem);
950 cell = ERR_PTR(-ENOENT);
951 goto out;
952 }
953 }
954 }
955
956out:
957 mutex_unlock(&nvmem_lookup_mutex);
822 return cell; 958 return cell;
823} 959}
824 960
825#if IS_ENABLED(CONFIG_OF) 961#if IS_ENABLED(CONFIG_OF)
962static struct nvmem_cell *
963nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
964{
965 struct nvmem_cell *cell = NULL;
966 int i = 0;
967
968 mutex_lock(&nvmem_mutex);
969 list_for_each_entry(cell, &nvmem->cells, node) {
970 if (index == i++)
971 break;
972 }
973 mutex_unlock(&nvmem_mutex);
974
975 return cell;
976}
977
826/** 978/**
827 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 979 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
828 * 980 *
829 * @np: Device tree node that uses the nvmem cell. 981 * @np: Device tree node that uses the nvmem cell.
830 * @name: nvmem cell name from nvmem-cell-names property, or NULL 982 * @id: nvmem cell name from nvmem-cell-names property, or NULL
831 * for the cell at index 0 (the lone cell with no accompanying 983 * for the cell at index 0 (the lone cell with no accompanying
832 * nvmem-cell-names property). 984 * nvmem-cell-names property).
833 * 985 *
834 * Return: Will be an ERR_PTR() on error or a valid pointer 986 * Return: Will be an ERR_PTR() on error or a valid pointer
835 * to a struct nvmem_cell. The nvmem_cell will be freed by the 987 * to a struct nvmem_cell. The nvmem_cell will be freed by the
836 * nvmem_cell_put(). 988 * nvmem_cell_put().
837 */ 989 */
838struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 990struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
839 const char *name)
840{ 991{
841 struct device_node *cell_np, *nvmem_np; 992 struct device_node *cell_np, *nvmem_np;
842 struct nvmem_cell *cell;
843 struct nvmem_device *nvmem; 993 struct nvmem_device *nvmem;
844 const __be32 *addr; 994 struct nvmem_cell *cell;
845 int rval, len;
846 int index = 0; 995 int index = 0;
847 996
848 /* if cell name exists, find index to the name */ 997 /* if cell name exists, find index to the name */
849 if (name) 998 if (id)
850 index = of_property_match_string(np, "nvmem-cell-names", name); 999 index = of_property_match_string(np, "nvmem-cell-names", id);
851 1000
852 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1001 cell_np = of_parse_phandle(np, "nvmem-cells", index);
853 if (!cell_np) 1002 if (!cell_np)
@@ -857,59 +1006,18 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
857 if (!nvmem_np) 1006 if (!nvmem_np)
858 return ERR_PTR(-EINVAL); 1007 return ERR_PTR(-EINVAL);
859 1008
860 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 1009 nvmem = __nvmem_device_get(nvmem_np, NULL);
861 of_node_put(nvmem_np); 1010 of_node_put(nvmem_np);
862 if (IS_ERR(nvmem)) 1011 if (IS_ERR(nvmem))
863 return ERR_CAST(nvmem); 1012 return ERR_CAST(nvmem);
864 1013
865 addr = of_get_property(cell_np, "reg", &len); 1014 cell = nvmem_find_cell_by_index(nvmem, index);
866 if (!addr || (len < 2 * sizeof(u32))) {
867 dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n",
868 cell_np);
869 rval = -EINVAL;
870 goto err_mem;
871 }
872
873 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
874 if (!cell) { 1015 if (!cell) {
875 rval = -ENOMEM; 1016 __nvmem_device_put(nvmem);
876 goto err_mem; 1017 return ERR_PTR(-ENOENT);
877 }
878
879 cell->nvmem = nvmem;
880 cell->offset = be32_to_cpup(addr++);
881 cell->bytes = be32_to_cpup(addr);
882 cell->name = cell_np->name;
883
884 addr = of_get_property(cell_np, "bits", &len);
885 if (addr && len == (2 * sizeof(u32))) {
886 cell->bit_offset = be32_to_cpup(addr++);
887 cell->nbits = be32_to_cpup(addr);
888 } 1018 }
889 1019
890 if (cell->nbits)
891 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
892 BITS_PER_BYTE);
893
894 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
895 dev_err(&nvmem->dev,
896 "cell %s unaligned to nvmem stride %d\n",
897 cell->name, nvmem->stride);
898 rval = -EINVAL;
899 goto err_sanity;
900 }
901
902 nvmem_cell_add(cell);
903
904 return cell; 1020 return cell;
905
906err_sanity:
907 kfree(cell);
908
909err_mem:
910 __nvmem_device_put(nvmem);
911
912 return ERR_PTR(rval);
913} 1021}
914EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1022EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
915#endif 1023#endif
@@ -918,27 +1026,29 @@ EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
918 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1026 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
919 * 1027 *
920 * @dev: Device that requests the nvmem cell. 1028 * @dev: Device that requests the nvmem cell.
921 * @cell_id: nvmem cell name to get. 1029 * @id: nvmem cell name to get (this corresponds with the name from the
1030 * nvmem-cell-names property for DT systems and with the con_id from
1031 * the lookup entry for non-DT systems).
922 * 1032 *
923 * Return: Will be an ERR_PTR() on error or a valid pointer 1033 * Return: Will be an ERR_PTR() on error or a valid pointer
924 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1034 * to a struct nvmem_cell. The nvmem_cell will be freed by the
925 * nvmem_cell_put(). 1035 * nvmem_cell_put().
926 */ 1036 */
927struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) 1037struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
928{ 1038{
929 struct nvmem_cell *cell; 1039 struct nvmem_cell *cell;
930 1040
931 if (dev->of_node) { /* try dt first */ 1041 if (dev->of_node) { /* try dt first */
932 cell = of_nvmem_cell_get(dev->of_node, cell_id); 1042 cell = of_nvmem_cell_get(dev->of_node, id);
933 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1043 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
934 return cell; 1044 return cell;
935 } 1045 }
936 1046
937 /* NULL cell_id only allowed for device tree; invalid otherwise */ 1047 /* NULL cell id only allowed for device tree; invalid otherwise */
938 if (!cell_id) 1048 if (!id)
939 return ERR_PTR(-EINVAL); 1049 return ERR_PTR(-EINVAL);
940 1050
941 return nvmem_cell_get_from_list(cell_id); 1051 return nvmem_cell_get_from_lookup(dev, id);
942} 1052}
943EXPORT_SYMBOL_GPL(nvmem_cell_get); 1053EXPORT_SYMBOL_GPL(nvmem_cell_get);
944 1054
@@ -1015,7 +1125,6 @@ void nvmem_cell_put(struct nvmem_cell *cell)
1015 struct nvmem_device *nvmem = cell->nvmem; 1125 struct nvmem_device *nvmem = cell->nvmem;
1016 1126
1017 __nvmem_device_put(nvmem); 1127 __nvmem_device_put(nvmem);
1018 nvmem_cell_drop(cell);
1019} 1128}
1020EXPORT_SYMBOL_GPL(nvmem_cell_put); 1129EXPORT_SYMBOL_GPL(nvmem_cell_put);
1021 1130
@@ -1267,7 +1376,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1267 * @buf: buffer to be written to cell. 1376 * @buf: buffer to be written to cell.
1268 * 1377 *
1269 * Return: length of bytes written or negative error code on failure. 1378 * Return: length of bytes written or negative error code on failure.
1270 * */ 1379 */
1271int nvmem_device_cell_write(struct nvmem_device *nvmem, 1380int nvmem_device_cell_write(struct nvmem_device *nvmem,
1272 struct nvmem_cell_info *info, void *buf) 1381 struct nvmem_cell_info *info, void *buf)
1273{ 1382{
@@ -1323,7 +1432,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_read);
1323 * @buf: buffer to be written. 1432 * @buf: buffer to be written.
1324 * 1433 *
1325 * Return: length of bytes written or negative error code on failure. 1434 * Return: length of bytes written or negative error code on failure.
1326 * */ 1435 */
1327int nvmem_device_write(struct nvmem_device *nvmem, 1436int nvmem_device_write(struct nvmem_device *nvmem,
1328 unsigned int offset, 1437 unsigned int offset,
1329 size_t bytes, void *buf) 1438 size_t bytes, void *buf)
@@ -1343,6 +1452,80 @@ int nvmem_device_write(struct nvmem_device *nvmem,
1343} 1452}
1344EXPORT_SYMBOL_GPL(nvmem_device_write); 1453EXPORT_SYMBOL_GPL(nvmem_device_write);
1345 1454
1455/**
1456 * nvmem_add_cell_table() - register a table of cell info entries
1457 *
1458 * @table: table of cell info entries
1459 */
1460void nvmem_add_cell_table(struct nvmem_cell_table *table)
1461{
1462 mutex_lock(&nvmem_cell_mutex);
1463 list_add_tail(&table->node, &nvmem_cell_tables);
1464 mutex_unlock(&nvmem_cell_mutex);
1465}
1466EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1467
1468/**
1469 * nvmem_del_cell_table() - remove a previously registered cell info table
1470 *
1471 * @table: table of cell info entries
1472 */
1473void nvmem_del_cell_table(struct nvmem_cell_table *table)
1474{
1475 mutex_lock(&nvmem_cell_mutex);
1476 list_del(&table->node);
1477 mutex_unlock(&nvmem_cell_mutex);
1478}
1479EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1480
1481/**
1482 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1483 *
1484 * @entries: array of cell lookup entries
1485 * @nentries: number of cell lookup entries in the array
1486 */
1487void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1488{
1489 int i;
1490
1491 mutex_lock(&nvmem_lookup_mutex);
1492 for (i = 0; i < nentries; i++)
1493 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1494 mutex_unlock(&nvmem_lookup_mutex);
1495}
1496EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1497
1498/**
1499 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1500 * entries
1501 *
1502 * @entries: array of cell lookup entries
1503 * @nentries: number of cell lookup entries in the array
1504 */
1505void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1506{
1507 int i;
1508
1509 mutex_lock(&nvmem_lookup_mutex);
1510 for (i = 0; i < nentries; i++)
1511 list_del(&entries[i].node);
1512 mutex_unlock(&nvmem_lookup_mutex);
1513}
1514EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1515
1516/**
1517 * nvmem_dev_name() - Get the name of a given nvmem device.
1518 *
1519 * @nvmem: nvmem device.
1520 *
1521 * Return: name of the nvmem device.
1522 */
1523const char *nvmem_dev_name(struct nvmem_device *nvmem)
1524{
1525 return dev_name(&nvmem->dev);
1526}
1527EXPORT_SYMBOL_GPL(nvmem_dev_name);
1528
1346static int __init nvmem_init(void) 1529static int __init nvmem_init(void)
1347{ 1530{
1348 return bus_register(&nvmem_bus_type); 1531 return bus_register(&nvmem_bus_type);
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index a9534a6e8636..66cff1e2147a 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -236,7 +236,7 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
236 lpc18xx_nvmem_config.dev = dev; 236 lpc18xx_nvmem_config.dev = dev;
237 lpc18xx_nvmem_config.priv = eeprom; 237 lpc18xx_nvmem_config.priv = eeprom;
238 238
239 eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config); 239 eeprom->nvmem = devm_nvmem_register(dev, &lpc18xx_nvmem_config);
240 if (IS_ERR(eeprom->nvmem)) { 240 if (IS_ERR(eeprom->nvmem)) {
241 ret = PTR_ERR(eeprom->nvmem); 241 ret = PTR_ERR(eeprom->nvmem);
242 goto err_clk; 242 goto err_clk;
@@ -255,11 +255,6 @@ err_clk:
255static int lpc18xx_eeprom_remove(struct platform_device *pdev) 255static int lpc18xx_eeprom_remove(struct platform_device *pdev)
256{ 256{
257 struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev); 257 struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev);
258 int ret;
259
260 ret = nvmem_unregister(eeprom->nvmem);
261 if (ret < 0)
262 return ret;
263 258
264 clk_disable_unprepare(eeprom->clk); 259 clk_disable_unprepare(eeprom->clk);
265 260
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 7018e2ef5714..53122f59c4b2 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -177,7 +177,7 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
177 ocotp_config.size = data->size; 177 ocotp_config.size = data->size;
178 ocotp_config.priv = otp; 178 ocotp_config.priv = otp;
179 ocotp_config.dev = dev; 179 ocotp_config.dev = dev;
180 otp->nvmem = nvmem_register(&ocotp_config); 180 otp->nvmem = devm_nvmem_register(dev, &ocotp_config);
181 if (IS_ERR(otp->nvmem)) { 181 if (IS_ERR(otp->nvmem)) {
182 ret = PTR_ERR(otp->nvmem); 182 ret = PTR_ERR(otp->nvmem);
183 goto err_clk; 183 goto err_clk;
@@ -199,7 +199,7 @@ static int mxs_ocotp_remove(struct platform_device *pdev)
199 199
200 clk_unprepare(otp->clk); 200 clk_unprepare(otp->clk);
201 201
202 return nvmem_unregister(otp->nvmem); 202 return 0;
203} 203}
204 204
205static struct platform_driver mxs_ocotp_driver = { 205static struct platform_driver mxs_ocotp_driver = {
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index d020f89248fd..570a2e354f30 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -154,7 +154,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
154 struct resource *res; 154 struct resource *res;
155 struct nvmem_device *nvmem; 155 struct nvmem_device *nvmem;
156 struct sunxi_sid *sid; 156 struct sunxi_sid *sid;
157 int ret, i, size; 157 int i, size;
158 char *randomness; 158 char *randomness;
159 const struct sunxi_sid_cfg *cfg; 159 const struct sunxi_sid_cfg *cfg;
160 160
@@ -181,15 +181,13 @@ static int sunxi_sid_probe(struct platform_device *pdev)
181 else 181 else
182 econfig.reg_read = sunxi_sid_read; 182 econfig.reg_read = sunxi_sid_read;
183 econfig.priv = sid; 183 econfig.priv = sid;
184 nvmem = nvmem_register(&econfig); 184 nvmem = devm_nvmem_register(dev, &econfig);
185 if (IS_ERR(nvmem)) 185 if (IS_ERR(nvmem))
186 return PTR_ERR(nvmem); 186 return PTR_ERR(nvmem);
187 187
188 randomness = kzalloc(size, GFP_KERNEL); 188 randomness = kzalloc(size, GFP_KERNEL);
189 if (!randomness) { 189 if (!randomness)
190 ret = -EINVAL; 190 return -ENOMEM;
191 goto err_unreg_nvmem;
192 }
193 191
194 for (i = 0; i < size; i++) 192 for (i = 0; i < size; i++)
195 econfig.reg_read(sid, i, &randomness[i], 1); 193 econfig.reg_read(sid, i, &randomness[i], 1);
@@ -200,17 +198,6 @@ static int sunxi_sid_probe(struct platform_device *pdev)
200 platform_set_drvdata(pdev, nvmem); 198 platform_set_drvdata(pdev, nvmem);
201 199
202 return 0; 200 return 0;
203
204err_unreg_nvmem:
205 nvmem_unregister(nvmem);
206 return ret;
207}
208
209static int sunxi_sid_remove(struct platform_device *pdev)
210{
211 struct nvmem_device *nvmem = platform_get_drvdata(pdev);
212
213 return nvmem_unregister(nvmem);
214} 201}
215 202
216static const struct sunxi_sid_cfg sun4i_a10_cfg = { 203static const struct sunxi_sid_cfg sun4i_a10_cfg = {
@@ -243,7 +230,6 @@ MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
243 230
244static struct platform_driver sunxi_sid_driver = { 231static struct platform_driver sunxi_sid_driver = {
245 .probe = sunxi_sid_probe, 232 .probe = sunxi_sid_probe,
246 .remove = sunxi_sid_remove,
247 .driver = { 233 .driver = {
248 .name = "eeprom-sunxi-sid", 234 .name = "eeprom-sunxi-sid",
249 .of_match_table = sunxi_sid_of_match, 235 .of_match_table = sunxi_sid_of_match,
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 2da567540c2d..7c639006252e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Copyright (C) 2012 Intel, Inc. 3 * Copyright (C) 2012 Intel, Inc.
3 * Copyright (C) 2013 Intel, Inc. 4 * Copyright (C) 2013 Intel, Inc.
@@ -46,7 +47,6 @@
46 * exchange is properly mapped during a transfer. 47 * exchange is properly mapped during a transfer.
47 */ 48 */
48 49
49
50#include <linux/module.h> 50#include <linux/module.h>
51#include <linux/mod_devicetable.h> 51#include <linux/mod_devicetable.h>
52#include <linux/interrupt.h> 52#include <linux/interrupt.h>
@@ -59,10 +59,11 @@
59#include <linux/bitops.h> 59#include <linux/bitops.h>
60#include <linux/slab.h> 60#include <linux/slab.h>
61#include <linux/io.h> 61#include <linux/io.h>
62#include <linux/goldfish.h>
63#include <linux/dma-mapping.h> 62#include <linux/dma-mapping.h>
64#include <linux/mm.h> 63#include <linux/mm.h>
65#include <linux/acpi.h> 64#include <linux/acpi.h>
65#include <linux/bug.h>
66#include "goldfish_pipe_qemu.h"
66 67
67/* 68/*
68 * Update this when something changes in the driver's behavior so the host 69 * Update this when something changes in the driver's behavior so the host
@@ -73,71 +74,6 @@ enum {
73 PIPE_CURRENT_DEVICE_VERSION = 2 74 PIPE_CURRENT_DEVICE_VERSION = 2
74}; 75};
75 76
76/*
77 * IMPORTANT: The following constants must match the ones used and defined
78 * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
79 */
80
81/* List of bitflags returned in status of CMD_POLL command */
82enum PipePollFlags {
83 PIPE_POLL_IN = 1 << 0,
84 PIPE_POLL_OUT = 1 << 1,
85 PIPE_POLL_HUP = 1 << 2
86};
87
88/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
89enum PipeErrors {
90 PIPE_ERROR_INVAL = -1,
91 PIPE_ERROR_AGAIN = -2,
92 PIPE_ERROR_NOMEM = -3,
93 PIPE_ERROR_IO = -4
94};
95
96/* Bit-flags used to signal events from the emulator */
97enum PipeWakeFlags {
98 PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */
99 PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */
100 PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */
101};
102
103/* Bit flags for the 'flags' field */
104enum PipeFlagsBits {
105 BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
106 BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
107 BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
108};
109
110enum PipeRegs {
111 PIPE_REG_CMD = 0,
112
113 PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
114 PIPE_REG_SIGNAL_BUFFER = 8,
115 PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
116
117 PIPE_REG_OPEN_BUFFER_HIGH = 20,
118 PIPE_REG_OPEN_BUFFER = 24,
119
120 PIPE_REG_VERSION = 36,
121
122 PIPE_REG_GET_SIGNALLED = 48,
123};
124
125enum PipeCmdCode {
126 PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */
127 PIPE_CMD_CLOSE,
128 PIPE_CMD_POLL,
129 PIPE_CMD_WRITE,
130 PIPE_CMD_WAKE_ON_WRITE,
131 PIPE_CMD_READ,
132 PIPE_CMD_WAKE_ON_READ,
133
134 /*
135 * TODO(zyy): implement a deferred read/write execution to allow
136 * parallel processing of pipe operations on the host.
137 */
138 PIPE_CMD_WAKE_ON_DONE_IO,
139};
140
141enum { 77enum {
142 MAX_BUFFERS_PER_COMMAND = 336, 78 MAX_BUFFERS_PER_COMMAND = 336,
143 MAX_SIGNALLED_PIPES = 64, 79 MAX_SIGNALLED_PIPES = 64,
@@ -145,14 +81,12 @@ enum {
145}; 81};
146 82
147struct goldfish_pipe_dev; 83struct goldfish_pipe_dev;
148struct goldfish_pipe;
149struct goldfish_pipe_command;
150 84
151/* A per-pipe command structure, shared with the host */ 85/* A per-pipe command structure, shared with the host */
152struct goldfish_pipe_command { 86struct goldfish_pipe_command {
153 s32 cmd; /* PipeCmdCode, guest -> host */ 87 s32 cmd; /* PipeCmdCode, guest -> host */
154 s32 id; /* pipe id, guest -> host */ 88 s32 id; /* pipe id, guest -> host */
155 s32 status; /* command execution status, host -> guest */ 89 s32 status; /* command execution status, host -> guest */
156 s32 reserved; /* to pad to 64-bit boundary */ 90 s32 reserved; /* to pad to 64-bit boundary */
157 union { 91 union {
158 /* Parameters for PIPE_CMD_{READ,WRITE} */ 92 /* Parameters for PIPE_CMD_{READ,WRITE} */
@@ -184,19 +118,21 @@ struct open_command_param {
184/* Device-level set of buffers shared with the host */ 118/* Device-level set of buffers shared with the host */
185struct goldfish_pipe_dev_buffers { 119struct goldfish_pipe_dev_buffers {
186 struct open_command_param open_command_params; 120 struct open_command_param open_command_params;
187 struct signalled_pipe_buffer signalled_pipe_buffers[ 121 struct signalled_pipe_buffer
188 MAX_SIGNALLED_PIPES]; 122 signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
189}; 123};
190 124
191/* This data type models a given pipe instance */ 125/* This data type models a given pipe instance */
192struct goldfish_pipe { 126struct goldfish_pipe {
193 /* pipe ID - index into goldfish_pipe_dev::pipes array */ 127 /* pipe ID - index into goldfish_pipe_dev::pipes array */
194 u32 id; 128 u32 id;
129
195 /* The wake flags pipe is waiting for 130 /* The wake flags pipe is waiting for
196 * Note: not protected with any lock, uses atomic operations 131 * Note: not protected with any lock, uses atomic operations
197 * and barriers to make it thread-safe. 132 * and barriers to make it thread-safe.
198 */ 133 */
199 unsigned long flags; 134 unsigned long flags;
135
200 /* wake flags host have signalled, 136 /* wake flags host have signalled,
201 * - protected by goldfish_pipe_dev::lock 137 * - protected by goldfish_pipe_dev::lock
202 */ 138 */
@@ -220,8 +156,12 @@ struct goldfish_pipe {
220 156
221 /* A wake queue for sleeping until host signals an event */ 157 /* A wake queue for sleeping until host signals an event */
222 wait_queue_head_t wake_queue; 158 wait_queue_head_t wake_queue;
159
223 /* Pointer to the parent goldfish_pipe_dev instance */ 160 /* Pointer to the parent goldfish_pipe_dev instance */
224 struct goldfish_pipe_dev *dev; 161 struct goldfish_pipe_dev *dev;
162
163 /* A buffer of pages, too large to fit into a stack frame */
164 struct page *pages[MAX_BUFFERS_PER_COMMAND];
225}; 165};
226 166
227/* The global driver data. Holds a reference to the i/o page used to 167/* The global driver data. Holds a reference to the i/o page used to
@@ -229,6 +169,9 @@ struct goldfish_pipe {
229 * waiting to be awoken. 169 * waiting to be awoken.
230 */ 170 */
231struct goldfish_pipe_dev { 171struct goldfish_pipe_dev {
172 /* A magic number to check if this is an instance of this struct */
173 void *magic;
174
232 /* 175 /*
233 * Global device spinlock. Protects the following members: 176 * Global device spinlock. Protects the following members:
234 * - pipes, pipes_capacity 177 * - pipes, pipes_capacity
@@ -261,15 +204,22 @@ struct goldfish_pipe_dev {
261 /* Head of a doubly linked list of signalled pipes */ 204 /* Head of a doubly linked list of signalled pipes */
262 struct goldfish_pipe *first_signalled_pipe; 205 struct goldfish_pipe *first_signalled_pipe;
263 206
207 /* ptr to platform device's device struct */
208 struct device *pdev_dev;
209
264 /* Some device-specific data */ 210 /* Some device-specific data */
265 int irq; 211 int irq;
266 int version; 212 int version;
267 unsigned char __iomem *base; 213 unsigned char __iomem *base;
268};
269 214
270static struct goldfish_pipe_dev pipe_dev[1] = {}; 215 /* an irq tasklet to run goldfish_interrupt_task */
216 struct tasklet_struct irq_tasklet;
271 217
272static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) 218 struct miscdevice miscdev;
219};
220
221static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
222 enum PipeCmdCode cmd)
273{ 223{
274 pipe->command_buffer->cmd = cmd; 224 pipe->command_buffer->cmd = cmd;
275 /* failure by default */ 225 /* failure by default */
@@ -278,13 +228,13 @@ static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
278 return pipe->command_buffer->status; 228 return pipe->command_buffer->status;
279} 229}
280 230
281static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) 231static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
282{ 232{
283 int status; 233 int status;
284 234
285 if (mutex_lock_interruptible(&pipe->lock)) 235 if (mutex_lock_interruptible(&pipe->lock))
286 return PIPE_ERROR_IO; 236 return PIPE_ERROR_IO;
287 status = goldfish_cmd_locked(pipe, cmd); 237 status = goldfish_pipe_cmd_locked(pipe, cmd);
288 mutex_unlock(&pipe->lock); 238 mutex_unlock(&pipe->lock);
289 return status; 239 return status;
290} 240}
@@ -307,10 +257,12 @@ static int goldfish_pipe_error_convert(int status)
307 } 257 }
308} 258}
309 259
310static int pin_user_pages(unsigned long first_page, unsigned long last_page, 260static int pin_user_pages(unsigned long first_page,
311 unsigned int last_page_size, int is_write, 261 unsigned long last_page,
312 struct page *pages[MAX_BUFFERS_PER_COMMAND], 262 unsigned int last_page_size,
313 unsigned int *iter_last_page_size) 263 int is_write,
264 struct page *pages[MAX_BUFFERS_PER_COMMAND],
265 unsigned int *iter_last_page_size)
314{ 266{
315 int ret; 267 int ret;
316 int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; 268 int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
@@ -322,18 +274,18 @@ static int pin_user_pages(unsigned long first_page, unsigned long last_page,
322 *iter_last_page_size = last_page_size; 274 *iter_last_page_size = last_page_size;
323 } 275 }
324 276
325 ret = get_user_pages_fast( 277 ret = get_user_pages_fast(first_page, requested_pages, !is_write,
326 first_page, requested_pages, !is_write, pages); 278 pages);
327 if (ret <= 0) 279 if (ret <= 0)
328 return -EFAULT; 280 return -EFAULT;
329 if (ret < requested_pages) 281 if (ret < requested_pages)
330 *iter_last_page_size = PAGE_SIZE; 282 *iter_last_page_size = PAGE_SIZE;
331 return ret;
332 283
284 return ret;
333} 285}
334 286
335static void release_user_pages(struct page **pages, int pages_count, 287static void release_user_pages(struct page **pages, int pages_count,
336 int is_write, s32 consumed_size) 288 int is_write, s32 consumed_size)
337{ 289{
338 int i; 290 int i;
339 291
@@ -345,12 +297,15 @@ static void release_user_pages(struct page **pages, int pages_count,
345} 297}
346 298
347/* Populate the call parameters, merging adjacent pages together */ 299/* Populate the call parameters, merging adjacent pages together */
348static void populate_rw_params( 300static void populate_rw_params(struct page **pages,
349 struct page **pages, int pages_count, 301 int pages_count,
350 unsigned long address, unsigned long address_end, 302 unsigned long address,
351 unsigned long first_page, unsigned long last_page, 303 unsigned long address_end,
352 unsigned int iter_last_page_size, int is_write, 304 unsigned long first_page,
353 struct goldfish_pipe_command *command) 305 unsigned long last_page,
306 unsigned int iter_last_page_size,
307 int is_write,
308 struct goldfish_pipe_command *command)
354{ 309{
355 /* 310 /*
356 * Process the first page separately - it's the only page that 311 * Process the first page separately - it's the only page that
@@ -382,55 +337,59 @@ static void populate_rw_params(
382} 337}
383 338
384static int transfer_max_buffers(struct goldfish_pipe *pipe, 339static int transfer_max_buffers(struct goldfish_pipe *pipe,
385 unsigned long address, unsigned long address_end, int is_write, 340 unsigned long address,
386 unsigned long last_page, unsigned int last_page_size, 341 unsigned long address_end,
387 s32 *consumed_size, int *status) 342 int is_write,
343 unsigned long last_page,
344 unsigned int last_page_size,
345 s32 *consumed_size,
346 int *status)
388{ 347{
389 static struct page *pages[MAX_BUFFERS_PER_COMMAND];
390 unsigned long first_page = address & PAGE_MASK; 348 unsigned long first_page = address & PAGE_MASK;
391 unsigned int iter_last_page_size; 349 unsigned int iter_last_page_size;
392 int pages_count = pin_user_pages(first_page, last_page, 350 int pages_count;
393 last_page_size, is_write,
394 pages, &iter_last_page_size);
395
396 if (pages_count < 0)
397 return pages_count;
398 351
399 /* Serialize access to the pipe command buffers */ 352 /* Serialize access to the pipe command buffers */
400 if (mutex_lock_interruptible(&pipe->lock)) 353 if (mutex_lock_interruptible(&pipe->lock))
401 return -ERESTARTSYS; 354 return -ERESTARTSYS;
402 355
403 populate_rw_params(pages, pages_count, address, address_end, 356 pages_count = pin_user_pages(first_page, last_page,
404 first_page, last_page, iter_last_page_size, is_write, 357 last_page_size, is_write,
405 pipe->command_buffer); 358 pipe->pages, &iter_last_page_size);
359 if (pages_count < 0) {
360 mutex_unlock(&pipe->lock);
361 return pages_count;
362 }
363
364 populate_rw_params(pipe->pages, pages_count, address, address_end,
365 first_page, last_page, iter_last_page_size, is_write,
366 pipe->command_buffer);
406 367
407 /* Transfer the data */ 368 /* Transfer the data */
408 *status = goldfish_cmd_locked(pipe, 369 *status = goldfish_pipe_cmd_locked(pipe,
409 is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); 370 is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
410 371
411 *consumed_size = pipe->command_buffer->rw_params.consumed_size; 372 *consumed_size = pipe->command_buffer->rw_params.consumed_size;
412 373
413 release_user_pages(pages, pages_count, is_write, *consumed_size); 374 release_user_pages(pipe->pages, pages_count, is_write, *consumed_size);
414 375
415 mutex_unlock(&pipe->lock); 376 mutex_unlock(&pipe->lock);
416
417 return 0; 377 return 0;
418} 378}
419 379
420static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) 380static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
421{ 381{
422 u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; 382 u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
423 383
424 set_bit(wakeBit, &pipe->flags); 384 set_bit(wake_bit, &pipe->flags);
425 385
426 /* Tell the emulator we're going to wait for a wake event */ 386 /* Tell the emulator we're going to wait for a wake event */
427 (void)goldfish_cmd(pipe, 387 goldfish_pipe_cmd(pipe,
428 is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); 388 is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
429 389
430 while (test_bit(wakeBit, &pipe->flags)) { 390 while (test_bit(wake_bit, &pipe->flags)) {
431 if (wait_event_interruptible( 391 if (wait_event_interruptible(pipe->wake_queue,
432 pipe->wake_queue, 392 !test_bit(wake_bit, &pipe->flags)))
433 !test_bit(wakeBit, &pipe->flags)))
434 return -ERESTARTSYS; 393 return -ERESTARTSYS;
435 394
436 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) 395 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -441,7 +400,9 @@ static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
441} 400}
442 401
443static ssize_t goldfish_pipe_read_write(struct file *filp, 402static ssize_t goldfish_pipe_read_write(struct file *filp,
444 char __user *buffer, size_t bufflen, int is_write) 403 char __user *buffer,
404 size_t bufflen,
405 int is_write)
445{ 406{
446 struct goldfish_pipe *pipe = filp->private_data; 407 struct goldfish_pipe *pipe = filp->private_data;
447 int count = 0, ret = -EINVAL; 408 int count = 0, ret = -EINVAL;
@@ -456,7 +417,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
456 return 0; 417 return 0;
457 /* Check the buffer range for access */ 418 /* Check the buffer range for access */
458 if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, 419 if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
459 buffer, bufflen))) 420 buffer, bufflen)))
460 return -EFAULT; 421 return -EFAULT;
461 422
462 address = (unsigned long)buffer; 423 address = (unsigned long)buffer;
@@ -469,8 +430,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
469 int status; 430 int status;
470 431
471 ret = transfer_max_buffers(pipe, address, address_end, is_write, 432 ret = transfer_max_buffers(pipe, address, address_end, is_write,
472 last_page, last_page_size, &consumed_size, 433 last_page, last_page_size,
473 &status); 434 &consumed_size, &status);
474 if (ret < 0) 435 if (ret < 0)
475 break; 436 break;
476 437
@@ -496,7 +457,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
496 * err. 457 * err.
497 */ 458 */
498 if (status != PIPE_ERROR_AGAIN) 459 if (status != PIPE_ERROR_AGAIN)
499 pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", 460 dev_err_ratelimited(pipe->dev->pdev_dev,
461 "backend error %d on %s\n",
500 status, is_write ? "write" : "read"); 462 status, is_write ? "write" : "read");
501 break; 463 break;
502 } 464 }
@@ -522,19 +484,21 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
522} 484}
523 485
524static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, 486static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
525 size_t bufflen, loff_t *ppos) 487 size_t bufflen, loff_t *ppos)
526{ 488{
527 return goldfish_pipe_read_write(filp, buffer, bufflen, 489 return goldfish_pipe_read_write(filp, buffer, bufflen,
528 /* is_write */ 0); 490 /* is_write */ 0);
529} 491}
530 492
531static ssize_t goldfish_pipe_write(struct file *filp, 493static ssize_t goldfish_pipe_write(struct file *filp,
532 const char __user *buffer, size_t bufflen, 494 const char __user *buffer, size_t bufflen,
533 loff_t *ppos) 495 loff_t *ppos)
534{ 496{
535 return goldfish_pipe_read_write(filp, 497 /* cast away the const */
536 /* cast away the const */(char __user *)buffer, bufflen, 498 char __user *no_const_buffer = (char __user *)buffer;
537 /* is_write */ 1); 499
500 return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
501 /* is_write */ 1);
538} 502}
539 503
540static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) 504static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
@@ -545,7 +509,7 @@ static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
545 509
546 poll_wait(filp, &pipe->wake_queue, wait); 510 poll_wait(filp, &pipe->wake_queue, wait);
547 511
548 status = goldfish_cmd(pipe, PIPE_CMD_POLL); 512 status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
549 if (status < 0) 513 if (status < 0)
550 return -ERESTARTSYS; 514 return -ERESTARTSYS;
551 515
@@ -562,7 +526,7 @@ static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
562} 526}
563 527
564static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, 528static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
565 u32 id, u32 flags) 529 u32 id, u32 flags)
566{ 530{
567 struct goldfish_pipe *pipe; 531 struct goldfish_pipe *pipe;
568 532
@@ -574,8 +538,8 @@ static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
574 return; 538 return;
575 pipe->signalled_flags |= flags; 539 pipe->signalled_flags |= flags;
576 540
577 if (pipe->prev_signalled || pipe->next_signalled 541 if (pipe->prev_signalled || pipe->next_signalled ||
578 || dev->first_signalled_pipe == pipe) 542 dev->first_signalled_pipe == pipe)
579 return; /* already in the list */ 543 return; /* already in the list */
580 pipe->next_signalled = dev->first_signalled_pipe; 544 pipe->next_signalled = dev->first_signalled_pipe;
581 if (dev->first_signalled_pipe) 545 if (dev->first_signalled_pipe)
@@ -584,7 +548,8 @@ static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
584} 548}
585 549
586static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, 550static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
587 struct goldfish_pipe *pipe) { 551 struct goldfish_pipe *pipe)
552{
588 if (pipe->prev_signalled) 553 if (pipe->prev_signalled)
589 pipe->prev_signalled->next_signalled = pipe->next_signalled; 554 pipe->prev_signalled->next_signalled = pipe->next_signalled;
590 if (pipe->next_signalled) 555 if (pipe->next_signalled)
@@ -623,10 +588,10 @@ static struct goldfish_pipe *signalled_pipes_pop_front(
623 return pipe; 588 return pipe;
624} 589}
625 590
626static void goldfish_interrupt_task(unsigned long unused) 591static void goldfish_interrupt_task(unsigned long dev_addr)
627{ 592{
628 struct goldfish_pipe_dev *dev = pipe_dev;
629 /* Iterate over the signalled pipes and wake them one by one */ 593 /* Iterate over the signalled pipes and wake them one by one */
594 struct goldfish_pipe_dev *dev = (struct goldfish_pipe_dev *)dev_addr;
630 struct goldfish_pipe *pipe; 595 struct goldfish_pipe *pipe;
631 int wakes; 596 int wakes;
632 597
@@ -646,7 +611,9 @@ static void goldfish_interrupt_task(unsigned long unused)
646 wake_up_interruptible(&pipe->wake_queue); 611 wake_up_interruptible(&pipe->wake_queue);
647 } 612 }
648} 613}
649static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); 614
615static void goldfish_pipe_device_deinit(struct platform_device *pdev,
616 struct goldfish_pipe_dev *dev);
650 617
651/* 618/*
652 * The general idea of the interrupt handling: 619 * The general idea of the interrupt handling:
@@ -668,7 +635,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
668 unsigned long flags; 635 unsigned long flags;
669 struct goldfish_pipe_dev *dev = dev_id; 636 struct goldfish_pipe_dev *dev = dev_id;
670 637
671 if (dev != pipe_dev) 638 if (dev->magic != &goldfish_pipe_device_deinit)
672 return IRQ_NONE; 639 return IRQ_NONE;
673 640
674 /* Request the signalled pipes from the device */ 641 /* Request the signalled pipes from the device */
@@ -689,7 +656,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
689 656
690 spin_unlock_irqrestore(&dev->lock, flags); 657 spin_unlock_irqrestore(&dev->lock, flags);
691 658
692 tasklet_schedule(&goldfish_interrupt_tasklet); 659 tasklet_schedule(&dev->irq_tasklet);
693 return IRQ_HANDLED; 660 return IRQ_HANDLED;
694} 661}
695 662
@@ -702,7 +669,10 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
702 return id; 669 return id;
703 670
704 { 671 {
705 /* Reallocate the array */ 672 /* Reallocate the array.
673 * Since get_free_pipe_id_locked runs with interrupts disabled,
674 * we don't want to make calls that could lead to sleep.
675 */
706 u32 new_capacity = 2 * dev->pipes_capacity; 676 u32 new_capacity = 2 * dev->pipes_capacity;
707 struct goldfish_pipe **pipes = 677 struct goldfish_pipe **pipes =
708 kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); 678 kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
@@ -717,6 +687,14 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
717 return id; 687 return id;
718} 688}
719 689
690/* A helper function to get the instance of goldfish_pipe_dev from file */
691static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
692{
693 struct miscdevice *miscdev = file->private_data;
694
695 return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
696}
697
720/** 698/**
721 * goldfish_pipe_open - open a channel to the AVD 699 * goldfish_pipe_open - open a channel to the AVD
722 * @inode: inode of device 700 * @inode: inode of device
@@ -730,14 +708,15 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
730 */ 708 */
731static int goldfish_pipe_open(struct inode *inode, struct file *file) 709static int goldfish_pipe_open(struct inode *inode, struct file *file)
732{ 710{
733 struct goldfish_pipe_dev *dev = pipe_dev; 711 struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
734 unsigned long flags; 712 unsigned long flags;
735 int id; 713 int id;
736 int status; 714 int status;
737 715
738 /* Allocate new pipe kernel object */ 716 /* Allocate new pipe kernel object */
739 struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); 717 struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
740 if (pipe == NULL) 718
719 if (!pipe)
741 return -ENOMEM; 720 return -ENOMEM;
742 721
743 pipe->dev = dev; 722 pipe->dev = dev;
@@ -748,6 +727,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
748 * Command buffer needs to be allocated on its own page to make sure 727 * Command buffer needs to be allocated on its own page to make sure
749 * it is physically contiguous in host's address space. 728 * it is physically contiguous in host's address space.
750 */ 729 */
730 BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
751 pipe->command_buffer = 731 pipe->command_buffer =
752 (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); 732 (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
753 if (!pipe->command_buffer) { 733 if (!pipe->command_buffer) {
@@ -772,7 +752,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
772 MAX_BUFFERS_PER_COMMAND; 752 MAX_BUFFERS_PER_COMMAND;
773 dev->buffers->open_command_params.command_buffer_ptr = 753 dev->buffers->open_command_params.command_buffer_ptr =
774 (u64)(unsigned long)__pa(pipe->command_buffer); 754 (u64)(unsigned long)__pa(pipe->command_buffer);
775 status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); 755 status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
776 spin_unlock_irqrestore(&dev->lock, flags); 756 spin_unlock_irqrestore(&dev->lock, flags);
777 if (status < 0) 757 if (status < 0)
778 goto err_cmd; 758 goto err_cmd;
@@ -798,7 +778,7 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
798 struct goldfish_pipe_dev *dev = pipe->dev; 778 struct goldfish_pipe_dev *dev = pipe->dev;
799 779
800 /* The guest is closing the channel, so tell the emulator right now */ 780 /* The guest is closing the channel, so tell the emulator right now */
801 (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); 781 goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
802 782
803 spin_lock_irqsave(&dev->lock, flags); 783 spin_lock_irqsave(&dev->lock, flags);
804 dev->pipes[pipe->id] = NULL; 784 dev->pipes[pipe->id] = NULL;
@@ -820,36 +800,55 @@ static const struct file_operations goldfish_pipe_fops = {
820 .release = goldfish_pipe_release, 800 .release = goldfish_pipe_release,
821}; 801};
822 802
823static struct miscdevice goldfish_pipe_dev = { 803static void init_miscdevice(struct miscdevice *miscdev)
824 .minor = MISC_DYNAMIC_MINOR, 804{
825 .name = "goldfish_pipe", 805 memset(miscdev, 0, sizeof(*miscdev));
826 .fops = &goldfish_pipe_fops, 806
827}; 807 miscdev->minor = MISC_DYNAMIC_MINOR;
808 miscdev->name = "goldfish_pipe";
809 miscdev->fops = &goldfish_pipe_fops;
810}
811
812static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
813{
814 const unsigned long paddr = __pa(addr);
815
816 writel(upper_32_bits(paddr), porth);
817 writel(lower_32_bits(paddr), portl);
818}
828 819
829static int goldfish_pipe_device_init(struct platform_device *pdev) 820static int goldfish_pipe_device_init(struct platform_device *pdev,
821 struct goldfish_pipe_dev *dev)
830{ 822{
831 char *page; 823 int err;
832 struct goldfish_pipe_dev *dev = pipe_dev; 824
833 int err = devm_request_irq(&pdev->dev, dev->irq, 825 tasklet_init(&dev->irq_tasklet, &goldfish_interrupt_task,
834 goldfish_pipe_interrupt, 826 (unsigned long)dev);
835 IRQF_SHARED, "goldfish_pipe", dev); 827
828 err = devm_request_irq(&pdev->dev, dev->irq,
829 goldfish_pipe_interrupt,
830 IRQF_SHARED, "goldfish_pipe", dev);
836 if (err) { 831 if (err) {
837 dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); 832 dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
838 return err; 833 return err;
839 } 834 }
840 835
841 err = misc_register(&goldfish_pipe_dev); 836 init_miscdevice(&dev->miscdev);
837 err = misc_register(&dev->miscdev);
842 if (err) { 838 if (err) {
843 dev_err(&pdev->dev, "unable to register v2 device\n"); 839 dev_err(&pdev->dev, "unable to register v2 device\n");
844 return err; 840 return err;
845 } 841 }
846 842
843 dev->pdev_dev = &pdev->dev;
847 dev->first_signalled_pipe = NULL; 844 dev->first_signalled_pipe = NULL;
848 dev->pipes_capacity = INITIAL_PIPES_CAPACITY; 845 dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
849 dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), 846 dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
850 GFP_KERNEL); 847 GFP_KERNEL);
851 if (!dev->pipes) 848 if (!dev->pipes) {
849 misc_deregister(&dev->miscdev);
852 return -ENOMEM; 850 return -ENOMEM;
851 }
853 852
854 /* 853 /*
855 * We're going to pass two buffers, open_command_params and 854 * We're going to pass two buffers, open_command_params and
@@ -857,75 +856,67 @@ static int goldfish_pipe_device_init(struct platform_device *pdev)
857 * needs to be contained in a single physical page. The easiest choice 856 * needs to be contained in a single physical page. The easiest choice
858 * is to just allocate a page and place the buffers in it. 857 * is to just allocate a page and place the buffers in it.
859 */ 858 */
860 if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE)) 859 BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
861 return -ENOMEM; 860 dev->buffers = (struct goldfish_pipe_dev_buffers *)
862 861 __get_free_page(GFP_KERNEL);
863 page = (char *)__get_free_page(GFP_KERNEL); 862 if (!dev->buffers) {
864 if (!page) {
865 kfree(dev->pipes); 863 kfree(dev->pipes);
864 misc_deregister(&dev->miscdev);
866 return -ENOMEM; 865 return -ENOMEM;
867 } 866 }
868 dev->buffers = (struct goldfish_pipe_dev_buffers *)page;
869 867
870 /* Send the buffer addresses to the host */ 868 /* Send the buffer addresses to the host */
871 { 869 write_pa_addr(&dev->buffers->signalled_pipe_buffers,
872 u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); 870 dev->base + PIPE_REG_SIGNAL_BUFFER,
873 871 dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
874 writel((u32)(unsigned long)(paddr >> 32), 872
875 dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); 873 writel(MAX_SIGNALLED_PIPES,
876 writel((u32)(unsigned long)paddr, 874 dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
877 dev->base + PIPE_REG_SIGNAL_BUFFER); 875
878 writel((u32)MAX_SIGNALLED_PIPES, 876 write_pa_addr(&dev->buffers->open_command_params,
879 dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); 877 dev->base + PIPE_REG_OPEN_BUFFER,
880 878 dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
881 paddr = __pa(&dev->buffers->open_command_params); 879
882 writel((u32)(unsigned long)(paddr >> 32), 880 platform_set_drvdata(pdev, dev);
883 dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
884 writel((u32)(unsigned long)paddr,
885 dev->base + PIPE_REG_OPEN_BUFFER);
886 }
887 return 0; 881 return 0;
888} 882}
889 883
890static void goldfish_pipe_device_deinit(struct platform_device *pdev) 884static void goldfish_pipe_device_deinit(struct platform_device *pdev,
885 struct goldfish_pipe_dev *dev)
891{ 886{
892 struct goldfish_pipe_dev *dev = pipe_dev; 887 misc_deregister(&dev->miscdev);
893 888 tasklet_kill(&dev->irq_tasklet);
894 misc_deregister(&goldfish_pipe_dev);
895 kfree(dev->pipes); 889 kfree(dev->pipes);
896 free_page((unsigned long)dev->buffers); 890 free_page((unsigned long)dev->buffers);
897} 891}
898 892
899static int goldfish_pipe_probe(struct platform_device *pdev) 893static int goldfish_pipe_probe(struct platform_device *pdev)
900{ 894{
901 int err;
902 struct resource *r; 895 struct resource *r;
903 struct goldfish_pipe_dev *dev = pipe_dev; 896 struct goldfish_pipe_dev *dev;
904 897
905 if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE)) 898 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
899 if (!dev)
906 return -ENOMEM; 900 return -ENOMEM;
907 901
908 /* not thread safe, but this should not happen */ 902 dev->magic = &goldfish_pipe_device_deinit;
909 WARN_ON(dev->base != NULL);
910
911 spin_lock_init(&dev->lock); 903 spin_lock_init(&dev->lock);
912 904
913 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 905 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
914 if (r == NULL || resource_size(r) < PAGE_SIZE) { 906 if (!r || resource_size(r) < PAGE_SIZE) {
915 dev_err(&pdev->dev, "can't allocate i/o page\n"); 907 dev_err(&pdev->dev, "can't allocate i/o page\n");
916 return -EINVAL; 908 return -EINVAL;
917 } 909 }
918 dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); 910 dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
919 if (dev->base == NULL) { 911 if (!dev->base) {
920 dev_err(&pdev->dev, "ioremap failed\n"); 912 dev_err(&pdev->dev, "ioremap failed\n");
921 return -EINVAL; 913 return -EINVAL;
922 } 914 }
923 915
924 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 916 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
925 if (r == NULL) { 917 if (!r)
926 err = -EINVAL; 918 return -EINVAL;
927 goto error; 919
928 }
929 dev->irq = r->start; 920 dev->irq = r->start;
930 921
931 /* 922 /*
@@ -935,25 +926,19 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
935 * reading device version back: this allows the host implementation to 926 * reading device version back: this allows the host implementation to
936 * detect the old driver (if there was no version write before read). 927 * detect the old driver (if there was no version write before read).
937 */ 928 */
938 writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); 929 writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
939 dev->version = readl(dev->base + PIPE_REG_VERSION); 930 dev->version = readl(dev->base + PIPE_REG_VERSION);
940 if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION)) 931 if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
941 return -EINVAL; 932 return -EINVAL;
942 933
943 err = goldfish_pipe_device_init(pdev); 934 return goldfish_pipe_device_init(pdev, dev);
944 if (!err)
945 return 0;
946
947error:
948 dev->base = NULL;
949 return err;
950} 935}
951 936
952static int goldfish_pipe_remove(struct platform_device *pdev) 937static int goldfish_pipe_remove(struct platform_device *pdev)
953{ 938{
954 struct goldfish_pipe_dev *dev = pipe_dev; 939 struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
955 goldfish_pipe_device_deinit(pdev); 940
956 dev->base = NULL; 941 goldfish_pipe_device_deinit(pdev, dev);
957 return 0; 942 return 0;
958} 943}
959 944
@@ -981,4 +966,4 @@ static struct platform_driver goldfish_pipe_driver = {
981 966
982module_platform_driver(goldfish_pipe_driver); 967module_platform_driver(goldfish_pipe_driver);
983MODULE_AUTHOR("David Turner <digit@google.com>"); 968MODULE_AUTHOR("David Turner <digit@google.com>");
984MODULE_LICENSE("GPL"); 969MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/goldfish/goldfish_pipe_qemu.h b/drivers/platform/goldfish/goldfish_pipe_qemu.h
new file mode 100644
index 000000000000..b4d78c108afd
--- /dev/null
+++ b/drivers/platform/goldfish/goldfish_pipe_qemu.h
@@ -0,0 +1,98 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * IMPORTANT: The following constants must match the ones used and defined in
4 * external/qemu/include/hw/misc/goldfish_pipe.h
5 */
6
7#ifndef GOLDFISH_PIPE_QEMU_H
8#define GOLDFISH_PIPE_QEMU_H
9
10/* List of bitflags returned in status of CMD_POLL command */
11enum PipePollFlags {
12 PIPE_POLL_IN = 1 << 0,
13 PIPE_POLL_OUT = 1 << 1,
14 PIPE_POLL_HUP = 1 << 2
15};
16
17/* Possible status values used to signal errors */
18enum PipeErrors {
19 PIPE_ERROR_INVAL = -1,
20 PIPE_ERROR_AGAIN = -2,
21 PIPE_ERROR_NOMEM = -3,
22 PIPE_ERROR_IO = -4
23};
24
25/* Bit-flags used to signal events from the emulator */
26enum PipeWakeFlags {
27 /* emulator closed pipe */
28 PIPE_WAKE_CLOSED = 1 << 0,
29
30 /* pipe can now be read from */
31 PIPE_WAKE_READ = 1 << 1,
32
33 /* pipe can now be written to */
34 PIPE_WAKE_WRITE = 1 << 2,
35
36 /* unlock this pipe's DMA buffer */
37 PIPE_WAKE_UNLOCK_DMA = 1 << 3,
38
39 /* unlock DMA buffer of the pipe shared to this pipe */
40 PIPE_WAKE_UNLOCK_DMA_SHARED = 1 << 4,
41};
42
43/* Possible pipe closing reasons */
44enum PipeCloseReason {
45 /* guest sent a close command */
46 PIPE_CLOSE_GRACEFUL = 0,
47
48 /* guest rebooted, we're closing the pipes */
49 PIPE_CLOSE_REBOOT = 1,
50
51 /* close old pipes on snapshot load */
52 PIPE_CLOSE_LOAD_SNAPSHOT = 2,
53
54 /* some unrecoverable error on the pipe */
55 PIPE_CLOSE_ERROR = 3,
56};
57
58/* Bit flags for the 'flags' field */
59enum PipeFlagsBits {
60 BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
61 BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
62 BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
63};
64
65enum PipeRegs {
66 PIPE_REG_CMD = 0,
67
68 PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
69 PIPE_REG_SIGNAL_BUFFER = 8,
70 PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
71
72 PIPE_REG_OPEN_BUFFER_HIGH = 20,
73 PIPE_REG_OPEN_BUFFER = 24,
74
75 PIPE_REG_VERSION = 36,
76
77 PIPE_REG_GET_SIGNALLED = 48,
78};
79
80enum PipeCmdCode {
81 /* to be used by the pipe device itself */
82 PIPE_CMD_OPEN = 1,
83
84 PIPE_CMD_CLOSE,
85 PIPE_CMD_POLL,
86 PIPE_CMD_WRITE,
87 PIPE_CMD_WAKE_ON_WRITE,
88 PIPE_CMD_READ,
89 PIPE_CMD_WAKE_ON_READ,
90
91 /*
92 * TODO(zyy): implement a deferred read/write execution to allow
93 * parallel processing of pipe operations on the host.
94 */
95 PIPE_CMD_WAKE_ON_DONE_IO,
96};
97
98#endif /* GOLDFISH_PIPE_QEMU_H */
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 95b00d28ad6e..55eda5863a6b 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -9,6 +9,7 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/idr.h> 10#include <linux/idr.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/of_device.h>
12#include <linux/pm_runtime.h> 13#include <linux/pm_runtime.h>
13#include <linux/slimbus.h> 14#include <linux/slimbus.h>
14#include "slimbus.h" 15#include "slimbus.h"
@@ -32,6 +33,10 @@ static int slim_device_match(struct device *dev, struct device_driver *drv)
32 struct slim_device *sbdev = to_slim_device(dev); 33 struct slim_device *sbdev = to_slim_device(dev);
33 struct slim_driver *sbdrv = to_slim_driver(drv); 34 struct slim_driver *sbdrv = to_slim_driver(drv);
34 35
36 /* Attempt an OF style match first */
37 if (of_driver_match_device(dev, drv))
38 return 1;
39
35 return !!slim_match(sbdrv->id_table, sbdev); 40 return !!slim_match(sbdrv->id_table, sbdev);
36} 41}
37 42
@@ -39,8 +44,23 @@ static int slim_device_probe(struct device *dev)
39{ 44{
40 struct slim_device *sbdev = to_slim_device(dev); 45 struct slim_device *sbdev = to_slim_device(dev);
41 struct slim_driver *sbdrv = to_slim_driver(dev->driver); 46 struct slim_driver *sbdrv = to_slim_driver(dev->driver);
47 int ret;
42 48
43 return sbdrv->probe(sbdev); 49 ret = sbdrv->probe(sbdev);
50 if (ret)
51 return ret;
52
53 /* try getting the logical address after probe */
54 ret = slim_get_logical_addr(sbdev);
55 if (!ret) {
56 if (sbdrv->device_status)
57 sbdrv->device_status(sbdev, sbdev->status);
58 } else {
59 dev_err(&sbdev->dev, "Failed to get logical address\n");
60 ret = -EPROBE_DEFER;
61 }
62
63 return ret;
44} 64}
45 65
46static int slim_device_remove(struct device *dev) 66static int slim_device_remove(struct device *dev)
@@ -57,11 +77,24 @@ static int slim_device_remove(struct device *dev)
57 return 0; 77 return 0;
58} 78}
59 79
80static int slim_device_uevent(struct device *dev, struct kobj_uevent_env *env)
81{
82 struct slim_device *sbdev = to_slim_device(dev);
83 int ret;
84
85 ret = of_device_uevent_modalias(dev, env);
86 if (ret != -ENODEV)
87 return ret;
88
89 return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev));
90}
91
60struct bus_type slimbus_bus = { 92struct bus_type slimbus_bus = {
61 .name = "slimbus", 93 .name = "slimbus",
62 .match = slim_device_match, 94 .match = slim_device_match,
63 .probe = slim_device_probe, 95 .probe = slim_device_probe,
64 .remove = slim_device_remove, 96 .remove = slim_device_remove,
97 .uevent = slim_device_uevent,
65}; 98};
66EXPORT_SYMBOL_GPL(slimbus_bus); 99EXPORT_SYMBOL_GPL(slimbus_bus);
67 100
@@ -77,7 +110,7 @@ EXPORT_SYMBOL_GPL(slimbus_bus);
77int __slim_driver_register(struct slim_driver *drv, struct module *owner) 110int __slim_driver_register(struct slim_driver *drv, struct module *owner)
78{ 111{
79 /* ID table and probe are mandatory */ 112 /* ID table and probe are mandatory */
80 if (!drv->id_table || !drv->probe) 113 if (!(drv->driver.of_match_table || drv->id_table) || !drv->probe)
81 return -EINVAL; 114 return -EINVAL;
82 115
83 drv->driver.bus = &slimbus_bus; 116 drv->driver.bus = &slimbus_bus;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 8be4d6786c61..7218fb963d0a 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1004,6 +1004,7 @@ static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
1004 struct slim_eaddr *ea, u8 *laddr) 1004 struct slim_eaddr *ea, u8 *laddr)
1005{ 1005{
1006 struct slim_val_inf msg = {0}; 1006 struct slim_val_inf msg = {0};
1007 u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
1007 struct slim_msg_txn txn; 1008 struct slim_msg_txn txn;
1008 u8 wbuf[10] = {0}; 1009 u8 wbuf[10] = {0};
1009 u8 rbuf[10] = {0}; 1010 u8 rbuf[10] = {0};
@@ -1034,6 +1035,9 @@ static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
1034 return ret; 1035 return ret;
1035 } 1036 }
1036 1037
1038 if (!memcmp(rbuf, failed_ea, 6))
1039 return -ENXIO;
1040
1037 *laddr = rbuf[6]; 1041 *laddr = rbuf[6];
1038 1042
1039 return ret; 1043 return ret;
@@ -1234,8 +1238,17 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
1234 pm_runtime_resume(ctrl->dev); 1238 pm_runtime_resume(ctrl->dev);
1235 pm_runtime_mark_last_busy(ctrl->dev); 1239 pm_runtime_mark_last_busy(ctrl->dev);
1236 pm_runtime_put(ctrl->dev); 1240 pm_runtime_put(ctrl->dev);
1241
1242 ret = slim_register_controller(&ctrl->ctrl);
1243 if (ret) {
1244 dev_err(ctrl->dev, "error adding slim controller\n");
1245 return ret;
1246 }
1247
1248 dev_info(ctrl->dev, "SLIM controller Registered\n");
1237 } else { 1249 } else {
1238 qcom_slim_qmi_exit(ctrl); 1250 qcom_slim_qmi_exit(ctrl);
1251 slim_unregister_controller(&ctrl->ctrl);
1239 } 1252 }
1240 1253
1241 return 0; 1254 return 0;
@@ -1342,7 +1355,6 @@ static int of_qcom_slim_ngd_register(struct device *parent,
1342 ngd->base = ctrl->base + ngd->id * data->offset + 1355 ngd->base = ctrl->base + ngd->id * data->offset +
1343 (ngd->id - 1) * data->size; 1356 (ngd->id - 1) * data->size;
1344 ctrl->ngd = ngd; 1357 ctrl->ngd = ngd;
1345 platform_driver_register(&qcom_slim_ngd_driver);
1346 1358
1347 return 0; 1359 return 0;
1348 } 1360 }
@@ -1357,11 +1369,6 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
1357 int ret; 1369 int ret;
1358 1370
1359 ctrl->ctrl.dev = dev; 1371 ctrl->ctrl.dev = dev;
1360 ret = slim_register_controller(&ctrl->ctrl);
1361 if (ret) {
1362 dev_err(dev, "error adding slim controller\n");
1363 return ret;
1364 }
1365 1372
1366 pm_runtime_use_autosuspend(dev); 1373 pm_runtime_use_autosuspend(dev);
1367 pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND); 1374 pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
@@ -1371,7 +1378,7 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
1371 ret = qcom_slim_ngd_qmi_svc_event_init(ctrl); 1378 ret = qcom_slim_ngd_qmi_svc_event_init(ctrl);
1372 if (ret) { 1379 if (ret) {
1373 dev_err(&pdev->dev, "QMI service registration failed:%d", ret); 1380 dev_err(&pdev->dev, "QMI service registration failed:%d", ret);
1374 goto err; 1381 return ret;
1375 } 1382 }
1376 1383
1377 INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker); 1384 INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker);
@@ -1383,14 +1390,12 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
1383 } 1390 }
1384 1391
1385 return 0; 1392 return 0;
1386err:
1387 slim_unregister_controller(&ctrl->ctrl);
1388wq_err: 1393wq_err:
1389 qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); 1394 qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
1390 if (ctrl->mwq) 1395 if (ctrl->mwq)
1391 destroy_workqueue(ctrl->mwq); 1396 destroy_workqueue(ctrl->mwq);
1392 1397
1393 return 0; 1398 return ret;
1394} 1399}
1395 1400
1396static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) 1401static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
@@ -1441,6 +1446,7 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
1441 init_completion(&ctrl->reconf); 1446 init_completion(&ctrl->reconf);
1442 init_completion(&ctrl->qmi.qmi_comp); 1447 init_completion(&ctrl->qmi.qmi_comp);
1443 1448
1449 platform_driver_register(&qcom_slim_ngd_driver);
1444 return of_qcom_slim_ngd_register(dev, ctrl); 1450 return of_qcom_slim_ngd_register(dev, ctrl);
1445} 1451}
1446 1452
@@ -1456,7 +1462,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
1456 struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); 1462 struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
1457 1463
1458 pm_runtime_disable(&pdev->dev); 1464 pm_runtime_disable(&pdev->dev);
1459 slim_unregister_controller(&ctrl->ctrl); 1465 qcom_slim_ngd_enable(ctrl, false);
1460 qcom_slim_ngd_exit_dma(ctrl); 1466 qcom_slim_ngd_exit_dma(ctrl);
1461 qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); 1467 qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
1462 if (ctrl->mwq) 1468 if (ctrl->mwq)
@@ -1467,7 +1473,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
1467 return 0; 1473 return 0;
1468} 1474}
1469 1475
1470static int qcom_slim_ngd_runtime_idle(struct device *dev) 1476static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev)
1471{ 1477{
1472 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); 1478 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
1473 1479
@@ -1477,8 +1483,7 @@ static int qcom_slim_ngd_runtime_idle(struct device *dev)
1477 return -EAGAIN; 1483 return -EAGAIN;
1478} 1484}
1479 1485
1480#ifdef CONFIG_PM 1486static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
1481static int qcom_slim_ngd_runtime_suspend(struct device *dev)
1482{ 1487{
1483 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); 1488 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
1484 int ret = 0; 1489 int ret = 0;
@@ -1491,7 +1496,6 @@ static int qcom_slim_ngd_runtime_suspend(struct device *dev)
1491 1496
1492 return ret; 1497 return ret;
1493} 1498}
1494#endif
1495 1499
1496static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = { 1500static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
1497 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1501 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index dcc0ff9f0c22..1cbfedfc20ef 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -35,6 +35,11 @@ int sdw_add_bus_master(struct sdw_bus *bus)
35 INIT_LIST_HEAD(&bus->slaves); 35 INIT_LIST_HEAD(&bus->slaves);
36 INIT_LIST_HEAD(&bus->m_rt_list); 36 INIT_LIST_HEAD(&bus->m_rt_list);
37 37
38 /*
39 * Initialize multi_link flag
40 * TODO: populate this flag by reading property from FW node
41 */
42 bus->multi_link = false;
38 if (bus->ops->read_prop) { 43 if (bus->ops->read_prop) {
39 ret = bus->ops->read_prop(bus); 44 ret = bus->ops->read_prop(bus);
40 if (ret < 0) { 45 if (ret < 0) {
@@ -175,6 +180,7 @@ static inline int do_transfer_defer(struct sdw_bus *bus,
175 180
176 defer->msg = msg; 181 defer->msg = msg;
177 defer->length = msg->len; 182 defer->length = msg->len;
183 init_completion(&defer->complete);
178 184
179 for (i = 0; i <= retry; i++) { 185 for (i = 0; i <= retry; i++) {
180 resp = bus->ops->xfer_msg_defer(bus, msg, defer); 186 resp = bus->ops->xfer_msg_defer(bus, msg, defer);
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 3b15c4e25a3a..c77de05b8100 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -4,6 +4,8 @@
4#ifndef __SDW_BUS_H 4#ifndef __SDW_BUS_H
5#define __SDW_BUS_H 5#define __SDW_BUS_H
6 6
7#define DEFAULT_BANK_SWITCH_TIMEOUT 3000
8
7#if IS_ENABLED(CONFIG_ACPI) 9#if IS_ENABLED(CONFIG_ACPI)
8int sdw_acpi_find_slaves(struct sdw_bus *bus); 10int sdw_acpi_find_slaves(struct sdw_bus *bus);
9#else 11#else
@@ -99,6 +101,7 @@ struct sdw_slave_runtime {
99 * this stream, can be zero. 101 * this stream, can be zero.
100 * @slave_rt_list: Slave runtime list 102 * @slave_rt_list: Slave runtime list
101 * @port_list: List of Master Ports configured for this stream, can be zero. 103 * @port_list: List of Master Ports configured for this stream, can be zero.
104 * @stream_node: sdw_stream_runtime master_list node
102 * @bus_node: sdw_bus m_rt_list node 105 * @bus_node: sdw_bus m_rt_list node
103 */ 106 */
104struct sdw_master_runtime { 107struct sdw_master_runtime {
@@ -108,6 +111,7 @@ struct sdw_master_runtime {
108 unsigned int ch_count; 111 unsigned int ch_count;
109 struct list_head slave_rt_list; 112 struct list_head slave_rt_list;
110 struct list_head port_list; 113 struct list_head port_list;
114 struct list_head stream_node;
111 struct list_head bus_node; 115 struct list_head bus_node;
112}; 116};
113 117
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 0a8990e758f9..c5ee97ee7886 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -398,6 +398,69 @@ static int intel_config_stream(struct sdw_intel *sdw,
398} 398}
399 399
400/* 400/*
401 * bank switch routines
402 */
403
404static int intel_pre_bank_switch(struct sdw_bus *bus)
405{
406 struct sdw_cdns *cdns = bus_to_cdns(bus);
407 struct sdw_intel *sdw = cdns_to_intel(cdns);
408 void __iomem *shim = sdw->res->shim;
409 int sync_reg;
410
411 /* Write to register only for multi-link */
412 if (!bus->multi_link)
413 return 0;
414
415 /* Read SYNC register */
416 sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
417 sync_reg |= SDW_SHIM_SYNC_CMDSYNC << sdw->instance;
418 intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
419
420 return 0;
421}
422
423static int intel_post_bank_switch(struct sdw_bus *bus)
424{
425 struct sdw_cdns *cdns = bus_to_cdns(bus);
426 struct sdw_intel *sdw = cdns_to_intel(cdns);
427 void __iomem *shim = sdw->res->shim;
428 int sync_reg, ret;
429
430 /* Write to register only for multi-link */
431 if (!bus->multi_link)
432 return 0;
433
434 /* Read SYNC register */
435 sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
436
437 /*
438 * post_bank_switch() ops is called from the bus in loop for
439 * all the Masters in the steam with the expectation that
440 * we trigger the bankswitch for the only first Master in the list
441 * and do nothing for the other Masters
442 *
443 * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
444 */
445 if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK))
446 return 0;
447
448 /*
449 * Set SyncGO bit to synchronously trigger a bank switch for
450 * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
451 * the Masters.
452 */
453 sync_reg |= SDW_SHIM_SYNC_SYNCGO;
454
455 ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
456 SDW_SHIM_SYNC_SYNCGO);
457 if (ret < 0)
458 dev_err(sdw->cdns.dev, "Post bank switch failed: %d", ret);
459
460 return ret;
461}
462
463/*
401 * DAI routines 464 * DAI routines
402 */ 465 */
403 466
@@ -750,6 +813,8 @@ static struct sdw_master_ops sdw_intel_ops = {
750 .xfer_msg_defer = cdns_xfer_msg_defer, 813 .xfer_msg_defer = cdns_xfer_msg_defer,
751 .reset_page_addr = cdns_reset_page_addr, 814 .reset_page_addr = cdns_reset_page_addr,
752 .set_bus_conf = cdns_bus_conf, 815 .set_bus_conf = cdns_bus_conf,
816 .pre_bank_switch = intel_pre_bank_switch,
817 .post_bank_switch = intel_post_bank_switch,
753}; 818};
754 819
755/* 820/*
@@ -780,9 +845,6 @@ static int intel_probe(struct platform_device *pdev)
780 sdw_intel_ops.read_prop = intel_prop_read; 845 sdw_intel_ops.read_prop = intel_prop_read;
781 sdw->cdns.bus.ops = &sdw_intel_ops; 846 sdw->cdns.bus.ops = &sdw_intel_ops;
782 847
783 sdw_intel_ops.read_prop = intel_prop_read;
784 sdw->cdns.bus.ops = &sdw_intel_ops;
785
786 platform_set_drvdata(pdev, sdw); 848 platform_set_drvdata(pdev, sdw);
787 849
788 ret = sdw_add_bus_master(&sdw->cdns.bus); 850 ret = sdw_add_bus_master(&sdw->cdns.bus);
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index d1ea6b4d0ad3..5c8a20d99878 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -151,7 +151,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
151 struct acpi_device *adev; 151 struct acpi_device *adev;
152 152
153 if (acpi_bus_get_device(handle, &adev)) { 153 if (acpi_bus_get_device(handle, &adev)) {
154 dev_err(&adev->dev, "Couldn't find ACPI handle\n"); 154 pr_err("%s: Couldn't find ACPI handle\n", __func__);
155 return AE_NOT_FOUND; 155 return AE_NOT_FOUND;
156 } 156 }
157 157
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index e5c7e1ef6318..bd879b1a76c8 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -626,9 +626,10 @@ static int sdw_program_params(struct sdw_bus *bus)
626 return ret; 626 return ret;
627} 627}
628 628
629static int sdw_bank_switch(struct sdw_bus *bus) 629static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
630{ 630{
631 int col_index, row_index; 631 int col_index, row_index;
632 bool multi_link;
632 struct sdw_msg *wr_msg; 633 struct sdw_msg *wr_msg;
633 u8 *wbuf = NULL; 634 u8 *wbuf = NULL;
634 int ret = 0; 635 int ret = 0;
@@ -638,6 +639,8 @@ static int sdw_bank_switch(struct sdw_bus *bus)
638 if (!wr_msg) 639 if (!wr_msg)
639 return -ENOMEM; 640 return -ENOMEM;
640 641
642 bus->defer_msg.msg = wr_msg;
643
641 wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL); 644 wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL);
642 if (!wbuf) { 645 if (!wbuf) {
643 ret = -ENOMEM; 646 ret = -ENOMEM;
@@ -658,17 +661,29 @@ static int sdw_bank_switch(struct sdw_bus *bus)
658 SDW_MSG_FLAG_WRITE, wbuf); 661 SDW_MSG_FLAG_WRITE, wbuf);
659 wr_msg->ssp_sync = true; 662 wr_msg->ssp_sync = true;
660 663
661 ret = sdw_transfer(bus, wr_msg); 664 /*
665 * Set the multi_link flag only when both the hardware supports
666 * and there is a stream handled by multiple masters
667 */
668 multi_link = bus->multi_link && (m_rt_count > 1);
669
670 if (multi_link)
671 ret = sdw_transfer_defer(bus, wr_msg, &bus->defer_msg);
672 else
673 ret = sdw_transfer(bus, wr_msg);
674
662 if (ret < 0) { 675 if (ret < 0) {
663 dev_err(bus->dev, "Slave frame_ctrl reg write failed"); 676 dev_err(bus->dev, "Slave frame_ctrl reg write failed");
664 goto error; 677 goto error;
665 } 678 }
666 679
667 kfree(wr_msg); 680 if (!multi_link) {
668 kfree(wbuf); 681 kfree(wr_msg);
669 bus->defer_msg.msg = NULL; 682 kfree(wbuf);
670 bus->params.curr_bank = !bus->params.curr_bank; 683 bus->defer_msg.msg = NULL;
671 bus->params.next_bank = !bus->params.next_bank; 684 bus->params.curr_bank = !bus->params.curr_bank;
685 bus->params.next_bank = !bus->params.next_bank;
686 }
672 687
673 return 0; 688 return 0;
674 689
@@ -679,37 +694,138 @@ error_1:
679 return ret; 694 return ret;
680} 695}
681 696
697/**
698 * sdw_ml_sync_bank_switch: Multilink register bank switch
699 *
700 * @bus: SDW bus instance
701 *
702 * Caller function should free the buffers on error
703 */
704static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
705{
706 unsigned long time_left;
707
708 if (!bus->multi_link)
709 return 0;
710
711 /* Wait for completion of transfer */
712 time_left = wait_for_completion_timeout(&bus->defer_msg.complete,
713 bus->bank_switch_timeout);
714
715 if (!time_left) {
716 dev_err(bus->dev, "Controller Timed out on bank switch");
717 return -ETIMEDOUT;
718 }
719
720 bus->params.curr_bank = !bus->params.curr_bank;
721 bus->params.next_bank = !bus->params.next_bank;
722
723 if (bus->defer_msg.msg) {
724 kfree(bus->defer_msg.msg->buf);
725 kfree(bus->defer_msg.msg);
726 }
727
728 return 0;
729}
730
682static int do_bank_switch(struct sdw_stream_runtime *stream) 731static int do_bank_switch(struct sdw_stream_runtime *stream)
683{ 732{
684 struct sdw_master_runtime *m_rt = stream->m_rt; 733 struct sdw_master_runtime *m_rt = NULL;
685 const struct sdw_master_ops *ops; 734 const struct sdw_master_ops *ops;
686 struct sdw_bus *bus = m_rt->bus; 735 struct sdw_bus *bus = NULL;
736 bool multi_link = false;
687 int ret = 0; 737 int ret = 0;
688 738
689 ops = bus->ops; 739 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
740 bus = m_rt->bus;
741 ops = bus->ops;
742
743 if (bus->multi_link) {
744 multi_link = true;
745 mutex_lock(&bus->msg_lock);
746 }
747
748 /* Pre-bank switch */
749 if (ops->pre_bank_switch) {
750 ret = ops->pre_bank_switch(bus);
751 if (ret < 0) {
752 dev_err(bus->dev,
753 "Pre bank switch op failed: %d", ret);
754 goto msg_unlock;
755 }
756 }
690 757
691 /* Pre-bank switch */ 758 /*
692 if (ops->pre_bank_switch) { 759 * Perform Bank switch operation.
693 ret = ops->pre_bank_switch(bus); 760 * For multi link cases, the actual bank switch is
761 * synchronized across all Masters and happens later as a
762 * part of post_bank_switch ops.
763 */
764 ret = sdw_bank_switch(bus, stream->m_rt_count);
694 if (ret < 0) { 765 if (ret < 0) {
695 dev_err(bus->dev, "Pre bank switch op failed: %d", ret); 766 dev_err(bus->dev, "Bank switch failed: %d", ret);
696 return ret; 767 goto error;
768
697 } 769 }
698 } 770 }
699 771
700 /* Bank switch */ 772 /*
701 ret = sdw_bank_switch(bus); 773 * For multi link cases, it is expected that the bank switch is
702 if (ret < 0) { 774 * triggered by the post_bank_switch for the first Master in the list
703 dev_err(bus->dev, "Bank switch failed: %d", ret); 775 * and for the other Masters the post_bank_switch() should return doing
704 return ret; 776 * nothing.
705 } 777 */
778 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
779 bus = m_rt->bus;
780 ops = bus->ops;
781
782 /* Post-bank switch */
783 if (ops->post_bank_switch) {
784 ret = ops->post_bank_switch(bus);
785 if (ret < 0) {
786 dev_err(bus->dev,
787 "Post bank switch op failed: %d", ret);
788 goto error;
789 }
790 } else if (bus->multi_link && stream->m_rt_count > 1) {
791 dev_err(bus->dev,
792 "Post bank switch ops not implemented");
793 goto error;
794 }
795
796 /* Set the bank switch timeout to default, if not set */
797 if (!bus->bank_switch_timeout)
798 bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
706 799
707 /* Post-bank switch */ 800 /* Check if bank switch was successful */
708 if (ops->post_bank_switch) { 801 ret = sdw_ml_sync_bank_switch(bus);
709 ret = ops->post_bank_switch(bus);
710 if (ret < 0) { 802 if (ret < 0) {
711 dev_err(bus->dev, 803 dev_err(bus->dev,
712 "Post bank switch op failed: %d", ret); 804 "multi link bank switch failed: %d", ret);
805 goto error;
806 }
807
808 mutex_unlock(&bus->msg_lock);
809 }
810
811 return ret;
812
813error:
814 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
815
816 bus = m_rt->bus;
817
818 kfree(bus->defer_msg.msg->buf);
819 kfree(bus->defer_msg.msg);
820 }
821
822msg_unlock:
823
824 if (multi_link) {
825 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
826 bus = m_rt->bus;
827 if (mutex_is_locked(&bus->msg_lock))
828 mutex_unlock(&bus->msg_lock);
713 } 829 }
714 } 830 }
715 831
@@ -747,12 +863,29 @@ struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name)
747 return NULL; 863 return NULL;
748 864
749 stream->name = stream_name; 865 stream->name = stream_name;
866 INIT_LIST_HEAD(&stream->master_list);
750 stream->state = SDW_STREAM_ALLOCATED; 867 stream->state = SDW_STREAM_ALLOCATED;
868 stream->m_rt_count = 0;
751 869
752 return stream; 870 return stream;
753} 871}
754EXPORT_SYMBOL(sdw_alloc_stream); 872EXPORT_SYMBOL(sdw_alloc_stream);
755 873
874static struct sdw_master_runtime
875*sdw_find_master_rt(struct sdw_bus *bus,
876 struct sdw_stream_runtime *stream)
877{
878 struct sdw_master_runtime *m_rt = NULL;
879
880 /* Retrieve Bus handle if already available */
881 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
882 if (m_rt->bus == bus)
883 return m_rt;
884 }
885
886 return NULL;
887}
888
756/** 889/**
757 * sdw_alloc_master_rt() - Allocates and initialize Master runtime handle 890 * sdw_alloc_master_rt() - Allocates and initialize Master runtime handle
758 * 891 *
@@ -769,12 +902,11 @@ static struct sdw_master_runtime
769{ 902{
770 struct sdw_master_runtime *m_rt; 903 struct sdw_master_runtime *m_rt;
771 904
772 m_rt = stream->m_rt;
773
774 /* 905 /*
775 * check if Master is already allocated (as a result of Slave adding 906 * check if Master is already allocated (as a result of Slave adding
776 * it first), if so skip allocation and go to configure 907 * it first), if so skip allocation and go to configure
777 */ 908 */
909 m_rt = sdw_find_master_rt(bus, stream);
778 if (m_rt) 910 if (m_rt)
779 goto stream_config; 911 goto stream_config;
780 912
@@ -785,7 +917,7 @@ static struct sdw_master_runtime
785 /* Initialization of Master runtime handle */ 917 /* Initialization of Master runtime handle */
786 INIT_LIST_HEAD(&m_rt->port_list); 918 INIT_LIST_HEAD(&m_rt->port_list);
787 INIT_LIST_HEAD(&m_rt->slave_rt_list); 919 INIT_LIST_HEAD(&m_rt->slave_rt_list);
788 stream->m_rt = m_rt; 920 list_add_tail(&m_rt->stream_node, &stream->master_list);
789 921
790 list_add_tail(&m_rt->bus_node, &bus->m_rt_list); 922 list_add_tail(&m_rt->bus_node, &bus->m_rt_list);
791 923
@@ -843,17 +975,21 @@ static void sdw_slave_port_release(struct sdw_bus *bus,
843 struct sdw_stream_runtime *stream) 975 struct sdw_stream_runtime *stream)
844{ 976{
845 struct sdw_port_runtime *p_rt, *_p_rt; 977 struct sdw_port_runtime *p_rt, *_p_rt;
846 struct sdw_master_runtime *m_rt = stream->m_rt; 978 struct sdw_master_runtime *m_rt;
847 struct sdw_slave_runtime *s_rt; 979 struct sdw_slave_runtime *s_rt;
848 980
849 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { 981 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
850 if (s_rt->slave != slave) 982 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
851 continue;
852 983
853 list_for_each_entry_safe(p_rt, _p_rt, 984 if (s_rt->slave != slave)
854 &s_rt->port_list, port_node) { 985 continue;
855 list_del(&p_rt->port_node); 986
856 kfree(p_rt); 987 list_for_each_entry_safe(p_rt, _p_rt,
988 &s_rt->port_list, port_node) {
989
990 list_del(&p_rt->port_node);
991 kfree(p_rt);
992 }
857 } 993 }
858 } 994 }
859} 995}
@@ -870,16 +1006,18 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
870 struct sdw_stream_runtime *stream) 1006 struct sdw_stream_runtime *stream)
871{ 1007{
872 struct sdw_slave_runtime *s_rt, *_s_rt; 1008 struct sdw_slave_runtime *s_rt, *_s_rt;
873 struct sdw_master_runtime *m_rt = stream->m_rt; 1009 struct sdw_master_runtime *m_rt;
874 1010
875 /* Retrieve Slave runtime handle */ 1011 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
876 list_for_each_entry_safe(s_rt, _s_rt, 1012 /* Retrieve Slave runtime handle */
877 &m_rt->slave_rt_list, m_rt_node) { 1013 list_for_each_entry_safe(s_rt, _s_rt,
1014 &m_rt->slave_rt_list, m_rt_node) {
878 1015
879 if (s_rt->slave == slave) { 1016 if (s_rt->slave == slave) {
880 list_del(&s_rt->m_rt_node); 1017 list_del(&s_rt->m_rt_node);
881 kfree(s_rt); 1018 kfree(s_rt);
882 return; 1019 return;
1020 }
883 } 1021 }
884 } 1022 }
885} 1023}
@@ -887,6 +1025,7 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
887/** 1025/**
888 * sdw_release_master_stream() - Free Master runtime handle 1026 * sdw_release_master_stream() - Free Master runtime handle
889 * 1027 *
1028 * @m_rt: Master runtime node
890 * @stream: Stream runtime handle. 1029 * @stream: Stream runtime handle.
891 * 1030 *
892 * This function is to be called with bus_lock held 1031 * This function is to be called with bus_lock held
@@ -894,9 +1033,9 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
894 * handle. If this is called first then sdw_release_slave_stream() will have 1033 * handle. If this is called first then sdw_release_slave_stream() will have
895 * no effect as Slave(s) runtime handle would already be freed up. 1034 * no effect as Slave(s) runtime handle would already be freed up.
896 */ 1035 */
897static void sdw_release_master_stream(struct sdw_stream_runtime *stream) 1036static void sdw_release_master_stream(struct sdw_master_runtime *m_rt,
1037 struct sdw_stream_runtime *stream)
898{ 1038{
899 struct sdw_master_runtime *m_rt = stream->m_rt;
900 struct sdw_slave_runtime *s_rt, *_s_rt; 1039 struct sdw_slave_runtime *s_rt, *_s_rt;
901 1040
902 list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) { 1041 list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
@@ -904,7 +1043,9 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
904 sdw_release_slave_stream(s_rt->slave, stream); 1043 sdw_release_slave_stream(s_rt->slave, stream);
905 } 1044 }
906 1045
1046 list_del(&m_rt->stream_node);
907 list_del(&m_rt->bus_node); 1047 list_del(&m_rt->bus_node);
1048 kfree(m_rt);
908} 1049}
909 1050
910/** 1051/**
@@ -918,13 +1059,23 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
918int sdw_stream_remove_master(struct sdw_bus *bus, 1059int sdw_stream_remove_master(struct sdw_bus *bus,
919 struct sdw_stream_runtime *stream) 1060 struct sdw_stream_runtime *stream)
920{ 1061{
1062 struct sdw_master_runtime *m_rt, *_m_rt;
1063
921 mutex_lock(&bus->bus_lock); 1064 mutex_lock(&bus->bus_lock);
922 1065
923 sdw_release_master_stream(stream); 1066 list_for_each_entry_safe(m_rt, _m_rt,
924 sdw_master_port_release(bus, stream->m_rt); 1067 &stream->master_list, stream_node) {
925 stream->state = SDW_STREAM_RELEASED; 1068
926 kfree(stream->m_rt); 1069 if (m_rt->bus != bus)
927 stream->m_rt = NULL; 1070 continue;
1071
1072 sdw_master_port_release(bus, m_rt);
1073 sdw_release_master_stream(m_rt, stream);
1074 stream->m_rt_count--;
1075 }
1076
1077 if (list_empty(&stream->master_list))
1078 stream->state = SDW_STREAM_RELEASED;
928 1079
929 mutex_unlock(&bus->bus_lock); 1080 mutex_unlock(&bus->bus_lock);
930 1081
@@ -1107,6 +1258,18 @@ int sdw_stream_add_master(struct sdw_bus *bus,
1107 1258
1108 mutex_lock(&bus->bus_lock); 1259 mutex_lock(&bus->bus_lock);
1109 1260
1261 /*
1262 * For multi link streams, add the second master only if
1263 * the bus supports it.
1264 * Check if bus->multi_link is set
1265 */
1266 if (!bus->multi_link && stream->m_rt_count > 0) {
1267 dev_err(bus->dev,
1268 "Multilink not supported, link %d", bus->link_id);
1269 ret = -EINVAL;
1270 goto unlock;
1271 }
1272
1110 m_rt = sdw_alloc_master_rt(bus, stream_config, stream); 1273 m_rt = sdw_alloc_master_rt(bus, stream_config, stream);
1111 if (!m_rt) { 1274 if (!m_rt) {
1112 dev_err(bus->dev, 1275 dev_err(bus->dev,
@@ -1124,10 +1287,12 @@ int sdw_stream_add_master(struct sdw_bus *bus,
1124 if (ret) 1287 if (ret)
1125 goto stream_error; 1288 goto stream_error;
1126 1289
1290 stream->m_rt_count++;
1291
1127 goto unlock; 1292 goto unlock;
1128 1293
1129stream_error: 1294stream_error:
1130 sdw_release_master_stream(stream); 1295 sdw_release_master_stream(m_rt, stream);
1131unlock: 1296unlock:
1132 mutex_unlock(&bus->bus_lock); 1297 mutex_unlock(&bus->bus_lock);
1133 return ret; 1298 return ret;
@@ -1205,7 +1370,7 @@ stream_error:
1205 * we hit error so cleanup the stream, release all Slave(s) and 1370 * we hit error so cleanup the stream, release all Slave(s) and
1206 * Master runtime 1371 * Master runtime
1207 */ 1372 */
1208 sdw_release_master_stream(stream); 1373 sdw_release_master_stream(m_rt, stream);
1209error: 1374error:
1210 mutex_unlock(&slave->bus->bus_lock); 1375 mutex_unlock(&slave->bus->bus_lock);
1211 return ret; 1376 return ret;
@@ -1245,33 +1410,82 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
1245 return NULL; 1410 return NULL;
1246} 1411}
1247 1412
1413/**
1414 * sdw_acquire_bus_lock: Acquire bus lock for all Master runtime(s)
1415 *
1416 * @stream: SoundWire stream
1417 *
1418 * Acquire bus_lock for each of the master runtime(m_rt) part of this
1419 * stream to reconfigure the bus.
1420 * NOTE: This function is called from SoundWire stream ops and is
1421 * expected that a global lock is held before acquiring bus_lock.
1422 */
1423static void sdw_acquire_bus_lock(struct sdw_stream_runtime *stream)
1424{
1425 struct sdw_master_runtime *m_rt = NULL;
1426 struct sdw_bus *bus = NULL;
1427
1428 /* Iterate for all Master(s) in Master list */
1429 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
1430 bus = m_rt->bus;
1431
1432 mutex_lock(&bus->bus_lock);
1433 }
1434}
1435
1436/**
1437 * sdw_release_bus_lock: Release bus lock for all Master runtime(s)
1438 *
1439 * @stream: SoundWire stream
1440 *
1441 * Release the previously held bus_lock after reconfiguring the bus.
1442 * NOTE: This function is called from SoundWire stream ops and is
1443 * expected that a global lock is held before releasing bus_lock.
1444 */
1445static void sdw_release_bus_lock(struct sdw_stream_runtime *stream)
1446{
1447 struct sdw_master_runtime *m_rt = NULL;
1448 struct sdw_bus *bus = NULL;
1449
1450 /* Iterate for all Master(s) in Master list */
1451 list_for_each_entry_reverse(m_rt, &stream->master_list, stream_node) {
1452 bus = m_rt->bus;
1453 mutex_unlock(&bus->bus_lock);
1454 }
1455}
1456
1248static int _sdw_prepare_stream(struct sdw_stream_runtime *stream) 1457static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
1249{ 1458{
1250 struct sdw_master_runtime *m_rt = stream->m_rt; 1459 struct sdw_master_runtime *m_rt = NULL;
1251 struct sdw_bus *bus = m_rt->bus; 1460 struct sdw_bus *bus = NULL;
1252 struct sdw_master_prop *prop = NULL; 1461 struct sdw_master_prop *prop = NULL;
1253 struct sdw_bus_params params; 1462 struct sdw_bus_params params;
1254 int ret; 1463 int ret;
1255 1464
1256 prop = &bus->prop; 1465 /* Prepare Master(s) and Slave(s) port(s) associated with stream */
1257 memcpy(&params, &bus->params, sizeof(params)); 1466 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
1467 bus = m_rt->bus;
1468 prop = &bus->prop;
1469 memcpy(&params, &bus->params, sizeof(params));
1258 1470
1259 /* TODO: Support Asynchronous mode */ 1471 /* TODO: Support Asynchronous mode */
1260 if ((prop->max_freq % stream->params.rate) != 0) { 1472 if ((prop->max_freq % stream->params.rate) != 0) {
1261 dev_err(bus->dev, "Async mode not supported"); 1473 dev_err(bus->dev, "Async mode not supported");
1262 return -EINVAL; 1474 return -EINVAL;
1263 } 1475 }
1264 1476
1265 /* Increment cumulative bus bandwidth */ 1477 /* Increment cumulative bus bandwidth */
1266 /* TODO: Update this during Device-Device support */ 1478 /* TODO: Update this during Device-Device support */
1267 bus->params.bandwidth += m_rt->stream->params.rate * 1479 bus->params.bandwidth += m_rt->stream->params.rate *
1268 m_rt->ch_count * m_rt->stream->params.bps; 1480 m_rt->ch_count * m_rt->stream->params.bps;
1481
1482 /* Program params */
1483 ret = sdw_program_params(bus);
1484 if (ret < 0) {
1485 dev_err(bus->dev, "Program params failed: %d", ret);
1486 goto restore_params;
1487 }
1269 1488
1270 /* Program params */
1271 ret = sdw_program_params(bus);
1272 if (ret < 0) {
1273 dev_err(bus->dev, "Program params failed: %d", ret);
1274 goto restore_params;
1275 } 1489 }
1276 1490
1277 ret = do_bank_switch(stream); 1491 ret = do_bank_switch(stream);
@@ -1280,12 +1494,16 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
1280 goto restore_params; 1494 goto restore_params;
1281 } 1495 }
1282 1496
1283 /* Prepare port(s) on the new clock configuration */ 1497 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
1284 ret = sdw_prep_deprep_ports(m_rt, true); 1498 bus = m_rt->bus;
1285 if (ret < 0) { 1499
1286 dev_err(bus->dev, "Prepare port(s) failed ret = %d", 1500 /* Prepare port(s) on the new clock configuration */
1287 ret); 1501 ret = sdw_prep_deprep_ports(m_rt, true);
1288 return ret; 1502 if (ret < 0) {
1503 dev_err(bus->dev, "Prepare port(s) failed ret = %d",
1504 ret);
1505 return ret;
1506 }
1289 } 1507 }
1290 1508
1291 stream->state = SDW_STREAM_PREPARED; 1509 stream->state = SDW_STREAM_PREPARED;
@@ -1313,35 +1531,40 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
1313 return -EINVAL; 1531 return -EINVAL;
1314 } 1532 }
1315 1533
1316 mutex_lock(&stream->m_rt->bus->bus_lock); 1534 sdw_acquire_bus_lock(stream);
1317 1535
1318 ret = _sdw_prepare_stream(stream); 1536 ret = _sdw_prepare_stream(stream);
1319 if (ret < 0) 1537 if (ret < 0)
1320 pr_err("Prepare for stream:%s failed: %d", stream->name, ret); 1538 pr_err("Prepare for stream:%s failed: %d", stream->name, ret);
1321 1539
1322 mutex_unlock(&stream->m_rt->bus->bus_lock); 1540 sdw_release_bus_lock(stream);
1323 return ret; 1541 return ret;
1324} 1542}
1325EXPORT_SYMBOL(sdw_prepare_stream); 1543EXPORT_SYMBOL(sdw_prepare_stream);
1326 1544
1327static int _sdw_enable_stream(struct sdw_stream_runtime *stream) 1545static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
1328{ 1546{
1329 struct sdw_master_runtime *m_rt = stream->m_rt; 1547 struct sdw_master_runtime *m_rt = NULL;
1330 struct sdw_bus *bus = m_rt->bus; 1548 struct sdw_bus *bus = NULL;
1331 int ret; 1549 int ret;
1332 1550
1333 /* Program params */ 1551 /* Enable Master(s) and Slave(s) port(s) associated with stream */
1334 ret = sdw_program_params(bus); 1552 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
1335 if (ret < 0) { 1553 bus = m_rt->bus;
1336 dev_err(bus->dev, "Program params failed: %d", ret);
1337 return ret;
1338 }
1339 1554
1340 /* Enable port(s) */ 1555 /* Program params */
1341 ret = sdw_enable_disable_ports(m_rt, true); 1556 ret = sdw_program_params(bus);
1342 if (ret < 0) { 1557 if (ret < 0) {
1343 dev_err(bus->dev, "Enable port(s) failed ret: %d", ret); 1558 dev_err(bus->dev, "Program params failed: %d", ret);
1344 return ret; 1559 return ret;
1560 }
1561
1562 /* Enable port(s) */
1563 ret = sdw_enable_disable_ports(m_rt, true);
1564 if (ret < 0) {
1565 dev_err(bus->dev, "Enable port(s) failed ret: %d", ret);
1566 return ret;
1567 }
1345 } 1568 }
1346 1569
1347 ret = do_bank_switch(stream); 1570 ret = do_bank_switch(stream);
@@ -1370,37 +1593,42 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
1370 return -EINVAL; 1593 return -EINVAL;
1371 } 1594 }
1372 1595
1373 mutex_lock(&stream->m_rt->bus->bus_lock); 1596 sdw_acquire_bus_lock(stream);
1374 1597
1375 ret = _sdw_enable_stream(stream); 1598 ret = _sdw_enable_stream(stream);
1376 if (ret < 0) 1599 if (ret < 0)
1377 pr_err("Enable for stream:%s failed: %d", stream->name, ret); 1600 pr_err("Enable for stream:%s failed: %d", stream->name, ret);
1378 1601
1379 mutex_unlock(&stream->m_rt->bus->bus_lock); 1602 sdw_release_bus_lock(stream);
1380 return ret; 1603 return ret;
1381} 1604}
1382EXPORT_SYMBOL(sdw_enable_stream); 1605EXPORT_SYMBOL(sdw_enable_stream);
1383 1606
1384static int _sdw_disable_stream(struct sdw_stream_runtime *stream) 1607static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
1385{ 1608{
1386 struct sdw_master_runtime *m_rt = stream->m_rt; 1609 struct sdw_master_runtime *m_rt = NULL;
1387 struct sdw_bus *bus = m_rt->bus; 1610 struct sdw_bus *bus = NULL;
1388 int ret; 1611 int ret;
1389 1612
1390 /* Disable port(s) */ 1613 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
1391 ret = sdw_enable_disable_ports(m_rt, false); 1614 bus = m_rt->bus;
1392 if (ret < 0) { 1615 /* Disable port(s) */
1393 dev_err(bus->dev, "Disable port(s) failed: %d", ret); 1616 ret = sdw_enable_disable_ports(m_rt, false);
1394 return ret; 1617 if (ret < 0) {
1618 dev_err(bus->dev, "Disable port(s) failed: %d", ret);
1619 return ret;
1620 }
1395 } 1621 }
1396
1397 stream->state = SDW_STREAM_DISABLED; 1622 stream->state = SDW_STREAM_DISABLED;
1398 1623
1399 /* Program params */ 1624 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
1400 ret = sdw_program_params(bus); 1625 bus = m_rt->bus;
1401 if (ret < 0) { 1626 /* Program params */
1402 dev_err(bus->dev, "Program params failed: %d", ret); 1627 ret = sdw_program_params(bus);
1403 return ret; 1628 if (ret < 0) {
1629 dev_err(bus->dev, "Program params failed: %d", ret);
1630 return ret;
1631 }
1404 } 1632 }
1405 1633
1406 return do_bank_switch(stream); 1634 return do_bank_switch(stream);
@@ -1422,43 +1650,46 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
1422 return -EINVAL; 1650 return -EINVAL;
1423 } 1651 }
1424 1652
1425 mutex_lock(&stream->m_rt->bus->bus_lock); 1653 sdw_acquire_bus_lock(stream);
1426 1654
1427 ret = _sdw_disable_stream(stream); 1655 ret = _sdw_disable_stream(stream);
1428 if (ret < 0) 1656 if (ret < 0)
1429 pr_err("Disable for stream:%s failed: %d", stream->name, ret); 1657 pr_err("Disable for stream:%s failed: %d", stream->name, ret);
1430 1658
1431 mutex_unlock(&stream->m_rt->bus->bus_lock); 1659 sdw_release_bus_lock(stream);
1432 return ret; 1660 return ret;
1433} 1661}
1434EXPORT_SYMBOL(sdw_disable_stream); 1662EXPORT_SYMBOL(sdw_disable_stream);
1435 1663
1436static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream) 1664static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
1437{ 1665{
1438 struct sdw_master_runtime *m_rt = stream->m_rt; 1666 struct sdw_master_runtime *m_rt = NULL;
1439 struct sdw_bus *bus = m_rt->bus; 1667 struct sdw_bus *bus = NULL;
1440 int ret = 0; 1668 int ret = 0;
1441 1669
1442 /* De-prepare port(s) */ 1670 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
1443 ret = sdw_prep_deprep_ports(m_rt, false); 1671 bus = m_rt->bus;
1444 if (ret < 0) { 1672 /* De-prepare port(s) */
1445 dev_err(bus->dev, "De-prepare port(s) failed: %d", ret); 1673 ret = sdw_prep_deprep_ports(m_rt, false);
1446 return ret; 1674 if (ret < 0) {
1447 } 1675 dev_err(bus->dev, "De-prepare port(s) failed: %d", ret);
1676 return ret;
1677 }
1448 1678
1449 stream->state = SDW_STREAM_DEPREPARED; 1679 /* TODO: Update this during Device-Device support */
1680 bus->params.bandwidth -= m_rt->stream->params.rate *
1681 m_rt->ch_count * m_rt->stream->params.bps;
1450 1682
1451 /* TODO: Update this during Device-Device support */ 1683 /* Program params */
1452 bus->params.bandwidth -= m_rt->stream->params.rate * 1684 ret = sdw_program_params(bus);
1453 m_rt->ch_count * m_rt->stream->params.bps; 1685 if (ret < 0) {
1686 dev_err(bus->dev, "Program params failed: %d", ret);
1687 return ret;
1688 }
1454 1689
1455 /* Program params */
1456 ret = sdw_program_params(bus);
1457 if (ret < 0) {
1458 dev_err(bus->dev, "Program params failed: %d", ret);
1459 return ret;
1460 } 1690 }
1461 1691
1692 stream->state = SDW_STREAM_DEPREPARED;
1462 return do_bank_switch(stream); 1693 return do_bank_switch(stream);
1463} 1694}
1464 1695
@@ -1478,13 +1709,12 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
1478 return -EINVAL; 1709 return -EINVAL;
1479 } 1710 }
1480 1711
1481 mutex_lock(&stream->m_rt->bus->bus_lock); 1712 sdw_acquire_bus_lock(stream);
1482
1483 ret = _sdw_deprepare_stream(stream); 1713 ret = _sdw_deprepare_stream(stream);
1484 if (ret < 0) 1714 if (ret < 0)
1485 pr_err("De-prepare for stream:%d failed: %d", ret, ret); 1715 pr_err("De-prepare for stream:%d failed: %d", ret, ret);
1486 1716
1487 mutex_unlock(&stream->m_rt->bus->bus_lock); 1717 sdw_release_bus_lock(stream);
1488 return ret; 1718 return ret;
1489} 1719}
1490EXPORT_SYMBOL(sdw_deprepare_stream); 1720EXPORT_SYMBOL(sdw_deprepare_stream);
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index c2277b8ee88d..9553305c63ea 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -1,8 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - capabilities lookup 3 * Thunderbolt driver - capabilities lookup
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
6 */ 7 */
7 8
8#include <linux/slab.h> 9#include <linux/slab.h>
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 37a7f4c735d0..73b386de4d15 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -1,8 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - control channel and configuration commands 3 * Thunderbolt driver - control channel and configuration commands
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
6 */ 7 */
7 8
8#include <linux/crc32.h> 9#include <linux/crc32.h>
@@ -631,7 +632,7 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
631 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; 632 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
632 } 633 }
633 634
634 tb_ctl_info(ctl, "control channel created\n"); 635 tb_ctl_dbg(ctl, "control channel created\n");
635 return ctl; 636 return ctl;
636err: 637err:
637 tb_ctl_free(ctl); 638 tb_ctl_free(ctl);
@@ -662,8 +663,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
662 tb_ctl_pkg_free(ctl->rx_packets[i]); 663 tb_ctl_pkg_free(ctl->rx_packets[i]);
663 664
664 665
665 if (ctl->frame_pool) 666 dma_pool_destroy(ctl->frame_pool);
666 dma_pool_destroy(ctl->frame_pool);
667 kfree(ctl); 667 kfree(ctl);
668} 668}
669 669
@@ -673,7 +673,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
673void tb_ctl_start(struct tb_ctl *ctl) 673void tb_ctl_start(struct tb_ctl *ctl)
674{ 674{
675 int i; 675 int i;
676 tb_ctl_info(ctl, "control channel starting...\n"); 676 tb_ctl_dbg(ctl, "control channel starting...\n");
677 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ 677 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
678 tb_ring_start(ctl->rx); 678 tb_ring_start(ctl->rx);
679 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) 679 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
@@ -702,7 +702,7 @@ void tb_ctl_stop(struct tb_ctl *ctl)
702 if (!list_empty(&ctl->request_queue)) 702 if (!list_empty(&ctl->request_queue))
703 tb_ctl_WARN(ctl, "dangling request in request_queue\n"); 703 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
704 INIT_LIST_HEAD(&ctl->request_queue); 704 INIT_LIST_HEAD(&ctl->request_queue);
705 tb_ctl_info(ctl, "control channel stopped\n"); 705 tb_ctl_dbg(ctl, "control channel stopped\n");
706} 706}
707 707
708/* public interface, commands */ 708/* public interface, commands */
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 3062e0b5f71e..2f1a1e111110 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -1,8 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - control channel and configuration commands 3 * Thunderbolt driver - control channel and configuration commands
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
6 */ 7 */
7 8
8#ifndef _TB_CFG 9#ifndef _TB_CFG
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index f2701194f810..847dd07a7b17 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -1,13 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Thunderbolt DMA configuration based mailbox support 3 * Thunderbolt DMA configuration based mailbox support
3 * 4 *
4 * Copyright (C) 2017, Intel Corporation 5 * Copyright (C) 2017, Intel Corporation
5 * Authors: Michael Jamet <michael.jamet@intel.com> 6 * Authors: Michael Jamet <michael.jamet@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#include <linux/delay.h> 10#include <linux/delay.h>
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
index c4a69e0fbff7..7deadd97ce31 100644
--- a/drivers/thunderbolt/dma_port.h
+++ b/drivers/thunderbolt/dma_port.h
@@ -1,13 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Thunderbolt DMA configuration based mailbox support 3 * Thunderbolt DMA configuration based mailbox support
3 * 4 *
4 * Copyright (C) 2017, Intel Corporation 5 * Copyright (C) 2017, Intel Corporation
5 * Authors: Michael Jamet <michael.jamet@intel.com> 6 * Authors: Michael Jamet <michael.jamet@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#ifndef DMA_PORT_H_ 10#ifndef DMA_PORT_H_
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 092381e2accf..93e562f18d40 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Thunderbolt bus support 3 * Thunderbolt bus support
3 * 4 *
4 * Copyright (C) 2017, Intel Corporation 5 * Copyright (C) 2017, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */ 7 */
11 8
12#include <linux/device.h> 9#include <linux/device.h>
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 3e8caf22c294..81e8ac4c5805 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -1,8 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - eeprom access 3 * Thunderbolt driver - eeprom access
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
6 */ 7 */
7 8
8#include <linux/crc32.h> 9#include <linux/crc32.h>
@@ -540,7 +541,7 @@ int tb_drom_read(struct tb_switch *sw)
540 return res; 541 return res;
541 size &= 0x3ff; 542 size &= 0x3ff;
542 size += TB_DROM_DATA_START; 543 size += TB_DROM_DATA_START;
543 tb_sw_info(sw, "reading drom (length: %#x)\n", size); 544 tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
544 if (size < sizeof(*header)) { 545 if (size < sizeof(*header)) {
545 tb_sw_warn(sw, "drom too small, aborting\n"); 546 tb_sw_warn(sw, "drom too small, aborting\n");
546 return -EIO; 547 return -EIO;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 28fc4ce75edb..e3fc920af682 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Internal Thunderbolt Connection Manager. This is a firmware running on 3 * Internal Thunderbolt Connection Manager. This is a firmware running on
3 * the Thunderbolt host controller performing most of the low-level 4 * the Thunderbolt host controller performing most of the low-level
@@ -6,10 +7,6 @@
6 * Copyright (C) 2017, Intel Corporation 7 * Copyright (C) 2017, Intel Corporation
7 * Authors: Michael Jamet <michael.jamet@intel.com> 8 * Authors: Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com> 9 * Mika Westerberg <mika.westerberg@linux.intel.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */ 10 */
14 11
15#include <linux/delay.h> 12#include <linux/delay.h>
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 5cd6bdfa068f..9aa44f9762a3 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * Thunderbolt Cactus Ridge driver - NHI driver 2 * Thunderbolt driver - NHI driver
3 * 3 *
4 * The NHI (native host interface) is the pci device that allows us to send and 4 * The NHI (native host interface) is the pci device that allows us to send and
5 * receive frames from the thunderbolt bus. 5 * receive frames from the thunderbolt bus.
6 * 6 *
7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
8 * Copyright (C) 2018, Intel Corporation
8 */ 9 */
9 10
10#include <linux/pm_runtime.h> 11#include <linux/pm_runtime.h>
@@ -95,9 +96,9 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
95 else 96 else
96 new = old & ~mask; 97 new = old & ~mask;
97 98
98 dev_info(&ring->nhi->pdev->dev, 99 dev_dbg(&ring->nhi->pdev->dev,
99 "%s interrupt at register %#x bit %d (%#x -> %#x)\n", 100 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
100 active ? "enabling" : "disabling", reg, bit, old, new); 101 active ? "enabling" : "disabling", reg, bit, old, new);
101 102
102 if (new == old) 103 if (new == old)
103 dev_WARN(&ring->nhi->pdev->dev, 104 dev_WARN(&ring->nhi->pdev->dev,
@@ -476,8 +477,9 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
476 void *poll_data) 477 void *poll_data)
477{ 478{
478 struct tb_ring *ring = NULL; 479 struct tb_ring *ring = NULL;
479 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", 480
480 transmit ? "TX" : "RX", hop, size); 481 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
482 transmit ? "TX" : "RX", hop, size);
481 483
482 /* Tx Ring 2 is reserved for E2E workaround */ 484 /* Tx Ring 2 is reserved for E2E workaround */
483 if (transmit && hop == RING_E2E_UNUSED_HOPID) 485 if (transmit && hop == RING_E2E_UNUSED_HOPID)
@@ -585,8 +587,8 @@ void tb_ring_start(struct tb_ring *ring)
585 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); 587 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
586 goto err; 588 goto err;
587 } 589 }
588 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", 590 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
589 RING_TYPE(ring), ring->hop); 591 RING_TYPE(ring), ring->hop);
590 592
591 if (ring->flags & RING_FLAG_FRAME) { 593 if (ring->flags & RING_FLAG_FRAME) {
592 /* Means 4096 */ 594 /* Means 4096 */
@@ -647,8 +649,8 @@ void tb_ring_stop(struct tb_ring *ring)
647{ 649{
648 spin_lock_irq(&ring->nhi->lock); 650 spin_lock_irq(&ring->nhi->lock);
649 spin_lock(&ring->lock); 651 spin_lock(&ring->lock);
650 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", 652 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
651 RING_TYPE(ring), ring->hop); 653 RING_TYPE(ring), ring->hop);
652 if (ring->nhi->going_away) 654 if (ring->nhi->going_away)
653 goto err; 655 goto err;
654 if (!ring->running) { 656 if (!ring->running) {
@@ -716,10 +718,8 @@ void tb_ring_free(struct tb_ring *ring)
716 ring->descriptors_dma = 0; 718 ring->descriptors_dma = 0;
717 719
718 720
719 dev_info(&ring->nhi->pdev->dev, 721 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
720 "freeing %s %d\n", 722 ring->hop);
721 RING_TYPE(ring),
722 ring->hop);
723 723
724 /** 724 /**
725 * ring->work can no longer be scheduled (it is scheduled only 725 * ring->work can no longer be scheduled (it is scheduled only
@@ -931,7 +931,8 @@ static int nhi_runtime_resume(struct device *dev)
931static void nhi_shutdown(struct tb_nhi *nhi) 931static void nhi_shutdown(struct tb_nhi *nhi)
932{ 932{
933 int i; 933 int i;
934 dev_info(&nhi->pdev->dev, "shutdown\n"); 934
935 dev_dbg(&nhi->pdev->dev, "shutdown\n");
935 936
936 for (i = 0; i < nhi->hop_count; i++) { 937 for (i = 0; i < nhi->hop_count; i++) {
937 if (nhi->tx_rings[i]) 938 if (nhi->tx_rings[i])
@@ -1059,7 +1060,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1059 return -ENODEV; 1060 return -ENODEV;
1060 } 1061 }
1061 1062
1062 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); 1063 dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
1063 1064
1064 res = tb_domain_add(tb); 1065 res = tb_domain_add(tb);
1065 if (res) { 1066 if (res) {
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 1696a4560948..1b5d47ecd3ed 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -1,8 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - NHI driver 3 * Thunderbolt driver - NHI driver
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
6 */ 7 */
7 8
8#ifndef DSL3510_H_ 9#ifndef DSL3510_H_
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index b3e49d19c01e..a60bd98c1d04 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -3,6 +3,7 @@
3 * Thunderbolt driver - NHI registers 3 * Thunderbolt driver - NHI registers
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
6 */ 7 */
7 8
8#ifndef NHI_REGS_H_ 9#ifndef NHI_REGS_H_
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index ff49ad880bfd..a11956522bac 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -13,19 +13,19 @@
13 13
14static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop) 14static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
15{ 15{
16 tb_port_info(port, " Hop through port %d to hop %d (%s)\n", 16 tb_port_dbg(port, " Hop through port %d to hop %d (%s)\n",
17 hop->out_port, hop->next_hop, 17 hop->out_port, hop->next_hop,
18 hop->enable ? "enabled" : "disabled"); 18 hop->enable ? "enabled" : "disabled");
19 tb_port_info(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n", 19 tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
20 hop->weight, hop->priority, 20 hop->weight, hop->priority,
21 hop->initial_credits, hop->drop_packages); 21 hop->initial_credits, hop->drop_packages);
22 tb_port_info(port, " Counter enabled: %d Counter index: %d\n", 22 tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
23 hop->counter_enable, hop->counter); 23 hop->counter_enable, hop->counter);
24 tb_port_info(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n", 24 tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
25 hop->ingress_fc, hop->egress_fc, 25 hop->ingress_fc, hop->egress_fc,
26 hop->ingress_shared_buffer, hop->egress_shared_buffer); 26 hop->ingress_shared_buffer, hop->egress_shared_buffer);
27 tb_port_info(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n", 27 tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
28 hop->unknown1, hop->unknown2, hop->unknown3); 28 hop->unknown1, hop->unknown2, hop->unknown3);
29} 29}
30 30
31/** 31/**
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
index 8fe913a95b4a..b2f0d6386cee 100644
--- a/drivers/thunderbolt/property.c
+++ b/drivers/thunderbolt/property.c
@@ -1,13 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Thunderbolt XDomain property support 3 * Thunderbolt XDomain property support
3 * 4 *
4 * Copyright (C) 2017, Intel Corporation 5 * Copyright (C) 2017, Intel Corporation
5 * Authors: Michael Jamet <michael.jamet@intel.com> 6 * Authors: Michael Jamet <michael.jamet@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#include <linux/err.h> 10#include <linux/err.h>
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 7442bc4c6433..52ff854f0d6c 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1,8 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - switch/port utility functions 3 * Thunderbolt driver - switch/port utility functions
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
6 */ 7 */
7 8
8#include <linux/delay.h> 9#include <linux/delay.h>
@@ -436,15 +437,15 @@ static const char *tb_port_type(struct tb_regs_port_header *port)
436 437
437static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) 438static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
438{ 439{
439 tb_info(tb, 440 tb_dbg(tb,
440 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", 441 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
441 port->port_number, port->vendor_id, port->device_id, 442 port->port_number, port->vendor_id, port->device_id,
442 port->revision, port->thunderbolt_version, tb_port_type(port), 443 port->revision, port->thunderbolt_version, tb_port_type(port),
443 port->type); 444 port->type);
444 tb_info(tb, " Max hop id (in/out): %d/%d\n", 445 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
445 port->max_in_hop_id, port->max_out_hop_id); 446 port->max_in_hop_id, port->max_out_hop_id);
446 tb_info(tb, " Max counters: %d\n", port->max_counters); 447 tb_dbg(tb, " Max counters: %d\n", port->max_counters);
447 tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits); 448 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
448} 449}
449 450
450/** 451/**
@@ -605,20 +606,18 @@ static int tb_init_port(struct tb_port *port)
605 606
606static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) 607static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
607{ 608{
608 tb_info(tb, 609 tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
609 " Switch: %x:%x (Revision: %d, TB Version: %d)\n", 610 sw->vendor_id, sw->device_id, sw->revision,
610 sw->vendor_id, sw->device_id, sw->revision, 611 sw->thunderbolt_version);
611 sw->thunderbolt_version); 612 tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
612 tb_info(tb, " Max Port Number: %d\n", sw->max_port_number); 613 tb_dbg(tb, " Config:\n");
613 tb_info(tb, " Config:\n"); 614 tb_dbg(tb,
614 tb_info(tb,
615 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", 615 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
616 sw->upstream_port_number, sw->depth, 616 sw->upstream_port_number, sw->depth,
617 (((u64) sw->route_hi) << 32) | sw->route_lo, 617 (((u64) sw->route_hi) << 32) | sw->route_lo,
618 sw->enabled, sw->plug_events_delay); 618 sw->enabled, sw->plug_events_delay);
619 tb_info(tb, 619 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
620 " unknown1: %#x unknown4: %#x\n", 620 sw->__unknown1, sw->__unknown4);
621 sw->__unknown1, sw->__unknown4);
622} 621}
623 622
624/** 623/**
@@ -634,7 +633,7 @@ int tb_switch_reset(struct tb *tb, u64 route)
634 header.route_lo = route, 633 header.route_lo = route,
635 header.enabled = true, 634 header.enabled = true,
636 }; 635 };
637 tb_info(tb, "resetting switch at %llx\n", route); 636 tb_dbg(tb, "resetting switch at %llx\n", route);
638 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, 637 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
639 0, 2, 2, 2); 638 0, 2, 2, 2);
640 if (res.err) 639 if (res.err)
@@ -1139,7 +1138,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1139 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5)) 1138 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
1140 goto err_free_sw_ports; 1139 goto err_free_sw_ports;
1141 1140
1142 tb_info(tb, "current switch config:\n"); 1141 tb_dbg(tb, "current switch config:\n");
1143 tb_dump_switch(tb, &sw->config); 1142 tb_dump_switch(tb, &sw->config);
1144 1143
1145 /* configure switch */ 1144 /* configure switch */
@@ -1246,9 +1245,8 @@ int tb_switch_configure(struct tb_switch *sw)
1246 int ret; 1245 int ret;
1247 1246
1248 route = tb_route(sw); 1247 route = tb_route(sw);
1249 tb_info(tb, 1248 tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
1250 "initializing Switch at %#llx (depth: %d, up port: %d)\n", 1249 route, tb_route_length(route), sw->config.upstream_port_number);
1251 route, tb_route_length(route), sw->config.upstream_port_number);
1252 1250
1253 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) 1251 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1254 tb_sw_warn(sw, "unknown switch vendor id %#x\n", 1252 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
@@ -1386,13 +1384,13 @@ int tb_switch_add(struct tb_switch *sw)
1386 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); 1384 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1387 return ret; 1385 return ret;
1388 } 1386 }
1389 tb_sw_info(sw, "uid: %#llx\n", sw->uid); 1387 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
1390 1388
1391 tb_switch_set_uuid(sw); 1389 tb_switch_set_uuid(sw);
1392 1390
1393 for (i = 0; i <= sw->config.max_port_number; i++) { 1391 for (i = 0; i <= sw->config.max_port_number; i++) {
1394 if (sw->ports[i].disabled) { 1392 if (sw->ports[i].disabled) {
1395 tb_port_info(&sw->ports[i], "disabled by eeprom\n"); 1393 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
1396 continue; 1394 continue;
1397 } 1395 }
1398 ret = tb_init_port(&sw->ports[i]); 1396 ret = tb_init_port(&sw->ports[i]);
@@ -1405,6 +1403,14 @@ int tb_switch_add(struct tb_switch *sw)
1405 if (ret) 1403 if (ret)
1406 return ret; 1404 return ret;
1407 1405
1406 if (tb_route(sw)) {
1407 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
1408 sw->vendor, sw->device);
1409 if (sw->vendor_name && sw->device_name)
1410 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
1411 sw->device_name);
1412 }
1413
1408 ret = tb_switch_nvm_add(sw); 1414 ret = tb_switch_nvm_add(sw);
1409 if (ret) { 1415 if (ret) {
1410 device_del(&sw->dev); 1416 device_del(&sw->dev);
@@ -1456,6 +1462,9 @@ void tb_switch_remove(struct tb_switch *sw)
1456 tb_plug_events_active(sw, false); 1462 tb_plug_events_active(sw, false);
1457 1463
1458 tb_switch_nvm_remove(sw); 1464 tb_switch_nvm_remove(sw);
1465
1466 if (tb_route(sw))
1467 dev_info(&sw->dev, "device disconnected\n");
1459 device_unregister(&sw->dev); 1468 device_unregister(&sw->dev);
1460} 1469}
1461 1470
@@ -1483,7 +1492,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
1483int tb_switch_resume(struct tb_switch *sw) 1492int tb_switch_resume(struct tb_switch *sw)
1484{ 1493{
1485 int i, err; 1494 int i, err;
1486 tb_sw_info(sw, "resuming switch\n"); 1495 tb_sw_dbg(sw, "resuming switch\n");
1487 1496
1488 /* 1497 /*
1489 * Check for UID of the connected switches except for root 1498 * Check for UID of the connected switches except for root
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1424581fd9af..30e02c716f6c 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -404,10 +404,10 @@ static int tb_suspend_noirq(struct tb *tb)
404{ 404{
405 struct tb_cm *tcm = tb_priv(tb); 405 struct tb_cm *tcm = tb_priv(tb);
406 406
407 tb_info(tb, "suspending...\n"); 407 tb_dbg(tb, "suspending...\n");
408 tb_switch_suspend(tb->root_switch); 408 tb_switch_suspend(tb->root_switch);
409 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 409 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
410 tb_info(tb, "suspend finished\n"); 410 tb_dbg(tb, "suspend finished\n");
411 411
412 return 0; 412 return 0;
413} 413}
@@ -417,7 +417,7 @@ static int tb_resume_noirq(struct tb *tb)
417 struct tb_cm *tcm = tb_priv(tb); 417 struct tb_cm *tcm = tb_priv(tb);
418 struct tb_pci_tunnel *tunnel, *n; 418 struct tb_pci_tunnel *tunnel, *n;
419 419
420 tb_info(tb, "resuming...\n"); 420 tb_dbg(tb, "resuming...\n");
421 421
422 /* remove any pci devices the firmware might have setup */ 422 /* remove any pci devices the firmware might have setup */
423 tb_switch_reset(tb, 0); 423 tb_switch_reset(tb, 0);
@@ -432,12 +432,12 @@ static int tb_resume_noirq(struct tb *tb)
432 * the pcie links need some time to get going. 432 * the pcie links need some time to get going.
433 * 100ms works for me... 433 * 100ms works for me...
434 */ 434 */
435 tb_info(tb, "tunnels restarted, sleeping for 100ms\n"); 435 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
436 msleep(100); 436 msleep(100);
437 } 437 }
438 /* Allow tb_handle_hotplug to progress events */ 438 /* Allow tb_handle_hotplug to progress events */
439 tcm->hotplug_active = true; 439 tcm->hotplug_active = true;
440 tb_info(tb, "resume finished\n"); 440 tb_dbg(tb, "resume finished\n");
441 441
442 return 0; 442 return 0;
443} 443}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 5067d69d0501..52584c4003e3 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1,8 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - bus logic (NHI independent) 3 * Thunderbolt driver - bus logic (NHI independent)
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
6 */ 7 */
7 8
8#ifndef TB_H_ 9#ifndef TB_H_
@@ -327,7 +328,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
327#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg) 328#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
328#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg) 329#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
329#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg) 330#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
330 331#define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
331 332
332#define __TB_SW_PRINT(level, sw, fmt, arg...) \ 333#define __TB_SW_PRINT(level, sw, fmt, arg...) \
333 do { \ 334 do { \
@@ -338,7 +339,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
338#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg) 339#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
339#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg) 340#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
340#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg) 341#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
341 342#define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
342 343
343#define __TB_PORT_PRINT(level, _port, fmt, arg...) \ 344#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
344 do { \ 345 do { \
@@ -352,6 +353,8 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
352 __TB_PORT_PRINT(tb_warn, port, fmt, ##arg) 353 __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
353#define tb_port_info(port, fmt, arg...) \ 354#define tb_port_info(port, fmt, arg...) \
354 __TB_PORT_PRINT(tb_info, port, fmt, ##arg) 355 __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
356#define tb_port_dbg(port, fmt, arg...) \
357 __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
355 358
356struct tb *icm_probe(struct tb_nhi *nhi); 359struct tb *icm_probe(struct tb_nhi *nhi);
357struct tb *tb_probe(struct tb_nhi *nhi); 360struct tb *tb_probe(struct tb_nhi *nhi);
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 2487e162c885..02c84aa3d018 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Thunderbolt control channel messages 3 * Thunderbolt control channel messages
3 * 4 *
4 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
5 * Copyright (C) 2017, Intel Corporation 6 * Copyright (C) 2017, Intel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */ 7 */
11 8
12#ifndef _TB_MSGS 9#ifndef _TB_MSGS
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 693b0353c3fe..6f1ff04ee195 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -1,12 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - Port/Switch config area registers 3 * Thunderbolt driver - Port/Switch config area registers
4 * 4 *
5 * Every thunderbolt device consists (logically) of a switch with multiple 5 * Every thunderbolt device consists (logically) of a switch with multiple
6 * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH, 6 * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
7 * COUNTERS) which are used to configure the device. 7 * COUNTERS) which are used to configure the device.
8 * 8 *
9 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 9 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
10 * Copyright (C) 2018, Intel Corporation
10 */ 11 */
11 12
12#ifndef _TB_REGS 13#ifndef _TB_REGS
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index db8bece63327..e27dd8beb94b 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1,13 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Thunderbolt XDomain discovery protocol support 3 * Thunderbolt XDomain discovery protocol support
3 * 4 *
4 * Copyright (C) 2017, Intel Corporation 5 * Copyright (C) 2017, Intel Corporation
5 * Authors: Michael Jamet <michael.jamet@intel.com> 6 * Authors: Michael Jamet <michael.jamet@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#include <linux/device.h> 10#include <linux/device.h>
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 70a7981b94b3..85644669fbe7 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -274,6 +274,8 @@ static struct class uio_class = {
274 .dev_groups = uio_groups, 274 .dev_groups = uio_groups,
275}; 275};
276 276
277static bool uio_class_registered;
278
277/* 279/*
278 * device functions 280 * device functions
279 */ 281 */
@@ -668,7 +670,7 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
668 struct page *page; 670 struct page *page;
669 unsigned long offset; 671 unsigned long offset;
670 void *addr; 672 void *addr;
671 int ret = 0; 673 vm_fault_t ret = 0;
672 int mi; 674 int mi;
673 675
674 mutex_lock(&idev->info_lock); 676 mutex_lock(&idev->info_lock);
@@ -736,7 +738,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
736 return -EINVAL; 738 return -EINVAL;
737 739
738 vma->vm_ops = &uio_physical_vm_ops; 740 vma->vm_ops = &uio_physical_vm_ops;
739 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 741 if (idev->info->mem[mi].memtype == UIO_MEM_PHYS)
742 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
740 743
741 /* 744 /*
742 * We cannot use the vm_iomap_memory() helper here, 745 * We cannot use the vm_iomap_memory() helper here,
@@ -793,18 +796,19 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
793 } 796 }
794 797
795 switch (idev->info->mem[mi].memtype) { 798 switch (idev->info->mem[mi].memtype) {
796 case UIO_MEM_PHYS: 799 case UIO_MEM_IOVA:
797 ret = uio_mmap_physical(vma); 800 case UIO_MEM_PHYS:
798 break; 801 ret = uio_mmap_physical(vma);
799 case UIO_MEM_LOGICAL: 802 break;
800 case UIO_MEM_VIRTUAL: 803 case UIO_MEM_LOGICAL:
801 ret = uio_mmap_logical(vma); 804 case UIO_MEM_VIRTUAL:
802 break; 805 ret = uio_mmap_logical(vma);
803 default: 806 break;
804 ret = -EINVAL; 807 default:
808 ret = -EINVAL;
805 } 809 }
806 810
807out: 811 out:
808 mutex_unlock(&idev->info_lock); 812 mutex_unlock(&idev->info_lock);
809 return ret; 813 return ret;
810} 814}
@@ -876,6 +880,9 @@ static int init_uio_class(void)
876 printk(KERN_ERR "class_register failed for uio\n"); 880 printk(KERN_ERR "class_register failed for uio\n");
877 goto err_class_register; 881 goto err_class_register;
878 } 882 }
883
884 uio_class_registered = true;
885
879 return 0; 886 return 0;
880 887
881err_class_register: 888err_class_register:
@@ -886,6 +893,7 @@ exit:
886 893
887static void release_uio_class(void) 894static void release_uio_class(void)
888{ 895{
896 uio_class_registered = false;
889 class_unregister(&uio_class); 897 class_unregister(&uio_class);
890 uio_major_cleanup(); 898 uio_major_cleanup();
891} 899}
@@ -912,6 +920,9 @@ int __uio_register_device(struct module *owner,
912 struct uio_device *idev; 920 struct uio_device *idev;
913 int ret = 0; 921 int ret = 0;
914 922
923 if (!uio_class_registered)
924 return -EPROBE_DEFER;
925
915 if (!parent || !info || !info->name || !info->version) 926 if (!parent || !info || !info->name || !info->version)
916 return -EINVAL; 927 return -EINVAL;
917 928
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index e1134a4d97f3..003badaef5f3 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -163,7 +163,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
163 dev_err(&pdev->dev, "unable to kmalloc\n"); 163 dev_err(&pdev->dev, "unable to kmalloc\n");
164 goto bad2; 164 goto bad2;
165 } 165 }
166 uioinfo->name = pdev->dev.of_node->name; 166 uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
167 pdev->dev.of_node);
167 uioinfo->version = "devicetree"; 168 uioinfo->version = "devicetree";
168 169
169 /* Multiple IRQs are not supported */ 170 /* Multiple IRQs are not supported */
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index bbc17effae5e..9cc37fe07d35 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -382,8 +382,7 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
382 } 382 }
383 383
384 /* set all UIO data */ 384 /* set all UIO data */
385 if (node->name) 385 info->mem[0].name = kasprintf(GFP_KERNEL, "%pOFn", node);
386 info->mem[0].name = kstrdup(node->name, GFP_KERNEL);
387 info->mem[0].addr = res.start; 386 info->mem[0].addr = res.start;
388 info->mem[0].size = resource_size(&res); 387 info->mem[0].size = resource_size(&res);
389 info->mem[0].memtype = UIO_MEM_PHYS; 388 info->mem[0].memtype = UIO_MEM_PHYS;
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index e401be8321ab..c2493d011225 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -17,7 +17,6 @@
17 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \ 17 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
18 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind 18 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind
19 */ 19 */
20#define DEBUG 1
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 21
23#include <linux/device.h> 22#include <linux/device.h>
@@ -33,13 +32,13 @@
33 32
34#include "../hv/hyperv_vmbus.h" 33#include "../hv/hyperv_vmbus.h"
35 34
36#define DRIVER_VERSION "0.02.0" 35#define DRIVER_VERSION "0.02.1"
37#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>" 36#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
38#define DRIVER_DESC "Generic UIO driver for VMBus devices" 37#define DRIVER_DESC "Generic UIO driver for VMBus devices"
39 38
40#define HV_RING_SIZE 512 /* pages */ 39#define HV_RING_SIZE 512 /* pages */
41#define SEND_BUFFER_SIZE (15 * 1024 * 1024) 40#define SEND_BUFFER_SIZE (16 * 1024 * 1024)
42#define RECV_BUFFER_SIZE (15 * 1024 * 1024) 41#define RECV_BUFFER_SIZE (31 * 1024 * 1024)
43 42
44/* 43/*
45 * List of resources to be mapped to user space 44 * List of resources to be mapped to user space
@@ -56,6 +55,7 @@ enum hv_uio_map {
56struct hv_uio_private_data { 55struct hv_uio_private_data {
57 struct uio_info info; 56 struct uio_info info;
58 struct hv_device *device; 57 struct hv_device *device;
58 atomic_t refcnt;
59 59
60 void *recv_buf; 60 void *recv_buf;
61 u32 recv_gpadl; 61 u32 recv_gpadl;
@@ -129,13 +129,12 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
129{ 129{
130 struct vmbus_channel *channel 130 struct vmbus_channel *channel
131 = container_of(kobj, struct vmbus_channel, kobj); 131 = container_of(kobj, struct vmbus_channel, kobj);
132 struct hv_device *dev = channel->primary_channel->device_obj; 132 void *ring_buffer = page_address(channel->ringbuffer_page);
133 u16 q_idx = channel->offermsg.offer.sub_channel_index;
134 133
135 dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n", 134 if (channel->state != CHANNEL_OPENED_STATE)
136 q_idx, vma_pages(vma), vma->vm_pgoff); 135 return -ENODEV;
137 136
138 return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages), 137 return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
139 channel->ringbuffer_pagecount << PAGE_SHIFT); 138 channel->ringbuffer_pagecount << PAGE_SHIFT);
140} 139}
141 140
@@ -176,58 +175,104 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
176 } 175 }
177} 176}
178 177
178/* free the reserved buffers for send and receive */
179static void 179static void
180hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata) 180hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
181{ 181{
182 if (pdata->send_gpadl) 182 if (pdata->send_gpadl) {
183 vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl); 183 vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
184 vfree(pdata->send_buf); 184 pdata->send_gpadl = 0;
185 vfree(pdata->send_buf);
186 }
185 187
186 if (pdata->recv_gpadl) 188 if (pdata->recv_gpadl) {
187 vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl); 189 vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
188 vfree(pdata->recv_buf); 190 pdata->recv_gpadl = 0;
191 vfree(pdata->recv_buf);
192 }
193}
194
195/* VMBus primary channel is opened on first use */
196static int
197hv_uio_open(struct uio_info *info, struct inode *inode)
198{
199 struct hv_uio_private_data *pdata
200 = container_of(info, struct hv_uio_private_data, info);
201 struct hv_device *dev = pdata->device;
202 int ret;
203
204 if (atomic_inc_return(&pdata->refcnt) != 1)
205 return 0;
206
207 ret = vmbus_connect_ring(dev->channel,
208 hv_uio_channel_cb, dev->channel);
209
210 if (ret == 0)
211 dev->channel->inbound.ring_buffer->interrupt_mask = 1;
212 else
213 atomic_dec(&pdata->refcnt);
214
215 return ret;
216}
217
218/* VMBus primary channel is closed on last close */
219static int
220hv_uio_release(struct uio_info *info, struct inode *inode)
221{
222 struct hv_uio_private_data *pdata
223 = container_of(info, struct hv_uio_private_data, info);
224 struct hv_device *dev = pdata->device;
225 int ret = 0;
226
227 if (atomic_dec_and_test(&pdata->refcnt))
228 ret = vmbus_disconnect_ring(dev->channel);
229
230 return ret;
189} 231}
190 232
191static int 233static int
192hv_uio_probe(struct hv_device *dev, 234hv_uio_probe(struct hv_device *dev,
193 const struct hv_vmbus_device_id *dev_id) 235 const struct hv_vmbus_device_id *dev_id)
194{ 236{
237 struct vmbus_channel *channel = dev->channel;
195 struct hv_uio_private_data *pdata; 238 struct hv_uio_private_data *pdata;
239 void *ring_buffer;
196 int ret; 240 int ret;
197 241
242 /* Communicating with host has to be via shared memory not hypercall */
243 if (!channel->offermsg.monitor_allocated) {
244 dev_err(&dev->device, "vmbus channel requires hypercall\n");
245 return -ENOTSUPP;
246 }
247
198 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 248 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
199 if (!pdata) 249 if (!pdata)
200 return -ENOMEM; 250 return -ENOMEM;
201 251
202 ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE, 252 ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
203 HV_RING_SIZE * PAGE_SIZE, NULL, 0, 253 HV_RING_SIZE * PAGE_SIZE);
204 hv_uio_channel_cb, dev->channel);
205 if (ret) 254 if (ret)
206 goto fail; 255 goto fail;
207 256
208 /* Communicating with host has to be via shared memory not hypercall */ 257 set_channel_read_mode(channel, HV_CALL_ISR);
209 if (!dev->channel->offermsg.monitor_allocated) {
210 dev_err(&dev->device, "vmbus channel requires hypercall\n");
211 ret = -ENOTSUPP;
212 goto fail_close;
213 }
214
215 dev->channel->inbound.ring_buffer->interrupt_mask = 1;
216 set_channel_read_mode(dev->channel, HV_CALL_ISR);
217 258
218 /* Fill general uio info */ 259 /* Fill general uio info */
219 pdata->info.name = "uio_hv_generic"; 260 pdata->info.name = "uio_hv_generic";
220 pdata->info.version = DRIVER_VERSION; 261 pdata->info.version = DRIVER_VERSION;
221 pdata->info.irqcontrol = hv_uio_irqcontrol; 262 pdata->info.irqcontrol = hv_uio_irqcontrol;
263 pdata->info.open = hv_uio_open;
264 pdata->info.release = hv_uio_release;
222 pdata->info.irq = UIO_IRQ_CUSTOM; 265 pdata->info.irq = UIO_IRQ_CUSTOM;
266 atomic_set(&pdata->refcnt, 0);
223 267
224 /* mem resources */ 268 /* mem resources */
225 pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings"; 269 pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
270 ring_buffer = page_address(channel->ringbuffer_page);
226 pdata->info.mem[TXRX_RING_MAP].addr 271 pdata->info.mem[TXRX_RING_MAP].addr
227 = (uintptr_t)dev->channel->ringbuffer_pages; 272 = (uintptr_t)virt_to_phys(ring_buffer);
228 pdata->info.mem[TXRX_RING_MAP].size 273 pdata->info.mem[TXRX_RING_MAP].size
229 = dev->channel->ringbuffer_pagecount << PAGE_SHIFT; 274 = channel->ringbuffer_pagecount << PAGE_SHIFT;
230 pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL; 275 pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
231 276
232 pdata->info.mem[INT_PAGE_MAP].name = "int_page"; 277 pdata->info.mem[INT_PAGE_MAP].name = "int_page";
233 pdata->info.mem[INT_PAGE_MAP].addr 278 pdata->info.mem[INT_PAGE_MAP].addr
@@ -247,7 +292,7 @@ hv_uio_probe(struct hv_device *dev,
247 goto fail_close; 292 goto fail_close;
248 } 293 }
249 294
250 ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf, 295 ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
251 RECV_BUFFER_SIZE, &pdata->recv_gpadl); 296 RECV_BUFFER_SIZE, &pdata->recv_gpadl);
252 if (ret) 297 if (ret)
253 goto fail_close; 298 goto fail_close;
@@ -261,14 +306,13 @@ hv_uio_probe(struct hv_device *dev,
261 pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE; 306 pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
262 pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL; 307 pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
263 308
264
265 pdata->send_buf = vzalloc(SEND_BUFFER_SIZE); 309 pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
266 if (pdata->send_buf == NULL) { 310 if (pdata->send_buf == NULL) {
267 ret = -ENOMEM; 311 ret = -ENOMEM;
268 goto fail_close; 312 goto fail_close;
269 } 313 }
270 314
271 ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf, 315 ret = vmbus_establish_gpadl(channel, pdata->send_buf,
272 SEND_BUFFER_SIZE, &pdata->send_gpadl); 316 SEND_BUFFER_SIZE, &pdata->send_gpadl);
273 if (ret) 317 if (ret)
274 goto fail_close; 318 goto fail_close;
@@ -290,10 +334,10 @@ hv_uio_probe(struct hv_device *dev,
290 goto fail_close; 334 goto fail_close;
291 } 335 }
292 336
293 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind); 337 vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
294 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel); 338 vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
295 339
296 ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr); 340 ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
297 if (ret) 341 if (ret)
298 dev_notice(&dev->device, 342 dev_notice(&dev->device,
299 "sysfs create ring bin file failed; %d\n", ret); 343 "sysfs create ring bin file failed; %d\n", ret);
@@ -304,7 +348,6 @@ hv_uio_probe(struct hv_device *dev,
304 348
305fail_close: 349fail_close:
306 hv_uio_cleanup(dev, pdata); 350 hv_uio_cleanup(dev, pdata);
307 vmbus_close(dev->channel);
308fail: 351fail:
309 kfree(pdata); 352 kfree(pdata);
310 353
@@ -322,7 +365,8 @@ hv_uio_remove(struct hv_device *dev)
322 uio_unregister_device(&pdata->info); 365 uio_unregister_device(&pdata->info);
323 hv_uio_cleanup(dev, pdata); 366 hv_uio_cleanup(dev, pdata);
324 hv_set_drvdata(dev, NULL); 367 hv_set_drvdata(dev, NULL);
325 vmbus_close(dev->channel); 368
369 vmbus_free_ring(dev->channel);
326 kfree(pdata); 370 kfree(pdata);
327 return 0; 371 return 0;
328} 372}
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index f598ecddc8a7..6c759934bff3 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -118,7 +118,8 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
118 dev_err(&pdev->dev, "unable to kmalloc\n"); 118 dev_err(&pdev->dev, "unable to kmalloc\n");
119 return -ENOMEM; 119 return -ENOMEM;
120 } 120 }
121 uioinfo->name = pdev->dev.of_node->name; 121 uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
122 pdev->dev.of_node);
122 uioinfo->version = "devicetree"; 123 uioinfo->version = "devicetree";
123 /* Multiple IRQs are not supported */ 124 /* Multiple IRQs are not supported */
124 } 125 }
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 92500f6bdad1..520a5f9c27de 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1890,7 +1890,6 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
1890 1890
1891err_reg: 1891err_reg:
1892 put_device(&vdev->dev); 1892 put_device(&vdev->dev);
1893 kfree(vdev);
1894err_devalloc: 1893err_devalloc:
1895 list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) { 1894 list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1896 list_del(&vdev->drv_list); 1895 list_del(&vdev->drv_list);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 83fc9aab34e8..3099052e1243 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -763,6 +763,8 @@ static int omap_hdq_remove(struct platform_device *pdev)
763 /* remove module dependency */ 763 /* remove module dependency */
764 pm_runtime_disable(&pdev->dev); 764 pm_runtime_disable(&pdev->dev);
765 765
766 w1_remove_master_device(&omap_w1_master);
767
766 return 0; 768 return 0;
767} 769}
768 770
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index bf641a191d07..7c4e33dbee4d 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -186,8 +186,8 @@ static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
186 return -1; 186 return -1;
187} 187}
188 188
189static uint16_t w1_ds2438_get_voltage(struct w1_slave *sl, 189static int w1_ds2438_get_voltage(struct w1_slave *sl,
190 int adc_input, uint16_t *voltage) 190 int adc_input, uint16_t *voltage)
191{ 191{
192 unsigned int retries = W1_DS2438_RETRIES; 192 unsigned int retries = W1_DS2438_RETRIES;
193 u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/]; 193 u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
@@ -235,6 +235,25 @@ post_unlock:
235 return ret; 235 return ret;
236} 236}
237 237
238static int w1_ds2438_get_current(struct w1_slave *sl, int16_t *voltage)
239{
240 u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
241 int ret;
242
243 mutex_lock(&sl->master->bus_mutex);
244
245 if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
246 /* The voltage measured across current sense resistor RSENS. */
247 *voltage = (((int16_t) w1_buf[DS2438_CURRENT_MSB]) << 8) | ((int16_t) w1_buf[DS2438_CURRENT_LSB]);
248 ret = 0;
249 } else
250 ret = -1;
251
252 mutex_unlock(&sl->master->bus_mutex);
253
254 return ret;
255}
256
238static ssize_t iad_write(struct file *filp, struct kobject *kobj, 257static ssize_t iad_write(struct file *filp, struct kobject *kobj,
239 struct bin_attribute *bin_attr, char *buf, 258 struct bin_attribute *bin_attr, char *buf,
240 loff_t off, size_t count) 259 loff_t off, size_t count)
@@ -257,6 +276,27 @@ static ssize_t iad_write(struct file *filp, struct kobject *kobj,
257 return ret; 276 return ret;
258} 277}
259 278
279static ssize_t iad_read(struct file *filp, struct kobject *kobj,
280 struct bin_attribute *bin_attr, char *buf,
281 loff_t off, size_t count)
282{
283 struct w1_slave *sl = kobj_to_w1_slave(kobj);
284 int ret;
285 int16_t voltage;
286
287 if (off != 0)
288 return 0;
289 if (!buf)
290 return -EINVAL;
291
292 if (w1_ds2438_get_current(sl, &voltage) == 0) {
293 ret = snprintf(buf, count, "%i\n", voltage);
294 } else
295 ret = -EIO;
296
297 return ret;
298}
299
260static ssize_t page0_read(struct file *filp, struct kobject *kobj, 300static ssize_t page0_read(struct file *filp, struct kobject *kobj,
261 struct bin_attribute *bin_attr, char *buf, 301 struct bin_attribute *bin_attr, char *buf,
262 loff_t off, size_t count) 302 loff_t off, size_t count)
@@ -272,9 +312,13 @@ static ssize_t page0_read(struct file *filp, struct kobject *kobj,
272 312
273 mutex_lock(&sl->master->bus_mutex); 313 mutex_lock(&sl->master->bus_mutex);
274 314
315 /* Read no more than page0 size */
316 if (count > DS2438_PAGE_SIZE)
317 count = DS2438_PAGE_SIZE;
318
275 if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) { 319 if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
276 memcpy(buf, &w1_buf, DS2438_PAGE_SIZE); 320 memcpy(buf, &w1_buf, count);
277 ret = DS2438_PAGE_SIZE; 321 ret = count;
278 } else 322 } else
279 ret = -EIO; 323 ret = -EIO;
280 324
@@ -289,7 +333,6 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
289{ 333{
290 struct w1_slave *sl = kobj_to_w1_slave(kobj); 334 struct w1_slave *sl = kobj_to_w1_slave(kobj);
291 int ret; 335 int ret;
292 ssize_t c = PAGE_SIZE;
293 int16_t temp; 336 int16_t temp;
294 337
295 if (off != 0) 338 if (off != 0)
@@ -298,8 +341,7 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
298 return -EINVAL; 341 return -EINVAL;
299 342
300 if (w1_ds2438_get_temperature(sl, &temp) == 0) { 343 if (w1_ds2438_get_temperature(sl, &temp) == 0) {
301 c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", temp); 344 ret = snprintf(buf, count, "%i\n", temp);
302 ret = PAGE_SIZE - c;
303 } else 345 } else
304 ret = -EIO; 346 ret = -EIO;
305 347
@@ -312,7 +354,6 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
312{ 354{
313 struct w1_slave *sl = kobj_to_w1_slave(kobj); 355 struct w1_slave *sl = kobj_to_w1_slave(kobj);
314 int ret; 356 int ret;
315 ssize_t c = PAGE_SIZE;
316 uint16_t voltage; 357 uint16_t voltage;
317 358
318 if (off != 0) 359 if (off != 0)
@@ -321,8 +362,7 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
321 return -EINVAL; 362 return -EINVAL;
322 363
323 if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) { 364 if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) {
324 c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage); 365 ret = snprintf(buf, count, "%u\n", voltage);
325 ret = PAGE_SIZE - c;
326 } else 366 } else
327 ret = -EIO; 367 ret = -EIO;
328 368
@@ -335,7 +375,6 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
335{ 375{
336 struct w1_slave *sl = kobj_to_w1_slave(kobj); 376 struct w1_slave *sl = kobj_to_w1_slave(kobj);
337 int ret; 377 int ret;
338 ssize_t c = PAGE_SIZE;
339 uint16_t voltage; 378 uint16_t voltage;
340 379
341 if (off != 0) 380 if (off != 0)
@@ -344,15 +383,14 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
344 return -EINVAL; 383 return -EINVAL;
345 384
346 if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) { 385 if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) {
347 c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage); 386 ret = snprintf(buf, count, "%u\n", voltage);
348 ret = PAGE_SIZE - c;
349 } else 387 } else
350 ret = -EIO; 388 ret = -EIO;
351 389
352 return ret; 390 return ret;
353} 391}
354 392
355static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, NULL, iad_write, 1); 393static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, iad_read, iad_write, 0);
356static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE); 394static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
357static BIN_ATTR_RO(temperature, 0/* real length varies */); 395static BIN_ATTR_RO(temperature, 0/* real length varies */);
358static BIN_ATTR_RO(vad, 0/* real length varies */); 396static BIN_ATTR_RO(vad, 0/* real length varies */);
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index d828a6efe0b1..46c67a764877 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -94,20 +94,15 @@ union coresight_dev_subtype {
94 * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. 94 * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
95 * @name: name of the component as shown under sysfs. 95 * @name: name of the component as shown under sysfs.
96 * @nr_inport: number of input ports for this component. 96 * @nr_inport: number of input ports for this component.
97 * @outports: list of remote endpoint port number.
98 * @child_names:name of all child components connected to this device.
99 * @child_ports:child component port number the current component is
100 connected to.
101 * @nr_outport: number of output ports for this component. 97 * @nr_outport: number of output ports for this component.
98 * @conns: Array of nr_outport connections from this component
102 */ 99 */
103struct coresight_platform_data { 100struct coresight_platform_data {
104 int cpu; 101 int cpu;
105 const char *name; 102 const char *name;
106 int nr_inport; 103 int nr_inport;
107 int *outports;
108 const char **child_names;
109 int *child_ports;
110 int nr_outport; 104 int nr_outport;
105 struct coresight_connection *conns;
111}; 106};
112 107
113/** 108/**
@@ -190,23 +185,15 @@ struct coresight_device {
190 * @disable: disables the sink. 185 * @disable: disables the sink.
191 * @alloc_buffer: initialises perf's ring buffer for trace collection. 186 * @alloc_buffer: initialises perf's ring buffer for trace collection.
192 * @free_buffer: release memory allocated in @get_config. 187 * @free_buffer: release memory allocated in @get_config.
193 * @set_buffer: initialises buffer mechanic before a trace session.
194 * @reset_buffer: finalises buffer mechanic after a trace session.
195 * @update_buffer: update buffer pointers after a trace session. 188 * @update_buffer: update buffer pointers after a trace session.
196 */ 189 */
197struct coresight_ops_sink { 190struct coresight_ops_sink {
198 int (*enable)(struct coresight_device *csdev, u32 mode); 191 int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
199 void (*disable)(struct coresight_device *csdev); 192 void (*disable)(struct coresight_device *csdev);
200 void *(*alloc_buffer)(struct coresight_device *csdev, int cpu, 193 void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
201 void **pages, int nr_pages, bool overwrite); 194 void **pages, int nr_pages, bool overwrite);
202 void (*free_buffer)(void *config); 195 void (*free_buffer)(void *config);
203 int (*set_buffer)(struct coresight_device *csdev, 196 unsigned long (*update_buffer)(struct coresight_device *csdev,
204 struct perf_output_handle *handle,
205 void *sink_config);
206 unsigned long (*reset_buffer)(struct coresight_device *csdev,
207 struct perf_output_handle *handle,
208 void *sink_config);
209 void (*update_buffer)(struct coresight_device *csdev,
210 struct perf_output_handle *handle, 197 struct perf_output_handle *handle,
211 void *sink_config); 198 void *sink_config);
212}; 199};
@@ -270,6 +257,13 @@ extern int coresight_enable(struct coresight_device *csdev);
270extern void coresight_disable(struct coresight_device *csdev); 257extern void coresight_disable(struct coresight_device *csdev);
271extern int coresight_timeout(void __iomem *addr, u32 offset, 258extern int coresight_timeout(void __iomem *addr, u32 offset,
272 int position, int value); 259 int position, int value);
260
261extern int coresight_claim_device(void __iomem *base);
262extern int coresight_claim_device_unlocked(void __iomem *base);
263
264extern void coresight_disclaim_device(void __iomem *base);
265extern void coresight_disclaim_device_unlocked(void __iomem *base);
266
273#else 267#else
274static inline struct coresight_device * 268static inline struct coresight_device *
275coresight_register(struct coresight_desc *desc) { return NULL; } 269coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -279,6 +273,19 @@ coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
279static inline void coresight_disable(struct coresight_device *csdev) {} 273static inline void coresight_disable(struct coresight_device *csdev) {}
280static inline int coresight_timeout(void __iomem *addr, u32 offset, 274static inline int coresight_timeout(void __iomem *addr, u32 offset,
281 int position, int value) { return 1; } 275 int position, int value) { return 1; }
276static inline int coresight_claim_device_unlocked(void __iomem *base)
277{
278 return -EINVAL;
279}
280
281static inline int coresight_claim_device(void __iomem *base)
282{
283 return -EINVAL;
284}
285
286static inline void coresight_disclaim_device(void __iomem *base) {}
287static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
288
282#endif 289#endif
283 290
284#ifdef CONFIG_OF 291#ifdef CONFIG_OF
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index ce550fcf6360..817600a32c93 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -69,4 +69,8 @@ void fpga_bridge_free(struct fpga_bridge *br);
69int fpga_bridge_register(struct fpga_bridge *br); 69int fpga_bridge_register(struct fpga_bridge *br);
70void fpga_bridge_unregister(struct fpga_bridge *br); 70void fpga_bridge_unregister(struct fpga_bridge *br);
71 71
72struct fpga_bridge
73*devm_fpga_bridge_create(struct device *dev, const char *name,
74 const struct fpga_bridge_ops *br_ops, void *priv);
75
72#endif /* _LINUX_FPGA_BRIDGE_H */ 76#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 8ab5df769923..e8ca62b2cb5b 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -198,4 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr);
198int fpga_mgr_register(struct fpga_manager *mgr); 198int fpga_mgr_register(struct fpga_manager *mgr);
199void fpga_mgr_unregister(struct fpga_manager *mgr); 199void fpga_mgr_unregister(struct fpga_manager *mgr);
200 200
201struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
202 const struct fpga_manager_ops *mops,
203 void *priv);
204
201#endif /*_LINUX_FPGA_MGR_H */ 205#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 0521b7f577a4..27cb706275db 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -44,4 +44,8 @@ void fpga_region_free(struct fpga_region *region);
44int fpga_region_register(struct fpga_region *region); 44int fpga_region_register(struct fpga_region *region);
45void fpga_region_unregister(struct fpga_region *region); 45void fpga_region_unregister(struct fpga_region *region);
46 46
47struct fpga_region
48*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
49 int (*get_bridges)(struct fpga_region *));
50
47#endif /* _FPGA_REGION_H */ 51#endif /* _FPGA_REGION_H */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index efda23cf32c7..b3e24368930a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -739,8 +739,9 @@ struct vmbus_channel {
739 u32 ringbuffer_gpadlhandle; 739 u32 ringbuffer_gpadlhandle;
740 740
741 /* Allocated memory for ring buffer */ 741 /* Allocated memory for ring buffer */
742 void *ringbuffer_pages; 742 struct page *ringbuffer_page;
743 u32 ringbuffer_pagecount; 743 u32 ringbuffer_pagecount;
744 u32 ringbuffer_send_offset;
744 struct hv_ring_buffer_info outbound; /* send to parent */ 745 struct hv_ring_buffer_info outbound; /* send to parent */
745 struct hv_ring_buffer_info inbound; /* receive from parent */ 746 struct hv_ring_buffer_info inbound; /* receive from parent */
746 747
@@ -1021,6 +1022,14 @@ struct vmbus_packet_mpb_array {
1021 struct hv_mpb_array range; 1022 struct hv_mpb_array range;
1022} __packed; 1023} __packed;
1023 1024
1025int vmbus_alloc_ring(struct vmbus_channel *channel,
1026 u32 send_size, u32 recv_size);
1027void vmbus_free_ring(struct vmbus_channel *channel);
1028
1029int vmbus_connect_ring(struct vmbus_channel *channel,
1030 void (*onchannel_callback)(void *context),
1031 void *context);
1032int vmbus_disconnect_ring(struct vmbus_channel *channel);
1024 1033
1025extern int vmbus_open(struct vmbus_channel *channel, 1034extern int vmbus_open(struct vmbus_channel *channel,
1026 u32 send_ringbuffersize, 1035 u32 send_ringbuffersize,
@@ -1125,6 +1134,7 @@ struct hv_device {
1125 u16 device_id; 1134 u16 device_id;
1126 1135
1127 struct device device; 1136 struct device device;
1137 char *driver_override; /* Driver name to force a match */
1128 1138
1129 struct vmbus_channel *channel; 1139 struct vmbus_channel *channel;
1130 struct kset *channels_kset; 1140 struct kset *channels_kset;
@@ -1442,7 +1452,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1442 const int *srv_version, int srv_vercnt, 1452 const int *srv_version, int srv_vercnt,
1443 int *nego_fw_version, int *nego_srv_version); 1453 int *nego_fw_version, int *nego_srv_version);
1444 1454
1445void hv_process_channel_removal(u32 relid); 1455void hv_process_channel_removal(struct vmbus_channel *channel);
1446 1456
1447void vmbus_setevent(struct vmbus_channel *channel); 1457void vmbus_setevent(struct vmbus_channel *channel);
1448/* 1458/*
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 4e85447f7860..312bfa5efd80 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * nvmem framework consumer. 3 * nvmem framework consumer.
3 * 4 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */ 7 */
11 8
12#ifndef _LINUX_NVMEM_CONSUMER_H 9#ifndef _LINUX_NVMEM_CONSUMER_H
@@ -14,6 +11,7 @@
14 11
15#include <linux/err.h> 12#include <linux/err.h>
16#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/notifier.h>
17 15
18struct device; 16struct device;
19struct device_node; 17struct device_node;
@@ -29,11 +27,36 @@ struct nvmem_cell_info {
29 unsigned int nbits; 27 unsigned int nbits;
30}; 28};
31 29
30/**
31 * struct nvmem_cell_lookup - cell lookup entry
32 *
33 * @nvmem_name: Name of the provider.
34 * @cell_name: Name of the nvmem cell as defined in the name field of
35 * struct nvmem_cell_info.
36 * @dev_id: Name of the consumer device that will be associated with
37 * this cell.
38 * @con_id: Connector id for this cell lookup.
39 */
40struct nvmem_cell_lookup {
41 const char *nvmem_name;
42 const char *cell_name;
43 const char *dev_id;
44 const char *con_id;
45 struct list_head node;
46};
47
48enum {
49 NVMEM_ADD = 1,
50 NVMEM_REMOVE,
51 NVMEM_CELL_ADD,
52 NVMEM_CELL_REMOVE,
53};
54
32#if IS_ENABLED(CONFIG_NVMEM) 55#if IS_ENABLED(CONFIG_NVMEM)
33 56
34/* Cell based interface */ 57/* Cell based interface */
35struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name); 58struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id);
36struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name); 59struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id);
37void nvmem_cell_put(struct nvmem_cell *cell); 60void nvmem_cell_put(struct nvmem_cell *cell);
38void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); 61void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
39void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); 62void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
@@ -55,18 +78,28 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
55int nvmem_device_cell_write(struct nvmem_device *nvmem, 78int nvmem_device_cell_write(struct nvmem_device *nvmem,
56 struct nvmem_cell_info *info, void *buf); 79 struct nvmem_cell_info *info, void *buf);
57 80
81const char *nvmem_dev_name(struct nvmem_device *nvmem);
82
83void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
84 size_t nentries);
85void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries,
86 size_t nentries);
87
88int nvmem_register_notifier(struct notifier_block *nb);
89int nvmem_unregister_notifier(struct notifier_block *nb);
90
58#else 91#else
59 92
60static inline struct nvmem_cell *nvmem_cell_get(struct device *dev, 93static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
61 const char *name) 94 const char *id)
62{ 95{
63 return ERR_PTR(-ENOSYS); 96 return ERR_PTR(-EOPNOTSUPP);
64} 97}
65 98
66static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, 99static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
67 const char *name) 100 const char *id)
68{ 101{
69 return ERR_PTR(-ENOSYS); 102 return ERR_PTR(-EOPNOTSUPP);
70} 103}
71 104
72static inline void devm_nvmem_cell_put(struct device *dev, 105static inline void devm_nvmem_cell_put(struct device *dev,
@@ -80,31 +113,31 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
80 113
81static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 114static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
82{ 115{
83 return ERR_PTR(-ENOSYS); 116 return ERR_PTR(-EOPNOTSUPP);
84} 117}
85 118
86static inline int nvmem_cell_write(struct nvmem_cell *cell, 119static inline int nvmem_cell_write(struct nvmem_cell *cell,
87 const char *buf, size_t len) 120 const char *buf, size_t len)
88{ 121{
89 return -ENOSYS; 122 return -EOPNOTSUPP;
90} 123}
91 124
92static inline int nvmem_cell_read_u32(struct device *dev, 125static inline int nvmem_cell_read_u32(struct device *dev,
93 const char *cell_id, u32 *val) 126 const char *cell_id, u32 *val)
94{ 127{
95 return -ENOSYS; 128 return -EOPNOTSUPP;
96} 129}
97 130
98static inline struct nvmem_device *nvmem_device_get(struct device *dev, 131static inline struct nvmem_device *nvmem_device_get(struct device *dev,
99 const char *name) 132 const char *name)
100{ 133{
101 return ERR_PTR(-ENOSYS); 134 return ERR_PTR(-EOPNOTSUPP);
102} 135}
103 136
104static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev, 137static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
105 const char *name) 138 const char *name)
106{ 139{
107 return ERR_PTR(-ENOSYS); 140 return ERR_PTR(-EOPNOTSUPP);
108} 141}
109 142
110static inline void nvmem_device_put(struct nvmem_device *nvmem) 143static inline void nvmem_device_put(struct nvmem_device *nvmem)
@@ -120,47 +153,68 @@ static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
120 struct nvmem_cell_info *info, 153 struct nvmem_cell_info *info,
121 void *buf) 154 void *buf)
122{ 155{
123 return -ENOSYS; 156 return -EOPNOTSUPP;
124} 157}
125 158
126static inline int nvmem_device_cell_write(struct nvmem_device *nvmem, 159static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
127 struct nvmem_cell_info *info, 160 struct nvmem_cell_info *info,
128 void *buf) 161 void *buf)
129{ 162{
130 return -ENOSYS; 163 return -EOPNOTSUPP;
131} 164}
132 165
133static inline int nvmem_device_read(struct nvmem_device *nvmem, 166static inline int nvmem_device_read(struct nvmem_device *nvmem,
134 unsigned int offset, size_t bytes, 167 unsigned int offset, size_t bytes,
135 void *buf) 168 void *buf)
136{ 169{
137 return -ENOSYS; 170 return -EOPNOTSUPP;
138} 171}
139 172
140static inline int nvmem_device_write(struct nvmem_device *nvmem, 173static inline int nvmem_device_write(struct nvmem_device *nvmem,
141 unsigned int offset, size_t bytes, 174 unsigned int offset, size_t bytes,
142 void *buf) 175 void *buf)
143{ 176{
144 return -ENOSYS; 177 return -EOPNOTSUPP;
145} 178}
179
180static inline const char *nvmem_dev_name(struct nvmem_device *nvmem)
181{
182 return NULL;
183}
184
185static inline void
186nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
187static inline void
188nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
189
190static inline int nvmem_register_notifier(struct notifier_block *nb)
191{
192 return -EOPNOTSUPP;
193}
194
195static inline int nvmem_unregister_notifier(struct notifier_block *nb)
196{
197 return -EOPNOTSUPP;
198}
199
146#endif /* CONFIG_NVMEM */ 200#endif /* CONFIG_NVMEM */
147 201
148#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) 202#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
149struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 203struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
150 const char *name); 204 const char *id);
151struct nvmem_device *of_nvmem_device_get(struct device_node *np, 205struct nvmem_device *of_nvmem_device_get(struct device_node *np,
152 const char *name); 206 const char *name);
153#else 207#else
154static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 208static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
155 const char *name) 209 const char *id)
156{ 210{
157 return ERR_PTR(-ENOSYS); 211 return ERR_PTR(-EOPNOTSUPP);
158} 212}
159 213
160static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np, 214static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
161 const char *name) 215 const char *name)
162{ 216{
163 return ERR_PTR(-ENOSYS); 217 return ERR_PTR(-EOPNOTSUPP);
164} 218}
165#endif /* CONFIG_NVMEM && CONFIG_OF */ 219#endif /* CONFIG_NVMEM && CONFIG_OF */
166 220
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 24def6ad09bb..1e3283c2af77 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * nvmem framework provider. 3 * nvmem framework provider.
3 * 4 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */ 7 */
11 8
12#ifndef _LINUX_NVMEM_PROVIDER_H 9#ifndef _LINUX_NVMEM_PROVIDER_H
@@ -67,30 +64,46 @@ struct nvmem_config {
67 struct device *base_dev; 64 struct device *base_dev;
68}; 65};
69 66
67/**
68 * struct nvmem_cell_table - NVMEM cell definitions for given provider
69 *
70 * @nvmem_name: Provider name.
71 * @cells: Array of cell definitions.
72 * @ncells: Number of cell definitions in the array.
73 * @node: List node.
74 *
75 * This structure together with related helper functions is provided for users
76 * that don't can't access the nvmem provided structure but wish to register
77 * cell definitions for it e.g. board files registering an EEPROM device.
78 */
79struct nvmem_cell_table {
80 const char *nvmem_name;
81 const struct nvmem_cell_info *cells;
82 size_t ncells;
83 struct list_head node;
84};
85
70#if IS_ENABLED(CONFIG_NVMEM) 86#if IS_ENABLED(CONFIG_NVMEM)
71 87
72struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); 88struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
73int nvmem_unregister(struct nvmem_device *nvmem); 89void nvmem_unregister(struct nvmem_device *nvmem);
74 90
75struct nvmem_device *devm_nvmem_register(struct device *dev, 91struct nvmem_device *devm_nvmem_register(struct device *dev,
76 const struct nvmem_config *cfg); 92 const struct nvmem_config *cfg);
77 93
78int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); 94int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
79 95
80int nvmem_add_cells(struct nvmem_device *nvmem, 96void nvmem_add_cell_table(struct nvmem_cell_table *table);
81 const struct nvmem_cell_info *info, 97void nvmem_del_cell_table(struct nvmem_cell_table *table);
82 int ncells); 98
83#else 99#else
84 100
85static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) 101static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
86{ 102{
87 return ERR_PTR(-ENOSYS); 103 return ERR_PTR(-EOPNOTSUPP);
88} 104}
89 105
90static inline int nvmem_unregister(struct nvmem_device *nvmem) 106static inline void nvmem_unregister(struct nvmem_device *nvmem) {}
91{
92 return -ENOSYS;
93}
94 107
95static inline struct nvmem_device * 108static inline struct nvmem_device *
96devm_nvmem_register(struct device *dev, const struct nvmem_config *c) 109devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
@@ -101,16 +114,11 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
101static inline int 114static inline int
102devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 115devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
103{ 116{
104 return nvmem_unregister(nvmem); 117 return -EOPNOTSUPP;
105
106} 118}
107 119
108static inline int nvmem_add_cells(struct nvmem_device *nvmem, 120static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
109 const struct nvmem_cell_info *info, 121static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
110 int ncells)
111{
112 return -ENOSYS;
113}
114 122
115#endif /* CONFIG_NVMEM */ 123#endif /* CONFIG_NVMEM */
116#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ 124#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 962971e6a9c7..df313913e856 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -678,6 +678,9 @@ struct sdw_master_ops {
678 * @defer_msg: Defer message 678 * @defer_msg: Defer message
679 * @clk_stop_timeout: Clock stop timeout computed 679 * @clk_stop_timeout: Clock stop timeout computed
680 * @bank_switch_timeout: Bank switch timeout computed 680 * @bank_switch_timeout: Bank switch timeout computed
681 * @multi_link: Store bus property that indicates if multi links
682 * are supported. This flag is populated by drivers after reading
683 * appropriate firmware (ACPI/DT).
681 */ 684 */
682struct sdw_bus { 685struct sdw_bus {
683 struct device *dev; 686 struct device *dev;
@@ -694,6 +697,7 @@ struct sdw_bus {
694 struct sdw_defer defer_msg; 697 struct sdw_defer defer_msg;
695 unsigned int clk_stop_timeout; 698 unsigned int clk_stop_timeout;
696 u32 bank_switch_timeout; 699 u32 bank_switch_timeout;
700 bool multi_link;
697}; 701};
698 702
699int sdw_add_bus_master(struct sdw_bus *bus); 703int sdw_add_bus_master(struct sdw_bus *bus);
@@ -768,14 +772,18 @@ struct sdw_stream_params {
768 * @params: Stream parameters 772 * @params: Stream parameters
769 * @state: Current state of the stream 773 * @state: Current state of the stream
770 * @type: Stream type PCM or PDM 774 * @type: Stream type PCM or PDM
771 * @m_rt: Master runtime 775 * @master_list: List of Master runtime(s) in this stream.
776 * master_list can contain only one m_rt per Master instance
777 * for a stream
778 * @m_rt_count: Count of Master runtime(s) in this stream
772 */ 779 */
773struct sdw_stream_runtime { 780struct sdw_stream_runtime {
774 char *name; 781 char *name;
775 struct sdw_stream_params params; 782 struct sdw_stream_params params;
776 enum sdw_stream_state state; 783 enum sdw_stream_state state;
777 enum sdw_stream_type type; 784 enum sdw_stream_type type;
778 struct sdw_master_runtime *m_rt; 785 struct list_head master_list;
786 int m_rt_count;
779}; 787};
780 788
781struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name); 789struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name);
diff --git a/include/linux/string.h b/include/linux/string.h
index 4a5a0eb7df51..27d0482e5e05 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -131,6 +131,13 @@ static inline void *memset_p(void **p, void *v, __kernel_size_t n)
131 return memset64((uint64_t *)p, (uintptr_t)v, n); 131 return memset64((uint64_t *)p, (uintptr_t)v, n);
132} 132}
133 133
134extern void **__memcat_p(void **a, void **b);
135#define memcat_p(a, b) ({ \
136 BUILD_BUG_ON_MSG(!__same_type(*(a), *(b)), \
137 "type mismatch in memcat_p()"); \
138 (typeof(*a) *)__memcat_p((void **)(a), (void **)(b)); \
139})
140
134#ifndef __HAVE_ARCH_MEMCPY 141#ifndef __HAVE_ARCH_MEMCPY
135extern void * memcpy(void *,const void *,__kernel_size_t); 142extern void * memcpy(void *,const void *,__kernel_size_t);
136#endif 143#endif
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index a3ed26082bc1..bf6ec83e60ee 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Thunderbolt service API 3 * Thunderbolt service API
3 * 4 *
@@ -5,10 +6,6 @@
5 * Copyright (C) 2017, Intel Corporation 6 * Copyright (C) 2017, Intel Corporation
6 * Authors: Michael Jamet <michael.jamet@intel.com> 7 * Authors: Michael Jamet <michael.jamet@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com> 8 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */ 9 */
13 10
14#ifndef THUNDERBOLT_H_ 11#ifndef THUNDERBOLT_H_
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6f8b68cd460f..a3cd7cb67a69 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -133,6 +133,7 @@ extern void uio_event_notify(struct uio_info *info);
133#define UIO_MEM_PHYS 1 133#define UIO_MEM_PHYS 1
134#define UIO_MEM_LOGICAL 2 134#define UIO_MEM_LOGICAL 2
135#define UIO_MEM_VIRTUAL 3 135#define UIO_MEM_VIRTUAL 3
136#define UIO_MEM_IOVA 4
136 137
137/* defines for uio_port->porttype */ 138/* defines for uio_port->porttype */
138#define UIO_PORT_NONE 0 139#define UIO_PORT_NONE 0
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index bfaec6903b8b..b9ba520f7e4b 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -200,6 +200,15 @@ struct binder_node_debug_info {
200 __u32 has_weak_ref; 200 __u32 has_weak_ref;
201}; 201};
202 202
203struct binder_node_info_for_ref {
204 __u32 handle;
205 __u32 strong_count;
206 __u32 weak_count;
207 __u32 reserved1;
208 __u32 reserved2;
209 __u32 reserved3;
210};
211
203#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) 212#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
204#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) 213#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
205#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) 214#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -208,6 +217,7 @@ struct binder_node_debug_info {
208#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) 217#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
209#define BINDER_VERSION _IOWR('b', 9, struct binder_version) 218#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
210#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) 219#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
220#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
211 221
212/* 222/*
213 * NOTE: Two special error codes you should check for when calling 223 * NOTE: Two special error codes you should check for when calling
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8d24f4ed66fd..04adfc3b185e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1965,6 +1965,14 @@ config TEST_DEBUG_VIRTUAL
1965 1965
1966 If unsure, say N. 1966 If unsure, say N.
1967 1967
1968config TEST_MEMCAT_P
1969 tristate "Test memcat_p() helper function"
1970 help
1971 Test the memcat_p() helper for correctly merging two
1972 pointer arrays together.
1973
1974 If unsure, say N.
1975
1968endif # RUNTIME_TESTING_MENU 1976endif # RUNTIME_TESTING_MENU
1969 1977
1970config MEMTEST 1978config MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index 56a8d9c23ef3..fa3eb1b4c0e3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -24,7 +24,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
24 flex_proportions.o ratelimit.o show_mem.o \ 24 flex_proportions.o ratelimit.o show_mem.o \
25 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 25 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
26 earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ 26 earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
27 nmi_backtrace.o nodemask.o win_minmax.o 27 nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
28 28
29lib-$(CONFIG_PRINTK) += dump_stack.o 29lib-$(CONFIG_PRINTK) += dump_stack.o
30lib-$(CONFIG_MMU) += ioremap.o 30lib-$(CONFIG_MMU) += ioremap.o
@@ -71,6 +71,7 @@ obj-$(CONFIG_TEST_UUID) += test_uuid.o
71obj-$(CONFIG_TEST_PARMAN) += test_parman.o 71obj-$(CONFIG_TEST_PARMAN) += test_parman.o
72obj-$(CONFIG_TEST_KMOD) += test_kmod.o 72obj-$(CONFIG_TEST_KMOD) += test_kmod.o
73obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o 73obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
74obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
74 75
75ifeq ($(CONFIG_DEBUG_KOBJECT),y) 76ifeq ($(CONFIG_DEBUG_KOBJECT),y)
76CFLAGS_kobject.o += -DDEBUG 77CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/memcat_p.c b/lib/memcat_p.c
new file mode 100644
index 000000000000..b810fbc66962
--- /dev/null
+++ b/lib/memcat_p.c
@@ -0,0 +1,34 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/slab.h>
4
5/*
6 * Merge two NULL-terminated pointer arrays into a newly allocated
7 * array, which is also NULL-terminated. Nomenclature is inspired by
8 * memset_p() and memcat() found elsewhere in the kernel source tree.
9 */
10void **__memcat_p(void **a, void **b)
11{
12 void **p = a, **new;
13 int nr;
14
15 /* count the elements in both arrays */
16 for (nr = 0, p = a; *p; nr++, p++)
17 ;
18 for (p = b; *p; nr++, p++)
19 ;
20 /* one for the NULL-terminator */
21 nr++;
22
23 new = kmalloc_array(nr, sizeof(void *), GFP_KERNEL);
24 if (!new)
25 return NULL;
26
27 /* nr -> last index; p points to NULL in b[] */
28 for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1)
29 new[nr] = *p;
30
31 return new;
32}
33EXPORT_SYMBOL_GPL(__memcat_p);
34
diff --git a/lib/string.c b/lib/string.c
index 2c0900a5d51a..38e4ca08e757 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,6 +27,7 @@
27#include <linux/export.h> 27#include <linux/export.h>
28#include <linux/bug.h> 28#include <linux/bug.h>
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/slab.h>
30 31
31#include <asm/byteorder.h> 32#include <asm/byteorder.h>
32#include <asm/word-at-a-time.h> 33#include <asm/word-at-a-time.h>
diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c
new file mode 100644
index 000000000000..849c477d49d0
--- /dev/null
+++ b/lib/test_memcat_p.c
@@ -0,0 +1,115 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Test cases for memcat_p() in lib/memcat_p.c
4 */
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
7#include <linux/string.h>
8#include <linux/slab.h>
9#include <linux/module.h>
10
11struct test_struct {
12 int num;
13 unsigned int magic;
14};
15
16#define MAGIC 0xf00ff00f
17/* Size of each of the NULL-terminated input arrays */
18#define INPUT_MAX 128
19/* Expected number of non-NULL elements in the output array */
20#define EXPECT (INPUT_MAX * 2 - 2)
21
22static int __init test_memcat_p_init(void)
23{
24 struct test_struct **in0, **in1, **out, **p;
25 int err = -ENOMEM, i, r, total = 0;
26
27 in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL);
28 if (!in0)
29 return err;
30
31 in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL);
32 if (!in1)
33 goto err_free_in0;
34
35 for (i = 0, r = 1; i < INPUT_MAX - 1; i++) {
36 in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL);
37 if (!in0[i])
38 goto err_free_elements;
39
40 in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL);
41 if (!in1[i]) {
42 kfree(in0[i]);
43 goto err_free_elements;
44 }
45
46 /* lifted from test_sort.c */
47 r = (r * 725861) % 6599;
48 in0[i]->num = r;
49 in1[i]->num = -r;
50 in0[i]->magic = MAGIC;
51 in1[i]->magic = MAGIC;
52 }
53
54 in0[i] = in1[i] = NULL;
55
56 out = memcat_p(in0, in1);
57 if (!out)
58 goto err_free_all_elements;
59
60 err = -EINVAL;
61 for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) {
62 total += (*p)->num;
63
64 if ((*p)->magic != MAGIC) {
65 pr_err("test failed: wrong magic at %d: %u\n", i,
66 (*p)->magic);
67 goto err_free_out;
68 }
69 }
70
71 if (total) {
72 pr_err("test failed: expected zero total, got %d\n", total);
73 goto err_free_out;
74 }
75
76 if (i != EXPECT) {
77 pr_err("test failed: expected output size %d, got %d\n",
78 EXPECT, i);
79 goto err_free_out;
80 }
81
82 for (i = 0; i < INPUT_MAX - 1; i++)
83 if (out[i] != in0[i] || out[i + INPUT_MAX - 1] != in1[i]) {
84 pr_err("test failed: wrong element order at %d\n", i);
85 goto err_free_out;
86 }
87
88 err = 0;
89 pr_info("test passed\n");
90
91err_free_out:
92 kfree(out);
93err_free_all_elements:
94 i = INPUT_MAX;
95err_free_elements:
96 for (i--; i >= 0; i--) {
97 kfree(in1[i]);
98 kfree(in0[i]);
99 }
100
101 kfree(in1);
102err_free_in0:
103 kfree(in0);
104
105 return err;
106}
107
108static void __exit test_memcat_p_exit(void)
109{
110}
111
112module_init(test_memcat_p_init);
113module_exit(test_memcat_p_exit);
114
115MODULE_LICENSE("GPL");
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index 57d0d871dcf7..33e67bd1dc34 100644
--- a/samples/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
@@ -370,7 +370,7 @@ static uint32_t amt_host_if_call(struct amt_host_if *acmd,
370 unsigned int expected_sz) 370 unsigned int expected_sz)
371{ 371{
372 uint32_t in_buf_sz; 372 uint32_t in_buf_sz;
373 uint32_t out_buf_sz; 373 ssize_t out_buf_sz;
374 ssize_t written; 374 ssize_t written;
375 uint32_t status; 375 uint32_t status;
376 struct amt_host_if_resp_header *msg_hdr; 376 struct amt_host_if_resp_header *msg_hdr;