aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 01:15:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 01:15:15 -0500
commit8e483ed1342a4ea45b70f0f33ac54eff7a33d918 (patch)
tree66c9f9ad196581966bdb06802e11e9856b1c0779
parente880e87488d5bbf630dd716e6de8a53585614568 (diff)
parente2d8680741edec84f843f783a7f4a44418b818d7 (diff)
Merge tag 'char-misc-4.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big char/misc driver update for 4.4-rc1. Lots of different driver and subsystem updates, hwtracing being the largest with the addition of some new platforms that are now supported. Full details in the shortlog. All of these have been in linux-next for a long time with no reported issues" * tag 'char-misc-4.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (181 commits) fpga: socfpga: Fix check of return value of devm_request_irq lkdtm: fix ACCESS_USERSPACE test mcb: Destroy IDA on module unload mcb: Do not return zero on error path in mcb_pci_probe() mei: bus: set the device name before running fixup mei: bus: use correct lock ordering mei: Fix debugfs filename in error output char: ipmi: ipmi_ssif: Replace timeval with timespec64 fpga: zynq-fpga: Fix issue with drvdata being overwritten. fpga manager: remove unnecessary null pointer checks fpga manager: ensure lifetime with of_fpga_mgr_get fpga: zynq-fpga: Change fw format to handle bin instead of bit. fpga: zynq-fpga: Fix unbalanced clock handling misc: sram: partition base address belongs to __iomem space coresight: etm3x: adding documentation for sysFS's cpu interface vme: 8-bit status/id takes 256 values, not 255 fpga manager: Adding FPGA Manager support for Xilinx Zynq 7000 ARM: zynq: dt: Updated devicetree for Zynq 7000 platform. ARM: dt: fpga: Added binding docs for Xilinx Zynq FPGA manager. ver_linux: proc/modules, limit text processing to 'sed' ...
-rw-r--r--Documentation/ABI/testing/configfs-stp-policy48
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x83
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth49
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc33
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-devices-pti24
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-output-devices13
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mei7
-rw-r--r--Documentation/ABI/testing/sysfs-class-fpga-manager37
-rw-r--r--Documentation/ABI/testing/sysfs-class-mic.txt29
-rw-r--r--Documentation/ABI/testing/sysfs-class-stm14
-rw-r--r--Documentation/ABI/testing/sysfs-class-stm_source11
-rw-r--r--Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt19
-rw-r--r--Documentation/devicetree/bindings/misc/sram.txt16
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt20
-rw-r--r--Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt25
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt38
-rw-r--r--Documentation/devicetree/bindings/nvmem/vf610-ocotp.txt19
-rw-r--r--Documentation/devicetree/bindings/w1/omap-hdq.txt7
-rw-r--r--Documentation/fpga/fpga-mgr.txt171
-rw-r--r--Documentation/ioctl/ioctl-number.txt3
-rw-r--r--Documentation/mic/mic_overview.txt31
-rwxr-xr-xDocumentation/mic/mpssd/mpss4
-rw-r--r--Documentation/mic/mpssd/mpssd.c362
-rw-r--r--Documentation/mic/mpssd/mpssd.h1
-rw-r--r--Documentation/trace/intel_th.txt99
-rw-r--r--Documentation/trace/stm.txt80
-rw-r--r--Documentation/w1/masters/omap-hdq6
-rw-r--r--MAINTAINERS37
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi5
-rw-r--r--drivers/Kconfig6
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/char/efirtc.c13
-rw-r--r--drivers/char/hpet.c25
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c9
-rw-r--r--drivers/char/snsc.c5
-rw-r--r--drivers/dma/mic_x100_dma.c39
-rw-r--r--drivers/extcon/extcon-arizona.c164
-rw-r--r--drivers/extcon/extcon-axp288.c35
-rw-r--r--drivers/extcon/extcon-gpio.c130
-rw-r--r--drivers/extcon/extcon-max14577.c17
-rw-r--r--drivers/extcon/extcon-max77693.c32
-rw-r--r--drivers/extcon/extcon-max77843.c27
-rw-r--r--drivers/extcon/extcon-max8997.c21
-rw-r--r--drivers/extcon/extcon-rt8973a.c7
-rw-r--r--drivers/extcon/extcon-sm5502.c7
-rw-r--r--drivers/extcon/extcon.c61
-rw-r--r--drivers/fpga/Kconfig29
-rw-r--r--drivers/fpga/Makefile10
-rw-r--r--drivers/fpga/fpga-mgr.c380
-rw-r--r--drivers/fpga/socfpga.c616
-rw-r--r--drivers/fpga/zynq-fpga.c514
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c107
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c22
-rw-r--r--drivers/hwtracing/coresight/coresight.c5
-rw-r--r--drivers/hwtracing/intel_th/Kconfig72
-rw-r--r--drivers/hwtracing/intel_th/Makefile18
-rw-r--r--drivers/hwtracing/intel_th/core.c692
-rw-r--r--drivers/hwtracing/intel_th/debug.c36
-rw-r--r--drivers/hwtracing/intel_th/debug.h34
-rw-r--r--drivers/hwtracing/intel_th/gth.c706
-rw-r--r--drivers/hwtracing/intel_th/gth.h66
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h244
-rw-r--r--drivers/hwtracing/intel_th/msu.c1509
-rw-r--r--drivers/hwtracing/intel_th/msu.h116
-rw-r--r--drivers/hwtracing/intel_th/pci.c86
-rw-r--r--drivers/hwtracing/intel_th/pti.c252
-rw-r--r--drivers/hwtracing/intel_th/pti.h29
-rw-r--r--drivers/hwtracing/intel_th/sth.c259
-rw-r--r--drivers/hwtracing/intel_th/sth.h42
-rw-r--r--drivers/hwtracing/stm/Kconfig26
-rw-r--r--drivers/hwtracing/stm/Makefile9
-rw-r--r--drivers/hwtracing/stm/console.c80
-rw-r--r--drivers/hwtracing/stm/core.c1032
-rw-r--r--drivers/hwtracing/stm/dummy_stm.c66
-rw-r--r--drivers/hwtracing/stm/policy.c529
-rw-r--r--drivers/hwtracing/stm/stm.h87
-rw-r--r--drivers/mcb/mcb-core.c1
-rw-r--r--drivers/mcb/mcb-pci.c1
-rw-r--r--drivers/memory/fsl-corenet-cf.c1
-rw-r--r--drivers/memory/ti-aemif.c1
-rw-r--r--drivers/mfd/wm5110-tables.c6
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c1
-rw-r--r--drivers/misc/genwqe/card_base.h2
-rw-r--r--drivers/misc/genwqe/card_ddcb.c2
-rw-r--r--drivers/misc/genwqe/card_dev.c4
-rw-r--r--drivers/misc/genwqe/card_utils.c5
-rw-r--r--drivers/misc/hpilo.c6
-rw-r--r--drivers/misc/kgdbts.c10
-rw-r--r--drivers/misc/lkdtm.c8
-rw-r--r--drivers/misc/mei/amthif.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c4
-rw-r--r--drivers/misc/mei/bus.c218
-rw-r--r--drivers/misc/mei/client.h12
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/misc/mei/hbm.c6
-rw-r--r--drivers/misc/mei/hw-me.c4
-rw-r--r--drivers/misc/mei/init.c4
-rw-r--r--drivers/misc/mei/interrupt.c4
-rw-r--r--drivers/misc/mei/mei_dev.h19
-rw-r--r--drivers/misc/mic/Kconfig25
-rw-r--r--drivers/misc/mic/Makefile2
-rw-r--r--drivers/misc/mic/bus/Makefile1
-rw-r--r--drivers/misc/mic/bus/cosm_bus.c141
-rw-r--r--drivers/misc/mic/bus/cosm_bus.h134
-rw-r--r--drivers/misc/mic/bus/mic_bus.c24
-rw-r--r--drivers/misc/mic/bus/scif_bus.c9
-rw-r--r--drivers/misc/mic/bus/scif_bus.h6
-rw-r--r--drivers/misc/mic/card/mic_device.c88
-rw-r--r--drivers/misc/mic/card/mic_x100.c2
-rw-r--r--drivers/misc/mic/common/mic_dev.h13
-rw-r--r--drivers/misc/mic/cosm/Makefile10
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c156
-rw-r--r--drivers/misc/mic/cosm/cosm_main.c388
-rw-r--r--drivers/misc/mic/cosm/cosm_main.h70
-rw-r--r--drivers/misc/mic/cosm/cosm_scif_server.c405
-rw-r--r--drivers/misc/mic/cosm/cosm_sysfs.c461
-rw-r--r--drivers/misc/mic/cosm_client/Makefile7
-rw-r--r--drivers/misc/mic/cosm_client/cosm_scif_client.c275
-rw-r--r--drivers/misc/mic/host/Makefile1
-rw-r--r--drivers/misc/mic/host/mic_boot.c317
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c114
-rw-r--r--drivers/misc/mic/host/mic_device.h88
-rw-r--r--drivers/misc/mic/host/mic_fops.c4
-rw-r--r--drivers/misc/mic/host/mic_intr.c46
-rw-r--r--drivers/misc/mic/host/mic_main.c223
-rw-r--r--drivers/misc/mic/host/mic_smpt.c30
-rw-r--r--drivers/misc/mic/host/mic_sysfs.c459
-rw-r--r--drivers/misc/mic/host/mic_virtio.c17
-rw-r--r--drivers/misc/mic/host/mic_virtio.h2
-rw-r--r--drivers/misc/mic/host/mic_x100.c46
-rw-r--r--drivers/misc/mic/scif/Makefile5
-rw-r--r--drivers/misc/mic/scif/scif_api.c234
-rw-r--r--drivers/misc/mic/scif/scif_debugfs.c85
-rw-r--r--drivers/misc/mic/scif/scif_dma.c1979
-rw-r--r--drivers/misc/mic/scif/scif_epd.c26
-rw-r--r--drivers/misc/mic/scif/scif_epd.h50
-rw-r--r--drivers/misc/mic/scif/scif_fd.c178
-rw-r--r--drivers/misc/mic/scif/scif_fence.c771
-rw-r--r--drivers/misc/mic/scif/scif_main.c111
-rw-r--r--drivers/misc/mic/scif/scif_main.h37
-rw-r--r--drivers/misc/mic/scif/scif_map.h25
-rw-r--r--drivers/misc/mic/scif/scif_mmap.c699
-rw-r--r--drivers/misc/mic/scif/scif_nm.c20
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c149
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.h42
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.c179
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.h42
-rw-r--r--drivers/misc/mic/scif/scif_rma.c1775
-rw-r--r--drivers/misc/mic/scif/scif_rma.h464
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.c291
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.h57
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c6
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h1
-rw-r--r--drivers/misc/sgi-gru/grukdump.c16
-rw-r--r--drivers/misc/sgi-gru/grukservices.c15
-rw-r--r--drivers/misc/sgi-gru/grumain.c4
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c25
-rw-r--r--drivers/misc/sram.c196
-rw-r--r--drivers/misc/ti-st/st_core.c18
-rw-r--r--drivers/misc/vmw_balloon.c843
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c3
-rw-r--r--drivers/nfc/mei_phy.c31
-rw-r--r--drivers/nfc/mei_phy.h4
-rw-r--r--drivers/nfc/microread/mei.c14
-rw-r--r--drivers/nfc/pn544/mei.c14
-rw-r--r--drivers/nvmem/Kconfig42
-rw-r--r--drivers/nvmem/Makefile8
-rw-r--r--drivers/nvmem/imx-ocotp.c154
-rw-r--r--drivers/nvmem/mxs-ocotp.c257
-rw-r--r--drivers/nvmem/rockchip-efuse.c186
-rw-r--r--drivers/nvmem/vf610-ocotp.c302
-rw-r--r--drivers/pcmcia/ds.c4
-rw-r--r--drivers/spmi/spmi-pmic-arb.c11
-rw-r--r--drivers/spmi/spmi.c5
-rw-r--r--drivers/uio/uio.c1
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c1
-rw-r--r--drivers/vme/vme_bridge.h4
-rw-r--r--drivers/w1/masters/omap_hdq.c224
-rw-r--r--drivers/w1/masters/w1-gpio.c20
-rw-r--r--drivers/w1/w1_int.c3
-rw-r--r--include/dt-bindings/mfd/arizona.h2
-rw-r--r--include/linux/coresight.h2
-rw-r--r--include/linux/dmaengine.h4
-rw-r--r--include/linux/extcon.h62
-rw-r--r--include/linux/extcon/extcon-gpio.h24
-rw-r--r--include/linux/fpga/fpga-mgr.h127
-rw-r--r--include/linux/mei_cl_bus.h40
-rw-r--r--include/linux/mfd/arizona/pdata.h6
-rw-r--r--include/linux/mfd/arizona/registers.h14
-rw-r--r--include/linux/mic_bus.h3
-rw-r--r--include/linux/mod_devicetable.h7
-rw-r--r--include/linux/msm_mdp.h79
-rw-r--r--include/linux/scif.h366
-rw-r--r--include/linux/spmi.h4
-rw-r--r--include/linux/stm.h126
-rw-r--r--include/linux/ti_wilink_st.h1
-rw-r--r--include/linux/vme.h3
-rw-r--r--include/uapi/linux/mic_common.h16
-rw-r--r--include/uapi/linux/scif_ioctl.h85
-rw-r--r--include/uapi/linux/stm.h50
-rw-r--r--scripts/mod/devicetable-offsets.c1
-rw-r--r--scripts/mod/file2alias.c12
-rwxr-xr-xscripts/ver_linux223
204 files changed, 21697 insertions, 2645 deletions
diff --git a/Documentation/ABI/testing/configfs-stp-policy b/Documentation/ABI/testing/configfs-stp-policy
new file mode 100644
index 000000000000..421ce6825c66
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-stp-policy
@@ -0,0 +1,48 @@
1What: /config/stp-policy
2Date: June 2015
3KernelVersion: 4.3
4Description:
5 This group contains policies mandating Master/Channel allocation
6 for software sources wishing to send trace data over an STM
7 device.
8
9What: /config/stp-policy/<device>.<policy>
10Date: June 2015
11KernelVersion: 4.3
12Description:
13 This group is the root of a policy; its name is a concatenation
14 of an stm device name to which this policy applies and an
15 arbitrary string. If <device> part doesn't match an existing
16 stm device, mkdir will fail with ENODEV; if that device already
17 has a policy assigned to it, mkdir will fail with EBUSY.
18
19What: /config/stp-policy/<device>.<policy>/device
20Date: June 2015
21KernelVersion: 4.3
22Description:
23 STM device to which this policy applies, read only. Same as the
24 <device> component of its parent directory.
25
26What: /config/stp-policy/<device>.<policy>/<node>
27Date: June 2015
28KernelVersion: 4.3
29Description:
30 Policy node is a string identifier that software clients will
31 use to request a master/channel to be allocated and assigned to
32 them.
33
34What: /config/stp-policy/<device>.<policy>/<node>/masters
35Date: June 2015
36KernelVersion: 4.3
37Description:
38 Range of masters from which to allocate for users of this node.
39 Write two numbers: the first master and the last master number.
40
41What: /config/stp-policy/<device>.<policy>/<node>/channels
42Date: June 2015
43KernelVersion: 4.3
44Description:
45 Range of channels from which to allocate for users of this node.
46 Write two numbers: the first channel and the last channel
47 number.
48
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
index d72ca1736ba4..924265a1295d 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
@@ -8,13 +8,6 @@ Description: (RW) Enable/disable tracing on this specific trace entiry.
8 of coresight components linking the source to the sink is 8 of coresight components linking the source to the sink is
9 configured and managed automatically by the coresight framework. 9 configured and managed automatically by the coresight framework.
10 10
11What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/status
12Date: November 2014
13KernelVersion: 3.19
14Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
15Description: (R) List various control and status registers. The specific
16 layout and content is driver specific.
17
18What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/addr_idx 11What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/addr_idx
19Date: November 2014 12Date: November 2014
20KernelVersion: 3.19 13KernelVersion: 3.19
@@ -251,3 +244,79 @@ Date: November 2014
251KernelVersion: 3.19 244KernelVersion: 3.19
252Contact: Mathieu Poirier <mathieu.poirier@linaro.org> 245Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
253Description: (RW) Define the event that controls the trigger. 246Description: (RW) Define the event that controls the trigger.
247
248What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/cpu
249Date: October 2015
250KernelVersion: 4.4
251Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
252Description: (RO) Holds the cpu number this tracer is affined to.
253
254What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmccr
255Date: September 2015
256KernelVersion: 4.4
257Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
258Description: (RO) Print the content of the ETM Configuration Code register
259 (0x004). The value is read directly from the HW.
260
261What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmccer
262Date: September 2015
263KernelVersion: 4.4
264Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
265Description: (RO) Print the content of the ETM Configuration Code Extension
266 register (0x1e8). The value is read directly from the HW.
267
268What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmscr
269Date: September 2015
270KernelVersion: 4.4
271Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
272Description: (RO) Print the content of the ETM System Configuration
273 register (0x014). The value is read directly from the HW.
274
275What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmidr
276Date: September 2015
277KernelVersion: 4.4
278Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
279Description: (RO) Print the content of the ETM ID register (0x1e4). The
280 value is read directly from the HW.
281
282What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmcr
283Date: September 2015
284KernelVersion: 4.4
285Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
286Description: (RO) Print the content of the ETM Main Control register (0x000).
287 The value is read directly from the HW.
288
289What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmtraceidr
290Date: September 2015
291KernelVersion: 4.4
292Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
293Description: (RO) Print the content of the ETM Trace ID register (0x200).
294 The value is read directly from the HW.
295
296What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmteevr
297Date: September 2015
298KernelVersion: 4.4
299Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
300Description: (RO) Print the content of the ETM Trace Enable Event register
301 (0x020). The value is read directly from the HW.
302
303What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmtsscr
304Date: September 2015
305KernelVersion: 4.4
306Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
307Description: (RO) Print the content of the ETM Trace Start/Stop Conrol
308 register (0x018). The value is read directly from the HW.
309
310What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmtecr1
311Date: September 2015
312KernelVersion: 4.4
313Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
314Description: (RO) Print the content of the ETM Enable Conrol #1
315 register (0x024). The value is read directly from the HW.
316
317What: /sys/bus/coresight/devices/<memory_map>.[etm|ptm]/mgmt/etmtecr2
318Date: September 2015
319KernelVersion: 4.4
320Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
321Description: (RO) Print the content of the ETM Enable Conrol #2
322 register (0x01c). The value is read directly from the HW.
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth
new file mode 100644
index 000000000000..22d0843849a8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth
@@ -0,0 +1,49 @@
1What: /sys/bus/intel_th/devices/<intel_th_id>-gth/masters/*
2Date: June 2015
3KernelVersion: 4.3
4Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
5Description: (RW) Configure output ports for STP masters. Writing -1
6 disables a master; any
7
8What: /sys/bus/intel_th/devices/<intel_th_id>-gth/outputs/[0-7]_port
9Date: June 2015
10KernelVersion: 4.3
11Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
12Description: (RO) Output port type:
13 0: not present,
14 1: MSU (Memory Storage Unit)
15 2: CTP (Common Trace Port)
16 4: PTI (MIPI PTI).
17
18What: /sys/bus/intel_th/devices/<intel_th_id>-gth/outputs/[0-7]_drop
19Date: June 2015
20KernelVersion: 4.3
21Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
22Description: (RW) Data retention policy setting: keep (0) or drop (1)
23 incoming data while output port is in reset.
24
25What: /sys/bus/intel_th/devices/<intel_th_id>-gth/outputs/[0-7]_null
26Date: June 2015
27KernelVersion: 4.3
28Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
29Description: (RW) STP NULL packet generation: enabled (1) or disabled (0).
30
31What: /sys/bus/intel_th/devices/<intel_th_id>-gth/outputs/[0-7]_flush
32Date: June 2015
33KernelVersion: 4.3
34Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
35Description: (RW) Force flush data from byte packing buffer for the output
36 port.
37
38What: /sys/bus/intel_th/devices/<intel_th_id>-gth/outputs/[0-7]_reset
39Date: June 2015
40KernelVersion: 4.3
41Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
42Description: (RO) Output port is in reset (1).
43
44What: /sys/bus/intel_th/devices/<intel_th_id>-gth/outputs/[0-7]_smcfreq
45Date: June 2015
46KernelVersion: 4.3
47Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
48Description: (RW) STP sync packet frequency for the port. Specifies the
49 number of clocks between mainenance packets.
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
new file mode 100644
index 000000000000..b940c5d91cf7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
@@ -0,0 +1,33 @@
1What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/wrap
2Date: June 2015
3KernelVersion: 4.3
4Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
5Description: (RW) Configure MSC buffer wrapping. 1 == wrapping enabled.
6
7What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/mode
8Date: June 2015
9KernelVersion: 4.3
10Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
11Description: (RW) Configure MSC operating mode:
12 - "single", for contiguous buffer mode (high-order alloc);
13 - "multi", for multiblock mode;
14 - "ExI", for DCI handler mode;
15 - "debug", for debug mode.
16 If operating mode changes, existing buffer is deallocated,
17 provided there are no active users and tracing is not enabled,
18 otherwise the write will fail.
19
20What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/nr_pages
21Date: June 2015
22KernelVersion: 4.3
23Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
24Description: (RW) Configure MSC buffer size for "single" or "multi" modes.
25 In single mode, this is a single number of pages, has to be
26 power of 2. In multiblock mode, this is a comma-separated list
27 of numbers of pages for each window to be allocated. Number of
28 windows is not limited.
29 Writing to this file deallocates existing buffer (provided
30 there are no active users and tracing is not enabled) and then
31 allocates a new one.
32
33
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-pti b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-pti
new file mode 100644
index 000000000000..df0b24fd0218
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-pti
@@ -0,0 +1,24 @@
1What: /sys/bus/intel_th/devices/<intel_th_id>-pti/mode
2Date: June 2015
3KernelVersion: 4.3
4Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
5Description: (RW) Configure PTI output width. Currently supported values
6 are 4, 8, 12, 16.
7
8What: /sys/bus/intel_th/devices/<intel_th_id>-pti/freerunning_clock
9Date: June 2015
10KernelVersion: 4.3
11Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
12Description: (RW) 0: PTI trace clock acts as a strobe which only toggles
13 when there is trace data to send. 1: PTI trace clock is a
14 free-running clock.
15
16What: /sys/bus/intel_th/devices/<intel_th_id>-pti/clock_divider
17Date: June 2015
18KernelVersion: 4.3
19Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
20Description: (RW) Configure PTI port clock divider:
21 - 0: Intel TH clock rate,
22 - 1: 1/2 Intel TH clock rate,
23 - 2: 1/4 Intel TH clock rate,
24 - 3: 1/8 Intel TH clock rate.
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
new file mode 100644
index 000000000000..4d48a9451866
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices
@@ -0,0 +1,13 @@
1What: /sys/bus/intel_th/devices/<intel_th_id>-<device><id>/active
2Date: June 2015
3KernelVersion: 4.3
4Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
5Description: (RW) Writes of 1 or 0 enable or disable trace output to this
6 output device. Reads return current status.
7
8What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/port
9Date: June 2015
10KernelVersion: 4.3
11Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
12Description: (RO) Port number, corresponding to this output device on the
13 switch (GTH).
diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei
index 20e4d1638bac..6bd45346ac7e 100644
--- a/Documentation/ABI/testing/sysfs-bus-mei
+++ b/Documentation/ABI/testing/sysfs-bus-mei
@@ -19,3 +19,10 @@ KernelVersion: 4.2
19Contact: Tomas Winkler <tomas.winkler@intel.com> 19Contact: Tomas Winkler <tomas.winkler@intel.com>
20Description: Stores mei client device uuid 20Description: Stores mei client device uuid
21 Format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx 21 Format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
22
23What: /sys/bus/mei/devices/.../version
24Date: Aug 2015
25KernelVersion: 4.3
26Contact: Tomas Winkler <tomas.winkler@intel.com>
27Description: Stores mei client protocol version
28 Format: %d
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-manager b/Documentation/ABI/testing/sysfs-class-fpga-manager
new file mode 100644
index 000000000000..23056c532fdd
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-fpga-manager
@@ -0,0 +1,37 @@
1What: /sys/class/fpga_manager/<fpga>/name
2Date: August 2015
3KernelVersion: 4.3
4Contact: Alan Tull <atull@opensource.altera.com>
5Description: Name of low level fpga manager driver.
6
7What: /sys/class/fpga_manager/<fpga>/state
8Date: August 2015
9KernelVersion: 4.3
10Contact: Alan Tull <atull@opensource.altera.com>
11Description: Read fpga manager state as a string.
12 The intent is to provide enough detail that if something goes
13 wrong during FPGA programming (something that the driver can't
14 fix) then userspace can know, i.e. if the firmware request
15 fails, that could be due to not being able to find the firmware
16 file.
17
18 This is a superset of FPGA states and fpga manager driver
19 states. The fpga manager driver is walking through these steps
20 to get the FPGA into a known operating state. It's a sequence,
21 though some steps may get skipped. Valid FPGA states will vary
22 by manufacturer; this is a superset.
23
24 * unknown = can't determine state
25 * power off = FPGA power is off
26 * power up = FPGA reports power is up
27 * reset = FPGA held in reset state
28 * firmware request = firmware class request in progress
29 * firmware request error = firmware request failed
30 * write init = preparing FPGA for programming
31 * write init error = Error while preparing FPGA for
32 programming
33 * write = FPGA ready to receive image data
34 * write error = Error while programming
35 * write complete = Doing post programming steps
36 * write complete error = Error while doing post programming
37 * operating = FPGA is programmed and operating
diff --git a/Documentation/ABI/testing/sysfs-class-mic.txt b/Documentation/ABI/testing/sysfs-class-mic.txt
index 13f48afc534f..d45eed2bf128 100644
--- a/Documentation/ABI/testing/sysfs-class-mic.txt
+++ b/Documentation/ABI/testing/sysfs-class-mic.txt
@@ -41,18 +41,15 @@ Description:
41 When read, this entry provides the current state of an Intel 41 When read, this entry provides the current state of an Intel
42 MIC device in the context of the card OS. Possible values that 42 MIC device in the context of the card OS. Possible values that
43 will be read are: 43 will be read are:
44 "offline" - The MIC device is ready to boot the card OS. On 44 "ready" - The MIC device is ready to boot the card OS. On
45 reading this entry after an OSPM resume, a "boot" has to be 45 reading this entry after an OSPM resume, a "boot" has to be
46 written to this entry if the card was previously shutdown 46 written to this entry if the card was previously shutdown
47 during OSPM suspend. 47 during OSPM suspend.
48 "online" - The MIC device has initiated booting a card OS. 48 "booting" - The MIC device has initiated booting a card OS.
49 "online" - The MIC device has completed boot and is online
49 "shutting_down" - The card OS is shutting down. 50 "shutting_down" - The card OS is shutting down.
51 "resetting" - A reset has been initiated for the MIC device
50 "reset_failed" - The MIC device has failed to reset. 52 "reset_failed" - The MIC device has failed to reset.
51 "suspending" - The MIC device is currently being prepared for
52 suspend. On reading this entry, a "suspend" has to be written
53 to the state sysfs entry to ensure the card is shutdown during
54 OSPM suspend.
55 "suspended" - The MIC device has been suspended.
56 53
57 When written, this sysfs entry triggers different state change 54 When written, this sysfs entry triggers different state change
58 operations depending upon the current state of the card OS. 55 operations depending upon the current state of the card OS.
@@ -62,8 +59,6 @@ Description:
62 sysfs entries. 59 sysfs entries.
63 "reset" - Initiates device reset. 60 "reset" - Initiates device reset.
64 "shutdown" - Initiates card OS shutdown. 61 "shutdown" - Initiates card OS shutdown.
65 "suspend" - Initiates card OS shutdown and also marks the card
66 as suspended.
67 62
68What: /sys/class/mic/mic(x)/shutdown_status 63What: /sys/class/mic/mic(x)/shutdown_status
69Date: October 2013 64Date: October 2013
@@ -126,7 +121,7 @@ Description:
126 the card. This sysfs entry can be written with the following 121 the card. This sysfs entry can be written with the following
127 valid strings: 122 valid strings:
128 a) linux - Boot a Linux image. 123 a) linux - Boot a Linux image.
129 b) elf - Boot an elf image for flash updates. 124 b) flash - Boot an image for flash updates.
130 125
131What: /sys/class/mic/mic(x)/log_buf_addr 126What: /sys/class/mic/mic(x)/log_buf_addr
132Date: October 2013 127Date: October 2013
@@ -155,3 +150,17 @@ Description:
155 daemon to set the log buffer length address. The correct log 150 daemon to set the log buffer length address. The correct log
156 buffer length address to be written can be found in the 151 buffer length address to be written can be found in the
157 System.map file of the card OS. 152 System.map file of the card OS.
153
154What: /sys/class/mic/mic(x)/heartbeat_enable
155Date: March 2015
156KernelVersion: 3.20
157Contact: Ashutosh Dixit <ashutosh.dixit@intel.com>
158Description:
159 The MIC drivers detect and inform user space about card crashes
160 via a heartbeat mechanism (see the description of
161 shutdown_status above). User space can turn off this
162 notification by setting heartbeat_enable to 0 and enable it by
163 setting this entry to 1. If this notification is disabled it is
164 the responsibility of user space to detect card crashes via
165 alternative means such as a network ping. This setting is
166 enabled by default.
diff --git a/Documentation/ABI/testing/sysfs-class-stm b/Documentation/ABI/testing/sysfs-class-stm
new file mode 100644
index 000000000000..c9aa4f3fc9a7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-stm
@@ -0,0 +1,14 @@
1What: /sys/class/stm/<stm>/masters
2Date: June 2015
3KernelVersion: 4.3
4Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
5Description:
6 Shows first and last available to software master numbers on
7 this STM device.
8
9What: /sys/class/stm/<stm>/channels
10Date: June 2015
11KernelVersion: 4.3
12Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
13Description:
14 Shows the number of channels per master on this STM device.
diff --git a/Documentation/ABI/testing/sysfs-class-stm_source b/Documentation/ABI/testing/sysfs-class-stm_source
new file mode 100644
index 000000000000..57b8dd39bbf7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-stm_source
@@ -0,0 +1,11 @@
1What: /sys/class/stm_source/<stm_source>/stm_source_link
2Date: June 2015
3KernelVersion: 4.3
4Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
5Description:
6 stm_source device linkage to stm device, where its tracing data
7 is directed. Reads return an existing connection or "<none>" if
8 this stm_source is not connected to any stm device yet.
9 Write an existing (registered) stm device's name here to
10 connect that device. If a device is already connected to this
11 stm_source, it will first be disconnected.
diff --git a/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt
new file mode 100644
index 000000000000..7018aa896835
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt
@@ -0,0 +1,19 @@
1Xilinx Zynq FPGA Manager
2
3Required properties:
4- compatible: should contain "xlnx,zynq-devcfg-1.0"
5- reg: base address and size for memory mapped io
6- interrupts: interrupt for the FPGA manager device
7- clocks: phandle for clocks required operation
8- clock-names: name for the clock, should be "ref_clk"
9- syscon: phandle for access to SLCR registers
10
11Example:
12 devcfg: devcfg@f8007000 {
13 compatible = "xlnx,zynq-devcfg-1.0";
14 reg = <0xf8007000 0x100>;
15 interrupts = <0 8 4>;
16 clocks = <&clkc 12>;
17 clock-names = "ref_clk";
18 syscon = <&slcr>;
19 };
diff --git a/Documentation/devicetree/bindings/misc/sram.txt b/Documentation/devicetree/bindings/misc/sram.txt
index 36cbe5aea990..42ee9438b771 100644
--- a/Documentation/devicetree/bindings/misc/sram.txt
+++ b/Documentation/devicetree/bindings/misc/sram.txt
@@ -33,6 +33,12 @@ Optional properties in the area nodes:
33 33
34- compatible : standard definition, should contain a vendor specific string 34- compatible : standard definition, should contain a vendor specific string
35 in the form <vendor>,[<device>-]<usage> 35 in the form <vendor>,[<device>-]<usage>
36- pool : indicates that the particular reserved SRAM area is addressable
37 and in use by another device or devices
38- export : indicates that the reserved SRAM area may be accessed outside
39 of the kernel, e.g. by bootloader or userspace
40- label : the name for the reserved partition, if omitted, the label
41 is taken from the node name excluding the unit address.
36 42
37Example: 43Example:
38 44
@@ -48,4 +54,14 @@ sram: sram@5c000000 {
48 compatible = "socvendor,smp-sram"; 54 compatible = "socvendor,smp-sram";
49 reg = <0x100 0x50>; 55 reg = <0x100 0x50>;
50 }; 56 };
57
58 device-sram@1000 {
59 reg = <0x1000 0x1000>;
60 pool;
61 };
62
63 exported@20000 {
64 reg = <0x20000 0x20000>;
65 export;
66 };
51}; 67};
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
new file mode 100644
index 000000000000..383d5889e95a
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -0,0 +1,20 @@
1Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
2
3This binding represents the on-chip eFuse OTP controller found on
4i.MX6Q/D, i.MX6DL/S, i.MX6SL, and i.MX6SX SoCs.
5
6Required properties:
7- compatible: should be one of
8 "fsl,imx6q-ocotp" (i.MX6Q/D/DL/S),
9 "fsl,imx6sl-ocotp" (i.MX6SL), or
10 "fsl,imx6sx-ocotp" (i.MX6SX), followed by "syscon".
11- reg: Should contain the register base and length.
12- clocks: Should contain a phandle pointing to the gated peripheral clock.
13
14Example:
15
16 ocotp: ocotp@021bc000 {
17 compatible = "fsl,imx6q-ocotp", "syscon";
18 reg = <0x021bc000 0x4000>;
19 clocks = <&clks IMX6QDL_CLK_IIM>;
20 };
diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt
new file mode 100644
index 000000000000..daebce9e6b07
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt
@@ -0,0 +1,25 @@
1On-Chip OTP Memory for Freescale i.MX23/i.MX28
2
3Required properties :
4- compatible :
5 - "fsl,imx23-ocotp" for i.MX23
6 - "fsl,imx28-ocotp" for i.MX28
7- #address-cells : Should be 1
8- #size-cells : Should be 1
9- reg : Address and length of OTP controller registers
10- clocks : Should contain a reference to the hbus clock
11
12= Data cells =
13Are child nodes of mxs-ocotp, bindings of which as described in
14bindings/nvmem/nvmem.txt
15
16Example for i.MX28:
17
18 ocotp: ocotp@8002c000 {
19 compatible = "fsl,imx28-ocotp", "fsl,ocotp";
20 #address-cells = <1>;
21 #size-cells = <1>;
22 reg = <0x8002c000 0x2000>;
23 clocks = <&clks 25>;
24 status = "okay";
25 };
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
new file mode 100644
index 000000000000..8f86ab3b1046
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
@@ -0,0 +1,38 @@
1= Rockchip eFuse device tree bindings =
2
3Required properties:
4- compatible: Should be "rockchip,rockchip-efuse"
5- reg: Should contain the registers location and exact eFuse size
6- clocks: Should be the clock id of eFuse
7- clock-names: Should be "pclk_efuse"
8
9= Data cells =
10Are child nodes of eFuse, bindings of which as described in
11bindings/nvmem/nvmem.txt
12
13Example:
14
15 efuse: efuse@ffb40000 {
16 compatible = "rockchip,rockchip-efuse";
17 reg = <0xffb40000 0x20>;
18 #address-cells = <1>;
19 #size-cells = <1>;
20 clocks = <&cru PCLK_EFUSE256>;
21 clock-names = "pclk_efuse";
22
23 /* Data cells */
24 cpu_leakage: cpu_leakage {
25 reg = <0x17 0x1>;
26 };
27 };
28
29= Data consumers =
30Are device nodes which consume nvmem data cells.
31
32Example:
33
34 cpu_leakage {
35 ...
36 nvmem-cells = <&cpu_leakage>;
37 nvmem-cell-names = "cpu_leakage";
38 };
diff --git a/Documentation/devicetree/bindings/nvmem/vf610-ocotp.txt b/Documentation/devicetree/bindings/nvmem/vf610-ocotp.txt
new file mode 100644
index 000000000000..56ed481c3e26
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/vf610-ocotp.txt
@@ -0,0 +1,19 @@
1On-Chip OTP Memory for Freescale Vybrid
2
3Required Properties:
4 compatible:
5 - "fsl,vf610-ocotp" for VF5xx/VF6xx
6 #address-cells : Should be 1
7 #size-cells : Should be 1
8 reg : Address and length of OTP controller and fuse map registers
9 clocks : ipg clock we associate with the OCOTP peripheral
10
11Example for Vybrid VF5xx/VF6xx:
12
13 ocotp: ocotp@400a5000 {
14 compatible = "fsl,vf610-ocotp";
15 #address-cells = <1>;
16 #size-cells = <1>;
17 reg = <0x400a5000 0xCF0>;
18 clocks = <&clks VF610_CLK_OCOTP>;
19 };
diff --git a/Documentation/devicetree/bindings/w1/omap-hdq.txt b/Documentation/devicetree/bindings/w1/omap-hdq.txt
index fef794741bd1..913c5f91a0f9 100644
--- a/Documentation/devicetree/bindings/w1/omap-hdq.txt
+++ b/Documentation/devicetree/bindings/w1/omap-hdq.txt
@@ -1,11 +1,15 @@
1* OMAP HDQ One wire bus master controller 1* OMAP HDQ One wire bus master controller
2 2
3Required properties: 3Required properties:
4- compatible : should be "ti,omap3-1w" 4- compatible : should be "ti,omap3-1w" or "ti,am4372-hdq"
5- reg : Address and length of the register set for the device 5- reg : Address and length of the register set for the device
6- interrupts : interrupt line. 6- interrupts : interrupt line.
7- ti,hwmods : "hdq1w" 7- ti,hwmods : "hdq1w"
8 8
9Optional properties:
10- ti,mode: should be "hdq": HDQ mode "1w": one-wire mode.
11 If not specified HDQ mode is implied.
12
9Example: 13Example:
10 14
11- From omap3.dtsi 15- From omap3.dtsi
@@ -14,4 +18,5 @@ Example:
14 reg = <0x480b2000 0x1000>; 18 reg = <0x480b2000 0x1000>;
15 interrupts = <58>; 19 interrupts = <58>;
16 ti,hwmods = "hdq1w"; 20 ti,hwmods = "hdq1w";
21 ti,mode = "hdq";
17 }; 22 };
diff --git a/Documentation/fpga/fpga-mgr.txt b/Documentation/fpga/fpga-mgr.txt
new file mode 100644
index 000000000000..ce3e84fa9023
--- /dev/null
+++ b/Documentation/fpga/fpga-mgr.txt
@@ -0,0 +1,171 @@
1FPGA Manager Core
2
3Alan Tull 2015
4
5Overview
6========
7
8The FPGA manager core exports a set of functions for programming an FPGA with
9an image. The API is manufacturer agnostic. All manufacturer specifics are
10hidden away in a low level driver which registers a set of ops with the core.
11The FPGA image data itself is very manufacturer specific, but for our purposes
12it's just binary data. The FPGA manager core won't parse it.
13
14
15API Functions:
16==============
17
18To program the FPGA from a file or from a buffer:
19-------------------------------------------------
20
21 int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
22 const char *buf, size_t count);
23
24Load the FPGA from an image which exists as a buffer in memory.
25
26 int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
27 const char *image_name);
28
29Load the FPGA from an image which exists as a file. The image file must be on
30the firmware search path (see the firmware class documentation).
31
32For both these functions, flags == 0 for normal full reconfiguration or
33FPGA_MGR_PARTIAL_RECONFIG for partial reconfiguration. If successful, the FPGA
34ends up in operating mode. Return 0 on success or a negative error code.
35
36
37To get/put a reference to a FPGA manager:
38-----------------------------------------
39
40 struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
41
42 void fpga_mgr_put(struct fpga_manager *mgr);
43
44Given a DT node, get an exclusive reference to a FPGA manager or release
45the reference.
46
47
48To register or unregister the low level FPGA-specific driver:
49-------------------------------------------------------------
50
51 int fpga_mgr_register(struct device *dev, const char *name,
52 const struct fpga_manager_ops *mops,
53 void *priv);
54
55 void fpga_mgr_unregister(struct device *dev);
56
57Use of these two functions is described below in "How To Support a new FPGA
58device."
59
60
61How to write an image buffer to a supported FPGA
62================================================
63/* Include to get the API */
64#include <linux/fpga/fpga-mgr.h>
65
66/* device node that specifies the FPGA manager to use */
67struct device_node *mgr_node = ...
68
69/* FPGA image is in this buffer. count is size of the buffer. */
70char *buf = ...
71int count = ...
72
73/* flags indicates whether to do full or partial reconfiguration */
74int flags = 0;
75
76int ret;
77
78/* Get exclusive control of FPGA manager */
79struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
80
81/* Load the buffer to the FPGA */
82ret = fpga_mgr_buf_load(mgr, flags, buf, count);
83
84/* Release the FPGA manager */
85fpga_mgr_put(mgr);
86
87
88How to write an image file to a supported FPGA
89==============================================
90/* Include to get the API */
91#include <linux/fpga/fpga-mgr.h>
92
93/* device node that specifies the FPGA manager to use */
94struct device_node *mgr_node = ...
95
96/* FPGA image is in this file which is in the firmware search path */
97const char *path = "fpga-image-9.rbf"
98
99/* flags indicates whether to do full or partial reconfiguration */
100int flags = 0;
101
102int ret;
103
104/* Get exclusive control of FPGA manager */
105struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
106
107/* Get the firmware image (path) and load it to the FPGA */
108ret = fpga_mgr_firmware_load(mgr, flags, path);
109
110/* Release the FPGA manager */
111fpga_mgr_put(mgr);
112
113
114How to support a new FPGA device
115================================
116To add another FPGA manager, write a driver that implements a set of ops. The
117probe function calls fpga_mgr_register(), such as:
118
119static const struct fpga_manager_ops socfpga_fpga_ops = {
120 .write_init = socfpga_fpga_ops_configure_init,
121 .write = socfpga_fpga_ops_configure_write,
122 .write_complete = socfpga_fpga_ops_configure_complete,
123 .state = socfpga_fpga_ops_state,
124};
125
126static int socfpga_fpga_probe(struct platform_device *pdev)
127{
128 struct device *dev = &pdev->dev;
129 struct socfpga_fpga_priv *priv;
130 int ret;
131
132 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
133 if (!priv)
134 return -ENOMEM;
135
136 /* ... do ioremaps, get interrupts, etc. and save
137 them in priv... */
138
139 return fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager",
140 &socfpga_fpga_ops, priv);
141}
142
143static int socfpga_fpga_remove(struct platform_device *pdev)
144{
145 fpga_mgr_unregister(&pdev->dev);
146
147 return 0;
148}
149
150
151The ops will implement whatever device specific register writes are needed to
152do the programming sequence for this particular FPGA. These ops return 0 for
153success or negative error codes otherwise.
154
155The programming sequence is:
156 1. .write_init
157 2. .write (may be called once or multiple times)
158 3. .write_complete
159
160The .write_init function will prepare the FPGA to receive the image data.
161
162The .write function writes a buffer to the FPGA. The buffer may be contain the
163whole FPGA image or may be a smaller chunk of an FPGA image. In the latter
164case, this function is called multiple times for successive chunks.
165
166The .write_complete function is called after all the image has been written
167to put the FPGA into operating mode.
168
169The ops include a .state function which will read the hardware FPGA manager and
170return a code of type enum fpga_mgr_states. It doesn't result in a change in
171hardware state.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 8a44d44cf901..91261a32a573 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -81,6 +81,9 @@ Code Seq#(hex) Include File Comments
810x22 all scsi/sg.h 810x22 all scsi/sg.h
82'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem 82'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem
83'$' 00-0F linux/perf_counter.h, linux/perf_event.h 83'$' 00-0F linux/perf_counter.h, linux/perf_event.h
84'%' 00-0F include/uapi/linux/stm.h
85 System Trace Module subsystem
86 <mailto:alexander.shishkin@linux.intel.com>
84'&' 00-07 drivers/firewire/nosy-user.h 87'&' 00-07 drivers/firewire/nosy-user.h
85'1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl 88'1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl
86 <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/> 89 <ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/>
diff --git a/Documentation/mic/mic_overview.txt b/Documentation/mic/mic_overview.txt
index 1a2f2c8ec59e..73f44fc3e715 100644
--- a/Documentation/mic/mic_overview.txt
+++ b/Documentation/mic/mic_overview.txt
@@ -28,6 +28,10 @@ The Symmetric Communication Interface (SCIF (pronounced as skiff)) is a
28low level communications API across PCIe currently implemented for MIC. 28low level communications API across PCIe currently implemented for MIC.
29More details are available at scif_overview.txt. 29More details are available at scif_overview.txt.
30 30
31The Coprocessor State Management (COSM) driver on the host allows for
32boot, shutdown and reset of Intel MIC devices. It communicates with a COSM
33"client" driver on the MIC cards over SCIF to perform these functions.
34
31Here is a block diagram of the various components described above. The 35Here is a block diagram of the various components described above. The
32virtio backends are situated on the host rather than the card given better 36virtio backends are situated on the host rather than the card given better
33single threaded performance for the host compared to MIC, the ability of 37single threaded performance for the host compared to MIC, the ability of
@@ -51,19 +55,20 @@ the fact that the virtio block storage backend can only be on the host.
51 | | | Virtio over PCIe IOCTLs | 55 | | | Virtio over PCIe IOCTLs |
52 | | +--------------------------+ 56 | | +--------------------------+
53+-----------+ | | | +-----------+ 57+-----------+ | | | +-----------+
54| MIC DMA | | +----------+ | +-----------+ | | MIC DMA | 58| MIC DMA | | +------+ | +------+ +------+ | | MIC DMA |
55| Driver | | | SCIF | | | SCIF | | | Driver | 59| Driver | | | SCIF | | | SCIF | | COSM | | | Driver |
56+-----------+ | +----------+ | +-----------+ | +-----------+ 60+-----------+ | +------+ | +------+ +--+---+ | +-----------+
57 | | | | | | | 61 | | | | | | | |
58+---------------+ | +-----+-----+ | +-----+-----+ | +---------------+ 62+---------------+ | +------+ | +--+---+ +--+---+ | +----------------+
59|MIC virtual Bus| | |SCIF HW Bus| | |SCIF HW BUS| | |MIC virtual Bus| 63|MIC virtual Bus| | |SCIF | | |SCIF | | COSM | | |MIC virtual Bus |
60+---------------+ | +-----------+ | +-----+-----+ | +---------------+ 64+---------------+ | |HW Bus| | |HW Bus| | Bus | | +----------------+
61 | | | | | | | 65 | | +------+ | +--+---+ +------+ | |
62 | +--------------+ | | | +---------------+ | 66 | | | | | | | |
63 | |Intel MIC | | | | |Intel MIC | | 67 | +-----------+---+ | | | +---------------+ |
64 +---|Card Driver +----+ | | |Host Driver | | 68 | |Intel MIC | | | | |Intel MIC | |
65 +--------------+ | +----+---------------+-----+ 69 +---|Card Driver | | | | |Host Driver | |
66 | | | 70 +------------+--------+ | +----+---------------+-----+
71 | | |
67 +-------------------------------------------------------------+ 72 +-------------------------------------------------------------+
68 | | 73 | |
69 | PCIe Bus | 74 | PCIe Bus |
diff --git a/Documentation/mic/mpssd/mpss b/Documentation/mic/mpssd/mpss
index 582aad4811ae..09ea90931649 100755
--- a/Documentation/mic/mpssd/mpss
+++ b/Documentation/mic/mpssd/mpss
@@ -119,10 +119,10 @@ stop()
119 # Wait for the cards to go offline 119 # Wait for the cards to go offline
120 for f in $sysfs/* 120 for f in $sysfs/*
121 do 121 do
122 while [ "`cat $f/state`" != "offline" ] 122 while [ "`cat $f/state`" != "ready" ]
123 do 123 do
124 sleep 1 124 sleep 1
125 echo -e "Waiting for "`basename $f`" to go offline" 125 echo -e "Waiting for "`basename $f`" to become ready"
126 done 126 done
127 done 127 done
128 128
diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c
index 3c5c379fc29d..aaeafa18d99b 100644
--- a/Documentation/mic/mpssd/mpssd.c
+++ b/Documentation/mic/mpssd/mpssd.c
@@ -43,7 +43,7 @@
43#include <linux/mic_common.h> 43#include <linux/mic_common.h>
44#include <tools/endian.h> 44#include <tools/endian.h>
45 45
46static void init_mic(struct mic_info *mic); 46static void *init_mic(void *arg);
47 47
48static FILE *logfp; 48static FILE *logfp;
49static struct mic_info mic_list; 49static struct mic_info mic_list;
@@ -116,19 +116,18 @@ static struct {
116 .num = htole16(MIC_VRING_ENTRIES), 116 .num = htole16(MIC_VRING_ENTRIES),
117 }, 117 },
118#if GSO_ENABLED 118#if GSO_ENABLED
119 .host_features = htole32( 119 .host_features = htole32(
120 1 << VIRTIO_NET_F_CSUM | 120 1 << VIRTIO_NET_F_CSUM |
121 1 << VIRTIO_NET_F_GSO | 121 1 << VIRTIO_NET_F_GSO |
122 1 << VIRTIO_NET_F_GUEST_TSO4 | 122 1 << VIRTIO_NET_F_GUEST_TSO4 |
123 1 << VIRTIO_NET_F_GUEST_TSO6 | 123 1 << VIRTIO_NET_F_GUEST_TSO6 |
124 1 << VIRTIO_NET_F_GUEST_ECN | 124 1 << VIRTIO_NET_F_GUEST_ECN),
125 1 << VIRTIO_NET_F_GUEST_UFO),
126#else 125#else
127 .host_features = 0, 126 .host_features = 0,
128#endif 127#endif
129}; 128};
130 129
131static const char *mic_config_dir = "/etc/sysconfig/mic"; 130static const char *mic_config_dir = "/etc/mpss";
132static const char *virtblk_backend = "VIRTBLK_BACKEND"; 131static const char *virtblk_backend = "VIRTBLK_BACKEND";
133static struct { 132static struct {
134 struct mic_device_desc dd; 133 struct mic_device_desc dd;
@@ -192,7 +191,7 @@ tap_configure(struct mic_info *mic, char *dev)
192 return ret; 191 return ret;
193 } 192 }
194 193
195 snprintf(ipaddr, IFNAMSIZ, "172.31.%d.254/24", mic->id); 194 snprintf(ipaddr, IFNAMSIZ, "172.31.%d.254/24", mic->id + 1);
196 195
197 pid = fork(); 196 pid = fork();
198 if (pid == 0) { 197 if (pid == 0) {
@@ -255,8 +254,7 @@ static int tun_alloc(struct mic_info *mic, char *dev)
255 return err; 254 return err;
256 } 255 }
257#if GSO_ENABLED 256#if GSO_ENABLED
258 offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 257 offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_TSO_ECN;
259 TUN_F_TSO_ECN | TUN_F_UFO;
260 258
261 err = ioctl(fd, TUNSETOFFLOAD, offload); 259 err = ioctl(fd, TUNSETOFFLOAD, offload);
262 if (err < 0) { 260 if (err < 0) {
@@ -332,7 +330,6 @@ static struct mic_device_desc *get_device_desc(struct mic_info *mic, int type)
332 return d; 330 return d;
333 } 331 }
334 mpsslog("%s %s %d not found\n", mic->name, __func__, type); 332 mpsslog("%s %s %d not found\n", mic->name, __func__, type);
335 assert(0);
336 return NULL; 333 return NULL;
337} 334}
338 335
@@ -415,6 +412,13 @@ mic_virtio_copy(struct mic_info *mic, int fd,
415 return ret; 412 return ret;
416} 413}
417 414
415static inline unsigned _vring_size(unsigned int num, unsigned long align)
416{
417 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
418 + align - 1) & ~(align - 1))
419 + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
420}
421
418/* 422/*
419 * This initialization routine requires at least one 423 * This initialization routine requires at least one
420 * vring i.e. vr0. vr1 is optional. 424 * vring i.e. vr0. vr1 is optional.
@@ -426,8 +430,9 @@ init_vr(struct mic_info *mic, int fd, int type,
426 int vr_size; 430 int vr_size;
427 char *va; 431 char *va;
428 432
429 vr_size = PAGE_ALIGN(vring_size(MIC_VRING_ENTRIES, 433 vr_size = PAGE_ALIGN(_vring_size(MIC_VRING_ENTRIES,
430 MIC_VIRTIO_RING_ALIGN) + sizeof(struct _mic_vring_info)); 434 MIC_VIRTIO_RING_ALIGN) +
435 sizeof(struct _mic_vring_info));
431 va = mmap(NULL, MIC_DEVICE_PAGE_END + vr_size * num_vq, 436 va = mmap(NULL, MIC_DEVICE_PAGE_END + vr_size * num_vq,
432 PROT_READ, MAP_SHARED, fd, 0); 437 PROT_READ, MAP_SHARED, fd, 0);
433 if (MAP_FAILED == va) { 438 if (MAP_FAILED == va) {
@@ -439,25 +444,25 @@ init_vr(struct mic_info *mic, int fd, int type,
439 set_dp(mic, type, va); 444 set_dp(mic, type, va);
440 vr0->va = (struct mic_vring *)&va[MIC_DEVICE_PAGE_END]; 445 vr0->va = (struct mic_vring *)&va[MIC_DEVICE_PAGE_END];
441 vr0->info = vr0->va + 446 vr0->info = vr0->va +
442 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN); 447 _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN);
443 vring_init(&vr0->vr, 448 vring_init(&vr0->vr,
444 MIC_VRING_ENTRIES, vr0->va, MIC_VIRTIO_RING_ALIGN); 449 MIC_VRING_ENTRIES, vr0->va, MIC_VIRTIO_RING_ALIGN);
445 mpsslog("%s %s vr0 %p vr0->info %p vr_size 0x%x vring 0x%x ", 450 mpsslog("%s %s vr0 %p vr0->info %p vr_size 0x%x vring 0x%x ",
446 __func__, mic->name, vr0->va, vr0->info, vr_size, 451 __func__, mic->name, vr0->va, vr0->info, vr_size,
447 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); 452 _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
448 mpsslog("magic 0x%x expected 0x%x\n", 453 mpsslog("magic 0x%x expected 0x%x\n",
449 le32toh(vr0->info->magic), MIC_MAGIC + type); 454 le32toh(vr0->info->magic), MIC_MAGIC + type);
450 assert(le32toh(vr0->info->magic) == MIC_MAGIC + type); 455 assert(le32toh(vr0->info->magic) == MIC_MAGIC + type);
451 if (vr1) { 456 if (vr1) {
452 vr1->va = (struct mic_vring *) 457 vr1->va = (struct mic_vring *)
453 &va[MIC_DEVICE_PAGE_END + vr_size]; 458 &va[MIC_DEVICE_PAGE_END + vr_size];
454 vr1->info = vr1->va + vring_size(MIC_VRING_ENTRIES, 459 vr1->info = vr1->va + _vring_size(MIC_VRING_ENTRIES,
455 MIC_VIRTIO_RING_ALIGN); 460 MIC_VIRTIO_RING_ALIGN);
456 vring_init(&vr1->vr, 461 vring_init(&vr1->vr,
457 MIC_VRING_ENTRIES, vr1->va, MIC_VIRTIO_RING_ALIGN); 462 MIC_VRING_ENTRIES, vr1->va, MIC_VIRTIO_RING_ALIGN);
458 mpsslog("%s %s vr1 %p vr1->info %p vr_size 0x%x vring 0x%x ", 463 mpsslog("%s %s vr1 %p vr1->info %p vr_size 0x%x vring 0x%x ",
459 __func__, mic->name, vr1->va, vr1->info, vr_size, 464 __func__, mic->name, vr1->va, vr1->info, vr_size,
460 vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN)); 465 _vring_size(MIC_VRING_ENTRIES, MIC_VIRTIO_RING_ALIGN));
461 mpsslog("magic 0x%x expected 0x%x\n", 466 mpsslog("magic 0x%x expected 0x%x\n",
462 le32toh(vr1->info->magic), MIC_MAGIC + type + 1); 467 le32toh(vr1->info->magic), MIC_MAGIC + type + 1);
463 assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1); 468 assert(le32toh(vr1->info->magic) == MIC_MAGIC + type + 1);
@@ -466,16 +471,21 @@ done:
466 return va; 471 return va;
467} 472}
468 473
469static void 474static int
470wait_for_card_driver(struct mic_info *mic, int fd, int type) 475wait_for_card_driver(struct mic_info *mic, int fd, int type)
471{ 476{
472 struct pollfd pollfd; 477 struct pollfd pollfd;
473 int err; 478 int err;
474 struct mic_device_desc *desc = get_device_desc(mic, type); 479 struct mic_device_desc *desc = get_device_desc(mic, type);
480 __u8 prev_status;
475 481
482 if (!desc)
483 return -ENODEV;
484 prev_status = desc->status;
476 pollfd.fd = fd; 485 pollfd.fd = fd;
477 mpsslog("%s %s Waiting .... desc-> type %d status 0x%x\n", 486 mpsslog("%s %s Waiting .... desc-> type %d status 0x%x\n",
478 mic->name, __func__, type, desc->status); 487 mic->name, __func__, type, desc->status);
488
479 while (1) { 489 while (1) {
480 pollfd.events = POLLIN; 490 pollfd.events = POLLIN;
481 pollfd.revents = 0; 491 pollfd.revents = 0;
@@ -487,8 +497,13 @@ wait_for_card_driver(struct mic_info *mic, int fd, int type)
487 } 497 }
488 498
489 if (pollfd.revents) { 499 if (pollfd.revents) {
490 mpsslog("%s %s Waiting... desc-> type %d status 0x%x\n", 500 if (desc->status != prev_status) {
491 mic->name, __func__, type, desc->status); 501 mpsslog("%s %s Waiting... desc-> type %d "
502 "status 0x%x\n",
503 mic->name, __func__, type,
504 desc->status);
505 prev_status = desc->status;
506 }
492 if (desc->status & VIRTIO_CONFIG_S_DRIVER_OK) { 507 if (desc->status & VIRTIO_CONFIG_S_DRIVER_OK) {
493 mpsslog("%s %s poll.revents %d\n", 508 mpsslog("%s %s poll.revents %d\n",
494 mic->name, __func__, pollfd.revents); 509 mic->name, __func__, pollfd.revents);
@@ -499,6 +514,7 @@ wait_for_card_driver(struct mic_info *mic, int fd, int type)
499 } 514 }
500 } 515 }
501 } 516 }
517 return 0;
502} 518}
503 519
504/* Spin till we have some descriptors */ 520/* Spin till we have some descriptors */
@@ -575,9 +591,16 @@ virtio_net(void *arg)
575 __func__, strerror(errno)); 591 __func__, strerror(errno));
576 continue; 592 continue;
577 } 593 }
578 if (!(desc->status & VIRTIO_CONFIG_S_DRIVER_OK)) 594 if (!(desc->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
579 wait_for_card_driver(mic, mic->mic_net.virtio_net_fd, 595 err = wait_for_card_driver(mic,
580 VIRTIO_ID_NET); 596 mic->mic_net.virtio_net_fd,
597 VIRTIO_ID_NET);
598 if (err) {
599 mpsslog("%s %s %d Exiting...\n",
600 mic->name, __func__, __LINE__);
601 break;
602 }
603 }
581 /* 604 /*
582 * Check if there is data to be read from TUN and write to 605 * Check if there is data to be read from TUN and write to
583 * virtio net fd if there is. 606 * virtio net fd if there is.
@@ -786,10 +809,16 @@ virtio_console(void *arg)
786 strerror(errno)); 809 strerror(errno));
787 continue; 810 continue;
788 } 811 }
789 if (!(desc->status & VIRTIO_CONFIG_S_DRIVER_OK)) 812 if (!(desc->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
790 wait_for_card_driver(mic, 813 err = wait_for_card_driver(mic,
791 mic->mic_console.virtio_console_fd, 814 mic->mic_console.virtio_console_fd,
792 VIRTIO_ID_CONSOLE); 815 VIRTIO_ID_CONSOLE);
816 if (err) {
817 mpsslog("%s %s %d Exiting...\n",
818 mic->name, __func__, __LINE__);
819 break;
820 }
821 }
793 822
794 if (console_poll[MONITOR_FD].revents & POLLIN) { 823 if (console_poll[MONITOR_FD].revents & POLLIN) {
795 copy.iov = iov0; 824 copy.iov = iov0;
@@ -1048,8 +1077,9 @@ stop_virtblk(struct mic_info *mic)
1048{ 1077{
1049 int vr_size, ret; 1078 int vr_size, ret;
1050 1079
1051 vr_size = PAGE_ALIGN(vring_size(MIC_VRING_ENTRIES, 1080 vr_size = PAGE_ALIGN(_vring_size(MIC_VRING_ENTRIES,
1052 MIC_VIRTIO_RING_ALIGN) + sizeof(struct _mic_vring_info)); 1081 MIC_VIRTIO_RING_ALIGN) +
1082 sizeof(struct _mic_vring_info));
1053 ret = munmap(mic->mic_virtblk.block_dp, 1083 ret = munmap(mic->mic_virtblk.block_dp,
1054 MIC_DEVICE_PAGE_END + vr_size * virtblk_dev_page.dd.num_vq); 1084 MIC_DEVICE_PAGE_END + vr_size * virtblk_dev_page.dd.num_vq);
1055 if (ret < 0) 1085 if (ret < 0)
@@ -1131,6 +1161,10 @@ write_status(int fd, __u8 *status)
1131 return ioctl(fd, MIC_VIRTIO_COPY_DESC, &copy); 1161 return ioctl(fd, MIC_VIRTIO_COPY_DESC, &copy);
1132} 1162}
1133 1163
1164#ifndef VIRTIO_BLK_T_GET_ID
1165#define VIRTIO_BLK_T_GET_ID 8
1166#endif
1167
1134static void * 1168static void *
1135virtio_block(void *arg) 1169virtio_block(void *arg)
1136{ 1170{
@@ -1297,12 +1331,7 @@ reset(struct mic_info *mic)
1297 mpsslog("%s: %s %d state %s\n", 1331 mpsslog("%s: %s %d state %s\n",
1298 mic->name, __func__, __LINE__, state); 1332 mic->name, __func__, __LINE__, state);
1299 1333
1300 /* 1334 if (!strcmp(state, "ready")) {
1301 * If the shutdown was initiated by OSPM, the state stays
1302 * in "suspended" which is also a valid condition for reset.
1303 */
1304 if ((!strcmp(state, "offline")) ||
1305 (!strcmp(state, "suspended"))) {
1306 free(state); 1335 free(state);
1307 break; 1336 break;
1308 } 1337 }
@@ -1331,34 +1360,50 @@ get_mic_shutdown_status(struct mic_info *mic, char *shutdown_status)
1331 assert(0); 1360 assert(0);
1332}; 1361};
1333 1362
1334static int get_mic_state(struct mic_info *mic, char *state) 1363static int get_mic_state(struct mic_info *mic)
1335{ 1364{
1336 if (!strcmp(state, "offline")) 1365 char *state = NULL;
1337 return MIC_OFFLINE; 1366 enum mic_states mic_state;
1338 if (!strcmp(state, "online")) 1367
1339 return MIC_ONLINE; 1368 while (!state) {
1340 if (!strcmp(state, "shutting_down")) 1369 state = readsysfs(mic->name, "state");
1341 return MIC_SHUTTING_DOWN; 1370 sleep(1);
1342 if (!strcmp(state, "reset_failed")) 1371 }
1343 return MIC_RESET_FAILED; 1372 mpsslog("%s: %s %d state %s\n",
1344 if (!strcmp(state, "suspending")) 1373 mic->name, __func__, __LINE__, state);
1345 return MIC_SUSPENDING; 1374
1346 if (!strcmp(state, "suspended")) 1375 if (!strcmp(state, "ready")) {
1347 return MIC_SUSPENDED; 1376 mic_state = MIC_READY;
1348 mpsslog("%s: BUG invalid state %s\n", mic->name, state); 1377 } else if (!strcmp(state, "booting")) {
1349 /* Invalid state */ 1378 mic_state = MIC_BOOTING;
1350 assert(0); 1379 } else if (!strcmp(state, "online")) {
1380 mic_state = MIC_ONLINE;
1381 } else if (!strcmp(state, "shutting_down")) {
1382 mic_state = MIC_SHUTTING_DOWN;
1383 } else if (!strcmp(state, "reset_failed")) {
1384 mic_state = MIC_RESET_FAILED;
1385 } else if (!strcmp(state, "resetting")) {
1386 mic_state = MIC_RESETTING;
1387 } else {
1388 mpsslog("%s: BUG invalid state %s\n", mic->name, state);
1389 assert(0);
1390 }
1391
1392 free(state);
1393 return mic_state;
1351}; 1394};
1352 1395
1353static void mic_handle_shutdown(struct mic_info *mic) 1396static void mic_handle_shutdown(struct mic_info *mic)
1354{ 1397{
1355#define SHUTDOWN_TIMEOUT 60 1398#define SHUTDOWN_TIMEOUT 60
1356 int i = SHUTDOWN_TIMEOUT, ret, stat = 0; 1399 int i = SHUTDOWN_TIMEOUT;
1357 char *shutdown_status; 1400 char *shutdown_status;
1358 while (i) { 1401 while (i) {
1359 shutdown_status = readsysfs(mic->name, "shutdown_status"); 1402 shutdown_status = readsysfs(mic->name, "shutdown_status");
1360 if (!shutdown_status) 1403 if (!shutdown_status) {
1404 sleep(1);
1361 continue; 1405 continue;
1406 }
1362 mpsslog("%s: %s %d shutdown_status %s\n", 1407 mpsslog("%s: %s %d shutdown_status %s\n",
1363 mic->name, __func__, __LINE__, shutdown_status); 1408 mic->name, __func__, __LINE__, shutdown_status);
1364 switch (get_mic_shutdown_status(mic, shutdown_status)) { 1409 switch (get_mic_shutdown_status(mic, shutdown_status)) {
@@ -1377,94 +1422,110 @@ static void mic_handle_shutdown(struct mic_info *mic)
1377 i--; 1422 i--;
1378 } 1423 }
1379reset: 1424reset:
1380 ret = kill(mic->pid, SIGTERM); 1425 if (!i)
1381 mpsslog("%s: %s %d kill pid %d ret %d\n", 1426 mpsslog("%s: %s %d timing out waiting for shutdown_status %s\n",
1382 mic->name, __func__, __LINE__, 1427 mic->name, __func__, __LINE__, shutdown_status);
1383 mic->pid, ret); 1428 reset(mic);
1384 if (!ret) {
1385 ret = waitpid(mic->pid, &stat,
1386 WIFSIGNALED(stat));
1387 mpsslog("%s: %s %d waitpid ret %d pid %d\n",
1388 mic->name, __func__, __LINE__,
1389 ret, mic->pid);
1390 }
1391 if (ret == mic->pid)
1392 reset(mic);
1393} 1429}
1394 1430
1395static void * 1431static int open_state_fd(struct mic_info *mic)
1396mic_config(void *arg)
1397{ 1432{
1398 struct mic_info *mic = (struct mic_info *)arg;
1399 char *state = NULL;
1400 char pathname[PATH_MAX]; 1433 char pathname[PATH_MAX];
1401 int fd, ret; 1434 int fd;
1402 struct pollfd ufds[1];
1403 char value[4096];
1404 1435
1405 snprintf(pathname, PATH_MAX - 1, "%s/%s/%s", 1436 snprintf(pathname, PATH_MAX - 1, "%s/%s/%s",
1406 MICSYSFSDIR, mic->name, "state"); 1437 MICSYSFSDIR, mic->name, "state");
1407 1438
1408 fd = open(pathname, O_RDONLY); 1439 fd = open(pathname, O_RDONLY);
1409 if (fd < 0) { 1440 if (fd < 0)
1410 mpsslog("%s: opening file %s failed %s\n", 1441 mpsslog("%s: opening file %s failed %s\n",
1411 mic->name, pathname, strerror(errno)); 1442 mic->name, pathname, strerror(errno));
1412 goto error; 1443 return fd;
1444}
1445
1446static int block_till_state_change(int fd, struct mic_info *mic)
1447{
1448 struct pollfd ufds[1];
1449 char value[PAGE_SIZE];
1450 int ret;
1451
1452 ufds[0].fd = fd;
1453 ufds[0].events = POLLERR | POLLPRI;
1454 ret = poll(ufds, 1, -1);
1455 if (ret < 0) {
1456 mpsslog("%s: %s %d poll failed %s\n",
1457 mic->name, __func__, __LINE__, strerror(errno));
1458 return ret;
1459 }
1460
1461 ret = lseek(fd, 0, SEEK_SET);
1462 if (ret < 0) {
1463 mpsslog("%s: %s %d Failed to seek to 0: %s\n",
1464 mic->name, __func__, __LINE__, strerror(errno));
1465 return ret;
1466 }
1467
1468 ret = read(fd, value, sizeof(value));
1469 if (ret < 0) {
1470 mpsslog("%s: %s %d Failed to read sysfs entry: %s\n",
1471 mic->name, __func__, __LINE__, strerror(errno));
1472 return ret;
1473 }
1474
1475 return 0;
1476}
1477
1478static void *
1479mic_config(void *arg)
1480{
1481 struct mic_info *mic = (struct mic_info *)arg;
1482 int fd, ret, stat = 0;
1483
1484 fd = open_state_fd(mic);
1485 if (fd < 0) {
1486 mpsslog("%s: %s %d open state fd failed %s\n",
1487 mic->name, __func__, __LINE__, strerror(errno));
1488 goto exit;
1413 } 1489 }
1414 1490
1415 do { 1491 do {
1416 ret = lseek(fd, 0, SEEK_SET); 1492 ret = block_till_state_change(fd, mic);
1417 if (ret < 0) { 1493 if (ret < 0) {
1418 mpsslog("%s: Failed to seek to file start '%s': %s\n", 1494 mpsslog("%s: %s %d block_till_state_change error %s\n",
1419 mic->name, pathname, strerror(errno)); 1495 mic->name, __func__, __LINE__, strerror(errno));
1420 goto close_error1; 1496 goto close_exit;
1421 } 1497 }
1422 ret = read(fd, value, sizeof(value)); 1498
1423 if (ret < 0) { 1499 switch (get_mic_state(mic)) {
1424 mpsslog("%s: Failed to read sysfs entry '%s': %s\n",
1425 mic->name, pathname, strerror(errno));
1426 goto close_error1;
1427 }
1428retry:
1429 state = readsysfs(mic->name, "state");
1430 if (!state)
1431 goto retry;
1432 mpsslog("%s: %s %d state %s\n",
1433 mic->name, __func__, __LINE__, state);
1434 switch (get_mic_state(mic, state)) {
1435 case MIC_SHUTTING_DOWN: 1500 case MIC_SHUTTING_DOWN:
1436 mic_handle_shutdown(mic); 1501 mic_handle_shutdown(mic);
1437 goto close_error; 1502 break;
1438 case MIC_SUSPENDING: 1503 case MIC_READY:
1439 mic->boot_on_resume = 1; 1504 case MIC_RESET_FAILED:
1440 setsysfs(mic->name, "state", "suspend"); 1505 ret = kill(mic->pid, SIGTERM);
1441 mic_handle_shutdown(mic); 1506 mpsslog("%s: %s %d kill pid %d ret %d\n",
1442 goto close_error; 1507 mic->name, __func__, __LINE__,
1443 case MIC_OFFLINE: 1508 mic->pid, ret);
1509 if (!ret) {
1510 ret = waitpid(mic->pid, &stat,
1511 WIFSIGNALED(stat));
1512 mpsslog("%s: %s %d waitpid ret %d pid %d\n",
1513 mic->name, __func__, __LINE__,
1514 ret, mic->pid);
1515 }
1444 if (mic->boot_on_resume) { 1516 if (mic->boot_on_resume) {
1445 setsysfs(mic->name, "state", "boot"); 1517 setsysfs(mic->name, "state", "boot");
1446 mic->boot_on_resume = 0; 1518 mic->boot_on_resume = 0;
1447 } 1519 }
1448 break; 1520 goto close_exit;
1449 default: 1521 default:
1450 break; 1522 break;
1451 } 1523 }
1452 free(state);
1453
1454 ufds[0].fd = fd;
1455 ufds[0].events = POLLERR | POLLPRI;
1456 ret = poll(ufds, 1, -1);
1457 if (ret < 0) {
1458 mpsslog("%s: poll failed %s\n",
1459 mic->name, strerror(errno));
1460 goto close_error1;
1461 }
1462 } while (1); 1524 } while (1);
1463close_error: 1525
1464 free(state); 1526close_exit:
1465close_error1:
1466 close(fd); 1527 close(fd);
1467error: 1528exit:
1468 init_mic(mic); 1529 init_mic(mic);
1469 pthread_exit(NULL); 1530 pthread_exit(NULL);
1470} 1531}
@@ -1477,15 +1538,15 @@ set_cmdline(struct mic_info *mic)
1477 1538
1478 len = snprintf(buffer, PATH_MAX, 1539 len = snprintf(buffer, PATH_MAX,
1479 "clocksource=tsc highres=off nohz=off "); 1540 "clocksource=tsc highres=off nohz=off ");
1480 len += snprintf(buffer + len, PATH_MAX - len, 1541 len += snprintf(buffer + len, PATH_MAX,
1481 "cpufreq_on;corec6_off;pc3_off;pc6_off "); 1542 "cpufreq_on;corec6_off;pc3_off;pc6_off ");
1482 len += snprintf(buffer + len, PATH_MAX - len, 1543 len += snprintf(buffer + len, PATH_MAX,
1483 "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0", 1544 "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0",
1484 mic->id); 1545 mic->id + 1);
1485 1546
1486 setsysfs(mic->name, "cmdline", buffer); 1547 setsysfs(mic->name, "cmdline", buffer);
1487 mpsslog("%s: Command line: \"%s\"\n", mic->name, buffer); 1548 mpsslog("%s: Command line: \"%s\"\n", mic->name, buffer);
1488 snprintf(buffer, PATH_MAX, "172.31.%d.1", mic->id); 1549 snprintf(buffer, PATH_MAX, "172.31.%d.1", mic->id + 1);
1489 mpsslog("%s: IPADDR: \"%s\"\n", mic->name, buffer); 1550 mpsslog("%s: IPADDR: \"%s\"\n", mic->name, buffer);
1490} 1551}
1491 1552
@@ -1541,8 +1602,6 @@ set_log_buf_info(struct mic_info *mic)
1541 close(fd); 1602 close(fd);
1542} 1603}
1543 1604
1544static void init_mic(struct mic_info *mic);
1545
1546static void 1605static void
1547change_virtblk_backend(int x, siginfo_t *siginfo, void *p) 1606change_virtblk_backend(int x, siginfo_t *siginfo, void *p)
1548{ 1607{
@@ -1553,8 +1612,16 @@ change_virtblk_backend(int x, siginfo_t *siginfo, void *p)
1553} 1612}
1554 1613
1555static void 1614static void
1556init_mic(struct mic_info *mic) 1615set_mic_boot_params(struct mic_info *mic)
1616{
1617 set_log_buf_info(mic);
1618 set_cmdline(mic);
1619}
1620
1621static void *
1622init_mic(void *arg)
1557{ 1623{
1624 struct mic_info *mic = (struct mic_info *)arg;
1558 struct sigaction ignore = { 1625 struct sigaction ignore = {
1559 .sa_flags = 0, 1626 .sa_flags = 0,
1560 .sa_handler = SIG_IGN 1627 .sa_handler = SIG_IGN
@@ -1564,7 +1631,7 @@ init_mic(struct mic_info *mic)
1564 .sa_sigaction = change_virtblk_backend, 1631 .sa_sigaction = change_virtblk_backend,
1565 }; 1632 };
1566 char buffer[PATH_MAX]; 1633 char buffer[PATH_MAX];
1567 int err; 1634 int err, fd;
1568 1635
1569 /* 1636 /*
1570 * Currently, one virtio block device is supported for each MIC card 1637 * Currently, one virtio block device is supported for each MIC card
@@ -1577,12 +1644,38 @@ init_mic(struct mic_info *mic)
1577 * the MIC daemon. 1644 * the MIC daemon.
1578 */ 1645 */
1579 sigaction(SIGUSR1, &ignore, NULL); 1646 sigaction(SIGUSR1, &ignore, NULL);
1647retry:
1648 fd = open_state_fd(mic);
1649 if (fd < 0) {
1650 mpsslog("%s: %s %d open state fd failed %s\n",
1651 mic->name, __func__, __LINE__, strerror(errno));
1652 sleep(2);
1653 goto retry;
1654 }
1655
1656 if (mic->restart) {
1657 snprintf(buffer, PATH_MAX, "boot");
1658 setsysfs(mic->name, "state", buffer);
1659 mpsslog("%s restarting mic %d\n",
1660 mic->name, mic->restart);
1661 mic->restart = 0;
1662 }
1663
1664 while (1) {
1665 while (block_till_state_change(fd, mic)) {
1666 mpsslog("%s: %s %d block_till_state_change error %s\n",
1667 mic->name, __func__, __LINE__, strerror(errno));
1668 sleep(2);
1669 continue;
1670 }
1671
1672 if (get_mic_state(mic) == MIC_BOOTING)
1673 break;
1674 }
1580 1675
1581 mic->pid = fork(); 1676 mic->pid = fork();
1582 switch (mic->pid) { 1677 switch (mic->pid) {
1583 case 0: 1678 case 0:
1584 set_log_buf_info(mic);
1585 set_cmdline(mic);
1586 add_virtio_device(mic, &virtcons_dev_page.dd); 1679 add_virtio_device(mic, &virtcons_dev_page.dd);
1587 add_virtio_device(mic, &virtnet_dev_page.dd); 1680 add_virtio_device(mic, &virtnet_dev_page.dd);
1588 err = pthread_create(&mic->mic_console.console_thread, NULL, 1681 err = pthread_create(&mic->mic_console.console_thread, NULL,
@@ -1612,24 +1705,29 @@ init_mic(struct mic_info *mic)
1612 mic->name, mic->id, errno); 1705 mic->name, mic->id, errno);
1613 break; 1706 break;
1614 default: 1707 default:
1615 if (mic->restart) { 1708 err = pthread_create(&mic->config_thread, NULL,
1616 snprintf(buffer, PATH_MAX, "boot"); 1709 mic_config, mic);
1617 setsysfs(mic->name, "state", buffer); 1710 if (err)
1618 mpsslog("%s restarting mic %d\n", 1711 mpsslog("%s mic_config pthread_create failed %s\n",
1619 mic->name, mic->restart); 1712 mic->name, strerror(err));
1620 mic->restart = 0;
1621 }
1622 pthread_create(&mic->config_thread, NULL, mic_config, mic);
1623 } 1713 }
1714
1715 return NULL;
1624} 1716}
1625 1717
1626static void 1718static void
1627start_daemon(void) 1719start_daemon(void)
1628{ 1720{
1629 struct mic_info *mic; 1721 struct mic_info *mic;
1722 int err;
1630 1723
1631 for (mic = mic_list.next; mic != NULL; mic = mic->next) 1724 for (mic = mic_list.next; mic; mic = mic->next) {
1632 init_mic(mic); 1725 set_mic_boot_params(mic);
1726 err = pthread_create(&mic->init_thread, NULL, init_mic, mic);
1727 if (err)
1728 mpsslog("%s init_mic pthread_create failed %s\n",
1729 mic->name, strerror(err));
1730 }
1633 1731
1634 while (1) 1732 while (1)
1635 sleep(60); 1733 sleep(60);
diff --git a/Documentation/mic/mpssd/mpssd.h b/Documentation/mic/mpssd/mpssd.h
index f5f18b15d9a0..8bd64944aacc 100644
--- a/Documentation/mic/mpssd/mpssd.h
+++ b/Documentation/mic/mpssd/mpssd.h
@@ -86,6 +86,7 @@ struct mic_info {
86 int id; 86 int id;
87 char *name; 87 char *name;
88 pthread_t config_thread; 88 pthread_t config_thread;
89 pthread_t init_thread;
89 pid_t pid; 90 pid_t pid;
90 struct mic_console_info mic_console; 91 struct mic_console_info mic_console;
91 struct mic_net_info mic_net; 92 struct mic_net_info mic_net;
diff --git a/Documentation/trace/intel_th.txt b/Documentation/trace/intel_th.txt
new file mode 100644
index 000000000000..f7fc5ba5df8d
--- /dev/null
+++ b/Documentation/trace/intel_th.txt
@@ -0,0 +1,99 @@
1Intel(R) Trace Hub (TH)
2=======================
3
4Overview
5--------
6
7Intel(R) Trace Hub (TH) is a set of hardware blocks that produce,
8switch and output trace data from multiple hardware and software
9sources over several types of trace output ports encoded in System
10Trace Protocol (MIPI STPv2) and is intended to perform full system
11debugging. For more information on the hardware, see Intel(R) Trace
12Hub developer's manual [1].
13
14It consists of trace sources, trace destinations (outputs) and a
15switch (Global Trace Hub, GTH). These devices are placed on a bus of
16their own ("intel_th"), where they can be discovered and configured
17via sysfs attributes.
18
19Currently, the following Intel TH subdevices (blocks) are supported:
20 - Software Trace Hub (STH), trace source, which is a System Trace
21 Module (STM) device,
22 - Memory Storage Unit (MSU), trace output, which allows storing
23 trace hub output in system memory,
24 - Parallel Trace Interface output (PTI), trace output to an external
25 debug host via a PTI port,
26 - Global Trace Hub (GTH), which is a switch and a central component
27 of Intel(R) Trace Hub architecture.
28
29Common attributes for output devices are described in
30Documentation/ABI/testing/sysfs-bus-intel_th-output-devices, the most
31notable of them is "active", which enables or disables trace output
32into that particular output device.
33
34GTH allows directing different STP masters into different output ports
35via its "masters" attribute group. More detailed GTH interface
36description is at Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth.
37
38STH registers an stm class device, through which it provides interface
39to userspace and kernelspace software trace sources. See
40Documentation/tracing/stm.txt for more information on that.
41
42MSU can be configured to collect trace data into a system memory
43buffer, which can later on be read from its device nodes via read() or
44mmap() interface.
45
46On the whole, Intel(R) Trace Hub does not require any special
47userspace software to function; everything can be configured, started
48and collected via sysfs attributes, and device nodes.
49
50[1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
51
52Bus and Subdevices
53------------------
54
55For each Intel TH device in the system a bus of its own is
56created and assigned an id number that reflects the order in which TH
57devices were emumerated. All TH subdevices (devices on intel_th bus)
58begin with this id: 0-gth, 0-msc0, 0-msc1, 0-pti, 0-sth, which is
59followed by device's name and an optional index.
60
61Output devices also get a device node in /dev/intel_thN, where N is
62the Intel TH device id. For example, MSU's memory buffers, when
63allocated, are accessible via /dev/intel_th0/msc{0,1}.
64
65Quick example
66-------------
67
68# figure out which GTH port is the first memory controller:
69
70$ cat /sys/bus/intel_th/devices/0-msc0/port
710
72
73# looks like it's port 0, configure master 33 to send data to port 0:
74
75$ echo 0 > /sys/bus/intel_th/devices/0-gth/masters/33
76
77# allocate a 2-windowed multiblock buffer on the first memory
78# controller, each with 64 pages:
79
80$ echo multi > /sys/bus/intel_th/devices/0-msc0/mode
81$ echo 64,64 > /sys/bus/intel_th/devices/0-msc0/nr_pages
82
83# enable wrapping for this controller, too:
84
85$ echo 1 > /sys/bus/intel_th/devices/0-msc0/wrap
86
87# and enable tracing into this port:
88
89$ echo 1 > /sys/bus/intel_th/devices/0-msc0/active
90
91# .. send data to master 33, see stm.txt for more details ..
92# .. wait for traces to pile up ..
93# .. and stop the trace:
94
95$ echo 0 > /sys/bus/intel_th/devices/0-msc0/active
96
97# and now you can collect the trace from the device node:
98
99$ cat /dev/intel_th0/msc0 > my_stp_trace
diff --git a/Documentation/trace/stm.txt b/Documentation/trace/stm.txt
new file mode 100644
index 000000000000..ea035f9dbfd7
--- /dev/null
+++ b/Documentation/trace/stm.txt
@@ -0,0 +1,80 @@
1System Trace Module
2===================
3
4System Trace Module (STM) is a device described in MIPI STP specs as
5STP trace stream generator. STP (System Trace Protocol) is a trace
6protocol multiplexing data from multiple trace sources, each one of
7which is assigned a unique pair of master and channel. While some of
8these masters and channels are statically allocated to certain
9hardware trace sources, others are available to software. Software
10trace sources are usually free to pick for themselves any
11master/channel combination from this pool.
12
13On the receiving end of this STP stream (the decoder side), trace
14sources can only be identified by master/channel combination, so in
15order for the decoder to be able to make sense of the trace that
16involves multiple trace sources, it needs to be able to map those
17master/channel pairs to the trace sources that it understands.
18
19For instance, it is helpful to know that syslog messages come on
20master 7 channel 15, while arbitrary user applications can use masters
2148 to 63 and channels 0 to 127.
22
23To solve this mapping problem, stm class provides a policy management
24mechanism via configfs, that allows defining rules that map string
25identifiers to ranges of masters and channels. If these rules (policy)
26are consistent with what decoder expects, it will be able to properly
27process the trace data.
28
29This policy is a tree structure containing rules (policy_node) that
30have a name (string identifier) and a range of masters and channels
31associated with it, located in "stp-policy" subsystem directory in
32configfs. The topmost directory's name (the policy) is formatted as
33the STM device name to which this policy applies and and arbitrary
34string identifier separated by a stop. From the examle above, a rule
35may look like this:
36
37$ ls /config/stp-policy/dummy_stm.my-policy/user
38channels masters
39$ cat /config/stp-policy/dummy_stm.my-policy/user/masters
4048 63
41$ cat /config/stp-policy/dummy_stm.my-policy/user/channels
420 127
43
44which means that the master allocation pool for this rule consists of
45masters 48 through 63 and channel allocation pool has channels 0
46through 127 in it. Now, any producer (trace source) identifying itself
47with "user" identification string will be allocated a master and
48channel from within these ranges.
49
50These rules can be nested, for example, one can define a rule "dummy"
51under "user" directory from the example above and this new rule will
52be used for trace sources with the id string of "user/dummy".
53
54Trace sources have to open the stm class device's node and write their
55trace data into its file descriptor. In order to identify themselves
56to the policy, they need to do a STP_POLICY_ID_SET ioctl on this file
57descriptor providing their id string. Otherwise, they will be
58automatically allocated a master/channel pair upon first write to this
59file descriptor according to the "default" rule of the policy, if such
60exists.
61
62Some STM devices may allow direct mapping of the channel mmio regions
63to userspace for zero-copy writing. One mappable page (in terms of
64mmu) will usually contain multiple channels' mmios, so the user will
65need to allocate that many channels to themselves (via the
66aforementioned ioctl() call) to be able to do this. That is, if your
67stm device's channel mmio region is 64 bytes and hardware page size is
684096 bytes, after a successful STP_POLICY_ID_SET ioctl() call with
69width==64, you should be able to mmap() one page on this file
70descriptor and obtain direct access to an mmio region for 64 channels.
71
72For kernel-based trace sources, there is "stm_source" device
73class. Devices of this class can be connected and disconnected to/from
74stm devices at runtime via a sysfs attribute.
75
76Examples of STM devices are Intel(R) Trace Hub [1] and Coresight STM
77[2].
78
79[1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
80[2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html
diff --git a/Documentation/w1/masters/omap-hdq b/Documentation/w1/masters/omap-hdq
index 884dc284b215..234522709a5f 100644
--- a/Documentation/w1/masters/omap-hdq
+++ b/Documentation/w1/masters/omap-hdq
@@ -44,3 +44,9 @@ e.g:
44insmod omap_hdq.ko W1_ID=2 44insmod omap_hdq.ko W1_ID=2
45inamod w1_bq27000.ko F_ID=2 45inamod w1_bq27000.ko F_ID=2
46 46
47The driver also supports 1-wire mode. In this mode, there is no need to
48pass slave ID as parameter. The driver will auto-detect slaves connected
49to the bus using SEARCH_ROM procedure. 1-wire mode can be selected by
50setting "ti,mode" property to "1w" in DT (see
51Documentation/devicetree/bindings/w1/omap-hdq.txt for more details).
52By default driver is in HDQ mode.
diff --git a/MAINTAINERS b/MAINTAINERS
index 68468bf0c3bf..e887dbb44431 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4355,6 +4355,13 @@ F: include/linux/fmc*.h
4355F: include/linux/ipmi-fru.h 4355F: include/linux/ipmi-fru.h
4356K: fmc_d.*register 4356K: fmc_d.*register
4357 4357
4358FPGA MANAGER FRAMEWORK
4359M: Alan Tull <atull@opensource.altera.com>
4360S: Maintained
4361F: drivers/fpga/
4362F: include/linux/fpga/fpga-mgr.h
4363W: http://www.rocketboards.org
4364
4358FPU EMULATOR 4365FPU EMULATOR
4359M: Bill Metzenthen <billm@melbpc.org.au> 4366M: Bill Metzenthen <billm@melbpc.org.au>
4360W: http://floatingpoint.sourceforge.net/emulator/index.html 4367W: http://floatingpoint.sourceforge.net/emulator/index.html
@@ -5551,6 +5558,12 @@ F: Documentation/networking/README.ipw2100
5551F: Documentation/networking/README.ipw2200 5558F: Documentation/networking/README.ipw2200
5552F: drivers/net/wireless/ipw2x00/ 5559F: drivers/net/wireless/ipw2x00/
5553 5560
5561INTEL(R) TRACE HUB
5562M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
5563S: Supported
5564F: Documentation/trace/intel_th.txt
5565F: drivers/hwtracing/intel_th/
5566
5554INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) 5567INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
5555M: Richard L Maliszewski <richard.l.maliszewski@intel.com> 5568M: Richard L Maliszewski <richard.l.maliszewski@intel.com>
5556M: Gang Wei <gang.wei@intel.com> 5569M: Gang Wei <gang.wei@intel.com>
@@ -5598,6 +5611,22 @@ F: include/linux/mei_cl_bus.h
5598F: drivers/misc/mei/* 5611F: drivers/misc/mei/*
5599F: Documentation/misc-devices/mei/* 5612F: Documentation/misc-devices/mei/*
5600 5613
5614INTEL MIC DRIVERS (mic)
5615M: Sudeep Dutt <sudeep.dutt@intel.com>
5616M: Ashutosh Dixit <ashutosh.dixit@intel.com>
5617S: Supported
5618W: https://github.com/sudeepdutt/mic
5619W: http://software.intel.com/en-us/mic-developer
5620F: include/linux/mic_bus.h
5621F: include/linux/scif.h
5622F: include/uapi/linux/mic_common.h
5623F: include/uapi/linux/mic_ioctl.h
5624F include/uapi/linux/scif_ioctl.h
5625F: drivers/misc/mic/
5626F: drivers/dma/mic_x100_dma.c
5627F: drivers/dma/mic_x100_dma.h
5628F Documentation/mic/
5629
5601INTEL PMC IPC DRIVER 5630INTEL PMC IPC DRIVER
5602M: Zha Qipeng<qipeng.zha@intel.com> 5631M: Zha Qipeng<qipeng.zha@intel.com>
5603L: platform-driver-x86@vger.kernel.org 5632L: platform-driver-x86@vger.kernel.org
@@ -9214,6 +9243,14 @@ S: Maintained
9214F: include/linux/mmc/dw_mmc.h 9243F: include/linux/mmc/dw_mmc.h
9215F: drivers/mmc/host/dw_mmc* 9244F: drivers/mmc/host/dw_mmc*
9216 9245
9246SYSTEM TRACE MODULE CLASS
9247M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
9248S: Maintained
9249F: Documentation/trace/stm.txt
9250F: drivers/hwtracing/stm/
9251F: include/linux/stm.h
9252F: include/uapi/linux/stm.h
9253
9217THUNDERBOLT DRIVER 9254THUNDERBOLT DRIVER
9218M: Andreas Noever <andreas.noever@gmail.com> 9255M: Andreas Noever <andreas.noever@gmail.com>
9219S: Maintained 9256S: Maintained
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index dc0457e40775..1a5220e05109 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -294,6 +294,11 @@
294 devcfg: devcfg@f8007000 { 294 devcfg: devcfg@f8007000 {
295 compatible = "xlnx,zynq-devcfg-1.0"; 295 compatible = "xlnx,zynq-devcfg-1.0";
296 reg = <0xf8007000 0x100>; 296 reg = <0xf8007000 0x100>;
297 interrupt-parent = <&intc>;
298 interrupts = <0 8 4>;
299 clocks = <&clkc 12>;
300 clock-names = "ref_clk";
301 syscon = <&slcr>;
297 }; 302 };
298 303
299 global_timer: timer@f8f00200 { 304 global_timer: timer@f8f00200 {
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3a5ab4d5873d..d2ac339de85f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -192,4 +192,10 @@ source "drivers/nvdimm/Kconfig"
192 192
193source "drivers/nvmem/Kconfig" 193source "drivers/nvmem/Kconfig"
194 194
195source "drivers/hwtracing/stm/Kconfig"
196
197source "drivers/hwtracing/intel_th/Kconfig"
198
199source "drivers/fpga/Kconfig"
200
195endmenu 201endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 7f1b7c5a1cfd..73d039156ea7 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -167,5 +167,8 @@ obj-$(CONFIG_PERF_EVENTS) += perf/
167obj-$(CONFIG_RAS) += ras/ 167obj-$(CONFIG_RAS) += ras/
168obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ 168obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
169obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/ 169obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
170obj-y += hwtracing/intel_th/
171obj-$(CONFIG_STM) += hwtracing/stm/
170obj-$(CONFIG_ANDROID) += android/ 172obj-$(CONFIG_ANDROID) += android/
171obj-$(CONFIG_NVMEM) += nvmem/ 173obj-$(CONFIG_NVMEM) += nvmem/
174obj-$(CONFIG_FPGA) += fpga/
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index e39e7402e623..dc62568b7dde 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -30,7 +30,6 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <linux/miscdevice.h> 32#include <linux/miscdevice.h>
33#include <linux/module.h>
34#include <linux/init.h> 33#include <linux/init.h>
35#include <linux/rtc.h> 34#include <linux/rtc.h>
36#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
@@ -395,14 +394,8 @@ efi_rtc_init(void)
395 } 394 }
396 return 0; 395 return 0;
397} 396}
397device_initcall(efi_rtc_init);
398 398
399static void __exit 399/*
400efi_rtc_exit(void)
401{
402 /* not yet used */
403}
404
405module_init(efi_rtc_init);
406module_exit(efi_rtc_exit);
407
408MODULE_LICENSE("GPL"); 400MODULE_LICENSE("GPL");
401*/
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 5c0baa9ffc64..240b6cf1d97c 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -12,7 +12,6 @@
12 */ 12 */
13 13
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
@@ -1043,24 +1042,16 @@ static int hpet_acpi_add(struct acpi_device *device)
1043 return hpet_alloc(&data); 1042 return hpet_alloc(&data);
1044} 1043}
1045 1044
1046static int hpet_acpi_remove(struct acpi_device *device)
1047{
1048 /* XXX need to unregister clocksource, dealloc mem, etc */
1049 return -EINVAL;
1050}
1051
1052static const struct acpi_device_id hpet_device_ids[] = { 1045static const struct acpi_device_id hpet_device_ids[] = {
1053 {"PNP0103", 0}, 1046 {"PNP0103", 0},
1054 {"", 0}, 1047 {"", 0},
1055}; 1048};
1056MODULE_DEVICE_TABLE(acpi, hpet_device_ids);
1057 1049
1058static struct acpi_driver hpet_acpi_driver = { 1050static struct acpi_driver hpet_acpi_driver = {
1059 .name = "hpet", 1051 .name = "hpet",
1060 .ids = hpet_device_ids, 1052 .ids = hpet_device_ids,
1061 .ops = { 1053 .ops = {
1062 .add = hpet_acpi_add, 1054 .add = hpet_acpi_add,
1063 .remove = hpet_acpi_remove,
1064 }, 1055 },
1065}; 1056};
1066 1057
@@ -1086,19 +1077,9 @@ static int __init hpet_init(void)
1086 1077
1087 return 0; 1078 return 0;
1088} 1079}
1080device_initcall(hpet_init);
1089 1081
1090static void __exit hpet_exit(void) 1082/*
1091{
1092 acpi_bus_unregister_driver(&hpet_acpi_driver);
1093
1094 if (sysctl_header)
1095 unregister_sysctl_table(sysctl_header);
1096 misc_deregister(&hpet_misc);
1097
1098 return;
1099}
1100
1101module_init(hpet_init);
1102module_exit(hpet_exit);
1103MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>"); 1083MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
1104MODULE_LICENSE("GPL"); 1084MODULE_LICENSE("GPL");
1085*/
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 877205d22046..90e624662257 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -52,6 +52,7 @@
52#include <linux/kthread.h> 52#include <linux/kthread.h>
53#include <linux/acpi.h> 53#include <linux/acpi.h>
54#include <linux/ctype.h> 54#include <linux/ctype.h>
55#include <linux/time64.h>
55 56
56#define PFX "ipmi_ssif: " 57#define PFX "ipmi_ssif: "
57#define DEVICE_NAME "ipmi_ssif" 58#define DEVICE_NAME "ipmi_ssif"
@@ -1041,12 +1042,12 @@ static void sender(void *send_info,
1041 start_next_msg(ssif_info, flags); 1042 start_next_msg(ssif_info, flags);
1042 1043
1043 if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) { 1044 if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) {
1044 struct timeval t; 1045 struct timespec64 t;
1045 1046
1046 do_gettimeofday(&t); 1047 ktime_get_real_ts64(&t);
1047 pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", 1048 pr_info("**Enqueue %02x %02x: %lld.%6.6ld\n",
1048 msg->data[0], msg->data[1], 1049 msg->data[0], msg->data[1],
1049 (long) t.tv_sec, (long) t.tv_usec); 1050 (long long) t.tv_sec, (long) t.tv_nsec / NSEC_PER_USEC);
1050 } 1051 }
1051} 1052}
1052 1053
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 8a80ead8d316..94006f9c2e43 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -19,7 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/poll.h> 21#include <linux/poll.h>
22#include <linux/module.h> 22#include <linux/init.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <asm/sn/io.h> 25#include <asm/sn/io.h>
@@ -461,5 +461,4 @@ scdrv_init(void)
461 } 461 }
462 return 0; 462 return 0;
463} 463}
464 464device_initcall(scdrv_init);
465module_init(scdrv_init);
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 74d9db05a5ad..068e920ecb68 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -193,8 +193,16 @@ static void mic_dma_prog_intr(struct mic_dma_chan *ch)
193static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src, 193static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src,
194 dma_addr_t dst, size_t len) 194 dma_addr_t dst, size_t len)
195{ 195{
196 if (-ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) 196 if (len && -ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) {
197 return -ENOMEM; 197 return -ENOMEM;
198 } else {
199 /* 3 is the maximum number of status descriptors */
200 int ret = mic_dma_avail_desc_ring_space(ch, 3);
201
202 if (ret < 0)
203 return ret;
204 }
205
198 /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */ 206 /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */
199 if (flags & DMA_PREP_FENCE) { 207 if (flags & DMA_PREP_FENCE) {
200 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, 208 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
@@ -270,6 +278,33 @@ allocate_tx(struct mic_dma_chan *ch)
270 return tx; 278 return tx;
271} 279}
272 280
281/* Program a status descriptor with dst as address and value to be written */
282static struct dma_async_tx_descriptor *
283mic_dma_prep_status_lock(struct dma_chan *ch, dma_addr_t dst, u64 src_val,
284 unsigned long flags)
285{
286 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
287 int result;
288
289 spin_lock(&mic_ch->prep_lock);
290 result = mic_dma_avail_desc_ring_space(mic_ch, 4);
291 if (result < 0)
292 goto error;
293 mic_dma_prep_status_desc(&mic_ch->desc_ring[mic_ch->head], src_val, dst,
294 false);
295 mic_dma_hw_ring_inc_head(mic_ch);
296 result = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
297 if (result < 0)
298 goto error;
299
300 return allocate_tx(mic_ch);
301error:
302 dev_err(mic_dma_ch_to_device(mic_ch),
303 "Error enqueueing dma status descriptor, error=%d\n", result);
304 spin_unlock(&mic_ch->prep_lock);
305 return NULL;
306}
307
273/* 308/*
274 * Prepare a memcpy descriptor to be added to the ring. 309 * Prepare a memcpy descriptor to be added to the ring.
275 * Note that the temporary descriptor adds an extra overhead of copying the 310 * Note that the temporary descriptor adds an extra overhead of copying the
@@ -587,6 +622,8 @@ static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
587 mic_dma_free_chan_resources; 622 mic_dma_free_chan_resources;
588 mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status; 623 mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status;
589 mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock; 624 mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock;
625 mic_dma_dev->dma_dev.device_prep_dma_imm_data =
626 mic_dma_prep_status_lock;
590 mic_dma_dev->dma_dev.device_prep_dma_interrupt = 627 mic_dma_dev->dma_dev.device_prep_dma_interrupt =
591 mic_dma_prep_interrupt_lock; 628 mic_dma_prep_interrupt_lock;
592 mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending; 629 mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending;
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 4b9f09cc38d8..e4890dd4fefd 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * extcon-arizona.c - Extcon driver Wolfson Arizona devices 2 * extcon-arizona.c - Extcon driver Wolfson Arizona devices
3 * 3 *
4 * Copyright (C) 2012 Wolfson Microelectronics plc 4 * Copyright (C) 2012-2014 Wolfson Microelectronics plc
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -43,11 +43,18 @@
43#define ARIZONA_MICD_CLAMP_MODE_JDL_GP5H 0x9 43#define ARIZONA_MICD_CLAMP_MODE_JDL_GP5H 0x9
44#define ARIZONA_MICD_CLAMP_MODE_JDH_GP5H 0xb 44#define ARIZONA_MICD_CLAMP_MODE_JDH_GP5H 0xb
45 45
46#define ARIZONA_TST_CAP_DEFAULT 0x3
47#define ARIZONA_TST_CAP_CLAMP 0x1
48
46#define ARIZONA_HPDET_MAX 10000 49#define ARIZONA_HPDET_MAX 10000
47 50
48#define HPDET_DEBOUNCE 500 51#define HPDET_DEBOUNCE 500
49#define DEFAULT_MICD_TIMEOUT 2000 52#define DEFAULT_MICD_TIMEOUT 2000
50 53
54#define QUICK_HEADPHONE_MAX_OHM 3
55#define MICROPHONE_MIN_OHM 1257
56#define MICROPHONE_MAX_OHM 30000
57
51#define MICD_DBTIME_TWO_READINGS 2 58#define MICD_DBTIME_TWO_READINGS 2
52#define MICD_DBTIME_FOUR_READINGS 4 59#define MICD_DBTIME_FOUR_READINGS 4
53 60
@@ -117,19 +124,22 @@ static const struct arizona_micd_range micd_default_ranges[] = {
117 { .max = 430, .key = BTN_5 }, 124 { .max = 430, .key = BTN_5 },
118}; 125};
119 126
127/* The number of levels in arizona_micd_levels valid for button thresholds */
128#define ARIZONA_NUM_MICD_BUTTON_LEVELS 64
129
120static const int arizona_micd_levels[] = { 130static const int arizona_micd_levels[] = {
121 3, 6, 8, 11, 13, 16, 18, 21, 23, 26, 28, 31, 34, 36, 39, 41, 44, 46, 131 3, 6, 8, 11, 13, 16, 18, 21, 23, 26, 28, 31, 34, 36, 39, 41, 44, 46,
122 49, 52, 54, 57, 60, 62, 65, 67, 70, 73, 75, 78, 81, 83, 89, 94, 100, 132 49, 52, 54, 57, 60, 62, 65, 67, 70, 73, 75, 78, 81, 83, 89, 94, 100,
123 105, 111, 116, 122, 127, 139, 150, 161, 173, 186, 196, 209, 220, 245, 133 105, 111, 116, 122, 127, 139, 150, 161, 173, 186, 196, 209, 220, 245,
124 270, 295, 321, 348, 375, 402, 430, 489, 550, 614, 681, 752, 903, 1071, 134 270, 295, 321, 348, 375, 402, 430, 489, 550, 614, 681, 752, 903, 1071,
125 1257, 135 1257, 30000,
126}; 136};
127 137
128static const unsigned int arizona_cable[] = { 138static const unsigned int arizona_cable[] = {
129 EXTCON_MECHANICAL, 139 EXTCON_MECHANICAL,
130 EXTCON_MICROPHONE, 140 EXTCON_JACK_MICROPHONE,
131 EXTCON_HEADPHONE, 141 EXTCON_JACK_HEADPHONE,
132 EXTCON_LINE_OUT, 142 EXTCON_JACK_LINE_OUT,
133 EXTCON_NONE, 143 EXTCON_NONE,
134}; 144};
135 145
@@ -140,17 +150,33 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
140{ 150{
141 struct arizona *arizona = info->arizona; 151 struct arizona *arizona = info->arizona;
142 unsigned int mask = 0, val = 0; 152 unsigned int mask = 0, val = 0;
153 unsigned int cap_sel = 0;
143 int ret; 154 int ret;
144 155
145 switch (arizona->type) { 156 switch (arizona->type) {
157 case WM8998:
158 case WM1814:
159 mask = 0;
160 break;
146 case WM5110: 161 case WM5110:
147 case WM8280: 162 case WM8280:
148 mask = ARIZONA_HP1L_SHRTO | ARIZONA_HP1L_FLWR | 163 mask = ARIZONA_HP1L_SHRTO | ARIZONA_HP1L_FLWR |
149 ARIZONA_HP1L_SHRTI; 164 ARIZONA_HP1L_SHRTI;
150 if (clamp) 165 if (clamp) {
151 val = ARIZONA_HP1L_SHRTO; 166 val = ARIZONA_HP1L_SHRTO;
152 else 167 cap_sel = ARIZONA_TST_CAP_CLAMP;
168 } else {
153 val = ARIZONA_HP1L_FLWR | ARIZONA_HP1L_SHRTI; 169 val = ARIZONA_HP1L_FLWR | ARIZONA_HP1L_SHRTI;
170 cap_sel = ARIZONA_TST_CAP_DEFAULT;
171 }
172
173 ret = regmap_update_bits(arizona->regmap,
174 ARIZONA_HP_TEST_CTRL_1,
175 ARIZONA_HP1_TST_CAP_SEL_MASK,
176 cap_sel);
177 if (ret != 0)
178 dev_warn(arizona->dev,
179 "Failed to set TST_CAP_SEL: %d\n", ret);
154 break; 180 break;
155 default: 181 default:
156 mask = ARIZONA_RMV_SHRT_HP1L; 182 mask = ARIZONA_RMV_SHRT_HP1L;
@@ -175,17 +201,19 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
175 ret); 201 ret);
176 } 202 }
177 203
178 ret = regmap_update_bits(arizona->regmap, ARIZONA_HP_CTRL_1L, 204 if (mask) {
179 mask, val); 205 ret = regmap_update_bits(arizona->regmap, ARIZONA_HP_CTRL_1L,
180 if (ret != 0) 206 mask, val);
181 dev_warn(arizona->dev, "Failed to do clamp: %d\n", 207 if (ret != 0)
208 dev_warn(arizona->dev, "Failed to do clamp: %d\n",
182 ret); 209 ret);
183 210
184 ret = regmap_update_bits(arizona->regmap, ARIZONA_HP_CTRL_1R, 211 ret = regmap_update_bits(arizona->regmap, ARIZONA_HP_CTRL_1R,
185 mask, val); 212 mask, val);
186 if (ret != 0) 213 if (ret != 0)
187 dev_warn(arizona->dev, "Failed to do clamp: %d\n", 214 dev_warn(arizona->dev, "Failed to do clamp: %d\n",
188 ret); 215 ret);
216 }
189 217
190 /* Restore the desired state while not doing the clamp */ 218 /* Restore the desired state while not doing the clamp */
191 if (!clamp) { 219 if (!clamp) {
@@ -270,6 +298,7 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
270 struct arizona *arizona = info->arizona; 298 struct arizona *arizona = info->arizona;
271 bool change; 299 bool change;
272 int ret; 300 int ret;
301 unsigned int mode;
273 302
274 /* Microphone detection can't use idle mode */ 303 /* Microphone detection can't use idle mode */
275 pm_runtime_get(info->dev); 304 pm_runtime_get(info->dev);
@@ -295,9 +324,14 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
295 regmap_write(arizona->regmap, 0x80, 0x0); 324 regmap_write(arizona->regmap, 0x80, 0x0);
296 } 325 }
297 326
327 if (info->detecting && arizona->pdata.micd_software_compare)
328 mode = ARIZONA_ACCDET_MODE_ADC;
329 else
330 mode = ARIZONA_ACCDET_MODE_MIC;
331
298 regmap_update_bits(arizona->regmap, 332 regmap_update_bits(arizona->regmap,
299 ARIZONA_ACCESSORY_DETECT_MODE_1, 333 ARIZONA_ACCESSORY_DETECT_MODE_1,
300 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); 334 ARIZONA_ACCDET_MODE_MASK, mode);
301 335
302 arizona_extcon_pulse_micbias(info); 336 arizona_extcon_pulse_micbias(info);
303 337
@@ -443,9 +477,6 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
443 arizona_hpdet_b_ranges[range].factor_a); 477 arizona_hpdet_b_ranges[range].factor_a);
444 break; 478 break;
445 479
446 default:
447 dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
448 info->hpdet_ip_version);
449 case 2: 480 case 2:
450 if (!(val & ARIZONA_HP_DONE_B)) { 481 if (!(val & ARIZONA_HP_DONE_B)) {
451 dev_err(arizona->dev, "HPDET did not complete: %x\n", 482 dev_err(arizona->dev, "HPDET did not complete: %x\n",
@@ -482,6 +513,12 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
482 arizona_hpdet_c_ranges[range].min); 513 arizona_hpdet_c_ranges[range].min);
483 val = arizona_hpdet_c_ranges[range].min; 514 val = arizona_hpdet_c_ranges[range].min;
484 } 515 }
516 break;
517
518 default:
519 dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
520 info->hpdet_ip_version);
521 return -EINVAL;
485 } 522 }
486 523
487 dev_dbg(arizona->dev, "HP impedance %d ohms\n", val); 524 dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
@@ -563,7 +600,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
563 struct arizona_extcon_info *info = data; 600 struct arizona_extcon_info *info = data;
564 struct arizona *arizona = info->arizona; 601 struct arizona *arizona = info->arizona;
565 int id_gpio = arizona->pdata.hpdet_id_gpio; 602 int id_gpio = arizona->pdata.hpdet_id_gpio;
566 unsigned int report = EXTCON_HEADPHONE; 603 unsigned int report = EXTCON_JACK_HEADPHONE;
567 int ret, reading; 604 int ret, reading;
568 bool mic = false; 605 bool mic = false;
569 606
@@ -608,9 +645,9 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
608 645
609 /* Report high impedence cables as line outputs */ 646 /* Report high impedence cables as line outputs */
610 if (reading >= 5000) 647 if (reading >= 5000)
611 report = EXTCON_LINE_OUT; 648 report = EXTCON_JACK_LINE_OUT;
612 else 649 else
613 report = EXTCON_HEADPHONE; 650 report = EXTCON_JACK_HEADPHONE;
614 651
615 ret = extcon_set_cable_state_(info->edev, report, true); 652 ret = extcon_set_cable_state_(info->edev, report, true);
616 if (ret != 0) 653 if (ret != 0)
@@ -695,7 +732,7 @@ err:
695 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); 732 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
696 733
697 /* Just report headphone */ 734 /* Just report headphone */
698 ret = extcon_set_cable_state_(info->edev, EXTCON_HEADPHONE, true); 735 ret = extcon_set_cable_state_(info->edev, EXTCON_JACK_HEADPHONE, true);
699 if (ret != 0) 736 if (ret != 0)
700 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret); 737 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
701 738
@@ -752,7 +789,7 @@ err:
752 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); 789 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
753 790
754 /* Just report headphone */ 791 /* Just report headphone */
755 ret = extcon_set_cable_state_(info->edev, EXTCON_HEADPHONE, true); 792 ret = extcon_set_cable_state_(info->edev, EXTCON_JACK_HEADPHONE, true);
756 if (ret != 0) 793 if (ret != 0)
757 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret); 794 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
758 795
@@ -804,6 +841,37 @@ static void arizona_micd_detect(struct work_struct *work)
804 return; 841 return;
805 } 842 }
806 843
844 if (info->detecting && arizona->pdata.micd_software_compare) {
845 /* Must disable MICD before we read the ADCVAL */
846 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
847 ARIZONA_MICD_ENA, 0);
848 ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_4, &val);
849 if (ret != 0) {
850 dev_err(arizona->dev,
851 "Failed to read MICDET_ADCVAL: %d\n",
852 ret);
853 mutex_unlock(&info->lock);
854 return;
855 }
856
857 dev_dbg(arizona->dev, "MICDET_ADCVAL: %x\n", val);
858
859 val &= ARIZONA_MICDET_ADCVAL_MASK;
860 if (val < ARRAY_SIZE(arizona_micd_levels))
861 val = arizona_micd_levels[val];
862 else
863 val = INT_MAX;
864
865 if (val <= QUICK_HEADPHONE_MAX_OHM)
866 val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_0;
867 else if (val <= MICROPHONE_MIN_OHM)
868 val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_1;
869 else if (val <= MICROPHONE_MAX_OHM)
870 val = ARIZONA_MICD_STS | ARIZONA_MICD_LVL_8;
871 else
872 val = ARIZONA_MICD_LVL_8;
873 }
874
807 for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) { 875 for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) {
808 ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val); 876 ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
809 if (ret != 0) { 877 if (ret != 0) {
@@ -847,7 +915,7 @@ static void arizona_micd_detect(struct work_struct *work)
847 arizona_identify_headphone(info); 915 arizona_identify_headphone(info);
848 916
849 ret = extcon_set_cable_state_(info->edev, 917 ret = extcon_set_cable_state_(info->edev,
850 EXTCON_MICROPHONE, true); 918 EXTCON_JACK_MICROPHONE, true);
851 if (ret != 0) 919 if (ret != 0)
852 dev_err(arizona->dev, "Headset report failed: %d\n", 920 dev_err(arizona->dev, "Headset report failed: %d\n",
853 ret); 921 ret);
@@ -932,10 +1000,17 @@ static void arizona_micd_detect(struct work_struct *work)
932 } 1000 }
933 1001
934handled: 1002handled:
935 if (info->detecting) 1003 if (info->detecting) {
1004 if (arizona->pdata.micd_software_compare)
1005 regmap_update_bits(arizona->regmap,
1006 ARIZONA_MIC_DETECT_1,
1007 ARIZONA_MICD_ENA,
1008 ARIZONA_MICD_ENA);
1009
936 queue_delayed_work(system_power_efficient_wq, 1010 queue_delayed_work(system_power_efficient_wq,
937 &info->micd_timeout_work, 1011 &info->micd_timeout_work,
938 msecs_to_jiffies(info->micd_timeout)); 1012 msecs_to_jiffies(info->micd_timeout));
1013 }
939 1014
940 pm_runtime_mark_last_busy(info->dev); 1015 pm_runtime_mark_last_busy(info->dev);
941 mutex_unlock(&info->lock); 1016 mutex_unlock(&info->lock);
@@ -991,12 +1066,9 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
991 1066
992 mutex_lock(&info->lock); 1067 mutex_lock(&info->lock);
993 1068
994 if (arizona->pdata.jd_gpio5) { 1069 if (info->micd_clamp) {
995 mask = ARIZONA_MICD_CLAMP_STS; 1070 mask = ARIZONA_MICD_CLAMP_STS;
996 if (arizona->pdata.jd_invert) 1071 present = 0;
997 present = ARIZONA_MICD_CLAMP_STS;
998 else
999 present = 0;
1000 } else { 1072 } else {
1001 mask = ARIZONA_JD1_STS; 1073 mask = ARIZONA_JD1_STS;
1002 if (arizona->pdata.jd_invert) 1074 if (arizona->pdata.jd_invert)
@@ -1055,9 +1127,11 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
1055 msecs_to_jiffies(HPDET_DEBOUNCE)); 1127 msecs_to_jiffies(HPDET_DEBOUNCE));
1056 } 1128 }
1057 1129
1058 regmap_update_bits(arizona->regmap, 1130 if (info->micd_clamp || !arizona->pdata.jd_invert)
1059 ARIZONA_JACK_DETECT_DEBOUNCE, 1131 regmap_update_bits(arizona->regmap,
1060 ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB, 0); 1132 ARIZONA_JACK_DETECT_DEBOUNCE,
1133 ARIZONA_MICD_CLAMP_DB |
1134 ARIZONA_JD1_DB, 0);
1061 } else { 1135 } else {
1062 dev_dbg(arizona->dev, "Detected jack removal\n"); 1136 dev_dbg(arizona->dev, "Detected jack removal\n");
1063 1137
@@ -1224,6 +1298,11 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1224 break; 1298 break;
1225 } 1299 }
1226 break; 1300 break;
1301 case WM8998:
1302 case WM1814:
1303 info->micd_clamp = true;
1304 info->hpdet_ip_version = 2;
1305 break;
1227 default: 1306 default:
1228 break; 1307 break;
1229 } 1308 }
@@ -1259,6 +1338,10 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1259 info->micd_num_modes = ARRAY_SIZE(micd_default_modes); 1338 info->micd_num_modes = ARRAY_SIZE(micd_default_modes);
1260 } 1339 }
1261 1340
1341 if (arizona->pdata.gpsw > 0)
1342 regmap_update_bits(arizona->regmap, ARIZONA_GP_SWITCH_1,
1343 ARIZONA_SW1_MODE_MASK, arizona->pdata.gpsw);
1344
1262 if (arizona->pdata.micd_pol_gpio > 0) { 1345 if (arizona->pdata.micd_pol_gpio > 0) {
1263 if (info->micd_modes[0].gpio) 1346 if (info->micd_modes[0].gpio)
1264 mode = GPIOF_OUT_INIT_HIGH; 1347 mode = GPIOF_OUT_INIT_HIGH;
@@ -1335,7 +1418,8 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1335 break; 1418 break;
1336 } 1419 }
1337 1420
1338 BUILD_BUG_ON(ARRAY_SIZE(arizona_micd_levels) != 0x40); 1421 BUILD_BUG_ON(ARRAY_SIZE(arizona_micd_levels) <
1422 ARIZONA_NUM_MICD_BUTTON_LEVELS);
1339 1423
1340 if (arizona->pdata.num_micd_ranges) { 1424 if (arizona->pdata.num_micd_ranges) {
1341 info->micd_ranges = pdata->micd_ranges; 1425 info->micd_ranges = pdata->micd_ranges;
@@ -1368,11 +1452,11 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1368 1452
1369 /* Set up all the buttons the user specified */ 1453 /* Set up all the buttons the user specified */
1370 for (i = 0; i < info->num_micd_ranges; i++) { 1454 for (i = 0; i < info->num_micd_ranges; i++) {
1371 for (j = 0; j < ARRAY_SIZE(arizona_micd_levels); j++) 1455 for (j = 0; j < ARIZONA_NUM_MICD_BUTTON_LEVELS; j++)
1372 if (arizona_micd_levels[j] >= info->micd_ranges[i].max) 1456 if (arizona_micd_levels[j] >= info->micd_ranges[i].max)
1373 break; 1457 break;
1374 1458
1375 if (j == ARRAY_SIZE(arizona_micd_levels)) { 1459 if (j == ARIZONA_NUM_MICD_BUTTON_LEVELS) {
1376 dev_err(arizona->dev, "Unsupported MICD level %d\n", 1460 dev_err(arizona->dev, "Unsupported MICD level %d\n",
1377 info->micd_ranges[i].max); 1461 info->micd_ranges[i].max);
1378 ret = -EINVAL; 1462 ret = -EINVAL;
@@ -1436,7 +1520,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1436 pm_runtime_idle(&pdev->dev); 1520 pm_runtime_idle(&pdev->dev);
1437 pm_runtime_get_sync(&pdev->dev); 1521 pm_runtime_get_sync(&pdev->dev);
1438 1522
1439 if (arizona->pdata.jd_gpio5) { 1523 if (info->micd_clamp) {
1440 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE; 1524 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
1441 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL; 1525 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
1442 } else { 1526 } else {
@@ -1541,7 +1625,7 @@ static int arizona_extcon_remove(struct platform_device *pdev)
1541 ARIZONA_MICD_CLAMP_CONTROL, 1625 ARIZONA_MICD_CLAMP_CONTROL,
1542 ARIZONA_MICD_CLAMP_MODE_MASK, 0); 1626 ARIZONA_MICD_CLAMP_MODE_MASK, 0);
1543 1627
1544 if (arizona->pdata.jd_gpio5) { 1628 if (info->micd_clamp) {
1545 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE; 1629 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
1546 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL; 1630 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
1547 } else { 1631 } else {
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index ea962bc547b8..fd55c2f2080a 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -102,9 +102,9 @@ enum axp288_extcon_irq {
102}; 102};
103 103
104static const unsigned int axp288_extcon_cables[] = { 104static const unsigned int axp288_extcon_cables[] = {
105 EXTCON_SLOW_CHARGER, 105 EXTCON_CHG_USB_SDP,
106 EXTCON_CHARGE_DOWNSTREAM, 106 EXTCON_CHG_USB_CDP,
107 EXTCON_FAST_CHARGER, 107 EXTCON_CHG_USB_DCP,
108 EXTCON_NONE, 108 EXTCON_NONE,
109}; 109};
110 110
@@ -192,18 +192,18 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
192 dev_dbg(info->dev, "sdp cable is connecetd\n"); 192 dev_dbg(info->dev, "sdp cable is connecetd\n");
193 notify_otg = true; 193 notify_otg = true;
194 notify_charger = true; 194 notify_charger = true;
195 cable = EXTCON_SLOW_CHARGER; 195 cable = EXTCON_CHG_USB_SDP;
196 break; 196 break;
197 case DET_STAT_CDP: 197 case DET_STAT_CDP:
198 dev_dbg(info->dev, "cdp cable is connecetd\n"); 198 dev_dbg(info->dev, "cdp cable is connecetd\n");
199 notify_otg = true; 199 notify_otg = true;
200 notify_charger = true; 200 notify_charger = true;
201 cable = EXTCON_CHARGE_DOWNSTREAM; 201 cable = EXTCON_CHG_USB_CDP;
202 break; 202 break;
203 case DET_STAT_DCP: 203 case DET_STAT_DCP:
204 dev_dbg(info->dev, "dcp cable is connecetd\n"); 204 dev_dbg(info->dev, "dcp cable is connecetd\n");
205 notify_charger = true; 205 notify_charger = true;
206 cable = EXTCON_FAST_CHARGER; 206 cable = EXTCON_CHG_USB_DCP;
207 break; 207 break;
208 default: 208 default:
209 dev_warn(info->dev, 209 dev_warn(info->dev,
@@ -309,7 +309,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
309 } 309 }
310 310
311 /* Get otg transceiver phy */ 311 /* Get otg transceiver phy */
312 info->otg = usb_get_phy(USB_PHY_TYPE_USB2); 312 info->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
313 if (IS_ERR(info->otg)) { 313 if (IS_ERR(info->otg)) {
314 dev_err(&pdev->dev, "failed to get otg transceiver\n"); 314 dev_err(&pdev->dev, "failed to get otg transceiver\n");
315 return PTR_ERR(info->otg); 315 return PTR_ERR(info->otg);
@@ -318,11 +318,11 @@ static int axp288_extcon_probe(struct platform_device *pdev)
318 /* Set up gpio control for USB Mux */ 318 /* Set up gpio control for USB Mux */
319 if (info->pdata->gpio_mux_cntl) { 319 if (info->pdata->gpio_mux_cntl) {
320 gpio = desc_to_gpio(info->pdata->gpio_mux_cntl); 320 gpio = desc_to_gpio(info->pdata->gpio_mux_cntl);
321 ret = gpio_request(gpio, "USB_MUX"); 321 ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
322 if (ret < 0) { 322 if (ret < 0) {
323 dev_err(&pdev->dev, 323 dev_err(&pdev->dev,
324 "failed to request the gpio=%d\n", gpio); 324 "failed to request the gpio=%d\n", gpio);
325 goto gpio_req_failed; 325 return ret;
326 } 326 }
327 gpiod_direction_output(info->pdata->gpio_mux_cntl, 327 gpiod_direction_output(info->pdata->gpio_mux_cntl,
328 EXTCON_GPIO_MUX_SEL_PMIC); 328 EXTCON_GPIO_MUX_SEL_PMIC);
@@ -335,7 +335,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
335 dev_err(&pdev->dev, 335 dev_err(&pdev->dev,
336 "failed to get virtual interrupt=%d\n", pirq); 336 "failed to get virtual interrupt=%d\n", pirq);
337 ret = info->irq[i]; 337 ret = info->irq[i];
338 goto gpio_req_failed; 338 return ret;
339 } 339 }
340 340
341 ret = devm_request_threaded_irq(&pdev->dev, info->irq[i], 341 ret = devm_request_threaded_irq(&pdev->dev, info->irq[i],
@@ -345,7 +345,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
345 if (ret) { 345 if (ret) {
346 dev_err(&pdev->dev, "failed to request interrupt=%d\n", 346 dev_err(&pdev->dev, "failed to request interrupt=%d\n",
347 info->irq[i]); 347 info->irq[i]);
348 goto gpio_req_failed; 348 return ret;
349 } 349 }
350 } 350 }
351 351
@@ -353,23 +353,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
353 axp288_extcon_enable_irq(info); 353 axp288_extcon_enable_irq(info);
354 354
355 return 0; 355 return 0;
356
357gpio_req_failed:
358 usb_put_phy(info->otg);
359 return ret;
360}
361
362static int axp288_extcon_remove(struct platform_device *pdev)
363{
364 struct axp288_extcon_info *info = platform_get_drvdata(pdev);
365
366 usb_put_phy(info->otg);
367 return 0;
368} 356}
369 357
370static struct platform_driver axp288_extcon_driver = { 358static struct platform_driver axp288_extcon_driver = {
371 .probe = axp288_extcon_probe, 359 .probe = axp288_extcon_probe,
372 .remove = axp288_extcon_remove,
373 .driver = { 360 .driver = {
374 .name = "axp288_extcon", 361 .name = "axp288_extcon",
375 }, 362 },
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 57c24fa52edb..279ff8f6637d 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * drivers/extcon/extcon_gpio.c 2 * extcon_gpio.c - Single-state GPIO extcon driver based on extcon class
3 *
4 * Single-state GPIO extcon driver based on extcon class
5 * 3 *
6 * Copyright (C) 2008 Google, Inc. 4 * Copyright (C) 2008 Google, Inc.
7 * Author: Mike Lockwood <lockwood@android.com> 5 * Author: Mike Lockwood <lockwood@android.com>
@@ -17,12 +15,12 @@
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 17 * GNU General Public License for more details.
20 * 18 */
21*/
22 19
23#include <linux/extcon.h> 20#include <linux/extcon.h>
24#include <linux/extcon/extcon-gpio.h> 21#include <linux/extcon/extcon-gpio.h>
25#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/gpio/consumer.h>
26#include <linux/init.h> 24#include <linux/init.h>
27#include <linux/interrupt.h> 25#include <linux/interrupt.h>
28#include <linux/kernel.h> 26#include <linux/kernel.h>
@@ -33,14 +31,12 @@
33 31
34struct gpio_extcon_data { 32struct gpio_extcon_data {
35 struct extcon_dev *edev; 33 struct extcon_dev *edev;
36 unsigned gpio;
37 bool gpio_active_low;
38 const char *state_on;
39 const char *state_off;
40 int irq; 34 int irq;
41 struct delayed_work work; 35 struct delayed_work work;
42 unsigned long debounce_jiffies; 36 unsigned long debounce_jiffies;
43 bool check_on_resume; 37
38 struct gpio_desc *id_gpiod;
39 struct gpio_extcon_pdata *pdata;
44}; 40};
45 41
46static void gpio_extcon_work(struct work_struct *work) 42static void gpio_extcon_work(struct work_struct *work)
@@ -50,93 +46,107 @@ static void gpio_extcon_work(struct work_struct *work)
50 container_of(to_delayed_work(work), struct gpio_extcon_data, 46 container_of(to_delayed_work(work), struct gpio_extcon_data,
51 work); 47 work);
52 48
53 state = gpio_get_value(data->gpio); 49 state = gpiod_get_value_cansleep(data->id_gpiod);
54 if (data->gpio_active_low) 50 if (data->pdata->gpio_active_low)
55 state = !state; 51 state = !state;
56 extcon_set_state(data->edev, state); 52 extcon_set_state(data->edev, state);
57} 53}
58 54
59static irqreturn_t gpio_irq_handler(int irq, void *dev_id) 55static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
60{ 56{
61 struct gpio_extcon_data *extcon_data = dev_id; 57 struct gpio_extcon_data *data = dev_id;
62 58
63 queue_delayed_work(system_power_efficient_wq, &extcon_data->work, 59 queue_delayed_work(system_power_efficient_wq, &data->work,
64 extcon_data->debounce_jiffies); 60 data->debounce_jiffies);
65 return IRQ_HANDLED; 61 return IRQ_HANDLED;
66} 62}
67 63
64static int gpio_extcon_init(struct device *dev, struct gpio_extcon_data *data)
65{
66 struct gpio_extcon_pdata *pdata = data->pdata;
67 int ret;
68
69 ret = devm_gpio_request_one(dev, pdata->gpio, GPIOF_DIR_IN,
70 dev_name(dev));
71 if (ret < 0)
72 return ret;
73
74 data->id_gpiod = gpio_to_desc(pdata->gpio);
75 if (!data->id_gpiod)
76 return -EINVAL;
77
78 if (pdata->debounce) {
79 ret = gpiod_set_debounce(data->id_gpiod,
80 pdata->debounce * 1000);
81 if (ret < 0)
82 data->debounce_jiffies =
83 msecs_to_jiffies(pdata->debounce);
84 }
85
86 data->irq = gpiod_to_irq(data->id_gpiod);
87 if (data->irq < 0)
88 return data->irq;
89
90 return 0;
91}
92
68static int gpio_extcon_probe(struct platform_device *pdev) 93static int gpio_extcon_probe(struct platform_device *pdev)
69{ 94{
70 struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev); 95 struct gpio_extcon_pdata *pdata = dev_get_platdata(&pdev->dev);
71 struct gpio_extcon_data *extcon_data; 96 struct gpio_extcon_data *data;
72 int ret; 97 int ret;
73 98
74 if (!pdata) 99 if (!pdata)
75 return -EBUSY; 100 return -EBUSY;
76 if (!pdata->irq_flags) { 101 if (!pdata->irq_flags || pdata->extcon_id > EXTCON_NONE)
77 dev_err(&pdev->dev, "IRQ flag is not specified.\n");
78 return -EINVAL; 102 return -EINVAL;
79 }
80 103
81 extcon_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_extcon_data), 104 data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_extcon_data),
82 GFP_KERNEL); 105 GFP_KERNEL);
83 if (!extcon_data) 106 if (!data)
84 return -ENOMEM; 107 return -ENOMEM;
108 data->pdata = pdata;
85 109
86 extcon_data->edev = devm_extcon_dev_allocate(&pdev->dev, NULL); 110 /* Initialize the gpio */
87 if (IS_ERR(extcon_data->edev)) { 111 ret = gpio_extcon_init(&pdev->dev, data);
88 dev_err(&pdev->dev, "failed to allocate extcon device\n");
89 return -ENOMEM;
90 }
91
92 extcon_data->gpio = pdata->gpio;
93 extcon_data->gpio_active_low = pdata->gpio_active_low;
94 extcon_data->state_on = pdata->state_on;
95 extcon_data->state_off = pdata->state_off;
96 extcon_data->check_on_resume = pdata->check_on_resume;
97
98 ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
99 pdev->name);
100 if (ret < 0) 112 if (ret < 0)
101 return ret; 113 return ret;
102 114
103 if (pdata->debounce) { 115 /* Allocate the memory of extcon devie and register extcon device */
104 ret = gpio_set_debounce(extcon_data->gpio, 116 data->edev = devm_extcon_dev_allocate(&pdev->dev, &pdata->extcon_id);
105 pdata->debounce * 1000); 117 if (IS_ERR(data->edev)) {
106 if (ret < 0) 118 dev_err(&pdev->dev, "failed to allocate extcon device\n");
107 extcon_data->debounce_jiffies = 119 return -ENOMEM;
108 msecs_to_jiffies(pdata->debounce);
109 } 120 }
110 121
111 ret = devm_extcon_dev_register(&pdev->dev, extcon_data->edev); 122 ret = devm_extcon_dev_register(&pdev->dev, data->edev);
112 if (ret < 0) 123 if (ret < 0)
113 return ret; 124 return ret;
114 125
115 INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); 126 INIT_DELAYED_WORK(&data->work, gpio_extcon_work);
116
117 extcon_data->irq = gpio_to_irq(extcon_data->gpio);
118 if (extcon_data->irq < 0)
119 return extcon_data->irq;
120 127
121 ret = request_any_context_irq(extcon_data->irq, gpio_irq_handler, 128 /*
122 pdata->irq_flags, pdev->name, 129 * Request the interrput of gpio to detect whether external connector
123 extcon_data); 130 * is attached or detached.
131 */
132 ret = devm_request_any_context_irq(&pdev->dev, data->irq,
133 gpio_irq_handler, pdata->irq_flags,
134 pdev->name, data);
124 if (ret < 0) 135 if (ret < 0)
125 return ret; 136 return ret;
126 137
127 platform_set_drvdata(pdev, extcon_data); 138 platform_set_drvdata(pdev, data);
128 /* Perform initial detection */ 139 /* Perform initial detection */
129 gpio_extcon_work(&extcon_data->work.work); 140 gpio_extcon_work(&data->work.work);
130 141
131 return 0; 142 return 0;
132} 143}
133 144
134static int gpio_extcon_remove(struct platform_device *pdev) 145static int gpio_extcon_remove(struct platform_device *pdev)
135{ 146{
136 struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev); 147 struct gpio_extcon_data *data = platform_get_drvdata(pdev);
137 148
138 cancel_delayed_work_sync(&extcon_data->work); 149 cancel_delayed_work_sync(&data->work);
139 free_irq(extcon_data->irq, extcon_data);
140 150
141 return 0; 151 return 0;
142} 152}
@@ -144,12 +154,12 @@ static int gpio_extcon_remove(struct platform_device *pdev)
144#ifdef CONFIG_PM_SLEEP 154#ifdef CONFIG_PM_SLEEP
145static int gpio_extcon_resume(struct device *dev) 155static int gpio_extcon_resume(struct device *dev)
146{ 156{
147 struct gpio_extcon_data *extcon_data; 157 struct gpio_extcon_data *data;
148 158
149 extcon_data = dev_get_drvdata(dev); 159 data = dev_get_drvdata(dev);
150 if (extcon_data->check_on_resume) 160 if (data->pdata->check_on_resume)
151 queue_delayed_work(system_power_efficient_wq, 161 queue_delayed_work(system_power_efficient_wq,
152 &extcon_data->work, extcon_data->debounce_jiffies); 162 &data->work, data->debounce_jiffies);
153 163
154 return 0; 164 return 0;
155} 165}
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index df0659d98e5a..601dbd996487 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -150,10 +150,10 @@ enum max14577_muic_acc_type {
150 150
151static const unsigned int max14577_extcon_cable[] = { 151static const unsigned int max14577_extcon_cable[] = {
152 EXTCON_USB, 152 EXTCON_USB,
153 EXTCON_TA, 153 EXTCON_CHG_USB_DCP,
154 EXTCON_FAST_CHARGER, 154 EXTCON_CHG_USB_FAST,
155 EXTCON_SLOW_CHARGER, 155 EXTCON_CHG_USB_SLOW,
156 EXTCON_CHARGE_DOWNSTREAM, 156 EXTCON_CHG_USB_CDP,
157 EXTCON_JIG, 157 EXTCON_JIG,
158 EXTCON_NONE, 158 EXTCON_NONE,
159}; 159};
@@ -456,18 +456,19 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info)
456 extcon_set_cable_state_(info->edev, EXTCON_USB, attached); 456 extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
457 break; 457 break;
458 case MAX14577_CHARGER_TYPE_DEDICATED_CHG: 458 case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
459 extcon_set_cable_state_(info->edev, EXTCON_TA, attached); 459 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
460 attached);
460 break; 461 break;
461 case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT: 462 case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
462 extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM, 463 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_CDP,
463 attached); 464 attached);
464 break; 465 break;
465 case MAX14577_CHARGER_TYPE_SPECIAL_500MA: 466 case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
466 extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER, 467 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SLOW,
467 attached); 468 attached);
468 break; 469 break;
469 case MAX14577_CHARGER_TYPE_SPECIAL_1A: 470 case MAX14577_CHARGER_TYPE_SPECIAL_1A:
470 extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER, 471 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_FAST,
471 attached); 472 attached);
472 break; 473 break;
473 case MAX14577_CHARGER_TYPE_NONE: 474 case MAX14577_CHARGER_TYPE_NONE:
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 35b9e118b2fb..44c499e1beee 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -204,11 +204,11 @@ enum max77693_muic_acc_type {
204static const unsigned int max77693_extcon_cable[] = { 204static const unsigned int max77693_extcon_cable[] = {
205 EXTCON_USB, 205 EXTCON_USB,
206 EXTCON_USB_HOST, 206 EXTCON_USB_HOST,
207 EXTCON_TA, 207 EXTCON_CHG_USB_DCP,
208 EXTCON_FAST_CHARGER, 208 EXTCON_CHG_USB_FAST,
209 EXTCON_SLOW_CHARGER, 209 EXTCON_CHG_USB_SLOW,
210 EXTCON_CHARGE_DOWNSTREAM, 210 EXTCON_CHG_USB_CDP,
211 EXTCON_MHL, 211 EXTCON_DISP_MHL,
212 EXTCON_JIG, 212 EXTCON_JIG,
213 EXTCON_DOCK, 213 EXTCON_DOCK,
214 EXTCON_NONE, 214 EXTCON_NONE,
@@ -505,7 +505,7 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
505 return ret; 505 return ret;
506 506
507 extcon_set_cable_state_(info->edev, EXTCON_DOCK, attached); 507 extcon_set_cable_state_(info->edev, EXTCON_DOCK, attached);
508 extcon_set_cable_state_(info->edev, EXTCON_MHL, attached); 508 extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL, attached);
509 goto out; 509 goto out;
510 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */ 510 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
511 dock_id = EXTCON_DOCK; 511 dock_id = EXTCON_DOCK;
@@ -605,7 +605,7 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
605 case MAX77693_MUIC_GND_MHL: 605 case MAX77693_MUIC_GND_MHL:
606 case MAX77693_MUIC_GND_MHL_VB: 606 case MAX77693_MUIC_GND_MHL_VB:
607 /* MHL or MHL with USB/TA cable */ 607 /* MHL or MHL with USB/TA cable */
608 extcon_set_cable_state_(info->edev, EXTCON_MHL, attached); 608 extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL, attached);
609 break; 609 break;
610 default: 610 default:
611 dev_err(info->dev, "failed to detect %s cable of gnd type\n", 611 dev_err(info->dev, "failed to detect %s cable of gnd type\n",
@@ -801,10 +801,11 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
801 * - Support charging through micro-usb port without 801 * - Support charging through micro-usb port without
802 * data connection 802 * data connection
803 */ 803 */
804 extcon_set_cable_state_(info->edev, EXTCON_TA, attached); 804 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
805 attached);
805 if (!cable_attached) 806 if (!cable_attached)
806 extcon_set_cable_state_(info->edev, EXTCON_MHL, 807 extcon_set_cable_state_(info->edev,
807 cable_attached); 808 EXTCON_DISP_MHL, cable_attached);
808 break; 809 break;
809 } 810 }
810 811
@@ -862,7 +863,7 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
862 863
863 extcon_set_cable_state_(info->edev, EXTCON_DOCK, 864 extcon_set_cable_state_(info->edev, EXTCON_DOCK,
864 attached); 865 attached);
865 extcon_set_cable_state_(info->edev, EXTCON_MHL, 866 extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL,
866 attached); 867 attached);
867 break; 868 break;
868 } 869 }
@@ -901,20 +902,21 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
901 break; 902 break;
902 case MAX77693_CHARGER_TYPE_DEDICATED_CHG: 903 case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
903 /* Only TA cable */ 904 /* Only TA cable */
904 extcon_set_cable_state_(info->edev, EXTCON_TA, attached); 905 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
906 attached);
905 break; 907 break;
906 } 908 }
907 break; 909 break;
908 case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT: 910 case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
909 extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM, 911 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_CDP,
910 attached); 912 attached);
911 break; 913 break;
912 case MAX77693_CHARGER_TYPE_APPLE_500MA: 914 case MAX77693_CHARGER_TYPE_APPLE_500MA:
913 extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER, 915 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SLOW,
914 attached); 916 attached);
915 break; 917 break;
916 case MAX77693_CHARGER_TYPE_APPLE_1A_2A: 918 case MAX77693_CHARGER_TYPE_APPLE_1A_2A:
917 extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER, 919 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_FAST,
918 attached); 920 attached);
919 break; 921 break;
920 case MAX77693_CHARGER_TYPE_DEAD_BATTERY: 922 case MAX77693_CHARGER_TYPE_DEAD_BATTERY:
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index fdd928542c19..9f9ea334399c 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -122,11 +122,11 @@ enum max77843_muic_charger_type {
122static const unsigned int max77843_extcon_cable[] = { 122static const unsigned int max77843_extcon_cable[] = {
123 EXTCON_USB, 123 EXTCON_USB,
124 EXTCON_USB_HOST, 124 EXTCON_USB_HOST,
125 EXTCON_TA, 125 EXTCON_CHG_USB_DCP,
126 EXTCON_CHARGE_DOWNSTREAM, 126 EXTCON_CHG_USB_CDP,
127 EXTCON_FAST_CHARGER, 127 EXTCON_CHG_USB_FAST,
128 EXTCON_SLOW_CHARGER, 128 EXTCON_CHG_USB_SLOW,
129 EXTCON_MHL, 129 EXTCON_DISP_MHL,
130 EXTCON_JIG, 130 EXTCON_JIG,
131 EXTCON_NONE, 131 EXTCON_NONE,
132}; 132};
@@ -355,7 +355,7 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
355 if (ret < 0) 355 if (ret < 0)
356 return ret; 356 return ret;
357 357
358 extcon_set_cable_state_(info->edev, EXTCON_MHL, attached); 358 extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL, attached);
359 break; 359 break;
360 default: 360 default:
361 dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n", 361 dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n",
@@ -494,7 +494,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
494 if (ret < 0) 494 if (ret < 0)
495 return ret; 495 return ret;
496 496
497 extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM, 497 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_CDP,
498 attached); 498 attached);
499 break; 499 break;
500 case MAX77843_MUIC_CHG_DEDICATED: 500 case MAX77843_MUIC_CHG_DEDICATED:
@@ -504,7 +504,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
504 if (ret < 0) 504 if (ret < 0)
505 return ret; 505 return ret;
506 506
507 extcon_set_cable_state_(info->edev, EXTCON_TA, attached); 507 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
508 attached);
508 break; 509 break;
509 case MAX77843_MUIC_CHG_SPECIAL_500MA: 510 case MAX77843_MUIC_CHG_SPECIAL_500MA:
510 ret = max77843_muic_set_path(info, 511 ret = max77843_muic_set_path(info,
@@ -513,7 +514,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
513 if (ret < 0) 514 if (ret < 0)
514 return ret; 515 return ret;
515 516
516 extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER, 517 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SLOW,
517 attached); 518 attached);
518 break; 519 break;
519 case MAX77843_MUIC_CHG_SPECIAL_1A: 520 case MAX77843_MUIC_CHG_SPECIAL_1A:
@@ -523,7 +524,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
523 if (ret < 0) 524 if (ret < 0)
524 return ret; 525 return ret;
525 526
526 extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER, 527 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_FAST,
527 attached); 528 attached);
528 break; 529 break;
529 case MAX77843_MUIC_CHG_GND: 530 case MAX77843_MUIC_CHG_GND:
@@ -532,9 +533,11 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
532 533
533 /* Charger cable on MHL accessory is attach or detach */ 534 /* Charger cable on MHL accessory is attach or detach */
534 if (gnd_type == MAX77843_MUIC_GND_MHL_VB) 535 if (gnd_type == MAX77843_MUIC_GND_MHL_VB)
535 extcon_set_cable_state_(info->edev, EXTCON_TA, true); 536 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
537 true);
536 else if (gnd_type == MAX77843_MUIC_GND_MHL) 538 else if (gnd_type == MAX77843_MUIC_GND_MHL)
537 extcon_set_cable_state_(info->edev, EXTCON_TA, false); 539 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
540 false);
538 break; 541 break;
539 case MAX77843_MUIC_CHG_NONE: 542 case MAX77843_MUIC_CHG_NONE:
540 break; 543 break;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 7b1ef200b121..b2b13b3dce14 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -148,11 +148,11 @@ struct max8997_muic_info {
148static const unsigned int max8997_extcon_cable[] = { 148static const unsigned int max8997_extcon_cable[] = {
149 EXTCON_USB, 149 EXTCON_USB,
150 EXTCON_USB_HOST, 150 EXTCON_USB_HOST,
151 EXTCON_TA, 151 EXTCON_CHG_USB_DCP,
152 EXTCON_FAST_CHARGER, 152 EXTCON_CHG_USB_FAST,
153 EXTCON_SLOW_CHARGER, 153 EXTCON_CHG_USB_SLOW,
154 EXTCON_CHARGE_DOWNSTREAM, 154 EXTCON_CHG_USB_CDP,
155 EXTCON_MHL, 155 EXTCON_DISP_MHL,
156 EXTCON_DOCK, 156 EXTCON_DOCK,
157 EXTCON_JIG, 157 EXTCON_JIG,
158 EXTCON_NONE, 158 EXTCON_NONE,
@@ -403,7 +403,7 @@ static int max8997_muic_adc_handler(struct max8997_muic_info *info)
403 return ret; 403 return ret;
404 break; 404 break;
405 case MAX8997_MUIC_ADC_MHL: 405 case MAX8997_MUIC_ADC_MHL:
406 extcon_set_cable_state_(info->edev, EXTCON_MHL, attached); 406 extcon_set_cable_state_(info->edev, EXTCON_DISP_MHL, attached);
407 break; 407 break;
408 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF: 408 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
409 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON: 409 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
@@ -486,18 +486,19 @@ static int max8997_muic_chg_handler(struct max8997_muic_info *info)
486 } 486 }
487 break; 487 break;
488 case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT: 488 case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
489 extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM, 489 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_CDP,
490 attached); 490 attached);
491 break; 491 break;
492 case MAX8997_CHARGER_TYPE_DEDICATED_CHG: 492 case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
493 extcon_set_cable_state_(info->edev, EXTCON_TA, attached); 493 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
494 attached);
494 break; 495 break;
495 case MAX8997_CHARGER_TYPE_500MA: 496 case MAX8997_CHARGER_TYPE_500MA:
496 extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER, 497 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SLOW,
497 attached); 498 attached);
498 break; 499 break;
499 case MAX8997_CHARGER_TYPE_1A: 500 case MAX8997_CHARGER_TYPE_1A:
500 extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER, 501 extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_FAST,
501 attached); 502 attached);
502 break; 503 break;
503 default: 504 default:
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 11592e980bc1..36bf1d63791c 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -93,7 +93,7 @@ static struct reg_data rt8973a_reg_data[] = {
93static const unsigned int rt8973a_extcon_cable[] = { 93static const unsigned int rt8973a_extcon_cable[] = {
94 EXTCON_USB, 94 EXTCON_USB,
95 EXTCON_USB_HOST, 95 EXTCON_USB_HOST,
96 EXTCON_TA, 96 EXTCON_CHG_USB_DCP,
97 EXTCON_JIG, 97 EXTCON_JIG,
98 EXTCON_NONE, 98 EXTCON_NONE,
99}; 99};
@@ -333,7 +333,7 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
333 con_sw = DM_DP_SWITCH_USB; 333 con_sw = DM_DP_SWITCH_USB;
334 break; 334 break;
335 case RT8973A_MUIC_ADC_TA: 335 case RT8973A_MUIC_ADC_TA:
336 id = EXTCON_TA; 336 id = EXTCON_CHG_USB_DCP;
337 con_sw = DM_DP_SWITCH_OPEN; 337 con_sw = DM_DP_SWITCH_OPEN;
338 break; 338 break;
339 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB: 339 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB:
@@ -594,7 +594,7 @@ static int rt8973a_muic_i2c_probe(struct i2c_client *i2c,
594 594
595 for (i = 0; i < info->num_muic_irqs; i++) { 595 for (i = 0; i < info->num_muic_irqs; i++) {
596 struct muic_irq *muic_irq = &info->muic_irqs[i]; 596 struct muic_irq *muic_irq = &info->muic_irqs[i];
597 unsigned int virq = 0; 597 int virq = 0;
598 598
599 virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq); 599 virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq);
600 if (virq <= 0) 600 if (virq <= 0)
@@ -658,6 +658,7 @@ static const struct of_device_id rt8973a_dt_match[] = {
658 { .compatible = "richtek,rt8973a-muic" }, 658 { .compatible = "richtek,rt8973a-muic" },
659 { }, 659 { },
660}; 660};
661MODULE_DEVICE_TABLE(of, rt8973a_dt_match);
661 662
662#ifdef CONFIG_PM_SLEEP 663#ifdef CONFIG_PM_SLEEP
663static int rt8973a_muic_suspend(struct device *dev) 664static int rt8973a_muic_suspend(struct device *dev)
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 0ffefefa2e26..7aac3cc7efd7 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -95,7 +95,7 @@ static struct reg_data sm5502_reg_data[] = {
95static const unsigned int sm5502_extcon_cable[] = { 95static const unsigned int sm5502_extcon_cable[] = {
96 EXTCON_USB, 96 EXTCON_USB,
97 EXTCON_USB_HOST, 97 EXTCON_USB_HOST,
98 EXTCON_TA, 98 EXTCON_CHG_USB_DCP,
99 EXTCON_NONE, 99 EXTCON_NONE,
100}; 100};
101 101
@@ -389,7 +389,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
389 vbus_sw = VBUSIN_SWITCH_VBUSOUT_WITH_USB; 389 vbus_sw = VBUSIN_SWITCH_VBUSOUT_WITH_USB;
390 break; 390 break;
391 case SM5502_MUIC_ADC_OPEN_TA: 391 case SM5502_MUIC_ADC_OPEN_TA:
392 id = EXTCON_TA; 392 id = EXTCON_CHG_USB_DCP;
393 con_sw = DM_DP_SWITCH_OPEN; 393 con_sw = DM_DP_SWITCH_OPEN;
394 vbus_sw = VBUSIN_SWITCH_VBUSOUT; 394 vbus_sw = VBUSIN_SWITCH_VBUSOUT;
395 break; 395 break;
@@ -586,7 +586,7 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
586 586
587 for (i = 0; i < info->num_muic_irqs; i++) { 587 for (i = 0; i < info->num_muic_irqs; i++) {
588 struct muic_irq *muic_irq = &info->muic_irqs[i]; 588 struct muic_irq *muic_irq = &info->muic_irqs[i];
589 unsigned int virq = 0; 589 int virq = 0;
590 590
591 virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq); 591 virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq);
592 if (virq <= 0) 592 if (virq <= 0)
@@ -650,6 +650,7 @@ static const struct of_device_id sm5502_dt_match[] = {
650 { .compatible = "siliconmitus,sm5502-muic" }, 650 { .compatible = "siliconmitus,sm5502-muic" },
651 { }, 651 { },
652}; 652};
653MODULE_DEVICE_TABLE(of, sm5502_dt_match);
653 654
654#ifdef CONFIG_PM_SLEEP 655#ifdef CONFIG_PM_SLEEP
655static int sm5502_muic_suspend(struct device *dev) 656static int sm5502_muic_suspend(struct device *dev)
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 8dd0af1d50bc..21a123cadf78 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -39,37 +39,40 @@
39#define CABLE_NAME_MAX 30 39#define CABLE_NAME_MAX 30
40 40
41static const char *extcon_name[] = { 41static const char *extcon_name[] = {
42 [EXTCON_NONE] = "NONE", 42 [EXTCON_NONE] = "NONE",
43 43
44 /* USB external connector */ 44 /* USB external connector */
45 [EXTCON_USB] = "USB", 45 [EXTCON_USB] = "USB",
46 [EXTCON_USB_HOST] = "USB-HOST", 46 [EXTCON_USB_HOST] = "USB-HOST",
47 47
48 /* Charger external connector */ 48 /* Charging external connector */
49 [EXTCON_TA] = "TA", 49 [EXTCON_CHG_USB_SDP] = "SDP",
50 [EXTCON_FAST_CHARGER] = "FAST-CHARGER", 50 [EXTCON_CHG_USB_DCP] = "DCP",
51 [EXTCON_SLOW_CHARGER] = "SLOW-CHARGER", 51 [EXTCON_CHG_USB_CDP] = "CDP",
52 [EXTCON_CHARGE_DOWNSTREAM] = "CHARGE-DOWNSTREAM", 52 [EXTCON_CHG_USB_ACA] = "ACA",
53 53 [EXTCON_CHG_USB_FAST] = "FAST-CHARGER",
54 /* Audio/Video external connector */ 54 [EXTCON_CHG_USB_SLOW] = "SLOW-CHARGER",
55 [EXTCON_LINE_IN] = "LINE-IN", 55
56 [EXTCON_LINE_OUT] = "LINE-OUT", 56 /* Jack external connector */
57 [EXTCON_MICROPHONE] = "MICROPHONE", 57 [EXTCON_JACK_MICROPHONE] = "MICROPHONE",
58 [EXTCON_HEADPHONE] = "HEADPHONE", 58 [EXTCON_JACK_HEADPHONE] = "HEADPHONE",
59 59 [EXTCON_JACK_LINE_IN] = "LINE-IN",
60 [EXTCON_HDMI] = "HDMI", 60 [EXTCON_JACK_LINE_OUT] = "LINE-OUT",
61 [EXTCON_MHL] = "MHL", 61 [EXTCON_JACK_VIDEO_IN] = "VIDEO-IN",
62 [EXTCON_DVI] = "DVI", 62 [EXTCON_JACK_VIDEO_OUT] = "VIDEO-OUT",
63 [EXTCON_VGA] = "VGA", 63 [EXTCON_JACK_SPDIF_IN] = "SPDIF-IN",
64 [EXTCON_SPDIF_IN] = "SPDIF-IN", 64 [EXTCON_JACK_SPDIF_OUT] = "SPDIF-OUT",
65 [EXTCON_SPDIF_OUT] = "SPDIF-OUT", 65
66 [EXTCON_VIDEO_IN] = "VIDEO-IN", 66 /* Display external connector */
67 [EXTCON_VIDEO_OUT] = "VIDEO-OUT", 67 [EXTCON_DISP_HDMI] = "HDMI",
68 68 [EXTCON_DISP_MHL] = "MHL",
69 /* Etc external connector */ 69 [EXTCON_DISP_DVI] = "DVI",
70 [EXTCON_DOCK] = "DOCK", 70 [EXTCON_DISP_VGA] = "VGA",
71 [EXTCON_JIG] = "JIG", 71
72 [EXTCON_MECHANICAL] = "MECHANICAL", 72 /* Miscellaneous external connector */
73 [EXTCON_DOCK] = "DOCK",
74 [EXTCON_JIG] = "JIG",
75 [EXTCON_MECHANICAL] = "MECHANICAL",
73 76
74 NULL, 77 NULL,
75}; 78};
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
new file mode 100644
index 000000000000..c9b9fdf6cfbb
--- /dev/null
+++ b/drivers/fpga/Kconfig
@@ -0,0 +1,29 @@
1#
2# FPGA framework configuration
3#
4
5menu "FPGA Configuration Support"
6
7config FPGA
8 tristate "FPGA Configuration Framework"
9 help
10 Say Y here if you want support for configuring FPGAs from the
11 kernel. The FPGA framework adds a FPGA manager class and FPGA
12 manager drivers.
13
14if FPGA
15
16config FPGA_MGR_SOCFPGA
17 tristate "Altera SOCFPGA FPGA Manager"
18 depends on ARCH_SOCFPGA
19 help
20 FPGA manager driver support for Altera SOCFPGA.
21
22config FPGA_MGR_ZYNQ_FPGA
23 tristate "Xilinx Zynq FPGA"
24 help
25 FPGA manager driver support for Xilinx Zynq FPGAs.
26
27endif # FPGA
28
29endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
new file mode 100644
index 000000000000..8d83fc6b1613
--- /dev/null
+++ b/drivers/fpga/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the fpga framework and fpga manager drivers.
3#
4
5# Core FPGA Manager Framework
6obj-$(CONFIG_FPGA) += fpga-mgr.o
7
8# FPGA Manager Drivers
9obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
10obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
new file mode 100644
index 000000000000..a24f5cb877e0
--- /dev/null
+++ b/drivers/fpga/fpga-mgr.c
@@ -0,0 +1,380 @@
1/*
2 * FPGA Manager Core
3 *
4 * Copyright (C) 2013-2015 Altera Corporation
5 *
6 * With code from the mailing list:
7 * Copyright (C) 2013 Xilinx, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/firmware.h>
22#include <linux/fpga/fpga-mgr.h>
23#include <linux/idr.h>
24#include <linux/module.h>
25#include <linux/of.h>
26#include <linux/mutex.h>
27#include <linux/slab.h>
28
29static DEFINE_IDA(fpga_mgr_ida);
30static struct class *fpga_mgr_class;
31
32/**
33 * fpga_mgr_buf_load - load fpga from image in buffer
34 * @mgr: fpga manager
35 * @flags: flags setting fpga confuration modes
36 * @buf: buffer contain fpga image
37 * @count: byte count of buf
38 *
39 * Step the low level fpga manager through the device-specific steps of getting
40 * an FPGA ready to be configured, writing the image to it, then doing whatever
41 * post-configuration steps necessary. This code assumes the caller got the
42 * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
43 *
44 * Return: 0 on success, negative error code otherwise.
45 */
46int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
47 size_t count)
48{
49 struct device *dev = &mgr->dev;
50 int ret;
51
52 /*
53 * Call the low level driver's write_init function. This will do the
54 * device-specific things to get the FPGA into the state where it is
55 * ready to receive an FPGA image.
56 */
57 mgr->state = FPGA_MGR_STATE_WRITE_INIT;
58 ret = mgr->mops->write_init(mgr, flags, buf, count);
59 if (ret) {
60 dev_err(dev, "Error preparing FPGA for writing\n");
61 mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
62 return ret;
63 }
64
65 /*
66 * Write the FPGA image to the FPGA.
67 */
68 mgr->state = FPGA_MGR_STATE_WRITE;
69 ret = mgr->mops->write(mgr, buf, count);
70 if (ret) {
71 dev_err(dev, "Error while writing image data to FPGA\n");
72 mgr->state = FPGA_MGR_STATE_WRITE_ERR;
73 return ret;
74 }
75
76 /*
77 * After all the FPGA image has been written, do the device specific
78 * steps to finish and set the FPGA into operating mode.
79 */
80 mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
81 ret = mgr->mops->write_complete(mgr, flags);
82 if (ret) {
83 dev_err(dev, "Error after writing image data to FPGA\n");
84 mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
85 return ret;
86 }
87 mgr->state = FPGA_MGR_STATE_OPERATING;
88
89 return 0;
90}
91EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
92
93/**
94 * fpga_mgr_firmware_load - request firmware and load to fpga
95 * @mgr: fpga manager
96 * @flags: flags setting fpga confuration modes
97 * @image_name: name of image file on the firmware search path
98 *
99 * Request an FPGA image using the firmware class, then write out to the FPGA.
100 * Update the state before each step to provide info on what step failed if
101 * there is a failure. This code assumes the caller got the mgr pointer
102 * from of_fpga_mgr_get() and checked that it is not an error code.
103 *
104 * Return: 0 on success, negative error code otherwise.
105 */
106int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
107 const char *image_name)
108{
109 struct device *dev = &mgr->dev;
110 const struct firmware *fw;
111 int ret;
112
113 dev_info(dev, "writing %s to %s\n", image_name, mgr->name);
114
115 mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ;
116
117 ret = request_firmware(&fw, image_name, dev);
118 if (ret) {
119 mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ_ERR;
120 dev_err(dev, "Error requesting firmware %s\n", image_name);
121 return ret;
122 }
123
124 ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
125 if (ret)
126 return ret;
127
128 release_firmware(fw);
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load);
133
134static const char * const state_str[] = {
135 [FPGA_MGR_STATE_UNKNOWN] = "unknown",
136 [FPGA_MGR_STATE_POWER_OFF] = "power off",
137 [FPGA_MGR_STATE_POWER_UP] = "power up",
138 [FPGA_MGR_STATE_RESET] = "reset",
139
140 /* requesting FPGA image from firmware */
141 [FPGA_MGR_STATE_FIRMWARE_REQ] = "firmware request",
142 [FPGA_MGR_STATE_FIRMWARE_REQ_ERR] = "firmware request error",
143
144 /* Preparing FPGA to receive image */
145 [FPGA_MGR_STATE_WRITE_INIT] = "write init",
146 [FPGA_MGR_STATE_WRITE_INIT_ERR] = "write init error",
147
148 /* Writing image to FPGA */
149 [FPGA_MGR_STATE_WRITE] = "write",
150 [FPGA_MGR_STATE_WRITE_ERR] = "write error",
151
152 /* Finishing configuration after image has been written */
153 [FPGA_MGR_STATE_WRITE_COMPLETE] = "write complete",
154 [FPGA_MGR_STATE_WRITE_COMPLETE_ERR] = "write complete error",
155
156 /* FPGA reports to be in normal operating mode */
157 [FPGA_MGR_STATE_OPERATING] = "operating",
158};
159
160static ssize_t name_show(struct device *dev,
161 struct device_attribute *attr, char *buf)
162{
163 struct fpga_manager *mgr = to_fpga_manager(dev);
164
165 return sprintf(buf, "%s\n", mgr->name);
166}
167
168static ssize_t state_show(struct device *dev,
169 struct device_attribute *attr, char *buf)
170{
171 struct fpga_manager *mgr = to_fpga_manager(dev);
172
173 return sprintf(buf, "%s\n", state_str[mgr->state]);
174}
175
176static DEVICE_ATTR_RO(name);
177static DEVICE_ATTR_RO(state);
178
179static struct attribute *fpga_mgr_attrs[] = {
180 &dev_attr_name.attr,
181 &dev_attr_state.attr,
182 NULL,
183};
184ATTRIBUTE_GROUPS(fpga_mgr);
185
186static int fpga_mgr_of_node_match(struct device *dev, const void *data)
187{
188 return dev->of_node == data;
189}
190
191/**
192 * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
193 * @node: device node
194 *
195 * Given a device node, get an exclusive reference to a fpga mgr.
196 *
197 * Return: fpga manager struct or IS_ERR() condition containing error code.
198 */
199struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
200{
201 struct fpga_manager *mgr;
202 struct device *dev;
203 int ret = -ENODEV;
204
205 dev = class_find_device(fpga_mgr_class, NULL, node,
206 fpga_mgr_of_node_match);
207 if (!dev)
208 return ERR_PTR(-ENODEV);
209
210 mgr = to_fpga_manager(dev);
211 if (!mgr)
212 goto err_dev;
213
214 /* Get exclusive use of fpga manager */
215 if (!mutex_trylock(&mgr->ref_mutex)) {
216 ret = -EBUSY;
217 goto err_dev;
218 }
219
220 if (!try_module_get(dev->parent->driver->owner))
221 goto err_ll_mod;
222
223 return mgr;
224
225err_ll_mod:
226 mutex_unlock(&mgr->ref_mutex);
227err_dev:
228 put_device(dev);
229 return ERR_PTR(ret);
230}
231EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
232
233/**
234 * fpga_mgr_put - release a reference to a fpga manager
235 * @mgr: fpga manager structure
236 */
237void fpga_mgr_put(struct fpga_manager *mgr)
238{
239 module_put(mgr->dev.parent->driver->owner);
240 mutex_unlock(&mgr->ref_mutex);
241 put_device(&mgr->dev);
242}
243EXPORT_SYMBOL_GPL(fpga_mgr_put);
244
245/**
246 * fpga_mgr_register - register a low level fpga manager driver
247 * @dev: fpga manager device from pdev
248 * @name: fpga manager name
249 * @mops: pointer to structure of fpga manager ops
250 * @priv: fpga manager private data
251 *
252 * Return: 0 on success, negative error code otherwise.
253 */
254int fpga_mgr_register(struct device *dev, const char *name,
255 const struct fpga_manager_ops *mops,
256 void *priv)
257{
258 struct fpga_manager *mgr;
259 const char *dt_label;
260 int id, ret;
261
262 if (!mops || !mops->write_init || !mops->write ||
263 !mops->write_complete || !mops->state) {
264 dev_err(dev, "Attempt to register without fpga_manager_ops\n");
265 return -EINVAL;
266 }
267
268 if (!name || !strlen(name)) {
269 dev_err(dev, "Attempt to register with no name!\n");
270 return -EINVAL;
271 }
272
273 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
274 if (!mgr)
275 return -ENOMEM;
276
277 id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL);
278 if (id < 0) {
279 ret = id;
280 goto error_kfree;
281 }
282
283 mutex_init(&mgr->ref_mutex);
284
285 mgr->name = name;
286 mgr->mops = mops;
287 mgr->priv = priv;
288
289 /*
290 * Initialize framework state by requesting low level driver read state
291 * from device. FPGA may be in reset mode or may have been programmed
292 * by bootloader or EEPROM.
293 */
294 mgr->state = mgr->mops->state(mgr);
295
296 device_initialize(&mgr->dev);
297 mgr->dev.class = fpga_mgr_class;
298 mgr->dev.parent = dev;
299 mgr->dev.of_node = dev->of_node;
300 mgr->dev.id = id;
301 dev_set_drvdata(dev, mgr);
302
303 dt_label = of_get_property(mgr->dev.of_node, "label", NULL);
304 if (dt_label)
305 ret = dev_set_name(&mgr->dev, "%s", dt_label);
306 else
307 ret = dev_set_name(&mgr->dev, "fpga%d", id);
308
309 ret = device_add(&mgr->dev);
310 if (ret)
311 goto error_device;
312
313 dev_info(&mgr->dev, "%s registered\n", mgr->name);
314
315 return 0;
316
317error_device:
318 ida_simple_remove(&fpga_mgr_ida, id);
319error_kfree:
320 kfree(mgr);
321
322 return ret;
323}
324EXPORT_SYMBOL_GPL(fpga_mgr_register);
325
326/**
327 * fpga_mgr_unregister - unregister a low level fpga manager driver
328 * @dev: fpga manager device from pdev
329 */
330void fpga_mgr_unregister(struct device *dev)
331{
332 struct fpga_manager *mgr = dev_get_drvdata(dev);
333
334 dev_info(&mgr->dev, "%s %s\n", __func__, mgr->name);
335
336 /*
337 * If the low level driver provides a method for putting fpga into
338 * a desired state upon unregister, do it.
339 */
340 if (mgr->mops->fpga_remove)
341 mgr->mops->fpga_remove(mgr);
342
343 device_unregister(&mgr->dev);
344}
345EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
346
347static void fpga_mgr_dev_release(struct device *dev)
348{
349 struct fpga_manager *mgr = to_fpga_manager(dev);
350
351 ida_simple_remove(&fpga_mgr_ida, mgr->dev.id);
352 kfree(mgr);
353}
354
355static int __init fpga_mgr_class_init(void)
356{
357 pr_info("FPGA manager framework\n");
358
359 fpga_mgr_class = class_create(THIS_MODULE, "fpga_manager");
360 if (IS_ERR(fpga_mgr_class))
361 return PTR_ERR(fpga_mgr_class);
362
363 fpga_mgr_class->dev_groups = fpga_mgr_groups;
364 fpga_mgr_class->dev_release = fpga_mgr_dev_release;
365
366 return 0;
367}
368
369static void __exit fpga_mgr_class_exit(void)
370{
371 class_destroy(fpga_mgr_class);
372 ida_destroy(&fpga_mgr_ida);
373}
374
375MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
376MODULE_DESCRIPTION("FPGA manager framework");
377MODULE_LICENSE("GPL v2");
378
379subsys_initcall(fpga_mgr_class_init);
380module_exit(fpga_mgr_class_exit);
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
new file mode 100644
index 000000000000..27d2ff28132c
--- /dev/null
+++ b/drivers/fpga/socfpga.c
@@ -0,0 +1,616 @@
1/*
2 * FPGA Manager Driver for Altera SOCFPGA
3 *
4 * Copyright (C) 2013-2015 Altera Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/fpga/fpga-mgr.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/pm.h>
27
28/* Register offsets */
29#define SOCFPGA_FPGMGR_STAT_OFST 0x0
30#define SOCFPGA_FPGMGR_CTL_OFST 0x4
31#define SOCFPGA_FPGMGR_DCLKCNT_OFST 0x8
32#define SOCFPGA_FPGMGR_DCLKSTAT_OFST 0xc
33#define SOCFPGA_FPGMGR_GPIO_INTEN_OFST 0x830
34#define SOCFPGA_FPGMGR_GPIO_INTMSK_OFST 0x834
35#define SOCFPGA_FPGMGR_GPIO_INTTYPE_LEVEL_OFST 0x838
36#define SOCFPGA_FPGMGR_GPIO_INT_POL_OFST 0x83c
37#define SOCFPGA_FPGMGR_GPIO_INTSTAT_OFST 0x840
38#define SOCFPGA_FPGMGR_GPIO_RAW_INTSTAT_OFST 0x844
39#define SOCFPGA_FPGMGR_GPIO_PORTA_EOI_OFST 0x84c
40#define SOCFPGA_FPGMGR_GPIO_EXT_PORTA_OFST 0x850
41
42/* Register bit defines */
43/* SOCFPGA_FPGMGR_STAT register mode field values */
44#define SOCFPGA_FPGMGR_STAT_POWER_UP 0x0 /*ramping*/
45#define SOCFPGA_FPGMGR_STAT_RESET 0x1
46#define SOCFPGA_FPGMGR_STAT_CFG 0x2
47#define SOCFPGA_FPGMGR_STAT_INIT 0x3
48#define SOCFPGA_FPGMGR_STAT_USER_MODE 0x4
49#define SOCFPGA_FPGMGR_STAT_UNKNOWN 0x5
50#define SOCFPGA_FPGMGR_STAT_STATE_MASK 0x7
51/* This is a flag value that doesn't really happen in this register field */
52#define SOCFPGA_FPGMGR_STAT_POWER_OFF 0x0
53
54#define MSEL_PP16_FAST_NOAES_NODC 0x0
55#define MSEL_PP16_FAST_AES_NODC 0x1
56#define MSEL_PP16_FAST_AESOPT_DC 0x2
57#define MSEL_PP16_SLOW_NOAES_NODC 0x4
58#define MSEL_PP16_SLOW_AES_NODC 0x5
59#define MSEL_PP16_SLOW_AESOPT_DC 0x6
60#define MSEL_PP32_FAST_NOAES_NODC 0x8
61#define MSEL_PP32_FAST_AES_NODC 0x9
62#define MSEL_PP32_FAST_AESOPT_DC 0xa
63#define MSEL_PP32_SLOW_NOAES_NODC 0xc
64#define MSEL_PP32_SLOW_AES_NODC 0xd
65#define MSEL_PP32_SLOW_AESOPT_DC 0xe
66#define SOCFPGA_FPGMGR_STAT_MSEL_MASK 0x000000f8
67#define SOCFPGA_FPGMGR_STAT_MSEL_SHIFT 3
68
69/* SOCFPGA_FPGMGR_CTL register */
70#define SOCFPGA_FPGMGR_CTL_EN 0x00000001
71#define SOCFPGA_FPGMGR_CTL_NCE 0x00000002
72#define SOCFPGA_FPGMGR_CTL_NCFGPULL 0x00000004
73
74#define CDRATIO_X1 0x00000000
75#define CDRATIO_X2 0x00000040
76#define CDRATIO_X4 0x00000080
77#define CDRATIO_X8 0x000000c0
78#define SOCFPGA_FPGMGR_CTL_CDRATIO_MASK 0x000000c0
79
80#define SOCFPGA_FPGMGR_CTL_AXICFGEN 0x00000100
81
82#define CFGWDTH_16 0x00000000
83#define CFGWDTH_32 0x00000200
84#define SOCFPGA_FPGMGR_CTL_CFGWDTH_MASK 0x00000200
85
86/* SOCFPGA_FPGMGR_DCLKSTAT register */
87#define SOCFPGA_FPGMGR_DCLKSTAT_DCNTDONE_E_DONE 0x1
88
89/* SOCFPGA_FPGMGR_GPIO_* registers share the same bit positions */
90#define SOCFPGA_FPGMGR_MON_NSTATUS 0x0001
91#define SOCFPGA_FPGMGR_MON_CONF_DONE 0x0002
92#define SOCFPGA_FPGMGR_MON_INIT_DONE 0x0004
93#define SOCFPGA_FPGMGR_MON_CRC_ERROR 0x0008
94#define SOCFPGA_FPGMGR_MON_CVP_CONF_DONE 0x0010
95#define SOCFPGA_FPGMGR_MON_PR_READY 0x0020
96#define SOCFPGA_FPGMGR_MON_PR_ERROR 0x0040
97#define SOCFPGA_FPGMGR_MON_PR_DONE 0x0080
98#define SOCFPGA_FPGMGR_MON_NCONFIG_PIN 0x0100
99#define SOCFPGA_FPGMGR_MON_NSTATUS_PIN 0x0200
100#define SOCFPGA_FPGMGR_MON_CONF_DONE_PIN 0x0400
101#define SOCFPGA_FPGMGR_MON_FPGA_POWER_ON 0x0800
102#define SOCFPGA_FPGMGR_MON_STATUS_MASK 0x0fff
103
104#define SOCFPGA_FPGMGR_NUM_SUPPLIES 3
105#define SOCFPGA_RESUME_TIMEOUT 3
106
107/* In power-up order. Reverse for power-down. */
108static const char *supply_names[SOCFPGA_FPGMGR_NUM_SUPPLIES] __maybe_unused = {
109 "FPGA-1.5V",
110 "FPGA-1.1V",
111 "FPGA-2.5V",
112};
113
114struct socfpga_fpga_priv {
115 void __iomem *fpga_base_addr;
116 void __iomem *fpga_data_addr;
117 struct completion status_complete;
118 int irq;
119};
120
121struct cfgmgr_mode {
122 /* Values to set in the CTRL register */
123 u32 ctrl;
124
125 /* flag that this table entry is a valid mode */
126 bool valid;
127};
128
129/* For SOCFPGA_FPGMGR_STAT_MSEL field */
130static struct cfgmgr_mode cfgmgr_modes[] = {
131 [MSEL_PP16_FAST_NOAES_NODC] = { CFGWDTH_16 | CDRATIO_X1, 1 },
132 [MSEL_PP16_FAST_AES_NODC] = { CFGWDTH_16 | CDRATIO_X2, 1 },
133 [MSEL_PP16_FAST_AESOPT_DC] = { CFGWDTH_16 | CDRATIO_X4, 1 },
134 [MSEL_PP16_SLOW_NOAES_NODC] = { CFGWDTH_16 | CDRATIO_X1, 1 },
135 [MSEL_PP16_SLOW_AES_NODC] = { CFGWDTH_16 | CDRATIO_X2, 1 },
136 [MSEL_PP16_SLOW_AESOPT_DC] = { CFGWDTH_16 | CDRATIO_X4, 1 },
137 [MSEL_PP32_FAST_NOAES_NODC] = { CFGWDTH_32 | CDRATIO_X1, 1 },
138 [MSEL_PP32_FAST_AES_NODC] = { CFGWDTH_32 | CDRATIO_X4, 1 },
139 [MSEL_PP32_FAST_AESOPT_DC] = { CFGWDTH_32 | CDRATIO_X8, 1 },
140 [MSEL_PP32_SLOW_NOAES_NODC] = { CFGWDTH_32 | CDRATIO_X1, 1 },
141 [MSEL_PP32_SLOW_AES_NODC] = { CFGWDTH_32 | CDRATIO_X4, 1 },
142 [MSEL_PP32_SLOW_AESOPT_DC] = { CFGWDTH_32 | CDRATIO_X8, 1 },
143};
144
145static u32 socfpga_fpga_readl(struct socfpga_fpga_priv *priv, u32 reg_offset)
146{
147 return readl(priv->fpga_base_addr + reg_offset);
148}
149
150static void socfpga_fpga_writel(struct socfpga_fpga_priv *priv, u32 reg_offset,
151 u32 value)
152{
153 writel(value, priv->fpga_base_addr + reg_offset);
154}
155
156static u32 socfpga_fpga_raw_readl(struct socfpga_fpga_priv *priv,
157 u32 reg_offset)
158{
159 return __raw_readl(priv->fpga_base_addr + reg_offset);
160}
161
162static void socfpga_fpga_raw_writel(struct socfpga_fpga_priv *priv,
163 u32 reg_offset, u32 value)
164{
165 __raw_writel(value, priv->fpga_base_addr + reg_offset);
166}
167
168static void socfpga_fpga_data_writel(struct socfpga_fpga_priv *priv, u32 value)
169{
170 writel(value, priv->fpga_data_addr);
171}
172
173static inline void socfpga_fpga_set_bitsl(struct socfpga_fpga_priv *priv,
174 u32 offset, u32 bits)
175{
176 u32 val;
177
178 val = socfpga_fpga_readl(priv, offset);
179 val |= bits;
180 socfpga_fpga_writel(priv, offset, val);
181}
182
183static inline void socfpga_fpga_clr_bitsl(struct socfpga_fpga_priv *priv,
184 u32 offset, u32 bits)
185{
186 u32 val;
187
188 val = socfpga_fpga_readl(priv, offset);
189 val &= ~bits;
190 socfpga_fpga_writel(priv, offset, val);
191}
192
193static u32 socfpga_fpga_mon_status_get(struct socfpga_fpga_priv *priv)
194{
195 return socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_GPIO_EXT_PORTA_OFST) &
196 SOCFPGA_FPGMGR_MON_STATUS_MASK;
197}
198
199static u32 socfpga_fpga_state_get(struct socfpga_fpga_priv *priv)
200{
201 u32 status = socfpga_fpga_mon_status_get(priv);
202
203 if ((status & SOCFPGA_FPGMGR_MON_FPGA_POWER_ON) == 0)
204 return SOCFPGA_FPGMGR_STAT_POWER_OFF;
205
206 return socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_STAT_OFST) &
207 SOCFPGA_FPGMGR_STAT_STATE_MASK;
208}
209
210static void socfpga_fpga_clear_done_status(struct socfpga_fpga_priv *priv)
211{
212 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_DCLKSTAT_OFST,
213 SOCFPGA_FPGMGR_DCLKSTAT_DCNTDONE_E_DONE);
214}
215
216/*
217 * Set the DCLKCNT, wait for DCLKSTAT to report the count completed, and clear
218 * the complete status.
219 */
220static int socfpga_fpga_dclk_set_and_wait_clear(struct socfpga_fpga_priv *priv,
221 u32 count)
222{
223 int timeout = 2;
224 u32 done;
225
226 /* Clear any existing DONE status. */
227 if (socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_DCLKSTAT_OFST))
228 socfpga_fpga_clear_done_status(priv);
229
230 /* Issue the DCLK count. */
231 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_DCLKCNT_OFST, count);
232
233 /* Poll DCLKSTAT to see if it completed in the timeout period. */
234 do {
235 done = socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_DCLKSTAT_OFST);
236 if (done == SOCFPGA_FPGMGR_DCLKSTAT_DCNTDONE_E_DONE) {
237 socfpga_fpga_clear_done_status(priv);
238 return 0;
239 }
240 udelay(1);
241 } while (timeout--);
242
243 return -ETIMEDOUT;
244}
245
246static int socfpga_fpga_wait_for_state(struct socfpga_fpga_priv *priv,
247 u32 state)
248{
249 int timeout = 2;
250
251 /*
252 * HW doesn't support an interrupt for changes in state, so poll to see
253 * if it matches the requested state within the timeout period.
254 */
255 do {
256 if ((socfpga_fpga_state_get(priv) & state) != 0)
257 return 0;
258 msleep(20);
259 } while (timeout--);
260
261 return -ETIMEDOUT;
262}
263
264static void socfpga_fpga_enable_irqs(struct socfpga_fpga_priv *priv, u32 irqs)
265{
266 /* set irqs to level sensitive */
267 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INTTYPE_LEVEL_OFST, 0);
268
269 /* set interrupt polarity */
270 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INT_POL_OFST, irqs);
271
272 /* clear irqs */
273 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_PORTA_EOI_OFST, irqs);
274
275 /* unmask interrupts */
276 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INTMSK_OFST, 0);
277
278 /* enable interrupts */
279 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INTEN_OFST, irqs);
280}
281
282static void socfpga_fpga_disable_irqs(struct socfpga_fpga_priv *priv)
283{
284 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_INTEN_OFST, 0);
285}
286
287static irqreturn_t socfpga_fpga_isr(int irq, void *dev_id)
288{
289 struct socfpga_fpga_priv *priv = dev_id;
290 u32 irqs, st;
291 bool conf_done, nstatus;
292
293 /* clear irqs */
294 irqs = socfpga_fpga_raw_readl(priv, SOCFPGA_FPGMGR_GPIO_INTSTAT_OFST);
295
296 socfpga_fpga_raw_writel(priv, SOCFPGA_FPGMGR_GPIO_PORTA_EOI_OFST, irqs);
297
298 st = socfpga_fpga_raw_readl(priv, SOCFPGA_FPGMGR_GPIO_EXT_PORTA_OFST);
299 conf_done = (st & SOCFPGA_FPGMGR_MON_CONF_DONE) != 0;
300 nstatus = (st & SOCFPGA_FPGMGR_MON_NSTATUS) != 0;
301
302 /* success */
303 if (conf_done && nstatus) {
304 /* disable irqs */
305 socfpga_fpga_raw_writel(priv,
306 SOCFPGA_FPGMGR_GPIO_INTEN_OFST, 0);
307 complete(&priv->status_complete);
308 }
309
310 return IRQ_HANDLED;
311}
312
313static int socfpga_fpga_wait_for_config_done(struct socfpga_fpga_priv *priv)
314{
315 int timeout, ret = 0;
316
317 socfpga_fpga_disable_irqs(priv);
318 init_completion(&priv->status_complete);
319 socfpga_fpga_enable_irqs(priv, SOCFPGA_FPGMGR_MON_CONF_DONE);
320
321 timeout = wait_for_completion_interruptible_timeout(
322 &priv->status_complete,
323 msecs_to_jiffies(10));
324 if (timeout == 0)
325 ret = -ETIMEDOUT;
326
327 socfpga_fpga_disable_irqs(priv);
328 return ret;
329}
330
331static int socfpga_fpga_cfg_mode_get(struct socfpga_fpga_priv *priv)
332{
333 u32 msel;
334
335 msel = socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_STAT_OFST);
336 msel &= SOCFPGA_FPGMGR_STAT_MSEL_MASK;
337 msel >>= SOCFPGA_FPGMGR_STAT_MSEL_SHIFT;
338
339 /* Check that this MSEL setting is supported */
340 if ((msel >= ARRAY_SIZE(cfgmgr_modes)) || !cfgmgr_modes[msel].valid)
341 return -EINVAL;
342
343 return msel;
344}
345
346static int socfpga_fpga_cfg_mode_set(struct socfpga_fpga_priv *priv)
347{
348 u32 ctrl_reg;
349 int mode;
350
351 /* get value from MSEL pins */
352 mode = socfpga_fpga_cfg_mode_get(priv);
353 if (mode < 0)
354 return mode;
355
356 /* Adjust CTRL for the CDRATIO */
357 ctrl_reg = socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_CTL_OFST);
358 ctrl_reg &= ~SOCFPGA_FPGMGR_CTL_CDRATIO_MASK;
359 ctrl_reg &= ~SOCFPGA_FPGMGR_CTL_CFGWDTH_MASK;
360 ctrl_reg |= cfgmgr_modes[mode].ctrl;
361
362 /* Set NCE to 0. */
363 ctrl_reg &= ~SOCFPGA_FPGMGR_CTL_NCE;
364 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_CTL_OFST, ctrl_reg);
365
366 return 0;
367}
368
369static int socfpga_fpga_reset(struct fpga_manager *mgr)
370{
371 struct socfpga_fpga_priv *priv = mgr->priv;
372 u32 ctrl_reg, status;
373 int ret;
374
375 /*
376 * Step 1:
377 * - Set CTRL.CFGWDTH, CTRL.CDRATIO to match cfg mode
378 * - Set CTRL.NCE to 0
379 */
380 ret = socfpga_fpga_cfg_mode_set(priv);
381 if (ret)
382 return ret;
383
384 /* Step 2: Set CTRL.EN to 1 */
385 socfpga_fpga_set_bitsl(priv, SOCFPGA_FPGMGR_CTL_OFST,
386 SOCFPGA_FPGMGR_CTL_EN);
387
388 /* Step 3: Set CTRL.NCONFIGPULL to 1 to put FPGA in reset */
389 ctrl_reg = socfpga_fpga_readl(priv, SOCFPGA_FPGMGR_CTL_OFST);
390 ctrl_reg |= SOCFPGA_FPGMGR_CTL_NCFGPULL;
391 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_CTL_OFST, ctrl_reg);
392
393 /* Step 4: Wait for STATUS.MODE to report FPGA is in reset phase */
394 status = socfpga_fpga_wait_for_state(priv, SOCFPGA_FPGMGR_STAT_RESET);
395
396 /* Step 5: Set CONTROL.NCONFIGPULL to 0 to release FPGA from reset */
397 ctrl_reg &= ~SOCFPGA_FPGMGR_CTL_NCFGPULL;
398 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_CTL_OFST, ctrl_reg);
399
400 /* Timeout waiting for reset */
401 if (status)
402 return -ETIMEDOUT;
403
404 return 0;
405}
406
407/*
408 * Prepare the FPGA to receive the configuration data.
409 */
410static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr, u32 flags,
411 const char *buf, size_t count)
412{
413 struct socfpga_fpga_priv *priv = mgr->priv;
414 int ret;
415
416 if (flags & FPGA_MGR_PARTIAL_RECONFIG) {
417 dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
418 return -EINVAL;
419 }
420 /* Steps 1 - 5: Reset the FPGA */
421 ret = socfpga_fpga_reset(mgr);
422 if (ret)
423 return ret;
424
425 /* Step 6: Wait for FPGA to enter configuration phase */
426 if (socfpga_fpga_wait_for_state(priv, SOCFPGA_FPGMGR_STAT_CFG))
427 return -ETIMEDOUT;
428
429 /* Step 7: Clear nSTATUS interrupt */
430 socfpga_fpga_writel(priv, SOCFPGA_FPGMGR_GPIO_PORTA_EOI_OFST,
431 SOCFPGA_FPGMGR_MON_NSTATUS);
432
433 /* Step 8: Set CTRL.AXICFGEN to 1 to enable transfer of config data */
434 socfpga_fpga_set_bitsl(priv, SOCFPGA_FPGMGR_CTL_OFST,
435 SOCFPGA_FPGMGR_CTL_AXICFGEN);
436
437 return 0;
438}
439
440/*
441 * Step 9: write data to the FPGA data register
442 */
443static int socfpga_fpga_ops_configure_write(struct fpga_manager *mgr,
444 const char *buf, size_t count)
445{
446 struct socfpga_fpga_priv *priv = mgr->priv;
447 u32 *buffer_32 = (u32 *)buf;
448 size_t i = 0;
449
450 if (count <= 0)
451 return -EINVAL;
452
453 /* Write out the complete 32-bit chunks. */
454 while (count >= sizeof(u32)) {
455 socfpga_fpga_data_writel(priv, buffer_32[i++]);
456 count -= sizeof(u32);
457 }
458
459 /* Write out remaining non 32-bit chunks. */
460 switch (count) {
461 case 3:
462 socfpga_fpga_data_writel(priv, buffer_32[i++] & 0x00ffffff);
463 break;
464 case 2:
465 socfpga_fpga_data_writel(priv, buffer_32[i++] & 0x0000ffff);
466 break;
467 case 1:
468 socfpga_fpga_data_writel(priv, buffer_32[i++] & 0x000000ff);
469 break;
470 case 0:
471 break;
472 default:
473 /* This will never happen. */
474 return -EFAULT;
475 }
476
477 return 0;
478}
479
480static int socfpga_fpga_ops_configure_complete(struct fpga_manager *mgr,
481 u32 flags)
482{
483 struct socfpga_fpga_priv *priv = mgr->priv;
484 u32 status;
485
486 /*
487 * Step 10:
488 * - Observe CONF_DONE and nSTATUS (active low)
489 * - if CONF_DONE = 1 and nSTATUS = 1, configuration was successful
490 * - if CONF_DONE = 0 and nSTATUS = 0, configuration failed
491 */
492 status = socfpga_fpga_wait_for_config_done(priv);
493 if (status)
494 return status;
495
496 /* Step 11: Clear CTRL.AXICFGEN to disable transfer of config data */
497 socfpga_fpga_clr_bitsl(priv, SOCFPGA_FPGMGR_CTL_OFST,
498 SOCFPGA_FPGMGR_CTL_AXICFGEN);
499
500 /*
501 * Step 12:
502 * - Write 4 to DCLKCNT
503 * - Wait for STATUS.DCNTDONE = 1
504 * - Clear W1C bit in STATUS.DCNTDONE
505 */
506 if (socfpga_fpga_dclk_set_and_wait_clear(priv, 4))
507 return -ETIMEDOUT;
508
509 /* Step 13: Wait for STATUS.MODE to report USER MODE */
510 if (socfpga_fpga_wait_for_state(priv, SOCFPGA_FPGMGR_STAT_USER_MODE))
511 return -ETIMEDOUT;
512
513 /* Step 14: Set CTRL.EN to 0 */
514 socfpga_fpga_clr_bitsl(priv, SOCFPGA_FPGMGR_CTL_OFST,
515 SOCFPGA_FPGMGR_CTL_EN);
516
517 return 0;
518}
519
520/* Translate state register values to FPGA framework state */
521static const enum fpga_mgr_states socfpga_state_to_framework_state[] = {
522 [SOCFPGA_FPGMGR_STAT_POWER_OFF] = FPGA_MGR_STATE_POWER_OFF,
523 [SOCFPGA_FPGMGR_STAT_RESET] = FPGA_MGR_STATE_RESET,
524 [SOCFPGA_FPGMGR_STAT_CFG] = FPGA_MGR_STATE_WRITE_INIT,
525 [SOCFPGA_FPGMGR_STAT_INIT] = FPGA_MGR_STATE_WRITE_INIT,
526 [SOCFPGA_FPGMGR_STAT_USER_MODE] = FPGA_MGR_STATE_OPERATING,
527 [SOCFPGA_FPGMGR_STAT_UNKNOWN] = FPGA_MGR_STATE_UNKNOWN,
528};
529
530static enum fpga_mgr_states socfpga_fpga_ops_state(struct fpga_manager *mgr)
531{
532 struct socfpga_fpga_priv *priv = mgr->priv;
533 enum fpga_mgr_states ret;
534 u32 state;
535
536 state = socfpga_fpga_state_get(priv);
537
538 if (state < ARRAY_SIZE(socfpga_state_to_framework_state))
539 ret = socfpga_state_to_framework_state[state];
540 else
541 ret = FPGA_MGR_STATE_UNKNOWN;
542
543 return ret;
544}
545
546static const struct fpga_manager_ops socfpga_fpga_ops = {
547 .state = socfpga_fpga_ops_state,
548 .write_init = socfpga_fpga_ops_configure_init,
549 .write = socfpga_fpga_ops_configure_write,
550 .write_complete = socfpga_fpga_ops_configure_complete,
551};
552
553static int socfpga_fpga_probe(struct platform_device *pdev)
554{
555 struct device *dev = &pdev->dev;
556 struct socfpga_fpga_priv *priv;
557 struct resource *res;
558 int ret;
559
560 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
561 if (!priv)
562 return -ENOMEM;
563
564 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
565 priv->fpga_base_addr = devm_ioremap_resource(dev, res);
566 if (IS_ERR(priv->fpga_base_addr))
567 return PTR_ERR(priv->fpga_base_addr);
568
569 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
570 priv->fpga_data_addr = devm_ioremap_resource(dev, res);
571 if (IS_ERR(priv->fpga_data_addr))
572 return PTR_ERR(priv->fpga_data_addr);
573
574 priv->irq = platform_get_irq(pdev, 0);
575 if (priv->irq < 0)
576 return priv->irq;
577
578 ret = devm_request_irq(dev, priv->irq, socfpga_fpga_isr, 0,
579 dev_name(dev), priv);
580 if (ret)
581 return ret;
582
583 return fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager",
584 &socfpga_fpga_ops, priv);
585}
586
587static int socfpga_fpga_remove(struct platform_device *pdev)
588{
589 fpga_mgr_unregister(&pdev->dev);
590
591 return 0;
592}
593
594#ifdef CONFIG_OF
595static const struct of_device_id socfpga_fpga_of_match[] = {
596 { .compatible = "altr,socfpga-fpga-mgr", },
597 {},
598};
599
600MODULE_DEVICE_TABLE(of, socfpga_fpga_of_match);
601#endif
602
603static struct platform_driver socfpga_fpga_driver = {
604 .probe = socfpga_fpga_probe,
605 .remove = socfpga_fpga_remove,
606 .driver = {
607 .name = "socfpga_fpga_manager",
608 .of_match_table = of_match_ptr(socfpga_fpga_of_match),
609 },
610};
611
612module_platform_driver(socfpga_fpga_driver);
613
614MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
615MODULE_DESCRIPTION("Altera SOCFPGA FPGA Manager");
616MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
new file mode 100644
index 000000000000..c2fb4120bd62
--- /dev/null
+++ b/drivers/fpga/zynq-fpga.c
@@ -0,0 +1,514 @@
1/*
2 * Copyright (c) 2011-2015 Xilinx Inc.
3 * Copyright (c) 2015, National Instruments Corp.
4 *
5 * FPGA Manager Driver for Xilinx Zynq, heavily based on xdevcfg driver
6 * in their vendor tree.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/completion.h>
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/fpga/fpga-mgr.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/iopoll.h>
26#include <linux/module.h>
27#include <linux/mfd/syscon.h>
28#include <linux/of_address.h>
29#include <linux/of_irq.h>
30#include <linux/pm.h>
31#include <linux/regmap.h>
32#include <linux/string.h>
33
34/* Offsets into SLCR regmap */
35
36/* FPGA Software Reset Control */
37#define SLCR_FPGA_RST_CTRL_OFFSET 0x240
38/* Level Shifters Enable */
39#define SLCR_LVL_SHFTR_EN_OFFSET 0x900
40
41/* Constant Definitions */
42
43/* Control Register */
44#define CTRL_OFFSET 0x00
45/* Lock Register */
46#define LOCK_OFFSET 0x04
47/* Interrupt Status Register */
48#define INT_STS_OFFSET 0x0c
49/* Interrupt Mask Register */
50#define INT_MASK_OFFSET 0x10
51/* Status Register */
52#define STATUS_OFFSET 0x14
53/* DMA Source Address Register */
54#define DMA_SRC_ADDR_OFFSET 0x18
55/* DMA Destination Address Reg */
56#define DMA_DST_ADDR_OFFSET 0x1c
57/* DMA Source Transfer Length */
58#define DMA_SRC_LEN_OFFSET 0x20
59/* DMA Destination Transfer */
60#define DMA_DEST_LEN_OFFSET 0x24
61/* Unlock Register */
62#define UNLOCK_OFFSET 0x34
63/* Misc. Control Register */
64#define MCTRL_OFFSET 0x80
65
66/* Control Register Bit definitions */
67
68/* Signal to reset FPGA */
69#define CTRL_PCFG_PROG_B_MASK BIT(30)
70/* Enable PCAP for PR */
71#define CTRL_PCAP_PR_MASK BIT(27)
72/* Enable PCAP */
73#define CTRL_PCAP_MODE_MASK BIT(26)
74
75/* Miscellaneous Control Register bit definitions */
76/* Internal PCAP loopback */
77#define MCTRL_PCAP_LPBK_MASK BIT(4)
78
79/* Status register bit definitions */
80
81/* FPGA init status */
82#define STATUS_DMA_Q_F BIT(31)
83#define STATUS_PCFG_INIT_MASK BIT(4)
84
85/* Interrupt Status/Mask Register Bit definitions */
86/* DMA command done */
87#define IXR_DMA_DONE_MASK BIT(13)
88/* DMA and PCAP cmd done */
89#define IXR_D_P_DONE_MASK BIT(12)
90 /* FPGA programmed */
91#define IXR_PCFG_DONE_MASK BIT(2)
92#define IXR_ERROR_FLAGS_MASK 0x00F0F860
93#define IXR_ALL_MASK 0xF8F7F87F
94
95/* Miscellaneous constant values */
96
97/* Invalid DMA addr */
98#define DMA_INVALID_ADDRESS GENMASK(31, 0)
99/* Used to unlock the dev */
100#define UNLOCK_MASK 0x757bdf0d
101/* Timeout for DMA to complete */
102#define DMA_DONE_TIMEOUT msecs_to_jiffies(1000)
103/* Timeout for polling reset bits */
104#define INIT_POLL_TIMEOUT 2500000
105/* Delay for polling reset bits */
106#define INIT_POLL_DELAY 20
107
108/* Masks for controlling stuff in SLCR */
109/* Disable all Level shifters */
110#define LVL_SHFTR_DISABLE_ALL_MASK 0x0
111/* Enable Level shifters from PS to PL */
112#define LVL_SHFTR_ENABLE_PS_TO_PL 0xa
113/* Enable Level shifters from PL to PS */
114#define LVL_SHFTR_ENABLE_PL_TO_PS 0xf
115/* Enable global resets */
116#define FPGA_RST_ALL_MASK 0xf
117/* Disable global resets */
118#define FPGA_RST_NONE_MASK 0x0
119
120struct zynq_fpga_priv {
121 struct device *dev;
122 int irq;
123 struct clk *clk;
124
125 void __iomem *io_base;
126 struct regmap *slcr;
127
128 struct completion dma_done;
129};
130
131static inline void zynq_fpga_write(struct zynq_fpga_priv *priv, u32 offset,
132 u32 val)
133{
134 writel(val, priv->io_base + offset);
135}
136
137static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
138 u32 offset)
139{
140 return readl(priv->io_base + offset);
141}
142
143#define zynq_fpga_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
144 readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
145 timeout_us)
146
147static void zynq_fpga_mask_irqs(struct zynq_fpga_priv *priv)
148{
149 u32 intr_mask;
150
151 intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
152 zynq_fpga_write(priv, INT_MASK_OFFSET,
153 intr_mask | IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
154}
155
156static void zynq_fpga_unmask_irqs(struct zynq_fpga_priv *priv)
157{
158 u32 intr_mask;
159
160 intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
161 zynq_fpga_write(priv, INT_MASK_OFFSET,
162 intr_mask
163 & ~(IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK));
164}
165
166static irqreturn_t zynq_fpga_isr(int irq, void *data)
167{
168 struct zynq_fpga_priv *priv = data;
169
170 /* disable DMA and error IRQs */
171 zynq_fpga_mask_irqs(priv);
172
173 complete(&priv->dma_done);
174
175 return IRQ_HANDLED;
176}
177
178static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
179 const char *buf, size_t count)
180{
181 struct zynq_fpga_priv *priv;
182 u32 ctrl, status;
183 int err;
184
185 priv = mgr->priv;
186
187 err = clk_enable(priv->clk);
188 if (err)
189 return err;
190
191 /* don't globally reset PL if we're doing partial reconfig */
192 if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
193 /* assert AXI interface resets */
194 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
195 FPGA_RST_ALL_MASK);
196
197 /* disable all level shifters */
198 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
199 LVL_SHFTR_DISABLE_ALL_MASK);
200 /* enable level shifters from PS to PL */
201 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
202 LVL_SHFTR_ENABLE_PS_TO_PL);
203
204 /* create a rising edge on PCFG_INIT. PCFG_INIT follows
205 * PCFG_PROG_B, so we need to poll it after setting PCFG_PROG_B
206 * to make sure the rising edge actually happens.
207 * Note: PCFG_PROG_B is low active, sequence as described in
208 * UG585 v1.10 page 211
209 */
210 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
211 ctrl |= CTRL_PCFG_PROG_B_MASK;
212
213 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
214
215 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
216 status & STATUS_PCFG_INIT_MASK,
217 INIT_POLL_DELAY,
218 INIT_POLL_TIMEOUT);
219 if (err) {
220 dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
221 goto out_err;
222 }
223
224 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
225 ctrl &= ~CTRL_PCFG_PROG_B_MASK;
226
227 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
228
229 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
230 !(status & STATUS_PCFG_INIT_MASK),
231 INIT_POLL_DELAY,
232 INIT_POLL_TIMEOUT);
233 if (err) {
234 dev_err(priv->dev, "Timeout waiting for !PCFG_INIT");
235 goto out_err;
236 }
237
238 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
239 ctrl |= CTRL_PCFG_PROG_B_MASK;
240
241 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
242
243 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
244 status & STATUS_PCFG_INIT_MASK,
245 INIT_POLL_DELAY,
246 INIT_POLL_TIMEOUT);
247 if (err) {
248 dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
249 goto out_err;
250 }
251 }
252
253 /* set configuration register with following options:
254 * - enable PCAP interface
255 * - set throughput for maximum speed
256 * - set CPU in user mode
257 */
258 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
259 zynq_fpga_write(priv, CTRL_OFFSET,
260 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl));
261
262 /* check that we have room in the command queue */
263 status = zynq_fpga_read(priv, STATUS_OFFSET);
264 if (status & STATUS_DMA_Q_F) {
265 dev_err(priv->dev, "DMA command queue full");
266 err = -EBUSY;
267 goto out_err;
268 }
269
270 /* ensure internal PCAP loopback is disabled */
271 ctrl = zynq_fpga_read(priv, MCTRL_OFFSET);
272 zynq_fpga_write(priv, MCTRL_OFFSET, (~MCTRL_PCAP_LPBK_MASK & ctrl));
273
274 clk_disable(priv->clk);
275
276 return 0;
277
278out_err:
279 clk_disable(priv->clk);
280
281 return err;
282}
283
284static int zynq_fpga_ops_write(struct fpga_manager *mgr,
285 const char *buf, size_t count)
286{
287 struct zynq_fpga_priv *priv;
288 int err;
289 char *kbuf;
290 size_t in_count;
291 dma_addr_t dma_addr;
292 u32 transfer_length;
293 u32 intr_status;
294
295 in_count = count;
296 priv = mgr->priv;
297
298 kbuf = dma_alloc_coherent(priv->dev, count, &dma_addr, GFP_KERNEL);
299 if (!kbuf)
300 return -ENOMEM;
301
302 memcpy(kbuf, buf, count);
303
304 /* enable clock */
305 err = clk_enable(priv->clk);
306 if (err)
307 goto out_free;
308
309 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
310
311 reinit_completion(&priv->dma_done);
312
313 /* enable DMA and error IRQs */
314 zynq_fpga_unmask_irqs(priv);
315
316 /* the +1 in the src addr is used to hold off on DMA_DONE IRQ
317 * until both AXI and PCAP are done ...
318 */
319 zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, (u32)(dma_addr) + 1);
320 zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, (u32)DMA_INVALID_ADDRESS);
321
322 /* convert #bytes to #words */
323 transfer_length = (count + 3) / 4;
324
325 zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, transfer_length);
326 zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
327
328 wait_for_completion(&priv->dma_done);
329
330 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
331 zynq_fpga_write(priv, INT_STS_OFFSET, intr_status);
332
333 if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
334 dev_err(priv->dev, "Error configuring FPGA");
335 err = -EFAULT;
336 }
337
338 clk_disable(priv->clk);
339
340out_free:
341 dma_free_coherent(priv->dev, in_count, kbuf, dma_addr);
342
343 return err;
344}
345
346static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
347{
348 struct zynq_fpga_priv *priv = mgr->priv;
349 int err;
350 u32 intr_status;
351
352 err = clk_enable(priv->clk);
353 if (err)
354 return err;
355
356 err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
357 intr_status & IXR_PCFG_DONE_MASK,
358 INIT_POLL_DELAY,
359 INIT_POLL_TIMEOUT);
360
361 clk_disable(priv->clk);
362
363 if (err)
364 return err;
365
366 /* for the partial reconfig case we didn't touch the level shifters */
367 if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
368 /* enable level shifters from PL to PS */
369 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
370 LVL_SHFTR_ENABLE_PL_TO_PS);
371
372 /* deassert AXI interface resets */
373 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
374 FPGA_RST_NONE_MASK);
375 }
376
377 return 0;
378}
379
380static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
381{
382 int err;
383 u32 intr_status;
384 struct zynq_fpga_priv *priv;
385
386 priv = mgr->priv;
387
388 err = clk_enable(priv->clk);
389 if (err)
390 return FPGA_MGR_STATE_UNKNOWN;
391
392 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
393 clk_disable(priv->clk);
394
395 if (intr_status & IXR_PCFG_DONE_MASK)
396 return FPGA_MGR_STATE_OPERATING;
397
398 return FPGA_MGR_STATE_UNKNOWN;
399}
400
401static const struct fpga_manager_ops zynq_fpga_ops = {
402 .state = zynq_fpga_ops_state,
403 .write_init = zynq_fpga_ops_write_init,
404 .write = zynq_fpga_ops_write,
405 .write_complete = zynq_fpga_ops_write_complete,
406};
407
408static int zynq_fpga_probe(struct platform_device *pdev)
409{
410 struct device *dev = &pdev->dev;
411 struct zynq_fpga_priv *priv;
412 struct resource *res;
413 int err;
414
415 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
416 if (!priv)
417 return -ENOMEM;
418
419 priv->dev = dev;
420
421 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
422 priv->io_base = devm_ioremap_resource(dev, res);
423 if (IS_ERR(priv->io_base))
424 return PTR_ERR(priv->io_base);
425
426 priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
427 "syscon");
428 if (IS_ERR(priv->slcr)) {
429 dev_err(dev, "unable to get zynq-slcr regmap");
430 return PTR_ERR(priv->slcr);
431 }
432
433 init_completion(&priv->dma_done);
434
435 priv->irq = platform_get_irq(pdev, 0);
436 if (priv->irq < 0) {
437 dev_err(dev, "No IRQ available");
438 return priv->irq;
439 }
440
441 err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0,
442 dev_name(dev), priv);
443 if (err) {
444 dev_err(dev, "unable to request IRQ");
445 return err;
446 }
447
448 priv->clk = devm_clk_get(dev, "ref_clk");
449 if (IS_ERR(priv->clk)) {
450 dev_err(dev, "input clock not found");
451 return PTR_ERR(priv->clk);
452 }
453
454 err = clk_prepare_enable(priv->clk);
455 if (err) {
456 dev_err(dev, "unable to enable clock");
457 return err;
458 }
459
460 /* unlock the device */
461 zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
462
463 clk_disable(priv->clk);
464
465 err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
466 &zynq_fpga_ops, priv);
467 if (err) {
468 dev_err(dev, "unable to register FPGA manager");
469 clk_unprepare(priv->clk);
470 return err;
471 }
472
473 return 0;
474}
475
476static int zynq_fpga_remove(struct platform_device *pdev)
477{
478 struct zynq_fpga_priv *priv;
479 struct fpga_manager *mgr;
480
481 mgr = platform_get_drvdata(pdev);
482 priv = mgr->priv;
483
484 fpga_mgr_unregister(&pdev->dev);
485
486 clk_unprepare(priv->clk);
487
488 return 0;
489}
490
491#ifdef CONFIG_OF
492static const struct of_device_id zynq_fpga_of_match[] = {
493 { .compatible = "xlnx,zynq-devcfg-1.0", },
494 {},
495};
496
497MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
498#endif
499
500static struct platform_driver zynq_fpga_driver = {
501 .probe = zynq_fpga_probe,
502 .remove = zynq_fpga_remove,
503 .driver = {
504 .name = "zynq_fpga_manager",
505 .of_match_table = of_match_ptr(zynq_fpga_of_match),
506 },
507};
508
509module_platform_driver(zynq_fpga_driver);
510
511MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
512MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>");
513MODULE_DESCRIPTION("Xilinx Zynq FPGA Manager");
514MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index bf2476ed9356..d630b7ece735 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -191,7 +191,8 @@ static void etm_set_prog(struct etm_drvdata *drvdata)
191 isb(); 191 isb();
192 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) { 192 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
193 dev_err(drvdata->dev, 193 dev_err(drvdata->dev,
194 "timeout observed when probing at offset %#x\n", ETMSR); 194 "%s: timeout observed when probing at offset %#x\n",
195 __func__, ETMSR);
195 } 196 }
196} 197}
197 198
@@ -209,7 +210,8 @@ static void etm_clr_prog(struct etm_drvdata *drvdata)
209 isb(); 210 isb();
210 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) { 211 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
211 dev_err(drvdata->dev, 212 dev_err(drvdata->dev,
212 "timeout observed when probing at offset %#x\n", ETMSR); 213 "%s: timeout observed when probing at offset %#x\n",
214 __func__, ETMSR);
213 } 215 }
214} 216}
215 217
@@ -313,14 +315,6 @@ static void etm_enable_hw(void *info)
313 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); 315 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
314} 316}
315 317
316static int etm_trace_id_simple(struct etm_drvdata *drvdata)
317{
318 if (!drvdata->enable)
319 return drvdata->traceid;
320
321 return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
322}
323
324static int etm_trace_id(struct coresight_device *csdev) 318static int etm_trace_id(struct coresight_device *csdev)
325{ 319{
326 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 320 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -1506,44 +1500,17 @@ static ssize_t timestamp_event_store(struct device *dev,
1506} 1500}
1507static DEVICE_ATTR_RW(timestamp_event); 1501static DEVICE_ATTR_RW(timestamp_event);
1508 1502
1509static ssize_t status_show(struct device *dev, 1503static ssize_t cpu_show(struct device *dev,
1510 struct device_attribute *attr, char *buf) 1504 struct device_attribute *attr, char *buf)
1511{ 1505{
1512 int ret; 1506 int val;
1513 unsigned long flags;
1514 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1507 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1515 1508
1516 pm_runtime_get_sync(drvdata->dev); 1509 val = drvdata->cpu;
1517 spin_lock_irqsave(&drvdata->spinlock, flags); 1510 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1518
1519 CS_UNLOCK(drvdata->base);
1520 ret = sprintf(buf,
1521 "ETMCCR: 0x%08x\n"
1522 "ETMCCER: 0x%08x\n"
1523 "ETMSCR: 0x%08x\n"
1524 "ETMIDR: 0x%08x\n"
1525 "ETMCR: 0x%08x\n"
1526 "ETMTRACEIDR: 0x%08x\n"
1527 "Enable event: 0x%08x\n"
1528 "Enable start/stop: 0x%08x\n"
1529 "Enable control: CR1 0x%08x CR2 0x%08x\n"
1530 "CPU affinity: %d\n",
1531 drvdata->etmccr, drvdata->etmccer,
1532 etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
1533 etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
1534 etm_readl(drvdata, ETMTEEVR),
1535 etm_readl(drvdata, ETMTSSCR),
1536 etm_readl(drvdata, ETMTECR1),
1537 etm_readl(drvdata, ETMTECR2),
1538 drvdata->cpu);
1539 CS_LOCK(drvdata->base);
1540
1541 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1542 pm_runtime_put(drvdata->dev);
1543 1511
1544 return ret;
1545} 1512}
1546static DEVICE_ATTR_RO(status); 1513static DEVICE_ATTR_RO(cpu);
1547 1514
1548static ssize_t traceid_show(struct device *dev, 1515static ssize_t traceid_show(struct device *dev,
1549 struct device_attribute *attr, char *buf) 1516 struct device_attribute *attr, char *buf)
@@ -1619,11 +1586,61 @@ static struct attribute *coresight_etm_attrs[] = {
1619 &dev_attr_ctxid_mask.attr, 1586 &dev_attr_ctxid_mask.attr,
1620 &dev_attr_sync_freq.attr, 1587 &dev_attr_sync_freq.attr,
1621 &dev_attr_timestamp_event.attr, 1588 &dev_attr_timestamp_event.attr,
1622 &dev_attr_status.attr,
1623 &dev_attr_traceid.attr, 1589 &dev_attr_traceid.attr,
1590 &dev_attr_cpu.attr,
1591 NULL,
1592};
1593
1594#define coresight_simple_func(name, offset) \
1595static ssize_t name##_show(struct device *_dev, \
1596 struct device_attribute *attr, char *buf) \
1597{ \
1598 struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
1599 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
1600 readl_relaxed(drvdata->base + offset)); \
1601} \
1602DEVICE_ATTR_RO(name)
1603
1604coresight_simple_func(etmccr, ETMCCR);
1605coresight_simple_func(etmccer, ETMCCER);
1606coresight_simple_func(etmscr, ETMSCR);
1607coresight_simple_func(etmidr, ETMIDR);
1608coresight_simple_func(etmcr, ETMCR);
1609coresight_simple_func(etmtraceidr, ETMTRACEIDR);
1610coresight_simple_func(etmteevr, ETMTEEVR);
1611coresight_simple_func(etmtssvr, ETMTSSCR);
1612coresight_simple_func(etmtecr1, ETMTECR1);
1613coresight_simple_func(etmtecr2, ETMTECR2);
1614
1615static struct attribute *coresight_etm_mgmt_attrs[] = {
1616 &dev_attr_etmccr.attr,
1617 &dev_attr_etmccer.attr,
1618 &dev_attr_etmscr.attr,
1619 &dev_attr_etmidr.attr,
1620 &dev_attr_etmcr.attr,
1621 &dev_attr_etmtraceidr.attr,
1622 &dev_attr_etmteevr.attr,
1623 &dev_attr_etmtssvr.attr,
1624 &dev_attr_etmtecr1.attr,
1625 &dev_attr_etmtecr2.attr,
1626 NULL,
1627};
1628
1629static const struct attribute_group coresight_etm_group = {
1630 .attrs = coresight_etm_attrs,
1631};
1632
1633
1634static const struct attribute_group coresight_etm_mgmt_group = {
1635 .attrs = coresight_etm_mgmt_attrs,
1636 .name = "mgmt",
1637};
1638
1639static const struct attribute_group *coresight_etm_groups[] = {
1640 &coresight_etm_group,
1641 &coresight_etm_mgmt_group,
1624 NULL, 1642 NULL,
1625}; 1643};
1626ATTRIBUTE_GROUPS(coresight_etm);
1627 1644
1628static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, 1645static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
1629 void *hcpu) 1646 void *hcpu)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 254a81a4e6f4..a6707642bb23 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -136,7 +136,9 @@ static void etm4_enable_hw(void *info)
136 writel_relaxed(drvdata->cntr_val[i], 136 writel_relaxed(drvdata->cntr_val[i],
137 drvdata->base + TRCCNTVRn(i)); 137 drvdata->base + TRCCNTVRn(i));
138 } 138 }
139 for (i = 0; i < drvdata->nr_resource; i++) 139
140 /* Resource selector pair 0 is always implemented and reserved */
141 for (i = 2; i < drvdata->nr_resource * 2; i++)
140 writel_relaxed(drvdata->res_ctrl[i], 142 writel_relaxed(drvdata->res_ctrl[i],
141 drvdata->base + TRCRSCTLRn(i)); 143 drvdata->base + TRCRSCTLRn(i));
142 144
@@ -489,8 +491,9 @@ static ssize_t reset_store(struct device *dev,
489 drvdata->cntr_val[i] = 0x0; 491 drvdata->cntr_val[i] = 0x0;
490 } 492 }
491 493
492 drvdata->res_idx = 0x0; 494 /* Resource selector pair 0 is always implemented and reserved */
493 for (i = 0; i < drvdata->nr_resource; i++) 495 drvdata->res_idx = 0x2;
496 for (i = 2; i < drvdata->nr_resource * 2; i++)
494 drvdata->res_ctrl[i] = 0x0; 497 drvdata->res_ctrl[i] = 0x0;
495 498
496 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 499 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
@@ -1732,7 +1735,7 @@ static ssize_t res_idx_store(struct device *dev,
1732 if (kstrtoul(buf, 16, &val)) 1735 if (kstrtoul(buf, 16, &val))
1733 return -EINVAL; 1736 return -EINVAL;
1734 /* Resource selector pair 0 is always implemented and reserved */ 1737 /* Resource selector pair 0 is always implemented and reserved */
1735 if ((val == 0) || (val >= drvdata->nr_resource)) 1738 if (val < 2 || val >= drvdata->nr_resource * 2)
1736 return -EINVAL; 1739 return -EINVAL;
1737 1740
1738 /* 1741 /*
@@ -2416,8 +2419,13 @@ static void etm4_init_arch_data(void *info)
2416 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3); 2419 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
2417 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */ 2420 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2418 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15); 2421 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
2419 /* NUMRSPAIR, bits[19:16] the number of resource pairs for tracing */ 2422 /*
2420 drvdata->nr_resource = BMVAL(etmidr4, 16, 19); 2423 * NUMRSPAIR, bits[19:16]
2424 * The number of resource pairs conveyed by the HW starts at 0, i.e a
2425 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
2426 * As such add 1 to the value of NUMRSPAIR for a better representation.
2427 */
2428 drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
2421 /* 2429 /*
2422 * NUMSSCC, bits[23:20] the number of single-shot 2430 * NUMSSCC, bits[23:20] the number of single-shot
2423 * comparator control for tracing 2431 * comparator control for tracing
@@ -2504,6 +2512,8 @@ static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
2504 drvdata->cntr_val[i] = 0x0; 2512 drvdata->cntr_val[i] = 0x0;
2505 } 2513 }
2506 2514
2515 /* Resource selector pair 0 is always implemented and reserved */
2516 drvdata->res_idx = 0x2;
2507 for (i = 2; i < drvdata->nr_resource * 2; i++) 2517 for (i = 2; i < drvdata->nr_resource * 2; i++)
2508 drvdata->res_ctrl[i] = 0x0; 2518 drvdata->res_ctrl[i] = 0x0;
2509 2519
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 894531d315b8..e25492137d8b 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -240,6 +240,11 @@ static int coresight_enable_path(struct list_head *path)
240 int ret = 0; 240 int ret = 0;
241 struct coresight_device *cd; 241 struct coresight_device *cd;
242 242
243 /*
244 * At this point we have a full @path, from source to sink. The
245 * sink is the first entry and the source the last one. Go through
246 * all the components and enable them one by one.
247 */
243 list_for_each_entry(cd, path, path_link) { 248 list_for_each_entry(cd, path, path_link) {
244 if (cd == list_first_entry(path, struct coresight_device, 249 if (cd == list_first_entry(path, struct coresight_device,
245 path_link)) { 250 path_link)) {
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig
new file mode 100644
index 000000000000..b7a9073d968b
--- /dev/null
+++ b/drivers/hwtracing/intel_th/Kconfig
@@ -0,0 +1,72 @@
1config INTEL_TH
2 tristate "Intel(R) Trace Hub controller"
3 help
4 Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that
5 produce, switch and output trace data from multiple hardware and
6 software sources over several types of trace output ports encoded
7 in System Trace Protocol (MIPI STPv2) and is intended to perform
8 full system debugging.
9
10 This option enables intel_th bus and common code used by TH
11 subdevices to interact with each other and hardware and for
12 platform glue layers to drive Intel TH devices.
13
14 Say Y here to enable Intel(R) Trace Hub controller support.
15
16if INTEL_TH
17
18config INTEL_TH_PCI
19 tristate "Intel(R) Trace Hub PCI controller"
20 depends on PCI
21 help
22 Intel(R) Trace Hub may exist as a PCI device. This option enables
23 support glue layer for PCI-based Intel TH.
24
25 Say Y here to enable PCI Intel TH support.
26
27config INTEL_TH_GTH
28 tristate "Intel(R) Trace Hub Global Trace Hub"
29 help
30 Global Trace Hub (GTH) is the central component of the
31 Intel TH infrastructure and acts as a switch for source
32 and output devices. This driver is required for other
33 Intel TH subdevices to initialize.
34
35 Say Y here to enable GTH subdevice of Intel(R) Trace Hub.
36
37config INTEL_TH_STH
38 tristate "Intel(R) Trace Hub Software Trace Hub support"
39 depends on STM
40 help
41 Software Trace Hub (STH) enables trace data from software
42 trace sources to be sent out via Intel(R) Trace Hub. It
43 uses stm class device to interface with its sources.
44
45 Say Y here to enable STH subdevice of Intel(R) Trace Hub.
46
47config INTEL_TH_MSU
48 tristate "Intel(R) Trace Hub Memory Storage Unit"
49 help
50 Memory Storage Unit (MSU) trace output device enables
51 storing STP traces to system memory. It supports single
52 and multiblock modes of operation and provides read()
53 and mmap() access to the collected data.
54
55 Say Y here to enable MSU output device for Intel TH.
56
57config INTEL_TH_PTI
58 tristate "Intel(R) Trace Hub PTI output"
59 help
60 Parallel Trace Interface unit (PTI) is a trace output device
61 of Intel TH architecture that facilitates STP trace output via
62 a PTI port.
63
64 Say Y to enable PTI output of Intel TH data.
65
66config INTEL_TH_DEBUG
67 bool "Intel(R) Trace Hub debugging"
68 depends on DEBUG_FS
69 help
70 Say Y here to enable debugging.
71
72endif
diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile
new file mode 100644
index 000000000000..81d42fe918f7
--- /dev/null
+++ b/drivers/hwtracing/intel_th/Makefile
@@ -0,0 +1,18 @@
1obj-$(CONFIG_INTEL_TH) += intel_th.o
2intel_th-y := core.o
3intel_th-$(CONFIG_INTEL_TH_DEBUG) += debug.o
4
5obj-$(CONFIG_INTEL_TH_PCI) += intel_th_pci.o
6intel_th_pci-y := pci.o
7
8obj-$(CONFIG_INTEL_TH_GTH) += intel_th_gth.o
9intel_th_gth-y := gth.o
10
11obj-$(CONFIG_INTEL_TH_STH) += intel_th_sth.o
12intel_th_sth-y := sth.o
13
14obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu.o
15intel_th_msu-y := msu.o
16
17obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o
18intel_th_pti-y := pti.o
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
new file mode 100644
index 000000000000..165d3001c301
--- /dev/null
+++ b/drivers/hwtracing/intel_th/core.c
@@ -0,0 +1,692 @@
1/*
2 * Intel(R) Trace Hub driver core
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/sysfs.h>
22#include <linux/kdev_t.h>
23#include <linux/debugfs.h>
24#include <linux/idr.h>
25#include <linux/pci.h>
26#include <linux/dma-mapping.h>
27
28#include "intel_th.h"
29#include "debug.h"
30
31static DEFINE_IDA(intel_th_ida);
32
33static int intel_th_match(struct device *dev, struct device_driver *driver)
34{
35 struct intel_th_driver *thdrv = to_intel_th_driver(driver);
36 struct intel_th_device *thdev = to_intel_th_device(dev);
37
38 if (thdev->type == INTEL_TH_SWITCH &&
39 (!thdrv->enable || !thdrv->disable))
40 return 0;
41
42 return !strcmp(thdev->name, driver->name);
43}
44
45static int intel_th_child_remove(struct device *dev, void *data)
46{
47 device_release_driver(dev);
48
49 return 0;
50}
51
52static int intel_th_probe(struct device *dev)
53{
54 struct intel_th_driver *thdrv = to_intel_th_driver(dev->driver);
55 struct intel_th_device *thdev = to_intel_th_device(dev);
56 struct intel_th_driver *hubdrv;
57 struct intel_th_device *hub = NULL;
58 int ret;
59
60 if (thdev->type == INTEL_TH_SWITCH)
61 hub = thdev;
62 else if (dev->parent)
63 hub = to_intel_th_device(dev->parent);
64
65 if (!hub || !hub->dev.driver)
66 return -EPROBE_DEFER;
67
68 hubdrv = to_intel_th_driver(hub->dev.driver);
69
70 ret = thdrv->probe(to_intel_th_device(dev));
71 if (ret)
72 return ret;
73
74 if (thdev->type == INTEL_TH_OUTPUT &&
75 !intel_th_output_assigned(thdev))
76 ret = hubdrv->assign(hub, thdev);
77
78 return ret;
79}
80
81static int intel_th_remove(struct device *dev)
82{
83 struct intel_th_driver *thdrv = to_intel_th_driver(dev->driver);
84 struct intel_th_device *thdev = to_intel_th_device(dev);
85 struct intel_th_device *hub = to_intel_th_device(dev->parent);
86 int err;
87
88 if (thdev->type == INTEL_TH_SWITCH) {
89 err = device_for_each_child(dev, thdev, intel_th_child_remove);
90 if (err)
91 return err;
92 }
93
94 thdrv->remove(thdev);
95
96 if (intel_th_output_assigned(thdev)) {
97 struct intel_th_driver *hubdrv =
98 to_intel_th_driver(dev->parent->driver);
99
100 if (hub->dev.driver)
101 hubdrv->unassign(hub, thdev);
102 }
103
104 return 0;
105}
106
107static struct bus_type intel_th_bus = {
108 .name = "intel_th",
109 .dev_attrs = NULL,
110 .match = intel_th_match,
111 .probe = intel_th_probe,
112 .remove = intel_th_remove,
113};
114
115static void intel_th_device_free(struct intel_th_device *thdev);
116
117static void intel_th_device_release(struct device *dev)
118{
119 intel_th_device_free(to_intel_th_device(dev));
120}
121
122static struct device_type intel_th_source_device_type = {
123 .name = "intel_th_source_device",
124 .release = intel_th_device_release,
125};
126
127static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
128 kuid_t *uid, kgid_t *gid)
129{
130 struct intel_th_device *thdev = to_intel_th_device(dev);
131 char *node;
132
133 if (thdev->id >= 0)
134 node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", 0, thdev->name,
135 thdev->id);
136 else
137 node = kasprintf(GFP_KERNEL, "intel_th%d/%s", 0, thdev->name);
138
139 return node;
140}
141
142static ssize_t port_show(struct device *dev, struct device_attribute *attr,
143 char *buf)
144{
145 struct intel_th_device *thdev = to_intel_th_device(dev);
146
147 if (thdev->output.port >= 0)
148 return scnprintf(buf, PAGE_SIZE, "%u\n", thdev->output.port);
149
150 return scnprintf(buf, PAGE_SIZE, "unassigned\n");
151}
152
153static DEVICE_ATTR_RO(port);
154
155static int intel_th_output_activate(struct intel_th_device *thdev)
156{
157 struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver);
158
159 if (thdrv->activate)
160 return thdrv->activate(thdev);
161
162 intel_th_trace_enable(thdev);
163
164 return 0;
165}
166
167static void intel_th_output_deactivate(struct intel_th_device *thdev)
168{
169 struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver);
170
171 if (thdrv->deactivate)
172 thdrv->deactivate(thdev);
173 else
174 intel_th_trace_disable(thdev);
175}
176
177static ssize_t active_show(struct device *dev, struct device_attribute *attr,
178 char *buf)
179{
180 struct intel_th_device *thdev = to_intel_th_device(dev);
181
182 return scnprintf(buf, PAGE_SIZE, "%d\n", thdev->output.active);
183}
184
185static ssize_t active_store(struct device *dev, struct device_attribute *attr,
186 const char *buf, size_t size)
187{
188 struct intel_th_device *thdev = to_intel_th_device(dev);
189 unsigned long val;
190 int ret;
191
192 ret = kstrtoul(buf, 10, &val);
193 if (ret)
194 return ret;
195
196 if (!!val != thdev->output.active) {
197 if (val)
198 ret = intel_th_output_activate(thdev);
199 else
200 intel_th_output_deactivate(thdev);
201 }
202
203 return ret ? ret : size;
204}
205
206static DEVICE_ATTR_RW(active);
207
208static struct attribute *intel_th_output_attrs[] = {
209 &dev_attr_port.attr,
210 &dev_attr_active.attr,
211 NULL,
212};
213
214ATTRIBUTE_GROUPS(intel_th_output);
215
216static struct device_type intel_th_output_device_type = {
217 .name = "intel_th_output_device",
218 .groups = intel_th_output_groups,
219 .release = intel_th_device_release,
220 .devnode = intel_th_output_devnode,
221};
222
223static struct device_type intel_th_switch_device_type = {
224 .name = "intel_th_switch_device",
225 .release = intel_th_device_release,
226};
227
228static struct device_type *intel_th_device_type[] = {
229 [INTEL_TH_SOURCE] = &intel_th_source_device_type,
230 [INTEL_TH_OUTPUT] = &intel_th_output_device_type,
231 [INTEL_TH_SWITCH] = &intel_th_switch_device_type,
232};
233
234int intel_th_driver_register(struct intel_th_driver *thdrv)
235{
236 if (!thdrv->probe || !thdrv->remove)
237 return -EINVAL;
238
239 thdrv->driver.bus = &intel_th_bus;
240
241 return driver_register(&thdrv->driver);
242}
243EXPORT_SYMBOL_GPL(intel_th_driver_register);
244
245void intel_th_driver_unregister(struct intel_th_driver *thdrv)
246{
247 driver_unregister(&thdrv->driver);
248}
249EXPORT_SYMBOL_GPL(intel_th_driver_unregister);
250
251static struct intel_th_device *
252intel_th_device_alloc(struct intel_th *th, unsigned int type, const char *name,
253 int id)
254{
255 struct device *parent;
256 struct intel_th_device *thdev;
257
258 if (type == INTEL_TH_SWITCH)
259 parent = th->dev;
260 else
261 parent = &th->hub->dev;
262
263 thdev = kzalloc(sizeof(*thdev) + strlen(name) + 1, GFP_KERNEL);
264 if (!thdev)
265 return NULL;
266
267 thdev->id = id;
268 thdev->type = type;
269
270 strcpy(thdev->name, name);
271 device_initialize(&thdev->dev);
272 thdev->dev.bus = &intel_th_bus;
273 thdev->dev.type = intel_th_device_type[type];
274 thdev->dev.parent = parent;
275 thdev->dev.dma_mask = parent->dma_mask;
276 thdev->dev.dma_parms = parent->dma_parms;
277 dma_set_coherent_mask(&thdev->dev, parent->coherent_dma_mask);
278 if (id >= 0)
279 dev_set_name(&thdev->dev, "%d-%s%d", th->id, name, id);
280 else
281 dev_set_name(&thdev->dev, "%d-%s", th->id, name);
282
283 return thdev;
284}
285
286static int intel_th_device_add_resources(struct intel_th_device *thdev,
287 struct resource *res, int nres)
288{
289 struct resource *r;
290
291 r = kmemdup(res, sizeof(*res) * nres, GFP_KERNEL);
292 if (!r)
293 return -ENOMEM;
294
295 thdev->resource = r;
296 thdev->num_resources = nres;
297
298 return 0;
299}
300
301static void intel_th_device_remove(struct intel_th_device *thdev)
302{
303 device_del(&thdev->dev);
304 put_device(&thdev->dev);
305}
306
307static void intel_th_device_free(struct intel_th_device *thdev)
308{
309 kfree(thdev->resource);
310 kfree(thdev);
311}
312
313/*
314 * Intel(R) Trace Hub subdevices
315 */
316static struct intel_th_subdevice {
317 const char *name;
318 struct resource res[3];
319 unsigned nres;
320 unsigned type;
321 unsigned otype;
322 int id;
323} intel_th_subdevices[TH_SUBDEVICE_MAX] = {
324 {
325 .nres = 1,
326 .res = {
327 {
328 .start = REG_GTH_OFFSET,
329 .end = REG_GTH_OFFSET + REG_GTH_LENGTH - 1,
330 .flags = IORESOURCE_MEM,
331 },
332 },
333 .name = "gth",
334 .type = INTEL_TH_SWITCH,
335 .id = -1,
336 },
337 {
338 .nres = 2,
339 .res = {
340 {
341 .start = REG_MSU_OFFSET,
342 .end = REG_MSU_OFFSET + REG_MSU_LENGTH - 1,
343 .flags = IORESOURCE_MEM,
344 },
345 {
346 .start = BUF_MSU_OFFSET,
347 .end = BUF_MSU_OFFSET + BUF_MSU_LENGTH - 1,
348 .flags = IORESOURCE_MEM,
349 },
350 },
351 .name = "msc",
352 .id = 0,
353 .type = INTEL_TH_OUTPUT,
354 .otype = GTH_MSU,
355 },
356 {
357 .nres = 2,
358 .res = {
359 {
360 .start = REG_MSU_OFFSET,
361 .end = REG_MSU_OFFSET + REG_MSU_LENGTH - 1,
362 .flags = IORESOURCE_MEM,
363 },
364 {
365 .start = BUF_MSU_OFFSET,
366 .end = BUF_MSU_OFFSET + BUF_MSU_LENGTH - 1,
367 .flags = IORESOURCE_MEM,
368 },
369 },
370 .name = "msc",
371 .id = 1,
372 .type = INTEL_TH_OUTPUT,
373 .otype = GTH_MSU,
374 },
375 {
376 .nres = 2,
377 .res = {
378 {
379 .start = REG_STH_OFFSET,
380 .end = REG_STH_OFFSET + REG_STH_LENGTH - 1,
381 .flags = IORESOURCE_MEM,
382 },
383 {
384 .start = TH_MMIO_SW,
385 .end = 0,
386 .flags = IORESOURCE_MEM,
387 },
388 },
389 .id = -1,
390 .name = "sth",
391 .type = INTEL_TH_SOURCE,
392 },
393 {
394 .nres = 1,
395 .res = {
396 {
397 .start = REG_PTI_OFFSET,
398 .end = REG_PTI_OFFSET + REG_PTI_LENGTH - 1,
399 .flags = IORESOURCE_MEM,
400 },
401 },
402 .id = -1,
403 .name = "pti",
404 .type = INTEL_TH_OUTPUT,
405 .otype = GTH_PTI,
406 },
407 {
408 .nres = 1,
409 .res = {
410 {
411 .start = REG_DCIH_OFFSET,
412 .end = REG_DCIH_OFFSET + REG_DCIH_LENGTH - 1,
413 .flags = IORESOURCE_MEM,
414 },
415 },
416 .id = -1,
417 .name = "dcih",
418 .type = INTEL_TH_OUTPUT,
419 },
420};
421
422static int intel_th_populate(struct intel_th *th, struct resource *devres,
423 unsigned int ndevres, int irq)
424{
425 struct resource res[3];
426 unsigned int req = 0;
427 int i, err;
428
429 /* create devices for each intel_th_subdevice */
430 for (i = 0; i < ARRAY_SIZE(intel_th_subdevices); i++) {
431 struct intel_th_subdevice *subdev = &intel_th_subdevices[i];
432 struct intel_th_device *thdev;
433 int r;
434
435 thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
436 subdev->id);
437 if (!thdev) {
438 err = -ENOMEM;
439 goto kill_subdevs;
440 }
441
442 memcpy(res, subdev->res,
443 sizeof(struct resource) * subdev->nres);
444
445 for (r = 0; r < subdev->nres; r++) {
446 int bar = TH_MMIO_CONFIG;
447
448 /*
449 * Take .end == 0 to mean 'take the whole bar',
450 * .start then tells us which bar it is. Default to
451 * TH_MMIO_CONFIG.
452 */
453 if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
454 bar = res[r].start;
455 res[r].start = 0;
456 res[r].end = resource_size(&devres[bar]) - 1;
457 }
458
459 if (res[r].flags & IORESOURCE_MEM) {
460 res[r].start += devres[bar].start;
461 res[r].end += devres[bar].start;
462
463 dev_dbg(th->dev, "%s:%d @ %pR\n",
464 subdev->name, r, &res[r]);
465 } else if (res[r].flags & IORESOURCE_IRQ) {
466 res[r].start = irq;
467 }
468 }
469
470 err = intel_th_device_add_resources(thdev, res, subdev->nres);
471 if (err) {
472 put_device(&thdev->dev);
473 goto kill_subdevs;
474 }
475
476 if (subdev->type == INTEL_TH_OUTPUT) {
477 thdev->dev.devt = MKDEV(th->major, i);
478 thdev->output.type = subdev->otype;
479 thdev->output.port = -1;
480 }
481
482 err = device_add(&thdev->dev);
483 if (err) {
484 put_device(&thdev->dev);
485 goto kill_subdevs;
486 }
487
488 /* need switch driver to be loaded to enumerate the rest */
489 if (subdev->type == INTEL_TH_SWITCH && !req) {
490 th->hub = thdev;
491 err = request_module("intel_th_%s", subdev->name);
492 if (!err)
493 req++;
494 }
495
496 th->thdev[i] = thdev;
497 }
498
499 return 0;
500
501kill_subdevs:
502 for (i-- ; i >= 0; i--)
503 intel_th_device_remove(th->thdev[i]);
504
505 return err;
506}
507
508static int match_devt(struct device *dev, void *data)
509{
510 dev_t devt = (dev_t)(unsigned long)data;
511
512 return dev->devt == devt;
513}
514
515static int intel_th_output_open(struct inode *inode, struct file *file)
516{
517 const struct file_operations *fops;
518 struct intel_th_driver *thdrv;
519 struct device *dev;
520 int err;
521
522 dev = bus_find_device(&intel_th_bus, NULL,
523 (void *)(unsigned long)inode->i_rdev,
524 match_devt);
525 if (!dev || !dev->driver)
526 return -ENODEV;
527
528 thdrv = to_intel_th_driver(dev->driver);
529 fops = fops_get(thdrv->fops);
530 if (!fops)
531 return -ENODEV;
532
533 replace_fops(file, fops);
534
535 file->private_data = to_intel_th_device(dev);
536
537 if (file->f_op->open) {
538 err = file->f_op->open(inode, file);
539 return err;
540 }
541
542 return 0;
543}
544
545static const struct file_operations intel_th_output_fops = {
546 .open = intel_th_output_open,
547 .llseek = noop_llseek,
548};
549
550/**
551 * intel_th_alloc() - allocate a new Intel TH device and its subdevices
552 * @dev: parent device
553 * @devres: parent's resources
554 * @ndevres: number of resources
555 * @irq: irq number
556 */
557struct intel_th *
558intel_th_alloc(struct device *dev, struct resource *devres,
559 unsigned int ndevres, int irq)
560{
561 struct intel_th *th;
562 int err;
563
564 th = kzalloc(sizeof(*th), GFP_KERNEL);
565 if (!th)
566 return ERR_PTR(-ENOMEM);
567
568 th->id = ida_simple_get(&intel_th_ida, 0, 0, GFP_KERNEL);
569 if (th->id < 0) {
570 err = th->id;
571 goto err_alloc;
572 }
573
574 th->major = __register_chrdev(0, 0, TH_POSSIBLE_OUTPUTS,
575 "intel_th/output", &intel_th_output_fops);
576 if (th->major < 0) {
577 err = th->major;
578 goto err_ida;
579 }
580 th->dev = dev;
581
582 err = intel_th_populate(th, devres, ndevres, irq);
583 if (err)
584 goto err_chrdev;
585
586 return th;
587
588err_chrdev:
589 __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
590 "intel_th/output");
591
592err_ida:
593 ida_simple_remove(&intel_th_ida, th->id);
594
595err_alloc:
596 kfree(th);
597
598 return ERR_PTR(err);
599}
600EXPORT_SYMBOL_GPL(intel_th_alloc);
601
602void intel_th_free(struct intel_th *th)
603{
604 int i;
605
606 for (i = 0; i < TH_SUBDEVICE_MAX; i++)
607 if (th->thdev[i] != th->hub)
608 intel_th_device_remove(th->thdev[i]);
609
610 intel_th_device_remove(th->hub);
611
612 __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
613 "intel_th/output");
614
615 ida_simple_remove(&intel_th_ida, th->id);
616
617 kfree(th);
618}
619EXPORT_SYMBOL_GPL(intel_th_free);
620
621/**
622 * intel_th_trace_enable() - enable tracing for an output device
623 * @thdev: output device that requests tracing be enabled
624 */
625int intel_th_trace_enable(struct intel_th_device *thdev)
626{
627 struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
628 struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
629
630 if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH))
631 return -EINVAL;
632
633 if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
634 return -EINVAL;
635
636 hubdrv->enable(hub, &thdev->output);
637
638 return 0;
639}
640EXPORT_SYMBOL_GPL(intel_th_trace_enable);
641
642/**
643 * intel_th_trace_disable() - disable tracing for an output device
644 * @thdev: output device that requests tracing be disabled
645 */
646int intel_th_trace_disable(struct intel_th_device *thdev)
647{
648 struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
649 struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
650
651 WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH);
652 if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
653 return -EINVAL;
654
655 hubdrv->disable(hub, &thdev->output);
656
657 return 0;
658}
659EXPORT_SYMBOL_GPL(intel_th_trace_disable);
660
661int intel_th_set_output(struct intel_th_device *thdev,
662 unsigned int master)
663{
664 struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
665 struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
666
667 if (!hubdrv->set_output)
668 return -ENOTSUPP;
669
670 return hubdrv->set_output(hub, master);
671}
672EXPORT_SYMBOL_GPL(intel_th_set_output);
673
674static int __init intel_th_init(void)
675{
676 intel_th_debug_init();
677
678 return bus_register(&intel_th_bus);
679}
680subsys_initcall(intel_th_init);
681
682static void __exit intel_th_exit(void)
683{
684 intel_th_debug_done();
685
686 bus_unregister(&intel_th_bus);
687}
688module_exit(intel_th_exit);
689
690MODULE_LICENSE("GPL v2");
691MODULE_DESCRIPTION("Intel(R) Trace Hub controller driver");
692MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/intel_th/debug.c b/drivers/hwtracing/intel_th/debug.c
new file mode 100644
index 000000000000..788a1f0a97ad
--- /dev/null
+++ b/drivers/hwtracing/intel_th/debug.c
@@ -0,0 +1,36 @@
1/*
2 * Intel(R) Trace Hub driver debugging
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/debugfs.h>
19
20#include "intel_th.h"
21#include "debug.h"
22
23struct dentry *intel_th_dbg;
24
25void intel_th_debug_init(void)
26{
27 intel_th_dbg = debugfs_create_dir("intel_th", NULL);
28 if (IS_ERR(intel_th_dbg))
29 intel_th_dbg = NULL;
30}
31
32void intel_th_debug_done(void)
33{
34 debugfs_remove(intel_th_dbg);
35 intel_th_dbg = NULL;
36}
diff --git a/drivers/hwtracing/intel_th/debug.h b/drivers/hwtracing/intel_th/debug.h
new file mode 100644
index 000000000000..88311bad3ba4
--- /dev/null
+++ b/drivers/hwtracing/intel_th/debug.h
@@ -0,0 +1,34 @@
1/*
2 * Intel(R) Trace Hub driver debugging
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __INTEL_TH_DEBUG_H__
17#define __INTEL_TH_DEBUG_H__
18
19#ifdef CONFIG_INTEL_TH_DEBUG
20extern struct dentry *intel_th_dbg;
21
22void intel_th_debug_init(void);
23void intel_th_debug_done(void);
24#else
25static inline void intel_th_debug_init(void)
26{
27}
28
29static inline void intel_th_debug_done(void)
30{
31}
32#endif
33
34#endif /* __INTEL_TH_DEBUG_H__ */
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
new file mode 100644
index 000000000000..2dc5378ccd3a
--- /dev/null
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -0,0 +1,706 @@
1/*
2 * Intel(R) Trace Hub Global Trace Hub
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/io.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/bitmap.h>
25
26#include "intel_th.h"
27#include "gth.h"
28
29struct gth_device;
30
31/**
32 * struct gth_output - GTH view on an output port
33 * @gth: backlink to the GTH device
34 * @output: link to output device's output descriptor
35 * @index: output port number
36 * @port_type: one of GTH_* port type values
37 * @master: bitmap of masters configured for this output
38 */
39struct gth_output {
40 struct gth_device *gth;
41 struct intel_th_output *output;
42 unsigned int index;
43 unsigned int port_type;
44 DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1);
45};
46
47/**
48 * struct gth_device - GTH device
49 * @dev: driver core's device
50 * @base: register window base address
51 * @output_group: attributes describing output ports
52 * @master_group: attributes describing master assignments
53 * @output: output ports
54 * @master: master/output port assignments
55 * @gth_lock: serializes accesses to GTH bits
56 */
57struct gth_device {
58 struct device *dev;
59 void __iomem *base;
60
61 struct attribute_group output_group;
62 struct attribute_group master_group;
63 struct gth_output output[TH_POSSIBLE_OUTPUTS];
64 signed char master[TH_CONFIGURABLE_MASTERS + 1];
65 spinlock_t gth_lock;
66};
67
68static void gth_output_set(struct gth_device *gth, int port,
69 unsigned int config)
70{
71 unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
72 u32 val;
73 int shift = (port & 3) * 8;
74
75 val = ioread32(gth->base + reg);
76 val &= ~(0xff << shift);
77 val |= config << shift;
78 iowrite32(val, gth->base + reg);
79}
80
81static unsigned int gth_output_get(struct gth_device *gth, int port)
82{
83 unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
84 u32 val;
85 int shift = (port & 3) * 8;
86
87 val = ioread32(gth->base + reg);
88 val &= 0xff << shift;
89 val >>= shift;
90
91 return val;
92}
93
94static void gth_smcfreq_set(struct gth_device *gth, int port,
95 unsigned int freq)
96{
97 unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
98 int shift = (port & 1) * 16;
99 u32 val;
100
101 val = ioread32(gth->base + reg);
102 val &= ~(0xffff << shift);
103 val |= freq << shift;
104 iowrite32(val, gth->base + reg);
105}
106
107static unsigned int gth_smcfreq_get(struct gth_device *gth, int port)
108{
109 unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
110 int shift = (port & 1) * 16;
111 u32 val;
112
113 val = ioread32(gth->base + reg);
114 val &= 0xffff << shift;
115 val >>= shift;
116
117 return val;
118}
119
120/*
121 * "masters" attribute group
122 */
123
124struct master_attribute {
125 struct device_attribute attr;
126 struct gth_device *gth;
127 unsigned int master;
128};
129
130static void
131gth_master_set(struct gth_device *gth, unsigned int master, int port)
132{
133 unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
134 unsigned int shift = (master & 0x7) * 4;
135 u32 val;
136
137 if (master >= 256) {
138 reg = REG_GTH_GSWTDEST;
139 shift = 0;
140 }
141
142 val = ioread32(gth->base + reg);
143 val &= ~(0xf << shift);
144 if (port >= 0)
145 val |= (0x8 | port) << shift;
146 iowrite32(val, gth->base + reg);
147}
148
149/*static int gth_master_get(struct gth_device *gth, unsigned int master)
150{
151 unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
152 unsigned int shift = (master & 0x7) * 4;
153 u32 val;
154
155 if (master >= 256) {
156 reg = REG_GTH_GSWTDEST;
157 shift = 0;
158 }
159
160 val = ioread32(gth->base + reg);
161 val &= (0xf << shift);
162 val >>= shift;
163
164 return val ? val & 0x7 : -1;
165 }*/
166
167static ssize_t master_attr_show(struct device *dev,
168 struct device_attribute *attr,
169 char *buf)
170{
171 struct master_attribute *ma =
172 container_of(attr, struct master_attribute, attr);
173 struct gth_device *gth = ma->gth;
174 size_t count;
175 int port;
176
177 spin_lock(&gth->gth_lock);
178 port = gth->master[ma->master];
179 spin_unlock(&gth->gth_lock);
180
181 if (port >= 0)
182 count = snprintf(buf, PAGE_SIZE, "%x\n", port);
183 else
184 count = snprintf(buf, PAGE_SIZE, "disabled\n");
185
186 return count;
187}
188
189static ssize_t master_attr_store(struct device *dev,
190 struct device_attribute *attr,
191 const char *buf, size_t count)
192{
193 struct master_attribute *ma =
194 container_of(attr, struct master_attribute, attr);
195 struct gth_device *gth = ma->gth;
196 int old_port, port;
197
198 if (kstrtoint(buf, 10, &port) < 0)
199 return -EINVAL;
200
201 if (port >= TH_POSSIBLE_OUTPUTS || port < -1)
202 return -EINVAL;
203
204 spin_lock(&gth->gth_lock);
205
206 /* disconnect from the previous output port, if any */
207 old_port = gth->master[ma->master];
208 if (old_port >= 0) {
209 gth->master[ma->master] = -1;
210 clear_bit(ma->master, gth->output[old_port].master);
211 if (gth->output[old_port].output->active)
212 gth_master_set(gth, ma->master, -1);
213 }
214
215 /* connect to the new output port, if any */
216 if (port >= 0) {
217 /* check if there's a driver for this port */
218 if (!gth->output[port].output) {
219 count = -ENODEV;
220 goto unlock;
221 }
222
223 set_bit(ma->master, gth->output[port].master);
224
225 /* if the port is active, program this setting */
226 if (gth->output[port].output->active)
227 gth_master_set(gth, ma->master, port);
228 }
229
230 gth->master[ma->master] = port;
231
232unlock:
233 spin_unlock(&gth->gth_lock);
234
235 return count;
236}
237
238struct output_attribute {
239 struct device_attribute attr;
240 struct gth_device *gth;
241 unsigned int port;
242 unsigned int parm;
243};
244
245#define OUTPUT_PARM(_name, _mask, _r, _w, _what) \
246 [TH_OUTPUT_PARM(_name)] = { .name = __stringify(_name), \
247 .get = gth_ ## _what ## _get, \
248 .set = gth_ ## _what ## _set, \
249 .mask = (_mask), \
250 .readable = (_r), \
251 .writable = (_w) }
252
253static const struct output_parm {
254 const char *name;
255 unsigned int (*get)(struct gth_device *gth, int port);
256 void (*set)(struct gth_device *gth, int port,
257 unsigned int val);
258 unsigned int mask;
259 unsigned int readable : 1,
260 writable : 1;
261} output_parms[] = {
262 OUTPUT_PARM(port, 0x7, 1, 0, output),
263 OUTPUT_PARM(null, BIT(3), 1, 1, output),
264 OUTPUT_PARM(drop, BIT(4), 1, 1, output),
265 OUTPUT_PARM(reset, BIT(5), 1, 0, output),
266 OUTPUT_PARM(flush, BIT(7), 0, 1, output),
267 OUTPUT_PARM(smcfreq, 0xffff, 1, 1, smcfreq),
268};
269
270static void
271gth_output_parm_set(struct gth_device *gth, int port, unsigned int parm,
272 unsigned int val)
273{
274 unsigned int config = output_parms[parm].get(gth, port);
275 unsigned int mask = output_parms[parm].mask;
276 unsigned int shift = __ffs(mask);
277
278 config &= ~mask;
279 config |= (val << shift) & mask;
280 output_parms[parm].set(gth, port, config);
281}
282
283static unsigned int
284gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
285{
286 unsigned int config = output_parms[parm].get(gth, port);
287 unsigned int mask = output_parms[parm].mask;
288 unsigned int shift = __ffs(mask);
289
290 config &= mask;
291 config >>= shift;
292 return config;
293}
294
295/*
296 * Reset outputs and sources
297 */
298static int intel_th_gth_reset(struct gth_device *gth)
299{
300 u32 scratchpad;
301 int port, i;
302
303 scratchpad = ioread32(gth->base + REG_GTH_SCRPD0);
304 if (scratchpad & SCRPD_DEBUGGER_IN_USE)
305 return -EBUSY;
306
307 /* output ports */
308 for (port = 0; port < 8; port++) {
309 if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
310 GTH_NONE)
311 continue;
312
313 gth_output_set(gth, port, 0);
314 gth_smcfreq_set(gth, port, 16);
315 }
316 /* disable overrides */
317 iowrite32(0, gth->base + REG_GTH_DESTOVR);
318
319 /* masters swdest_0~31 and gswdest */
320 for (i = 0; i < 33; i++)
321 iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4);
322
323 /* sources */
324 iowrite32(0, gth->base + REG_GTH_SCR);
325 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
326
327 return 0;
328}
329
330/*
331 * "outputs" attribute group
332 */
333
334static ssize_t output_attr_show(struct device *dev,
335 struct device_attribute *attr,
336 char *buf)
337{
338 struct output_attribute *oa =
339 container_of(attr, struct output_attribute, attr);
340 struct gth_device *gth = oa->gth;
341 size_t count;
342
343 spin_lock(&gth->gth_lock);
344 count = snprintf(buf, PAGE_SIZE, "%x\n",
345 gth_output_parm_get(gth, oa->port, oa->parm));
346 spin_unlock(&gth->gth_lock);
347
348 return count;
349}
350
351static ssize_t output_attr_store(struct device *dev,
352 struct device_attribute *attr,
353 const char *buf, size_t count)
354{
355 struct output_attribute *oa =
356 container_of(attr, struct output_attribute, attr);
357 struct gth_device *gth = oa->gth;
358 unsigned int config;
359
360 if (kstrtouint(buf, 16, &config) < 0)
361 return -EINVAL;
362
363 spin_lock(&gth->gth_lock);
364 gth_output_parm_set(gth, oa->port, oa->parm, config);
365 spin_unlock(&gth->gth_lock);
366
367 return count;
368}
369
370static int intel_th_master_attributes(struct gth_device *gth)
371{
372 struct master_attribute *master_attrs;
373 struct attribute **attrs;
374 int i, nattrs = TH_CONFIGURABLE_MASTERS + 2;
375
376 attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
377 if (!attrs)
378 return -ENOMEM;
379
380 master_attrs = devm_kcalloc(gth->dev, nattrs,
381 sizeof(struct master_attribute),
382 GFP_KERNEL);
383 if (!master_attrs)
384 return -ENOMEM;
385
386 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) {
387 char *name;
388
389 name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d%s", i,
390 i == TH_CONFIGURABLE_MASTERS ? "+" : "");
391 if (!name)
392 return -ENOMEM;
393
394 master_attrs[i].attr.attr.name = name;
395 master_attrs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
396 master_attrs[i].attr.show = master_attr_show;
397 master_attrs[i].attr.store = master_attr_store;
398
399 sysfs_attr_init(&master_attrs[i].attr.attr);
400 attrs[i] = &master_attrs[i].attr.attr;
401
402 master_attrs[i].gth = gth;
403 master_attrs[i].master = i;
404 }
405
406 gth->master_group.name = "masters";
407 gth->master_group.attrs = attrs;
408
409 return sysfs_create_group(&gth->dev->kobj, &gth->master_group);
410}
411
412static int intel_th_output_attributes(struct gth_device *gth)
413{
414 struct output_attribute *out_attrs;
415 struct attribute **attrs;
416 int i, j, nouts = TH_POSSIBLE_OUTPUTS;
417 int nparms = ARRAY_SIZE(output_parms);
418 int nattrs = nouts * nparms + 1;
419
420 attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
421 if (!attrs)
422 return -ENOMEM;
423
424 out_attrs = devm_kcalloc(gth->dev, nattrs,
425 sizeof(struct output_attribute),
426 GFP_KERNEL);
427 if (!out_attrs)
428 return -ENOMEM;
429
430 for (i = 0; i < nouts; i++) {
431 for (j = 0; j < nparms; j++) {
432 unsigned int idx = i * nparms + j;
433 char *name;
434
435 name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d_%s", i,
436 output_parms[j].name);
437 if (!name)
438 return -ENOMEM;
439
440 out_attrs[idx].attr.attr.name = name;
441
442 if (output_parms[j].readable) {
443 out_attrs[idx].attr.attr.mode |= S_IRUGO;
444 out_attrs[idx].attr.show = output_attr_show;
445 }
446
447 if (output_parms[j].writable) {
448 out_attrs[idx].attr.attr.mode |= S_IWUSR;
449 out_attrs[idx].attr.store = output_attr_store;
450 }
451
452 sysfs_attr_init(&out_attrs[idx].attr.attr);
453 attrs[idx] = &out_attrs[idx].attr.attr;
454
455 out_attrs[idx].gth = gth;
456 out_attrs[idx].port = i;
457 out_attrs[idx].parm = j;
458 }
459 }
460
461 gth->output_group.name = "outputs";
462 gth->output_group.attrs = attrs;
463
464 return sysfs_create_group(&gth->dev->kobj, &gth->output_group);
465}
466
467/**
468 * intel_th_gth_disable() - enable tracing to an output device
469 * @thdev: GTH device
470 * @output: output device's descriptor
471 *
472 * This will deconfigure all masters set to output to this device,
473 * disable tracing using force storeEn off signal and wait for the
474 * "pipeline empty" bit for corresponding output port.
475 */
476static void intel_th_gth_disable(struct intel_th_device *thdev,
477 struct intel_th_output *output)
478{
479 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
480 unsigned long count;
481 int master;
482 u32 reg;
483
484 spin_lock(&gth->gth_lock);
485 output->active = false;
486
487 for_each_set_bit(master, gth->output[output->port].master,
488 TH_CONFIGURABLE_MASTERS) {
489 gth_master_set(gth, master, -1);
490 }
491 spin_unlock(&gth->gth_lock);
492
493 iowrite32(0, gth->base + REG_GTH_SCR);
494 iowrite32(0xfd, gth->base + REG_GTH_SCR2);
495
496 /* wait on pipeline empty for the given port */
497 for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
498 count && !(reg & BIT(output->port)); count--) {
499 reg = ioread32(gth->base + REG_GTH_STAT);
500 cpu_relax();
501 }
502
503 /* clear force capture done for next captures */
504 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
505
506 if (!count)
507 dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
508 output->port);
509}
510
511/**
512 * intel_th_gth_enable() - enable tracing to an output device
513 * @thdev: GTH device
514 * @output: output device's descriptor
515 *
516 * This will configure all masters set to output to this device and
517 * enable tracing using force storeEn signal.
518 */
519static void intel_th_gth_enable(struct intel_th_device *thdev,
520 struct intel_th_output *output)
521{
522 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
523 u32 scr = 0xfc0000;
524 int master;
525
526 spin_lock(&gth->gth_lock);
527 for_each_set_bit(master, gth->output[output->port].master,
528 TH_CONFIGURABLE_MASTERS + 1) {
529 gth_master_set(gth, master, output->port);
530 }
531
532 if (output->multiblock)
533 scr |= 0xff;
534
535 output->active = true;
536 spin_unlock(&gth->gth_lock);
537
538 iowrite32(scr, gth->base + REG_GTH_SCR);
539 iowrite32(0, gth->base + REG_GTH_SCR2);
540}
541
542/**
543 * intel_th_gth_assign() - assign output device to a GTH output port
544 * @thdev: GTH device
545 * @othdev: output device
546 *
547 * This will match a given output device parameters against present
548 * output ports on the GTH and fill out relevant bits in output device's
549 * descriptor.
550 *
551 * Return: 0 on success, -errno on error.
552 */
553static int intel_th_gth_assign(struct intel_th_device *thdev,
554 struct intel_th_device *othdev)
555{
556 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
557 int i, id;
558
559 if (othdev->type != INTEL_TH_OUTPUT)
560 return -EINVAL;
561
562 for (i = 0, id = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
563 if (gth->output[i].port_type != othdev->output.type)
564 continue;
565
566 if (othdev->id == -1 || othdev->id == id)
567 goto found;
568
569 id++;
570 }
571
572 return -ENOENT;
573
574found:
575 spin_lock(&gth->gth_lock);
576 othdev->output.port = i;
577 othdev->output.active = false;
578 gth->output[i].output = &othdev->output;
579 spin_unlock(&gth->gth_lock);
580
581 return 0;
582}
583
584/**
585 * intel_th_gth_unassign() - deassociate an output device from its output port
586 * @thdev: GTH device
587 * @othdev: output device
588 */
589static void intel_th_gth_unassign(struct intel_th_device *thdev,
590 struct intel_th_device *othdev)
591{
592 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
593 int port = othdev->output.port;
594
595 spin_lock(&gth->gth_lock);
596 othdev->output.port = -1;
597 othdev->output.active = false;
598 gth->output[port].output = NULL;
599 spin_unlock(&gth->gth_lock);
600}
601
602static int
603intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
604{
605 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
606 int port = 0; /* FIXME: make default output configurable */
607
608 /*
609 * everything above TH_CONFIGURABLE_MASTERS is controlled by the
610 * same register
611 */
612 if (master > TH_CONFIGURABLE_MASTERS)
613 master = TH_CONFIGURABLE_MASTERS;
614
615 spin_lock(&gth->gth_lock);
616 if (gth->master[master] == -1) {
617 set_bit(master, gth->output[port].master);
618 gth->master[master] = port;
619 }
620 spin_unlock(&gth->gth_lock);
621
622 return 0;
623}
624
625static int intel_th_gth_probe(struct intel_th_device *thdev)
626{
627 struct device *dev = &thdev->dev;
628 struct gth_device *gth;
629 struct resource *res;
630 void __iomem *base;
631 int i, ret;
632
633 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
634 if (!res)
635 return -ENODEV;
636
637 base = devm_ioremap(dev, res->start, resource_size(res));
638 if (!base)
639 return -ENOMEM;
640
641 gth = devm_kzalloc(dev, sizeof(*gth), GFP_KERNEL);
642 if (!gth)
643 return -ENOMEM;
644
645 gth->dev = dev;
646 gth->base = base;
647 spin_lock_init(&gth->gth_lock);
648
649 ret = intel_th_gth_reset(gth);
650 if (ret)
651 return ret;
652
653 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
654 gth->master[i] = -1;
655
656 for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
657 gth->output[i].gth = gth;
658 gth->output[i].index = i;
659 gth->output[i].port_type =
660 gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
661 }
662
663 if (intel_th_output_attributes(gth) ||
664 intel_th_master_attributes(gth)) {
665 pr_warn("Can't initialize sysfs attributes\n");
666
667 if (gth->output_group.attrs)
668 sysfs_remove_group(&gth->dev->kobj, &gth->output_group);
669 return -ENOMEM;
670 }
671
672 dev_set_drvdata(dev, gth);
673
674 return 0;
675}
676
677static void intel_th_gth_remove(struct intel_th_device *thdev)
678{
679 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
680
681 sysfs_remove_group(&gth->dev->kobj, &gth->output_group);
682 sysfs_remove_group(&gth->dev->kobj, &gth->master_group);
683}
684
685static struct intel_th_driver intel_th_gth_driver = {
686 .probe = intel_th_gth_probe,
687 .remove = intel_th_gth_remove,
688 .assign = intel_th_gth_assign,
689 .unassign = intel_th_gth_unassign,
690 .set_output = intel_th_gth_set_output,
691 .enable = intel_th_gth_enable,
692 .disable = intel_th_gth_disable,
693 .driver = {
694 .name = "gth",
695 .owner = THIS_MODULE,
696 },
697};
698
699module_driver(intel_th_gth_driver,
700 intel_th_driver_register,
701 intel_th_driver_unregister);
702
703MODULE_ALIAS("intel_th_switch");
704MODULE_LICENSE("GPL v2");
705MODULE_DESCRIPTION("Intel(R) Trace Hub Global Trace Hub driver");
706MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
new file mode 100644
index 000000000000..3b714b7a61db
--- /dev/null
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -0,0 +1,66 @@
1/*
2 * Intel(R) Trace Hub Global Trace Hub (GTH) data structures
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __INTEL_TH_GTH_H__
17#define __INTEL_TH_GTH_H__
18
19/* Map output port parameter bits to symbolic names */
20#define TH_OUTPUT_PARM(name) \
21 TH_OUTPUT_ ## name
22
23enum intel_th_output_parm {
24 /* output port type */
25 TH_OUTPUT_PARM(port),
26 /* generate NULL packet */
27 TH_OUTPUT_PARM(null),
28 /* packet drop */
29 TH_OUTPUT_PARM(drop),
30 /* port in reset state */
31 TH_OUTPUT_PARM(reset),
32 /* flush out data */
33 TH_OUTPUT_PARM(flush),
34 /* mainenance packet frequency */
35 TH_OUTPUT_PARM(smcfreq),
36};
37
38/*
39 * Register offsets
40 */
41enum {
42 REG_GTH_GTHOPT0 = 0x00, /* Output ports 0..3 config */
43 REG_GTH_GTHOPT1 = 0x04, /* Output ports 4..7 config */
44 REG_GTH_SWDEST0 = 0x08, /* Switching destination masters 0..7 */
45 REG_GTH_GSWTDEST = 0x88, /* Global sw trace destination */
46 REG_GTH_SMCR0 = 0x9c, /* STP mainenance for ports 0/1 */
47 REG_GTH_SMCR1 = 0xa0, /* STP mainenance for ports 2/3 */
48 REG_GTH_SMCR2 = 0xa4, /* STP mainenance for ports 4/5 */
49 REG_GTH_SMCR3 = 0xa8, /* STP mainenance for ports 6/7 */
50 REG_GTH_SCR = 0xc8, /* Source control (storeEn override) */
51 REG_GTH_STAT = 0xd4, /* GTH status */
52 REG_GTH_SCR2 = 0xd8, /* Source control (force storeEn off) */
53 REG_GTH_DESTOVR = 0xdc, /* Destination override */
54 REG_GTH_SCRPD0 = 0xe0, /* ScratchPad[0] */
55 REG_GTH_SCRPD1 = 0xe4, /* ScratchPad[1] */
56 REG_GTH_SCRPD2 = 0xe8, /* ScratchPad[2] */
57 REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
58};
59
60/* Externall debugger is using Intel TH */
61#define SCRPD_DEBUGGER_IN_USE BIT(24)
62
63/* waiting for Pipeline Empty bit(s) to assert for GTH */
64#define GTH_PLE_WAITLOOP_DEPTH 10000
65
66#endif /* __INTEL_TH_GTH_H__ */
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
new file mode 100644
index 000000000000..57fd72b20fae
--- /dev/null
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -0,0 +1,244 @@
1/*
2 * Intel(R) Trace Hub data structures
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __INTEL_TH_H__
17#define __INTEL_TH_H__
18
19/* intel_th_device device types */
20enum {
21 /* Devices that generate trace data */
22 INTEL_TH_SOURCE = 0,
23 /* Output ports (MSC, PTI) */
24 INTEL_TH_OUTPUT,
25 /* Switch, the Global Trace Hub (GTH) */
26 INTEL_TH_SWITCH,
27};
28
29/**
30 * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
31 * @port: output port number, assigned by the switch
32 * @type: GTH_{MSU,CTP,PTI}
33 * @multiblock: true for multiblock output configuration
34 * @active: true when this output is enabled
35 *
36 * Output port descriptor, used by switch driver to tell which output
37 * port this output device corresponds to. Filled in at output device's
38 * probe time by switch::assign(). Passed from output device driver to
39 * switch related code to enable/disable its port.
40 */
41struct intel_th_output {
42 int port;
43 unsigned int type;
44 bool multiblock;
45 bool active;
46};
47
48/**
49 * struct intel_th_device - device on the intel_th bus
50 * @dev: device
51 * @resource: array of resources available to this device
52 * @num_resources: number of resources in @resource array
53 * @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH}
54 * @id: device instance or -1
55 * @output: output descriptor for INTEL_TH_OUTPUT devices
56 * @name: device name to match the driver
57 */
58struct intel_th_device {
59 struct device dev;
60 struct resource *resource;
61 unsigned int num_resources;
62 unsigned int type;
63 int id;
64
65 /* INTEL_TH_OUTPUT specific */
66 struct intel_th_output output;
67
68 char name[];
69};
70
71#define to_intel_th_device(_d) \
72 container_of((_d), struct intel_th_device, dev)
73
74/**
75 * intel_th_device_get_resource() - obtain @num'th resource of type @type
76 * @thdev: the device to search the resource for
77 * @type: resource type
78 * @num: number of the resource
79 */
80static inline struct resource *
81intel_th_device_get_resource(struct intel_th_device *thdev, unsigned int type,
82 unsigned int num)
83{
84 int i;
85
86 for (i = 0; i < thdev->num_resources; i++)
87 if (resource_type(&thdev->resource[i]) == type && !num--)
88 return &thdev->resource[i];
89
90 return NULL;
91}
92
93/**
94 * intel_th_output_assigned() - if an output device is assigned to a switch port
95 * @thdev: the output device
96 *
97 * Return: true if the device is INTEL_TH_OUTPUT *and* is assigned a port
98 */
99static inline bool
100intel_th_output_assigned(struct intel_th_device *thdev)
101{
102 return thdev->type == INTEL_TH_OUTPUT &&
103 thdev->output.port >= 0;
104}
105
106/**
107 * struct intel_th_driver - driver for an intel_th_device device
108 * @driver: generic driver
109 * @probe: probe method
110 * @remove: remove method
111 * @assign: match a given output type device against available outputs
112 * @unassign: deassociate an output type device from an output port
113 * @enable: enable tracing for a given output device
114 * @disable: disable tracing for a given output device
115 * @fops: file operations for device nodes
116 *
117 * Callbacks @probe and @remove are required for all device types.
118 * Switch device driver needs to fill in @assign, @enable and @disable
119 * callbacks.
120 */
121struct intel_th_driver {
122 struct device_driver driver;
123 int (*probe)(struct intel_th_device *thdev);
124 void (*remove)(struct intel_th_device *thdev);
125 /* switch (GTH) ops */
126 int (*assign)(struct intel_th_device *thdev,
127 struct intel_th_device *othdev);
128 void (*unassign)(struct intel_th_device *thdev,
129 struct intel_th_device *othdev);
130 void (*enable)(struct intel_th_device *thdev,
131 struct intel_th_output *output);
132 void (*disable)(struct intel_th_device *thdev,
133 struct intel_th_output *output);
134 /* output ops */
135 void (*irq)(struct intel_th_device *thdev);
136 int (*activate)(struct intel_th_device *thdev);
137 void (*deactivate)(struct intel_th_device *thdev);
138 /* file_operations for those who want a device node */
139 const struct file_operations *fops;
140
141 /* source ops */
142 int (*set_output)(struct intel_th_device *thdev,
143 unsigned int master);
144};
145
146#define to_intel_th_driver(_d) \
147 container_of((_d), struct intel_th_driver, driver)
148
149static inline struct intel_th_device *
150to_intel_th_hub(struct intel_th_device *thdev)
151{
152 struct device *parent = thdev->dev.parent;
153
154 if (!parent)
155 return NULL;
156
157 return to_intel_th_device(parent);
158}
159
160struct intel_th *
161intel_th_alloc(struct device *dev, struct resource *devres,
162 unsigned int ndevres, int irq);
163void intel_th_free(struct intel_th *th);
164
165int intel_th_driver_register(struct intel_th_driver *thdrv);
166void intel_th_driver_unregister(struct intel_th_driver *thdrv);
167
168int intel_th_trace_enable(struct intel_th_device *thdev);
169int intel_th_trace_disable(struct intel_th_device *thdev);
170int intel_th_set_output(struct intel_th_device *thdev,
171 unsigned int master);
172
173enum {
174 TH_MMIO_CONFIG = 0,
175 TH_MMIO_SW = 2,
176 TH_MMIO_END,
177};
178
179#define TH_SUBDEVICE_MAX 6
180#define TH_POSSIBLE_OUTPUTS 8
181#define TH_CONFIGURABLE_MASTERS 256
182#define TH_MSC_MAX 2
183
184/**
185 * struct intel_th - Intel TH controller
186 * @dev: driver core's device
187 * @thdev: subdevices
188 * @hub: "switch" subdevice (GTH)
189 * @id: this Intel TH controller's device ID in the system
190 * @major: device node major for output devices
191 */
192struct intel_th {
193 struct device *dev;
194
195 struct intel_th_device *thdev[TH_SUBDEVICE_MAX];
196 struct intel_th_device *hub;
197
198 int id;
199 int major;
200#ifdef CONFIG_INTEL_TH_DEBUG
201 struct dentry *dbg;
202#endif
203};
204
205/*
206 * Register windows
207 */
208enum {
209 /* Global Trace Hub (GTH) */
210 REG_GTH_OFFSET = 0x0000,
211 REG_GTH_LENGTH = 0x2000,
212
213 /* Software Trace Hub (STH) [0x4000..0x4fff] */
214 REG_STH_OFFSET = 0x4000,
215 REG_STH_LENGTH = 0x2000,
216
217 /* Memory Storage Unit (MSU) [0xa0000..0xa1fff] */
218 REG_MSU_OFFSET = 0xa0000,
219 REG_MSU_LENGTH = 0x02000,
220
221 /* Internal MSU trace buffer [0x80000..0x9ffff] */
222 BUF_MSU_OFFSET = 0x80000,
223 BUF_MSU_LENGTH = 0x20000,
224
225 /* PTI output == same window as GTH */
226 REG_PTI_OFFSET = REG_GTH_OFFSET,
227 REG_PTI_LENGTH = REG_GTH_LENGTH,
228
229 /* DCI Handler (DCIH) == some window as MSU */
230 REG_DCIH_OFFSET = REG_MSU_OFFSET,
231 REG_DCIH_LENGTH = REG_MSU_LENGTH,
232};
233
234/*
235 * GTH, output ports configuration
236 */
237enum {
238 GTH_NONE = 0,
239 GTH_MSU, /* memory/usb */
240 GTH_CTP, /* Common Trace Port */
241 GTH_PTI = 4, /* MIPI-PTI */
242};
243
244#endif
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
new file mode 100644
index 000000000000..70ca27e45602
--- /dev/null
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -0,0 +1,1509 @@
1/*
2 * Intel(R) Trace Hub Memory Storage Unit
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/uaccess.h>
22#include <linux/sizes.h>
23#include <linux/printk.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/fs.h>
27#include <linux/io.h>
28#include <linux/dma-mapping.h>
29
30#include <asm/cacheflush.h>
31
32#include "intel_th.h"
33#include "msu.h"
34
35#define msc_dev(x) (&(x)->thdev->dev)
36
37/**
38 * struct msc_block - multiblock mode block descriptor
39 * @bdesc: pointer to hardware descriptor (beginning of the block)
40 * @addr: physical address of the block
41 */
42struct msc_block {
43 struct msc_block_desc *bdesc;
44 dma_addr_t addr;
45};
46
47/**
48 * struct msc_window - multiblock mode window descriptor
49 * @entry: window list linkage (msc::win_list)
50 * @pgoff: page offset into the buffer that this window starts at
51 * @nr_blocks: number of blocks (pages) in this window
52 * @block: array of block descriptors
53 */
54struct msc_window {
55 struct list_head entry;
56 unsigned long pgoff;
57 unsigned int nr_blocks;
58 struct msc *msc;
59 struct msc_block block[0];
60};
61
62/**
63 * struct msc_iter - iterator for msc buffer
64 * @entry: msc::iter_list linkage
65 * @msc: pointer to the MSC device
66 * @start_win: oldest window
67 * @win: current window
68 * @offset: current logical offset into the buffer
69 * @start_block: oldest block in the window
70 * @block: block number in the window
71 * @block_off: offset into current block
72 * @wrap_count: block wrapping handling
73 * @eof: end of buffer reached
74 */
75struct msc_iter {
76 struct list_head entry;
77 struct msc *msc;
78 struct msc_window *start_win;
79 struct msc_window *win;
80 unsigned long offset;
81 int start_block;
82 int block;
83 unsigned int block_off;
84 unsigned int wrap_count;
85 unsigned int eof;
86};
87
88/**
89 * struct msc - MSC device representation
90 * @reg_base: register window base address
91 * @thdev: intel_th_device pointer
92 * @win_list: list of windows in multiblock mode
93 * @nr_pages: total number of pages allocated for this buffer
94 * @single_sz: amount of data in single mode
95 * @single_wrap: single mode wrap occurred
96 * @base: buffer's base pointer
97 * @base_addr: buffer's base address
98 * @user_count: number of users of the buffer
99 * @mmap_count: number of mappings
100 * @buf_mutex: mutex to serialize access to buffer-related bits
101
102 * @enabled: MSC is enabled
103 * @wrap: wrapping is enabled
104 * @mode: MSC operating mode
105 * @burst_len: write burst length
106 * @index: number of this MSC in the MSU
107 */
108struct msc {
109 void __iomem *reg_base;
110 struct intel_th_device *thdev;
111
112 struct list_head win_list;
113 unsigned long nr_pages;
114 unsigned long single_sz;
115 unsigned int single_wrap : 1;
116 void *base;
117 dma_addr_t base_addr;
118
119 /* <0: no buffer, 0: no users, >0: active users */
120 atomic_t user_count;
121
122 atomic_t mmap_count;
123 struct mutex buf_mutex;
124
125 struct mutex iter_mutex;
126 struct list_head iter_list;
127
128 /* config */
129 unsigned int enabled : 1,
130 wrap : 1;
131 unsigned int mode;
132 unsigned int burst_len;
133 unsigned int index;
134};
135
136static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
137{
138 /* header hasn't been written */
139 if (!bdesc->valid_dw)
140 return true;
141
142 /* valid_dw includes the header */
143 if (!msc_data_sz(bdesc))
144 return true;
145
146 return false;
147}
148
149/**
150 * msc_oldest_window() - locate the window with oldest data
151 * @msc: MSC device
152 *
153 * This should only be used in multiblock mode. Caller should hold the
154 * msc::user_count reference.
155 *
156 * Return: the oldest window with valid data
157 */
158static struct msc_window *msc_oldest_window(struct msc *msc)
159{
160 struct msc_window *win;
161 u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA);
162 unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT;
163 unsigned int found = 0;
164
165 if (list_empty(&msc->win_list))
166 return NULL;
167
168 /*
169 * we might need a radix tree for this, depending on how
170 * many windows a typical user would allocate; ideally it's
171 * something like 2, in which case we're good
172 */
173 list_for_each_entry(win, &msc->win_list, entry) {
174 if (win->block[0].addr == win_addr)
175 found++;
176
177 /* skip the empty ones */
178 if (msc_block_is_empty(win->block[0].bdesc))
179 continue;
180
181 if (found)
182 return win;
183 }
184
185 return list_entry(msc->win_list.next, struct msc_window, entry);
186}
187
188/**
189 * msc_win_oldest_block() - locate the oldest block in a given window
190 * @win: window to look at
191 *
192 * Return: index of the block with the oldest data
193 */
194static unsigned int msc_win_oldest_block(struct msc_window *win)
195{
196 unsigned int blk;
197 struct msc_block_desc *bdesc = win->block[0].bdesc;
198
199 /* without wrapping, first block is the oldest */
200 if (!msc_block_wrapped(bdesc))
201 return 0;
202
203 /*
204 * with wrapping, last written block contains both the newest and the
205 * oldest data for this window.
206 */
207 for (blk = 0; blk < win->nr_blocks; blk++) {
208 bdesc = win->block[blk].bdesc;
209
210 if (msc_block_last_written(bdesc))
211 return blk;
212 }
213
214 return 0;
215}
216
217/**
218 * msc_is_last_win() - check if a window is the last one for a given MSC
219 * @win: window
220 * Return: true if @win is the last window in MSC's multiblock buffer
221 */
222static inline bool msc_is_last_win(struct msc_window *win)
223{
224 return win->entry.next == &win->msc->win_list;
225}
226
227/**
228 * msc_next_window() - return next window in the multiblock buffer
229 * @win: current window
230 *
231 * Return: window following the current one
232 */
233static struct msc_window *msc_next_window(struct msc_window *win)
234{
235 if (msc_is_last_win(win))
236 return list_entry(win->msc->win_list.next, struct msc_window,
237 entry);
238
239 return list_entry(win->entry.next, struct msc_window, entry);
240}
241
242static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
243{
244 return iter->win->block[iter->block].bdesc;
245}
246
247static void msc_iter_init(struct msc_iter *iter)
248{
249 memset(iter, 0, sizeof(*iter));
250 iter->start_block = -1;
251 iter->block = -1;
252}
253
254static struct msc_iter *msc_iter_install(struct msc *msc)
255{
256 struct msc_iter *iter;
257
258 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
259 if (!iter)
260 return NULL;
261
262 msc_iter_init(iter);
263 iter->msc = msc;
264
265 mutex_lock(&msc->iter_mutex);
266 list_add_tail(&iter->entry, &msc->iter_list);
267 mutex_unlock(&msc->iter_mutex);
268
269 return iter;
270}
271
272static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
273{
274 mutex_lock(&msc->iter_mutex);
275 list_del(&iter->entry);
276 mutex_unlock(&msc->iter_mutex);
277
278 kfree(iter);
279}
280
281static void msc_iter_block_start(struct msc_iter *iter)
282{
283 if (iter->start_block != -1)
284 return;
285
286 iter->start_block = msc_win_oldest_block(iter->win);
287 iter->block = iter->start_block;
288 iter->wrap_count = 0;
289
290 /*
291 * start with the block with oldest data; if data has wrapped
292 * in this window, it should be in this block
293 */
294 if (msc_block_wrapped(msc_iter_bdesc(iter)))
295 iter->wrap_count = 2;
296
297}
298
299static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
300{
301 /* already started, nothing to do */
302 if (iter->start_win)
303 return 0;
304
305 iter->start_win = msc_oldest_window(msc);
306 if (!iter->start_win)
307 return -EINVAL;
308
309 iter->win = iter->start_win;
310 iter->start_block = -1;
311
312 msc_iter_block_start(iter);
313
314 return 0;
315}
316
317static int msc_iter_win_advance(struct msc_iter *iter)
318{
319 iter->win = msc_next_window(iter->win);
320 iter->start_block = -1;
321
322 if (iter->win == iter->start_win) {
323 iter->eof++;
324 return 1;
325 }
326
327 msc_iter_block_start(iter);
328
329 return 0;
330}
331
332static int msc_iter_block_advance(struct msc_iter *iter)
333{
334 iter->block_off = 0;
335
336 /* wrapping */
337 if (iter->wrap_count && iter->block == iter->start_block) {
338 iter->wrap_count--;
339 if (!iter->wrap_count)
340 /* copied newest data from the wrapped block */
341 return msc_iter_win_advance(iter);
342 }
343
344 /* no wrapping, check for last written block */
345 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
346 /* copied newest data for the window */
347 return msc_iter_win_advance(iter);
348
349 /* block advance */
350 if (++iter->block == iter->win->nr_blocks)
351 iter->block = 0;
352
353 /* no wrapping, sanity check in case there is no last written block */
354 if (!iter->wrap_count && iter->block == iter->start_block)
355 return msc_iter_win_advance(iter);
356
357 return 0;
358}
359
360/**
361 * msc_buffer_iterate() - go through multiblock buffer's data
362 * @iter: iterator structure
363 * @size: amount of data to scan
364 * @data: callback's private data
365 * @fn: iterator callback
366 *
367 * This will start at the window which will be written to next (containing
368 * the oldest data) and work its way to the current window, calling @fn
369 * for each chunk of data as it goes.
370 *
371 * Caller should have msc::user_count reference to make sure the buffer
372 * doesn't disappear from under us.
373 *
374 * Return: amount of data actually scanned.
375 */
376static ssize_t
377msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
378 unsigned long (*fn)(void *, void *, size_t))
379{
380 struct msc *msc = iter->msc;
381 size_t len = size;
382 unsigned int advance;
383
384 if (iter->eof)
385 return 0;
386
387 /* start with the oldest window */
388 if (msc_iter_win_start(iter, msc))
389 return 0;
390
391 do {
392 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
393 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
394 size_t tocopy = data_bytes, copied = 0;
395 size_t remaining = 0;
396
397 advance = 1;
398
399 /*
400 * If block wrapping happened, we need to visit the last block
401 * twice, because it contains both the oldest and the newest
402 * data in this window.
403 *
404 * First time (wrap_count==2), in the very beginning, to collect
405 * the oldest data, which is in the range
406 * (data_bytes..DATA_IN_PAGE).
407 *
408 * Second time (wrap_count==1), it's just like any other block,
409 * containing data in the range of [MSC_BDESC..data_bytes].
410 */
411 if (iter->block == iter->start_block && iter->wrap_count) {
412 tocopy = DATA_IN_PAGE - data_bytes;
413 src += data_bytes;
414 }
415
416 if (!tocopy)
417 goto next_block;
418
419 tocopy -= iter->block_off;
420 src += iter->block_off;
421
422 if (len < tocopy) {
423 tocopy = len;
424 advance = 0;
425 }
426
427 remaining = fn(data, src, tocopy);
428
429 if (remaining)
430 advance = 0;
431
432 copied = tocopy - remaining;
433 len -= copied;
434 iter->block_off += copied;
435 iter->offset += copied;
436
437 if (!advance)
438 break;
439
440next_block:
441 if (msc_iter_block_advance(iter))
442 break;
443
444 } while (len);
445
446 return size - len;
447}
448
449/**
450 * msc_buffer_clear_hw_header() - clear hw header for multiblock
451 * @msc: MSC device
452 */
453static void msc_buffer_clear_hw_header(struct msc *msc)
454{
455 struct msc_window *win;
456
457 mutex_lock(&msc->buf_mutex);
458 list_for_each_entry(win, &msc->win_list, entry) {
459 unsigned int blk;
460 size_t hw_sz = sizeof(struct msc_block_desc) -
461 offsetof(struct msc_block_desc, hw_tag);
462
463 for (blk = 0; blk < win->nr_blocks; blk++) {
464 struct msc_block_desc *bdesc = win->block[blk].bdesc;
465
466 memset(&bdesc->hw_tag, 0, hw_sz);
467 }
468 }
469 mutex_unlock(&msc->buf_mutex);
470}
471
472/**
473 * msc_configure() - set up MSC hardware
474 * @msc: the MSC device to configure
475 *
476 * Program storage mode, wrapping, burst length and trace buffer address
477 * into a given MSC. If msc::enabled is set, enable the trace, too.
478 */
479static int msc_configure(struct msc *msc)
480{
481 u32 reg;
482
483 if (msc->mode > MSC_MODE_MULTI)
484 return -ENOTSUPP;
485
486 if (msc->mode == MSC_MODE_MULTI)
487 msc_buffer_clear_hw_header(msc);
488
489 reg = msc->base_addr >> PAGE_SHIFT;
490 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
491
492 if (msc->mode == MSC_MODE_SINGLE) {
493 reg = msc->nr_pages;
494 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
495 }
496
497 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
498 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
499
500 reg |= msc->mode << __ffs(MSC_MODE);
501 reg |= msc->burst_len << __ffs(MSC_LEN);
502 /*if (msc->mode == MSC_MODE_MULTI)
503 reg |= MSC_RD_HDR_OVRD; */
504 if (msc->wrap)
505 reg |= MSC_WRAPEN;
506 if (msc->enabled)
507 reg |= MSC_EN;
508
509 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
510
511 if (msc->enabled) {
512 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
513 intel_th_trace_enable(msc->thdev);
514 }
515
516 return 0;
517}
518
519/**
520 * msc_disable() - disable MSC hardware
521 * @msc: MSC device to disable
522 *
523 * If @msc is enabled, disable tracing on the switch and then disable MSC
524 * storage.
525 */
526static void msc_disable(struct msc *msc)
527{
528 unsigned long count;
529 u32 reg;
530
531 if (!msc->enabled)
532 return;
533
534 intel_th_trace_disable(msc->thdev);
535
536 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
537 count && !(reg & MSCSTS_PLE); count--) {
538 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
539 cpu_relax();
540 }
541
542 if (!count)
543 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
544
545 if (msc->mode == MSC_MODE_SINGLE) {
546 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
547
548 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
549 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
550 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
551 reg, msc->single_sz, msc->single_wrap);
552 }
553
554 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
555 reg &= ~MSC_EN;
556 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
557 msc->enabled = 0;
558
559 iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
560 iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
561
562 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
563 ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
564
565 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
566 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
567}
568
569static int intel_th_msc_activate(struct intel_th_device *thdev)
570{
571 struct msc *msc = dev_get_drvdata(&thdev->dev);
572 int ret = 0;
573
574 if (!atomic_inc_unless_negative(&msc->user_count))
575 return -ENODEV;
576
577 mutex_lock(&msc->iter_mutex);
578 if (!list_empty(&msc->iter_list))
579 ret = -EBUSY;
580 mutex_unlock(&msc->iter_mutex);
581
582 if (ret) {
583 atomic_dec(&msc->user_count);
584 return ret;
585 }
586
587 msc->enabled = 1;
588
589 return msc_configure(msc);
590}
591
592static void intel_th_msc_deactivate(struct intel_th_device *thdev)
593{
594 struct msc *msc = dev_get_drvdata(&thdev->dev);
595
596 msc_disable(msc);
597
598 atomic_dec(&msc->user_count);
599}
600
601/**
602 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
603 * @msc: MSC device
604 * @size: allocation size in bytes
605 *
606 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
607 * caller is expected to hold it.
608 *
609 * Return: 0 on success, -errno otherwise.
610 */
611static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
612{
613 unsigned int order = get_order(size);
614 struct page *page;
615
616 if (!size)
617 return 0;
618
619 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
620 if (!page)
621 return -ENOMEM;
622
623 split_page(page, order);
624 msc->nr_pages = size >> PAGE_SHIFT;
625 msc->base = page_address(page);
626 msc->base_addr = page_to_phys(page);
627
628 return 0;
629}
630
631/**
632 * msc_buffer_contig_free() - free a contiguous buffer
633 * @msc: MSC configured in SINGLE mode
634 */
635static void msc_buffer_contig_free(struct msc *msc)
636{
637 unsigned long off;
638
639 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
640 struct page *page = virt_to_page(msc->base + off);
641
642 page->mapping = NULL;
643 __free_page(page);
644 }
645
646 msc->nr_pages = 0;
647}
648
649/**
650 * msc_buffer_contig_get_page() - find a page at a given offset
651 * @msc: MSC configured in SINGLE mode
652 * @pgoff: page offset
653 *
654 * Return: page, if @pgoff is within the range, NULL otherwise.
655 */
656static struct page *msc_buffer_contig_get_page(struct msc *msc,
657 unsigned long pgoff)
658{
659 if (pgoff >= msc->nr_pages)
660 return NULL;
661
662 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
663}
664
665/**
666 * msc_buffer_win_alloc() - alloc a window for a multiblock mode
667 * @msc: MSC device
668 * @nr_blocks: number of pages in this window
669 *
670 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
671 * to serialize, so the caller is expected to hold it.
672 *
673 * Return: 0 on success, -errno otherwise.
674 */
675static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
676{
677 struct msc_window *win;
678 unsigned long size = PAGE_SIZE;
679 int i, ret = -ENOMEM;
680
681 if (!nr_blocks)
682 return 0;
683
684 win = kzalloc(offsetof(struct msc_window, block[nr_blocks]),
685 GFP_KERNEL);
686 if (!win)
687 return -ENOMEM;
688
689 if (!list_empty(&msc->win_list)) {
690 struct msc_window *prev = list_entry(msc->win_list.prev,
691 struct msc_window, entry);
692
693 win->pgoff = prev->pgoff + prev->nr_blocks;
694 }
695
696 for (i = 0; i < nr_blocks; i++) {
697 win->block[i].bdesc = dma_alloc_coherent(msc_dev(msc), size,
698 &win->block[i].addr,
699 GFP_KERNEL);
700
701#ifdef CONFIG_X86
702 /* Set the page as uncached */
703 set_memory_uc((unsigned long)win->block[i].bdesc, 1);
704#endif
705
706 if (!win->block[i].bdesc)
707 goto err_nomem;
708 }
709
710 win->msc = msc;
711 win->nr_blocks = nr_blocks;
712
713 if (list_empty(&msc->win_list)) {
714 msc->base = win->block[0].bdesc;
715 msc->base_addr = win->block[0].addr;
716 }
717
718 list_add_tail(&win->entry, &msc->win_list);
719 msc->nr_pages += nr_blocks;
720
721 return 0;
722
723err_nomem:
724 for (i--; i >= 0; i--) {
725#ifdef CONFIG_X86
726 /* Reset the page to write-back before releasing */
727 set_memory_wb((unsigned long)win->block[i].bdesc, 1);
728#endif
729 dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
730 win->block[i].addr);
731 }
732 kfree(win);
733
734 return ret;
735}
736
737/**
738 * msc_buffer_win_free() - free a window from MSC's window list
739 * @msc: MSC device
740 * @win: window to free
741 *
742 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
743 * to serialize, so the caller is expected to hold it.
744 */
745static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
746{
747 int i;
748
749 msc->nr_pages -= win->nr_blocks;
750
751 list_del(&win->entry);
752 if (list_empty(&msc->win_list)) {
753 msc->base = NULL;
754 msc->base_addr = 0;
755 }
756
757 for (i = 0; i < win->nr_blocks; i++) {
758 struct page *page = virt_to_page(win->block[i].bdesc);
759
760 page->mapping = NULL;
761#ifdef CONFIG_X86
762 /* Reset the page to write-back before releasing */
763 set_memory_wb((unsigned long)win->block[i].bdesc, 1);
764#endif
765 dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
766 win->block[i].bdesc, win->block[i].addr);
767 }
768
769 kfree(win);
770}
771
772/**
773 * msc_buffer_relink() - set up block descriptors for multiblock mode
774 * @msc: MSC device
775 *
776 * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
777 * so the caller is expected to hold it.
778 */
779static void msc_buffer_relink(struct msc *msc)
780{
781 struct msc_window *win, *next_win;
782
783 /* call with msc::mutex locked */
784 list_for_each_entry(win, &msc->win_list, entry) {
785 unsigned int blk;
786 u32 sw_tag = 0;
787
788 /*
789 * Last window's next_win should point to the first window
790 * and MSC_SW_TAG_LASTWIN should be set.
791 */
792 if (msc_is_last_win(win)) {
793 sw_tag |= MSC_SW_TAG_LASTWIN;
794 next_win = list_entry(msc->win_list.next,
795 struct msc_window, entry);
796 } else {
797 next_win = list_entry(win->entry.next,
798 struct msc_window, entry);
799 }
800
801 for (blk = 0; blk < win->nr_blocks; blk++) {
802 struct msc_block_desc *bdesc = win->block[blk].bdesc;
803
804 memset(bdesc, 0, sizeof(*bdesc));
805
806 bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT;
807
808 /*
809 * Similarly to last window, last block should point
810 * to the first one.
811 */
812 if (blk == win->nr_blocks - 1) {
813 sw_tag |= MSC_SW_TAG_LASTBLK;
814 bdesc->next_blk =
815 win->block[0].addr >> PAGE_SHIFT;
816 } else {
817 bdesc->next_blk =
818 win->block[blk + 1].addr >> PAGE_SHIFT;
819 }
820
821 bdesc->sw_tag = sw_tag;
822 bdesc->block_sz = PAGE_SIZE / 64;
823 }
824 }
825
826 /*
827 * Make the above writes globally visible before tracing is
828 * enabled to make sure hardware sees them coherently.
829 */
830 wmb();
831}
832
833static void msc_buffer_multi_free(struct msc *msc)
834{
835 struct msc_window *win, *iter;
836
837 list_for_each_entry_safe(win, iter, &msc->win_list, entry)
838 msc_buffer_win_free(msc, win);
839}
840
841static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
842 unsigned int nr_wins)
843{
844 int ret, i;
845
846 for (i = 0; i < nr_wins; i++) {
847 ret = msc_buffer_win_alloc(msc, nr_pages[i]);
848 if (ret) {
849 msc_buffer_multi_free(msc);
850 return ret;
851 }
852 }
853
854 msc_buffer_relink(msc);
855
856 return 0;
857}
858
859/**
860 * msc_buffer_free() - free buffers for MSC
861 * @msc: MSC device
862 *
863 * Free MSC's storage buffers.
864 *
865 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
866 * serialize, so the caller is expected to hold it.
867 */
868static void msc_buffer_free(struct msc *msc)
869{
870 if (msc->mode == MSC_MODE_SINGLE)
871 msc_buffer_contig_free(msc);
872 else if (msc->mode == MSC_MODE_MULTI)
873 msc_buffer_multi_free(msc);
874}
875
876/**
877 * msc_buffer_alloc() - allocate a buffer for MSC
878 * @msc: MSC device
879 * @size: allocation size in bytes
880 *
881 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
882 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
883 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
884 * window per invocation, so in multiblock mode this can be called multiple
885 * times for the same MSC to allocate multiple windows.
886 *
887 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
888 * to serialize, so the caller is expected to hold it.
889 *
890 * Return: 0 on success, -errno otherwise.
891 */
892static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
893 unsigned int nr_wins)
894{
895 int ret;
896
897 /* -1: buffer not allocated */
898 if (atomic_read(&msc->user_count) != -1)
899 return -EBUSY;
900
901 if (msc->mode == MSC_MODE_SINGLE) {
902 if (nr_wins != 1)
903 return -EINVAL;
904
905 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
906 } else if (msc->mode == MSC_MODE_MULTI) {
907 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
908 } else {
909 ret = -ENOTSUPP;
910 }
911
912 if (!ret) {
913 /* allocation should be visible before the counter goes to 0 */
914 smp_mb__before_atomic();
915
916 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
917 return -EINVAL;
918 }
919
920 return ret;
921}
922
923/**
924 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
925 * @msc: MSC device
926 *
927 * This will free MSC buffer unless it is in use or there is no allocated
928 * buffer.
929 * Caller needs to hold msc::buf_mutex.
930 *
931 * Return: 0 on successful deallocation or if there was no buffer to
932 * deallocate, -EBUSY if there are active users.
933 */
934static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
935{
936 int count, ret = 0;
937
938 count = atomic_cmpxchg(&msc->user_count, 0, -1);
939
940 /* > 0: buffer is allocated and has users */
941 if (count > 0)
942 ret = -EBUSY;
943 /* 0: buffer is allocated, no users */
944 else if (!count)
945 msc_buffer_free(msc);
946 /* < 0: no buffer, nothing to do */
947
948 return ret;
949}
950
951/**
952 * msc_buffer_free_unless_used() - free a buffer unless it's in use
953 * @msc: MSC device
954 *
955 * This is a locked version of msc_buffer_unlocked_free_unless_used().
956 */
957static int msc_buffer_free_unless_used(struct msc *msc)
958{
959 int ret;
960
961 mutex_lock(&msc->buf_mutex);
962 ret = msc_buffer_unlocked_free_unless_used(msc);
963 mutex_unlock(&msc->buf_mutex);
964
965 return ret;
966}
967
968/**
969 * msc_buffer_get_page() - get MSC buffer page at a given offset
970 * @msc: MSC device
971 * @pgoff: page offset into the storage buffer
972 *
973 * This traverses msc::win_list, so holding msc::buf_mutex is expected from
974 * the caller.
975 *
976 * Return: page if @pgoff corresponds to a valid buffer page or NULL.
977 */
978static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
979{
980 struct msc_window *win;
981
982 if (msc->mode == MSC_MODE_SINGLE)
983 return msc_buffer_contig_get_page(msc, pgoff);
984
985 list_for_each_entry(win, &msc->win_list, entry)
986 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
987 goto found;
988
989 return NULL;
990
991found:
992 pgoff -= win->pgoff;
993 return virt_to_page(win->block[pgoff].bdesc);
994}
995
996/**
997 * struct msc_win_to_user_struct - data for copy_to_user() callback
998 * @buf: userspace buffer to copy data to
999 * @offset: running offset
1000 */
1001struct msc_win_to_user_struct {
1002 char __user *buf;
1003 unsigned long offset;
1004};
1005
1006/**
1007 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1008 * @data: callback's private data
1009 * @src: source buffer
1010 * @len: amount of data to copy from the source buffer
1011 */
1012static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1013{
1014 struct msc_win_to_user_struct *u = data;
1015 unsigned long ret;
1016
1017 ret = copy_to_user(u->buf + u->offset, src, len);
1018 u->offset += len - ret;
1019
1020 return ret;
1021}
1022
1023
1024/*
1025 * file operations' callbacks
1026 */
1027
1028static int intel_th_msc_open(struct inode *inode, struct file *file)
1029{
1030 struct intel_th_device *thdev = file->private_data;
1031 struct msc *msc = dev_get_drvdata(&thdev->dev);
1032 struct msc_iter *iter;
1033
1034 if (!capable(CAP_SYS_RAWIO))
1035 return -EPERM;
1036
1037 iter = msc_iter_install(msc);
1038 if (!iter)
1039 return -ENOMEM;
1040
1041 file->private_data = iter;
1042
1043 return nonseekable_open(inode, file);
1044}
1045
1046static int intel_th_msc_release(struct inode *inode, struct file *file)
1047{
1048 struct msc_iter *iter = file->private_data;
1049 struct msc *msc = iter->msc;
1050
1051 msc_iter_remove(iter, msc);
1052
1053 return 0;
1054}
1055
1056static ssize_t
1057msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1058{
1059 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1060 unsigned long start = off, tocopy = 0;
1061
1062 if (msc->single_wrap) {
1063 start += msc->single_sz;
1064 if (start < size) {
1065 tocopy = min(rem, size - start);
1066 if (copy_to_user(buf, msc->base + start, tocopy))
1067 return -EFAULT;
1068
1069 buf += tocopy;
1070 rem -= tocopy;
1071 start += tocopy;
1072 }
1073
1074 start &= size - 1;
1075 if (rem) {
1076 tocopy = min(rem, msc->single_sz - start);
1077 if (copy_to_user(buf, msc->base + start, tocopy))
1078 return -EFAULT;
1079
1080 rem -= tocopy;
1081 }
1082
1083 return len - rem;
1084 }
1085
1086 if (copy_to_user(buf, msc->base + start, rem))
1087 return -EFAULT;
1088
1089 return len;
1090}
1091
1092static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1093 size_t len, loff_t *ppos)
1094{
1095 struct msc_iter *iter = file->private_data;
1096 struct msc *msc = iter->msc;
1097 size_t size;
1098 loff_t off = *ppos;
1099 ssize_t ret = 0;
1100
1101 if (!atomic_inc_unless_negative(&msc->user_count))
1102 return 0;
1103
1104 if (msc->enabled) {
1105 ret = -EBUSY;
1106 goto put_count;
1107 }
1108
1109 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1110 size = msc->single_sz;
1111 else
1112 size = msc->nr_pages << PAGE_SHIFT;
1113
1114 if (!size)
1115 return 0;
1116
1117 if (off >= size) {
1118 len = 0;
1119 goto put_count;
1120 }
1121 if (off + len >= size)
1122 len = size - off;
1123
1124 if (msc->mode == MSC_MODE_SINGLE) {
1125 ret = msc_single_to_user(msc, buf, off, len);
1126 if (ret >= 0)
1127 *ppos += ret;
1128 } else if (msc->mode == MSC_MODE_MULTI) {
1129 struct msc_win_to_user_struct u = {
1130 .buf = buf,
1131 .offset = 0,
1132 };
1133
1134 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1135 if (ret >= 0)
1136 *ppos = iter->offset;
1137 } else {
1138 ret = -ENOTSUPP;
1139 }
1140
1141put_count:
1142 atomic_dec(&msc->user_count);
1143
1144 return ret;
1145}
1146
1147/*
1148 * vm operations callbacks (vm_ops)
1149 */
1150
1151static void msc_mmap_open(struct vm_area_struct *vma)
1152{
1153 struct msc_iter *iter = vma->vm_file->private_data;
1154 struct msc *msc = iter->msc;
1155
1156 atomic_inc(&msc->mmap_count);
1157}
1158
1159static void msc_mmap_close(struct vm_area_struct *vma)
1160{
1161 struct msc_iter *iter = vma->vm_file->private_data;
1162 struct msc *msc = iter->msc;
1163 unsigned long pg;
1164
1165 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1166 return;
1167
1168 /* drop page _counts */
1169 for (pg = 0; pg < msc->nr_pages; pg++) {
1170 struct page *page = msc_buffer_get_page(msc, pg);
1171
1172 if (WARN_ON_ONCE(!page))
1173 continue;
1174
1175 if (page->mapping)
1176 page->mapping = NULL;
1177 }
1178
1179 /* last mapping -- drop user_count */
1180 atomic_dec(&msc->user_count);
1181 mutex_unlock(&msc->buf_mutex);
1182}
1183
1184static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1185{
1186 struct msc_iter *iter = vma->vm_file->private_data;
1187 struct msc *msc = iter->msc;
1188
1189 vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1190 if (!vmf->page)
1191 return VM_FAULT_SIGBUS;
1192
1193 get_page(vmf->page);
1194 vmf->page->mapping = vma->vm_file->f_mapping;
1195 vmf->page->index = vmf->pgoff;
1196
1197 return 0;
1198}
1199
1200static const struct vm_operations_struct msc_mmap_ops = {
1201 .open = msc_mmap_open,
1202 .close = msc_mmap_close,
1203 .fault = msc_mmap_fault,
1204};
1205
1206static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1207{
1208 unsigned long size = vma->vm_end - vma->vm_start;
1209 struct msc_iter *iter = vma->vm_file->private_data;
1210 struct msc *msc = iter->msc;
1211 int ret = -EINVAL;
1212
1213 if (!size || offset_in_page(size))
1214 return -EINVAL;
1215
1216 if (vma->vm_pgoff)
1217 return -EINVAL;
1218
1219 /* grab user_count once per mmap; drop in msc_mmap_close() */
1220 if (!atomic_inc_unless_negative(&msc->user_count))
1221 return -EINVAL;
1222
1223 if (msc->mode != MSC_MODE_SINGLE &&
1224 msc->mode != MSC_MODE_MULTI)
1225 goto out;
1226
1227 if (size >> PAGE_SHIFT != msc->nr_pages)
1228 goto out;
1229
1230 atomic_set(&msc->mmap_count, 1);
1231 ret = 0;
1232
1233out:
1234 if (ret)
1235 atomic_dec(&msc->user_count);
1236
1237 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1238 vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1239 vma->vm_ops = &msc_mmap_ops;
1240 return ret;
1241}
1242
1243static const struct file_operations intel_th_msc_fops = {
1244 .open = intel_th_msc_open,
1245 .release = intel_th_msc_release,
1246 .read = intel_th_msc_read,
1247 .mmap = intel_th_msc_mmap,
1248 .llseek = no_llseek,
1249};
1250
1251static int intel_th_msc_init(struct msc *msc)
1252{
1253 atomic_set(&msc->user_count, -1);
1254
1255 msc->mode = MSC_MODE_MULTI;
1256 mutex_init(&msc->buf_mutex);
1257 INIT_LIST_HEAD(&msc->win_list);
1258
1259 mutex_init(&msc->iter_mutex);
1260 INIT_LIST_HEAD(&msc->iter_list);
1261
1262 msc->burst_len =
1263 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1264 __ffs(MSC_LEN);
1265
1266 return 0;
1267}
1268
1269static const char * const msc_mode[] = {
1270 [MSC_MODE_SINGLE] = "single",
1271 [MSC_MODE_MULTI] = "multi",
1272 [MSC_MODE_EXI] = "ExI",
1273 [MSC_MODE_DEBUG] = "debug",
1274};
1275
1276static ssize_t
1277wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1278{
1279 struct msc *msc = dev_get_drvdata(dev);
1280
1281 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1282}
1283
1284static ssize_t
1285wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1286 size_t size)
1287{
1288 struct msc *msc = dev_get_drvdata(dev);
1289 unsigned long val;
1290 int ret;
1291
1292 ret = kstrtoul(buf, 10, &val);
1293 if (ret)
1294 return ret;
1295
1296 msc->wrap = !!val;
1297
1298 return size;
1299}
1300
1301static DEVICE_ATTR_RW(wrap);
1302
1303static ssize_t
1304mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1305{
1306 struct msc *msc = dev_get_drvdata(dev);
1307
1308 return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
1309}
1310
1311static ssize_t
1312mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1313 size_t size)
1314{
1315 struct msc *msc = dev_get_drvdata(dev);
1316 size_t len = size;
1317 char *cp;
1318 int i, ret;
1319
1320 if (!capable(CAP_SYS_RAWIO))
1321 return -EPERM;
1322
1323 cp = memchr(buf, '\n', len);
1324 if (cp)
1325 len = cp - buf;
1326
1327 for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
1328 if (!strncmp(msc_mode[i], buf, len))
1329 goto found;
1330
1331 return -EINVAL;
1332
1333found:
1334 mutex_lock(&msc->buf_mutex);
1335 ret = msc_buffer_unlocked_free_unless_used(msc);
1336 if (!ret)
1337 msc->mode = i;
1338 mutex_unlock(&msc->buf_mutex);
1339
1340 return ret ? ret : size;
1341}
1342
1343static DEVICE_ATTR_RW(mode);
1344
1345static ssize_t
1346nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1347{
1348 struct msc *msc = dev_get_drvdata(dev);
1349 struct msc_window *win;
1350 size_t count = 0;
1351
1352 mutex_lock(&msc->buf_mutex);
1353
1354 if (msc->mode == MSC_MODE_SINGLE)
1355 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1356 else if (msc->mode == MSC_MODE_MULTI) {
1357 list_for_each_entry(win, &msc->win_list, entry) {
1358 count += scnprintf(buf + count, PAGE_SIZE - count,
1359 "%d%c", win->nr_blocks,
1360 msc_is_last_win(win) ? '\n' : ',');
1361 }
1362 } else {
1363 count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1364 }
1365
1366 mutex_unlock(&msc->buf_mutex);
1367
1368 return count;
1369}
1370
1371static ssize_t
1372nr_pages_store(struct device *dev, struct device_attribute *attr,
1373 const char *buf, size_t size)
1374{
1375 struct msc *msc = dev_get_drvdata(dev);
1376 unsigned long val, *win = NULL, *rewin;
1377 size_t len = size;
1378 const char *p = buf;
1379 char *end, *s;
1380 int ret, nr_wins = 0;
1381
1382 if (!capable(CAP_SYS_RAWIO))
1383 return -EPERM;
1384
1385 ret = msc_buffer_free_unless_used(msc);
1386 if (ret)
1387 return ret;
1388
1389 /* scan the comma-separated list of allocation sizes */
1390 end = memchr(buf, '\n', len);
1391 if (end)
1392 len = end - buf;
1393
1394 do {
1395 end = memchr(p, ',', len);
1396 s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1397 ret = kstrtoul(s, 10, &val);
1398 kfree(s);
1399
1400 if (ret || !val)
1401 goto free_win;
1402
1403 if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1404 ret = -EINVAL;
1405 goto free_win;
1406 }
1407
1408 nr_wins++;
1409 rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
1410 if (!rewin) {
1411 kfree(win);
1412 return -ENOMEM;
1413 }
1414
1415 win = rewin;
1416 win[nr_wins - 1] = val;
1417
1418 if (!end)
1419 break;
1420
1421 len -= end - p;
1422 p = end + 1;
1423 } while (len);
1424
1425 mutex_lock(&msc->buf_mutex);
1426 ret = msc_buffer_alloc(msc, win, nr_wins);
1427 mutex_unlock(&msc->buf_mutex);
1428
1429free_win:
1430 kfree(win);
1431
1432 return ret ? ret : size;
1433}
1434
1435static DEVICE_ATTR_RW(nr_pages);
1436
1437static struct attribute *msc_output_attrs[] = {
1438 &dev_attr_wrap.attr,
1439 &dev_attr_mode.attr,
1440 &dev_attr_nr_pages.attr,
1441 NULL,
1442};
1443
1444static struct attribute_group msc_output_group = {
1445 .attrs = msc_output_attrs,
1446};
1447
1448static int intel_th_msc_probe(struct intel_th_device *thdev)
1449{
1450 struct device *dev = &thdev->dev;
1451 struct resource *res;
1452 struct msc *msc;
1453 void __iomem *base;
1454 int err;
1455
1456 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
1457 if (!res)
1458 return -ENODEV;
1459
1460 base = devm_ioremap(dev, res->start, resource_size(res));
1461 if (!base)
1462 return -ENOMEM;
1463
1464 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
1465 if (!msc)
1466 return -ENOMEM;
1467
1468 msc->index = thdev->id;
1469
1470 msc->thdev = thdev;
1471 msc->reg_base = base + msc->index * 0x100;
1472
1473 err = intel_th_msc_init(msc);
1474 if (err)
1475 return err;
1476
1477 err = sysfs_create_group(&dev->kobj, &msc_output_group);
1478 if (err)
1479 return err;
1480
1481 dev_set_drvdata(dev, msc);
1482
1483 return 0;
1484}
1485
1486static void intel_th_msc_remove(struct intel_th_device *thdev)
1487{
1488 sysfs_remove_group(&thdev->dev.kobj, &msc_output_group);
1489}
1490
1491static struct intel_th_driver intel_th_msc_driver = {
1492 .probe = intel_th_msc_probe,
1493 .remove = intel_th_msc_remove,
1494 .activate = intel_th_msc_activate,
1495 .deactivate = intel_th_msc_deactivate,
1496 .fops = &intel_th_msc_fops,
1497 .driver = {
1498 .name = "msc",
1499 .owner = THIS_MODULE,
1500 },
1501};
1502
1503module_driver(intel_th_msc_driver,
1504 intel_th_driver_register,
1505 intel_th_driver_unregister);
1506
1507MODULE_LICENSE("GPL v2");
1508MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
1509MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
new file mode 100644
index 000000000000..9b710e4aa98a
--- /dev/null
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -0,0 +1,116 @@
1/*
2 * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __INTEL_TH_MSU_H__
17#define __INTEL_TH_MSU_H__
18
19enum {
20 REG_MSU_MSUPARAMS = 0x0000,
21 REG_MSU_MSUSTS = 0x0008,
22 REG_MSU_MSC0CTL = 0x0100, /* MSC0 control */
23 REG_MSU_MSC0STS = 0x0104, /* MSC0 status */
24 REG_MSU_MSC0BAR = 0x0108, /* MSC0 output base address */
25 REG_MSU_MSC0SIZE = 0x010c, /* MSC0 output size */
26 REG_MSU_MSC0MWP = 0x0110, /* MSC0 write pointer */
27 REG_MSU_MSC0NWSA = 0x011c, /* MSC0 next window start address */
28
29 REG_MSU_MSC1CTL = 0x0200, /* MSC1 control */
30 REG_MSU_MSC1STS = 0x0204, /* MSC1 status */
31 REG_MSU_MSC1BAR = 0x0208, /* MSC1 output base address */
32 REG_MSU_MSC1SIZE = 0x020c, /* MSC1 output size */
33 REG_MSU_MSC1MWP = 0x0210, /* MSC1 write pointer */
34 REG_MSU_MSC1NWSA = 0x021c, /* MSC1 next window start address */
35};
36
37/* MSUSTS bits */
38#define MSUSTS_MSU_INT BIT(0)
39
40/* MSCnCTL bits */
41#define MSC_EN BIT(0)
42#define MSC_WRAPEN BIT(1)
43#define MSC_RD_HDR_OVRD BIT(2)
44#define MSC_MODE (BIT(4) | BIT(5))
45#define MSC_LEN (BIT(8) | BIT(9) | BIT(10))
46
47/* MSC operating modes (MSC_MODE) */
48enum {
49 MSC_MODE_SINGLE = 0,
50 MSC_MODE_MULTI,
51 MSC_MODE_EXI,
52 MSC_MODE_DEBUG,
53};
54
55/* MSCnSTS bits */
56#define MSCSTS_WRAPSTAT BIT(1) /* Wrap occurred */
57#define MSCSTS_PLE BIT(2) /* Pipeline Empty */
58
59/*
60 * Multiblock/multiwindow block descriptor
61 */
62struct msc_block_desc {
63 u32 sw_tag;
64 u32 block_sz;
65 u32 next_blk;
66 u32 next_win;
67 u32 res0[4];
68 u32 hw_tag;
69 u32 valid_dw;
70 u32 ts_low;
71 u32 ts_high;
72 u32 res1[4];
73} __packed;
74
75#define MSC_BDESC sizeof(struct msc_block_desc)
76#define DATA_IN_PAGE (PAGE_SIZE - MSC_BDESC)
77
78/* MSC multiblock sw tag bits */
79#define MSC_SW_TAG_LASTBLK BIT(0)
80#define MSC_SW_TAG_LASTWIN BIT(1)
81
82/* MSC multiblock hw tag bits */
83#define MSC_HW_TAG_TRIGGER BIT(0)
84#define MSC_HW_TAG_BLOCKWRAP BIT(1)
85#define MSC_HW_TAG_WINWRAP BIT(2)
86#define MSC_HW_TAG_ENDBIT BIT(3)
87
88static inline unsigned long msc_data_sz(struct msc_block_desc *bdesc)
89{
90 if (!bdesc->valid_dw)
91 return 0;
92
93 return bdesc->valid_dw * 4 - MSC_BDESC;
94}
95
96static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
97{
98 if (bdesc->hw_tag & MSC_HW_TAG_BLOCKWRAP)
99 return true;
100
101 return false;
102}
103
104static inline bool msc_block_last_written(struct msc_block_desc *bdesc)
105{
106 if ((bdesc->hw_tag & MSC_HW_TAG_ENDBIT) ||
107 (msc_data_sz(bdesc) != DATA_IN_PAGE))
108 return true;
109
110 return false;
111}
112
113/* waiting for Pipeline Empty bit(s) to assert for MSC */
114#define MSC_PLE_WAITLOOP_DEPTH 10000
115
116#endif /* __INTEL_TH_MSU_H__ */
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
new file mode 100644
index 000000000000..641e87936064
--- /dev/null
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -0,0 +1,86 @@
1/*
2 * Intel(R) Trace Hub pci driver
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/sysfs.h>
22#include <linux/pci.h>
23
24#include "intel_th.h"
25
26#define DRIVER_NAME "intel_th_pci"
27
28#define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW))
29
30static int intel_th_pci_probe(struct pci_dev *pdev,
31 const struct pci_device_id *id)
32{
33 struct intel_th *th;
34 int err;
35
36 err = pcim_enable_device(pdev);
37 if (err)
38 return err;
39
40 err = pcim_iomap_regions_request_all(pdev, BAR_MASK, DRIVER_NAME);
41 if (err)
42 return err;
43
44 th = intel_th_alloc(&pdev->dev, pdev->resource,
45 DEVICE_COUNT_RESOURCE, pdev->irq);
46 if (IS_ERR(th))
47 return PTR_ERR(th);
48
49 pci_set_drvdata(pdev, th);
50
51 return 0;
52}
53
54static void intel_th_pci_remove(struct pci_dev *pdev)
55{
56 struct intel_th *th = pci_get_drvdata(pdev);
57
58 intel_th_free(th);
59}
60
61static const struct pci_device_id intel_th_pci_id_table[] = {
62 {
63 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9d26),
64 .driver_data = (kernel_ulong_t)0,
65 },
66 {
67 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
68 .driver_data = (kernel_ulong_t)0,
69 },
70 { 0 },
71};
72
73MODULE_DEVICE_TABLE(pci, intel_th_pci_id_table);
74
75static struct pci_driver intel_th_pci_driver = {
76 .name = DRIVER_NAME,
77 .id_table = intel_th_pci_id_table,
78 .probe = intel_th_pci_probe,
79 .remove = intel_th_pci_remove,
80};
81
82module_pci_driver(intel_th_pci_driver);
83
84MODULE_LICENSE("GPL v2");
85MODULE_DESCRIPTION("Intel(R) Trace Hub PCI controller driver");
86MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@intel.com>");
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
new file mode 100644
index 000000000000..57cbfdcc7ef0
--- /dev/null
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -0,0 +1,252 @@
1/*
2 * Intel(R) Trace Hub PTI output driver
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/sizes.h>
22#include <linux/printk.h>
23#include <linux/slab.h>
24#include <linux/mm.h>
25#include <linux/io.h>
26
27#include "intel_th.h"
28#include "pti.h"
29
30struct pti_device {
31 void __iomem *base;
32 struct intel_th_device *thdev;
33 unsigned int mode;
34 unsigned int freeclk;
35 unsigned int clkdiv;
36 unsigned int patgen;
37};
38
39/* map PTI widths to MODE settings of PTI_CTL register */
40static const unsigned int pti_mode[] = {
41 0, 4, 8, 0, 12, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0,
42};
43
44static int pti_width_mode(unsigned int width)
45{
46 int i;
47
48 for (i = 0; i < ARRAY_SIZE(pti_mode); i++)
49 if (pti_mode[i] == width)
50 return i;
51
52 return -EINVAL;
53}
54
55static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
56 char *buf)
57{
58 struct pti_device *pti = dev_get_drvdata(dev);
59
60 return scnprintf(buf, PAGE_SIZE, "%d\n", pti_mode[pti->mode]);
61}
62
63static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
64 const char *buf, size_t size)
65{
66 struct pti_device *pti = dev_get_drvdata(dev);
67 unsigned long val;
68 int ret;
69
70 ret = kstrtoul(buf, 10, &val);
71 if (ret)
72 return ret;
73
74 ret = pti_width_mode(val);
75 if (ret < 0)
76 return ret;
77
78 pti->mode = ret;
79
80 return size;
81}
82
83static DEVICE_ATTR_RW(mode);
84
85static ssize_t
86freerunning_clock_show(struct device *dev, struct device_attribute *attr,
87 char *buf)
88{
89 struct pti_device *pti = dev_get_drvdata(dev);
90
91 return scnprintf(buf, PAGE_SIZE, "%d\n", pti->freeclk);
92}
93
94static ssize_t
95freerunning_clock_store(struct device *dev, struct device_attribute *attr,
96 const char *buf, size_t size)
97{
98 struct pti_device *pti = dev_get_drvdata(dev);
99 unsigned long val;
100 int ret;
101
102 ret = kstrtoul(buf, 10, &val);
103 if (ret)
104 return ret;
105
106 pti->freeclk = !!val;
107
108 return size;
109}
110
111static DEVICE_ATTR_RW(freerunning_clock);
112
113static ssize_t
114clock_divider_show(struct device *dev, struct device_attribute *attr,
115 char *buf)
116{
117 struct pti_device *pti = dev_get_drvdata(dev);
118
119 return scnprintf(buf, PAGE_SIZE, "%d\n", 1u << pti->clkdiv);
120}
121
122static ssize_t
123clock_divider_store(struct device *dev, struct device_attribute *attr,
124 const char *buf, size_t size)
125{
126 struct pti_device *pti = dev_get_drvdata(dev);
127 unsigned long val;
128 int ret;
129
130 ret = kstrtoul(buf, 10, &val);
131 if (ret)
132 return ret;
133
134 if (!is_power_of_2(val) || val > 8 || !val)
135 return -EINVAL;
136
137 pti->clkdiv = val;
138
139 return size;
140}
141
142static DEVICE_ATTR_RW(clock_divider);
143
144static struct attribute *pti_output_attrs[] = {
145 &dev_attr_mode.attr,
146 &dev_attr_freerunning_clock.attr,
147 &dev_attr_clock_divider.attr,
148 NULL,
149};
150
151static struct attribute_group pti_output_group = {
152 .attrs = pti_output_attrs,
153};
154
155static int intel_th_pti_activate(struct intel_th_device *thdev)
156{
157 struct pti_device *pti = dev_get_drvdata(&thdev->dev);
158 u32 ctl = PTI_EN;
159
160 if (pti->patgen)
161 ctl |= pti->patgen << __ffs(PTI_PATGENMODE);
162 if (pti->freeclk)
163 ctl |= PTI_FCEN;
164 ctl |= pti->mode << __ffs(PTI_MODE);
165 ctl |= pti->clkdiv << __ffs(PTI_CLKDIV);
166
167 iowrite32(ctl, pti->base + REG_PTI_CTL);
168
169 intel_th_trace_enable(thdev);
170
171 return 0;
172}
173
174static void intel_th_pti_deactivate(struct intel_th_device *thdev)
175{
176 struct pti_device *pti = dev_get_drvdata(&thdev->dev);
177
178 intel_th_trace_disable(thdev);
179
180 iowrite32(0, pti->base + REG_PTI_CTL);
181}
182
183static void read_hw_config(struct pti_device *pti)
184{
185 u32 ctl = ioread32(pti->base + REG_PTI_CTL);
186
187 pti->mode = (ctl & PTI_MODE) >> __ffs(PTI_MODE);
188 pti->clkdiv = (ctl & PTI_CLKDIV) >> __ffs(PTI_CLKDIV);
189 pti->freeclk = !!(ctl & PTI_FCEN);
190
191 if (!pti_mode[pti->mode])
192 pti->mode = pti_width_mode(4);
193 if (!pti->clkdiv)
194 pti->clkdiv = 1;
195}
196
197static int intel_th_pti_probe(struct intel_th_device *thdev)
198{
199 struct device *dev = &thdev->dev;
200 struct resource *res;
201 struct pti_device *pti;
202 void __iomem *base;
203 int ret;
204
205 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
206 if (!res)
207 return -ENODEV;
208
209 base = devm_ioremap(dev, res->start, resource_size(res));
210 if (!base)
211 return -ENOMEM;
212
213 pti = devm_kzalloc(dev, sizeof(*pti), GFP_KERNEL);
214 if (!pti)
215 return -ENOMEM;
216
217 pti->thdev = thdev;
218 pti->base = base;
219
220 read_hw_config(pti);
221
222 ret = sysfs_create_group(&dev->kobj, &pti_output_group);
223 if (ret)
224 return ret;
225
226 dev_set_drvdata(dev, pti);
227
228 return 0;
229}
230
231static void intel_th_pti_remove(struct intel_th_device *thdev)
232{
233}
234
235static struct intel_th_driver intel_th_pti_driver = {
236 .probe = intel_th_pti_probe,
237 .remove = intel_th_pti_remove,
238 .activate = intel_th_pti_activate,
239 .deactivate = intel_th_pti_deactivate,
240 .driver = {
241 .name = "pti",
242 .owner = THIS_MODULE,
243 },
244};
245
246module_driver(intel_th_pti_driver,
247 intel_th_driver_register,
248 intel_th_driver_unregister);
249
250MODULE_LICENSE("GPL v2");
251MODULE_DESCRIPTION("Intel(R) Trace Hub PTI output driver");
252MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
new file mode 100644
index 000000000000..20883f5628cf
--- /dev/null
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -0,0 +1,29 @@
1/*
2 * Intel(R) Trace Hub PTI output data structures
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __INTEL_TH_STH_H__
17#define __INTEL_TH_STH_H__
18
19enum {
20 REG_PTI_CTL = 0x1c00,
21};
22
23#define PTI_EN BIT(0)
24#define PTI_FCEN BIT(1)
25#define PTI_MODE 0xf0
26#define PTI_CLKDIV 0x000f0000
27#define PTI_PATGENMODE 0x00f00000
28
29#endif /* __INTEL_TH_STH_H__ */
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
new file mode 100644
index 000000000000..56101c33e10f
--- /dev/null
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -0,0 +1,259 @@
1/*
2 * Intel(R) Trace Hub Software Trace Hub support
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/io.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/stm.h>
25
26#include "intel_th.h"
27#include "sth.h"
28
29struct sth_device {
30 void __iomem *base;
31 void __iomem *channels;
32 phys_addr_t channels_phys;
33 struct device *dev;
34 struct stm_data stm;
35 unsigned int sw_nmasters;
36};
37
38static struct intel_th_channel __iomem *
39sth_channel(struct sth_device *sth, unsigned int master, unsigned int channel)
40{
41 struct intel_th_channel __iomem *sw_map = sth->channels;
42
43 return &sw_map[(master - sth->stm.sw_start) * sth->stm.sw_nchannels +
44 channel];
45}
46
47static void sth_iowrite(void __iomem *dest, const unsigned char *payload,
48 unsigned int size)
49{
50 switch (size) {
51#ifdef CONFIG_64BIT
52 case 8:
53 writeq_relaxed(*(u64 *)payload, dest);
54 break;
55#endif
56 case 4:
57 writel_relaxed(*(u32 *)payload, dest);
58 break;
59 case 2:
60 writew_relaxed(*(u16 *)payload, dest);
61 break;
62 case 1:
63 writeb_relaxed(*(u8 *)payload, dest);
64 break;
65 default:
66 break;
67 }
68}
69
70static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
71 unsigned int channel, unsigned int packet,
72 unsigned int flags, unsigned int size,
73 const unsigned char *payload)
74{
75 struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
76 struct intel_th_channel __iomem *out =
77 sth_channel(sth, master, channel);
78 u64 __iomem *outp = &out->Dn;
79 unsigned long reg = REG_STH_TRIG;
80
81#ifndef CONFIG_64BIT
82 if (size > 4)
83 size = 4;
84#endif
85
86 size = rounddown_pow_of_two(size);
87
88 switch (packet) {
89 /* Global packets (GERR, XSYNC, TRIG) are sent with register writes */
90 case STP_PACKET_GERR:
91 reg += 4;
92 case STP_PACKET_XSYNC:
93 reg += 8;
94 case STP_PACKET_TRIG:
95 if (flags & STP_PACKET_TIMESTAMPED)
96 reg += 4;
97 iowrite8(*payload, sth->base + reg);
98 break;
99
100 case STP_PACKET_MERR:
101 sth_iowrite(&out->MERR, payload, size);
102 break;
103
104 case STP_PACKET_FLAG:
105 if (flags & STP_PACKET_TIMESTAMPED)
106 outp = (u64 __iomem *)&out->FLAG_TS;
107 else
108 outp = (u64 __iomem *)&out->FLAG;
109
110 size = 1;
111 sth_iowrite(outp, payload, size);
112 break;
113
114 case STP_PACKET_USER:
115 if (flags & STP_PACKET_TIMESTAMPED)
116 outp = &out->USER_TS;
117 else
118 outp = &out->USER;
119 sth_iowrite(outp, payload, size);
120 break;
121
122 case STP_PACKET_DATA:
123 outp = &out->Dn;
124
125 if (flags & STP_PACKET_TIMESTAMPED)
126 outp += 2;
127 if (flags & STP_PACKET_MARKED)
128 outp++;
129
130 sth_iowrite(outp, payload, size);
131 break;
132 }
133
134 return size;
135}
136
137static phys_addr_t
138sth_stm_mmio_addr(struct stm_data *stm_data, unsigned int master,
139 unsigned int channel, unsigned int nr_chans)
140{
141 struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
142 phys_addr_t addr;
143
144 master -= sth->stm.sw_start;
145 addr = sth->channels_phys + (master * sth->stm.sw_nchannels + channel) *
146 sizeof(struct intel_th_channel);
147
148 if (offset_in_page(addr) ||
149 offset_in_page(nr_chans * sizeof(struct intel_th_channel)))
150 return 0;
151
152 return addr;
153}
154
155static int sth_stm_link(struct stm_data *stm_data, unsigned int master,
156 unsigned int channel)
157{
158 struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
159
160 intel_th_set_output(to_intel_th_device(sth->dev), master);
161
162 return 0;
163}
164
165static int intel_th_sw_init(struct sth_device *sth)
166{
167 u32 reg;
168
169 reg = ioread32(sth->base + REG_STH_STHCAP1);
170 sth->stm.sw_nchannels = reg & 0xff;
171
172 reg = ioread32(sth->base + REG_STH_STHCAP0);
173 sth->stm.sw_start = reg & 0xffff;
174 sth->stm.sw_end = reg >> 16;
175
176 sth->sw_nmasters = sth->stm.sw_end - sth->stm.sw_start;
177 dev_dbg(sth->dev, "sw_start: %x sw_end: %x masters: %x nchannels: %x\n",
178 sth->stm.sw_start, sth->stm.sw_end, sth->sw_nmasters,
179 sth->stm.sw_nchannels);
180
181 return 0;
182}
183
184static int intel_th_sth_probe(struct intel_th_device *thdev)
185{
186 struct device *dev = &thdev->dev;
187 struct sth_device *sth;
188 struct resource *res;
189 void __iomem *base, *channels;
190 int err;
191
192 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
193 if (!res)
194 return -ENODEV;
195
196 base = devm_ioremap(dev, res->start, resource_size(res));
197 if (!base)
198 return -ENOMEM;
199
200 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 1);
201 if (!res)
202 return -ENODEV;
203
204 channels = devm_ioremap(dev, res->start, resource_size(res));
205 if (!channels)
206 return -ENOMEM;
207
208 sth = devm_kzalloc(dev, sizeof(*sth), GFP_KERNEL);
209 if (!sth)
210 return -ENOMEM;
211
212 sth->dev = dev;
213 sth->base = base;
214 sth->channels = channels;
215 sth->channels_phys = res->start;
216 sth->stm.name = dev_name(dev);
217 sth->stm.packet = sth_stm_packet;
218 sth->stm.mmio_addr = sth_stm_mmio_addr;
219 sth->stm.sw_mmiosz = sizeof(struct intel_th_channel);
220 sth->stm.link = sth_stm_link;
221
222 err = intel_th_sw_init(sth);
223 if (err)
224 return err;
225
226 err = stm_register_device(dev, &sth->stm, THIS_MODULE);
227 if (err) {
228 dev_err(dev, "stm_register_device failed\n");
229 return err;
230 }
231
232 dev_set_drvdata(dev, sth);
233
234 return 0;
235}
236
237static void intel_th_sth_remove(struct intel_th_device *thdev)
238{
239 struct sth_device *sth = dev_get_drvdata(&thdev->dev);
240
241 stm_unregister_device(&sth->stm);
242}
243
244static struct intel_th_driver intel_th_sth_driver = {
245 .probe = intel_th_sth_probe,
246 .remove = intel_th_sth_remove,
247 .driver = {
248 .name = "sth",
249 .owner = THIS_MODULE,
250 },
251};
252
253module_driver(intel_th_sth_driver,
254 intel_th_driver_register,
255 intel_th_driver_unregister);
256
257MODULE_LICENSE("GPL v2");
258MODULE_DESCRIPTION("Intel(R) Trace Hub Software Trace Hub driver");
259MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@intel.com>");
diff --git a/drivers/hwtracing/intel_th/sth.h b/drivers/hwtracing/intel_th/sth.h
new file mode 100644
index 000000000000..f1390cd4f2ed
--- /dev/null
+++ b/drivers/hwtracing/intel_th/sth.h
@@ -0,0 +1,42 @@
1/*
2 * Intel(R) Trace Hub Software Trace Hub (STH) data structures
3 *
4 * Copyright (C) 2014-2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef __INTEL_TH_STH_H__
17#define __INTEL_TH_STH_H__
18
19enum {
20 REG_STH_STHCAP0 = 0x0000, /* capabilities pt1 */
21 REG_STH_STHCAP1 = 0x0004, /* capabilities pt2 */
22 REG_STH_TRIG = 0x0008, /* TRIG packet payload */
23 REG_STH_TRIG_TS = 0x000c, /* TRIG_TS packet payload */
24 REG_STH_XSYNC = 0x0010, /* XSYNC packet payload */
25 REG_STH_XSYNC_TS = 0x0014, /* XSYNC_TS packet payload */
26 REG_STH_GERR = 0x0018, /* GERR packet payload */
27};
28
29struct intel_th_channel {
30 u64 Dn;
31 u64 DnM;
32 u64 DnTS;
33 u64 DnMTS;
34 u64 USER;
35 u64 USER_TS;
36 u32 FLAG;
37 u32 FLAG_TS;
38 u32 MERR;
39 u32 __unused;
40} __packed;
41
42#endif /* __INTEL_TH_STH_H__ */
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
new file mode 100644
index 000000000000..83e9f591a54b
--- /dev/null
+++ b/drivers/hwtracing/stm/Kconfig
@@ -0,0 +1,26 @@
1config STM
2 tristate "System Trace Module devices"
3 select CONFIGFS_FS
4 help
5 A System Trace Module (STM) is a device exporting data in System
6 Trace Protocol (STP) format as defined by MIPI STP standards.
7 Examples of such devices are Intel(R) Trace Hub and Coresight STM.
8
9 Say Y here to enable System Trace Module device support.
10
11config STM_DUMMY
12 tristate "Dummy STM driver"
13 help
14 This is a simple dummy device that pretends to be an stm device
15 and discards your data. Use for stm class testing.
16
17 If you don't know what this is, say N.
18
19config STM_SOURCE_CONSOLE
20 tristate "Kernel console over STM devices"
21 help
22 This is a kernel space trace source that sends kernel log
23 messages to trace hosts over STM devices.
24
25 If you want to send kernel console messages over STM devices,
26 say Y.
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
new file mode 100644
index 000000000000..f9312c38dd7a
--- /dev/null
+++ b/drivers/hwtracing/stm/Makefile
@@ -0,0 +1,9 @@
1obj-$(CONFIG_STM) += stm_core.o
2
3stm_core-y := core.o policy.o
4
5obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
6
7obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
8
9stm_console-y := console.o
diff --git a/drivers/hwtracing/stm/console.c b/drivers/hwtracing/stm/console.c
new file mode 100644
index 000000000000..c9d9a8d2ff52
--- /dev/null
+++ b/drivers/hwtracing/stm/console.c
@@ -0,0 +1,80 @@
1/*
2 * Simple kernel console driver for STM devices
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * STM console will send kernel messages over STM devices to a trace host.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/console.h>
20#include <linux/slab.h>
21#include <linux/stm.h>
22
23static int stm_console_link(struct stm_source_data *data);
24static void stm_console_unlink(struct stm_source_data *data);
25
26static struct stm_console {
27 struct stm_source_data data;
28 struct console console;
29} stm_console = {
30 .data = {
31 .name = "console",
32 .nr_chans = 1,
33 .link = stm_console_link,
34 .unlink = stm_console_unlink,
35 },
36};
37
38static void
39stm_console_write(struct console *con, const char *buf, unsigned len)
40{
41 struct stm_console *sc = container_of(con, struct stm_console, console);
42
43 stm_source_write(&sc->data, 0, buf, len);
44}
45
46static int stm_console_link(struct stm_source_data *data)
47{
48 struct stm_console *sc = container_of(data, struct stm_console, data);
49
50 strcpy(sc->console.name, "stm_console");
51 sc->console.write = stm_console_write;
52 sc->console.flags = CON_ENABLED | CON_PRINTBUFFER;
53 register_console(&sc->console);
54
55 return 0;
56}
57
58static void stm_console_unlink(struct stm_source_data *data)
59{
60 struct stm_console *sc = container_of(data, struct stm_console, data);
61
62 unregister_console(&sc->console);
63}
64
65static int stm_console_init(void)
66{
67 return stm_source_register_device(NULL, &stm_console.data);
68}
69
70static void stm_console_exit(void)
71{
72 stm_source_unregister_device(&stm_console.data);
73}
74
75module_init(stm_console_init);
76module_exit(stm_console_exit);
77
78MODULE_LICENSE("GPL v2");
79MODULE_DESCRIPTION("stm_console driver");
80MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
new file mode 100644
index 000000000000..b6445d9e5453
--- /dev/null
+++ b/drivers/hwtracing/stm/core.c
@@ -0,0 +1,1032 @@
1/*
2 * System Trace Module (STM) infrastructure
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * STM class implements generic infrastructure for System Trace Module devices
15 * as defined in MIPI STPv2 specification.
16 */
17
18#include <linux/uaccess.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/compat.h>
23#include <linux/kdev_t.h>
24#include <linux/srcu.h>
25#include <linux/slab.h>
26#include <linux/stm.h>
27#include <linux/fs.h>
28#include <linux/mm.h>
29#include "stm.h"
30
31#include <uapi/linux/stm.h>
32
33static unsigned int stm_core_up;
34
35/*
36 * The SRCU here makes sure that STM device doesn't disappear from under a
37 * stm_source_write() caller, which may want to have as little overhead as
38 * possible.
39 */
40static struct srcu_struct stm_source_srcu;
41
42static ssize_t masters_show(struct device *dev,
43 struct device_attribute *attr,
44 char *buf)
45{
46 struct stm_device *stm = to_stm_device(dev);
47 int ret;
48
49 ret = sprintf(buf, "%u %u\n", stm->data->sw_start, stm->data->sw_end);
50
51 return ret;
52}
53
54static DEVICE_ATTR_RO(masters);
55
56static ssize_t channels_show(struct device *dev,
57 struct device_attribute *attr,
58 char *buf)
59{
60 struct stm_device *stm = to_stm_device(dev);
61 int ret;
62
63 ret = sprintf(buf, "%u\n", stm->data->sw_nchannels);
64
65 return ret;
66}
67
68static DEVICE_ATTR_RO(channels);
69
70static struct attribute *stm_attrs[] = {
71 &dev_attr_masters.attr,
72 &dev_attr_channels.attr,
73 NULL,
74};
75
76ATTRIBUTE_GROUPS(stm);
77
78static struct class stm_class = {
79 .name = "stm",
80 .dev_groups = stm_groups,
81};
82
83static int stm_dev_match(struct device *dev, const void *data)
84{
85 const char *name = data;
86
87 return sysfs_streq(name, dev_name(dev));
88}
89
90/**
91 * stm_find_device() - find stm device by name
92 * @buf: character buffer containing the name
93 *
94 * This is called when either policy gets assigned to an stm device or an
95 * stm_source device gets linked to an stm device.
96 *
97 * This grabs device's reference (get_device()) and module reference, both
98 * of which the calling path needs to make sure to drop with stm_put_device().
99 *
100 * Return: stm device pointer or null if lookup failed.
101 */
102struct stm_device *stm_find_device(const char *buf)
103{
104 struct stm_device *stm;
105 struct device *dev;
106
107 if (!stm_core_up)
108 return NULL;
109
110 dev = class_find_device(&stm_class, NULL, buf, stm_dev_match);
111 if (!dev)
112 return NULL;
113
114 stm = to_stm_device(dev);
115 if (!try_module_get(stm->owner)) {
116 put_device(dev);
117 return NULL;
118 }
119
120 return stm;
121}
122
123/**
124 * stm_put_device() - drop references on the stm device
125 * @stm: stm device, previously acquired by stm_find_device()
126 *
127 * This drops the module reference and device reference taken by
128 * stm_find_device().
129 */
130void stm_put_device(struct stm_device *stm)
131{
132 module_put(stm->owner);
133 put_device(&stm->dev);
134}
135
136/*
137 * Internally we only care about software-writable masters here, that is the
138 * ones in the range [stm_data->sw_start..stm_data..sw_end], however we need
139 * original master numbers to be visible externally, since they are the ones
140 * that will appear in the STP stream. Thus, the internal bookkeeping uses
141 * $master - stm_data->sw_start to reference master descriptors and such.
142 */
143
144#define __stm_master(_s, _m) \
145 ((_s)->masters[(_m) - (_s)->data->sw_start])
146
147static inline struct stp_master *
148stm_master(struct stm_device *stm, unsigned int idx)
149{
150 if (idx < stm->data->sw_start || idx > stm->data->sw_end)
151 return NULL;
152
153 return __stm_master(stm, idx);
154}
155
156static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
157{
158 struct stp_master *master;
159 size_t size;
160
161 size = ALIGN(stm->data->sw_nchannels, 8) / 8;
162 size += sizeof(struct stp_master);
163 master = kzalloc(size, GFP_ATOMIC);
164 if (!master)
165 return -ENOMEM;
166
167 master->nr_free = stm->data->sw_nchannels;
168 __stm_master(stm, idx) = master;
169
170 return 0;
171}
172
173static void stp_master_free(struct stm_device *stm, unsigned int idx)
174{
175 struct stp_master *master = stm_master(stm, idx);
176
177 if (!master)
178 return;
179
180 __stm_master(stm, idx) = NULL;
181 kfree(master);
182}
183
184static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
185{
186 struct stp_master *master = stm_master(stm, output->master);
187
188 if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
189 return;
190
191 bitmap_allocate_region(&master->chan_map[0], output->channel,
192 ilog2(output->nr_chans));
193
194 master->nr_free -= output->nr_chans;
195}
196
197static void
198stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
199{
200 struct stp_master *master = stm_master(stm, output->master);
201
202 bitmap_release_region(&master->chan_map[0], output->channel,
203 ilog2(output->nr_chans));
204
205 output->nr_chans = 0;
206 master->nr_free += output->nr_chans;
207}
208
209/*
210 * This is like bitmap_find_free_region(), except it can ignore @start bits
211 * at the beginning.
212 */
213static int find_free_channels(unsigned long *bitmap, unsigned int start,
214 unsigned int end, unsigned int width)
215{
216 unsigned int pos;
217 int i;
218
219 for (pos = start; pos < end + 1; pos = ALIGN(pos, width)) {
220 pos = find_next_zero_bit(bitmap, end + 1, pos);
221 if (pos + width > end + 1)
222 break;
223
224 if (pos & (width - 1))
225 continue;
226
227 for (i = 1; i < width && !test_bit(pos + i, bitmap); i++)
228 ;
229 if (i == width)
230 return pos;
231 }
232
233 return -1;
234}
235
236static unsigned int
237stm_find_master_chan(struct stm_device *stm, unsigned int width,
238 unsigned int *mstart, unsigned int mend,
239 unsigned int *cstart, unsigned int cend)
240{
241 struct stp_master *master;
242 unsigned int midx;
243 int pos, err;
244
245 for (midx = *mstart; midx <= mend; midx++) {
246 if (!stm_master(stm, midx)) {
247 err = stp_master_alloc(stm, midx);
248 if (err)
249 return err;
250 }
251
252 master = stm_master(stm, midx);
253
254 if (!master->nr_free)
255 continue;
256
257 pos = find_free_channels(master->chan_map, *cstart, cend,
258 width);
259 if (pos < 0)
260 continue;
261
262 *mstart = midx;
263 *cstart = pos;
264 return 0;
265 }
266
267 return -ENOSPC;
268}
269
270static int stm_output_assign(struct stm_device *stm, unsigned int width,
271 struct stp_policy_node *policy_node,
272 struct stm_output *output)
273{
274 unsigned int midx, cidx, mend, cend;
275 int ret = -EINVAL;
276
277 if (width > stm->data->sw_nchannels)
278 return -EINVAL;
279
280 if (policy_node) {
281 stp_policy_node_get_ranges(policy_node,
282 &midx, &mend, &cidx, &cend);
283 } else {
284 midx = stm->data->sw_start;
285 cidx = 0;
286 mend = stm->data->sw_end;
287 cend = stm->data->sw_nchannels - 1;
288 }
289
290 spin_lock(&stm->mc_lock);
291 /* output is already assigned -- shouldn't happen */
292 if (WARN_ON_ONCE(output->nr_chans))
293 goto unlock;
294
295 ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
296 if (ret)
297 goto unlock;
298
299 output->master = midx;
300 output->channel = cidx;
301 output->nr_chans = width;
302 stm_output_claim(stm, output);
303 dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width);
304
305 ret = 0;
306unlock:
307 spin_unlock(&stm->mc_lock);
308
309 return ret;
310}
311
312static void stm_output_free(struct stm_device *stm, struct stm_output *output)
313{
314 spin_lock(&stm->mc_lock);
315 if (output->nr_chans)
316 stm_output_disclaim(stm, output);
317 spin_unlock(&stm->mc_lock);
318}
319
320static int major_match(struct device *dev, const void *data)
321{
322 unsigned int major = *(unsigned int *)data;
323
324 return MAJOR(dev->devt) == major;
325}
326
327static int stm_char_open(struct inode *inode, struct file *file)
328{
329 struct stm_file *stmf;
330 struct device *dev;
331 unsigned int major = imajor(inode);
332 int err = -ENODEV;
333
334 dev = class_find_device(&stm_class, NULL, &major, major_match);
335 if (!dev)
336 return -ENODEV;
337
338 stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
339 if (!stmf)
340 return -ENOMEM;
341
342 stmf->stm = to_stm_device(dev);
343
344 if (!try_module_get(stmf->stm->owner))
345 goto err_free;
346
347 file->private_data = stmf;
348
349 return nonseekable_open(inode, file);
350
351err_free:
352 kfree(stmf);
353
354 return err;
355}
356
357static int stm_char_release(struct inode *inode, struct file *file)
358{
359 struct stm_file *stmf = file->private_data;
360
361 stm_output_free(stmf->stm, &stmf->output);
362 stm_put_device(stmf->stm);
363 kfree(stmf);
364
365 return 0;
366}
367
368static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
369{
370 struct stm_device *stm = stmf->stm;
371 int ret;
372
373 stmf->policy_node = stp_policy_node_lookup(stm, id);
374
375 ret = stm_output_assign(stm, width, stmf->policy_node, &stmf->output);
376
377 if (stmf->policy_node)
378 stp_policy_node_put(stmf->policy_node);
379
380 return ret;
381}
382
383static void stm_write(struct stm_data *data, unsigned int master,
384 unsigned int channel, const char *buf, size_t count)
385{
386 unsigned int flags = STP_PACKET_TIMESTAMPED;
387 const unsigned char *p = buf, nil = 0;
388 size_t pos;
389 ssize_t sz;
390
391 for (pos = 0, p = buf; count > pos; pos += sz, p += sz) {
392 sz = min_t(unsigned int, count - pos, 8);
393 sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
394 sz, p);
395 flags = 0;
396 }
397
398 data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
399}
400
401static ssize_t stm_char_write(struct file *file, const char __user *buf,
402 size_t count, loff_t *ppos)
403{
404 struct stm_file *stmf = file->private_data;
405 struct stm_device *stm = stmf->stm;
406 char *kbuf;
407 int err;
408
409 /*
410 * if no m/c have been assigned to this writer up to this
411 * point, use "default" policy entry
412 */
413 if (!stmf->output.nr_chans) {
414 err = stm_file_assign(stmf, "default", 1);
415 /*
416 * EBUSY means that somebody else just assigned this
417 * output, which is just fine for write()
418 */
419 if (err && err != -EBUSY)
420 return err;
421 }
422
423 kbuf = kmalloc(count + 1, GFP_KERNEL);
424 if (!kbuf)
425 return -ENOMEM;
426
427 err = copy_from_user(kbuf, buf, count);
428 if (err) {
429 kfree(kbuf);
430 return -EFAULT;
431 }
432
433 stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf,
434 count);
435
436 kfree(kbuf);
437
438 return count;
439}
440
441static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
442{
443 struct stm_file *stmf = file->private_data;
444 struct stm_device *stm = stmf->stm;
445 unsigned long size, phys;
446
447 if (!stm->data->mmio_addr)
448 return -EOPNOTSUPP;
449
450 if (vma->vm_pgoff)
451 return -EINVAL;
452
453 size = vma->vm_end - vma->vm_start;
454
455 if (stmf->output.nr_chans * stm->data->sw_mmiosz != size)
456 return -EINVAL;
457
458 phys = stm->data->mmio_addr(stm->data, stmf->output.master,
459 stmf->output.channel,
460 stmf->output.nr_chans);
461
462 if (!phys)
463 return -EINVAL;
464
465 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
466 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
467 vm_iomap_memory(vma, phys, size);
468
469 return 0;
470}
471
472static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
473{
474 struct stm_device *stm = stmf->stm;
475 struct stp_policy_id *id;
476 int ret = -EINVAL;
477 u32 size;
478
479 if (stmf->output.nr_chans)
480 return -EBUSY;
481
482 if (copy_from_user(&size, arg, sizeof(size)))
483 return -EFAULT;
484
485 if (size >= PATH_MAX + sizeof(*id))
486 return -EINVAL;
487
488 /*
489 * size + 1 to make sure the .id string at the bottom is terminated,
490 * which is also why memdup_user() is not useful here
491 */
492 id = kzalloc(size + 1, GFP_KERNEL);
493 if (!id)
494 return -ENOMEM;
495
496 if (copy_from_user(id, arg, size)) {
497 ret = -EFAULT;
498 goto err_free;
499 }
500
501 if (id->__reserved_0 || id->__reserved_1)
502 goto err_free;
503
504 if (id->width < 1 ||
505 id->width > PAGE_SIZE / stm->data->sw_mmiosz)
506 goto err_free;
507
508 ret = stm_file_assign(stmf, id->id, id->width);
509 if (ret)
510 goto err_free;
511
512 ret = 0;
513
514 if (stm->data->link)
515 ret = stm->data->link(stm->data, stmf->output.master,
516 stmf->output.channel);
517
518 if (ret) {
519 stm_output_free(stmf->stm, &stmf->output);
520 stm_put_device(stmf->stm);
521 }
522
523err_free:
524 kfree(id);
525
526 return ret;
527}
528
529static int stm_char_policy_get_ioctl(struct stm_file *stmf, void __user *arg)
530{
531 struct stp_policy_id id = {
532 .size = sizeof(id),
533 .master = stmf->output.master,
534 .channel = stmf->output.channel,
535 .width = stmf->output.nr_chans,
536 .__reserved_0 = 0,
537 .__reserved_1 = 0,
538 };
539
540 return copy_to_user(arg, &id, id.size) ? -EFAULT : 0;
541}
542
543static long
544stm_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
545{
546 struct stm_file *stmf = file->private_data;
547 struct stm_data *stm_data = stmf->stm->data;
548 int err = -ENOTTY;
549 u64 options;
550
551 switch (cmd) {
552 case STP_POLICY_ID_SET:
553 err = stm_char_policy_set_ioctl(stmf, (void __user *)arg);
554 if (err)
555 return err;
556
557 return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
558
559 case STP_POLICY_ID_GET:
560 return stm_char_policy_get_ioctl(stmf, (void __user *)arg);
561
562 case STP_SET_OPTIONS:
563 if (copy_from_user(&options, (u64 __user *)arg, sizeof(u64)))
564 return -EFAULT;
565
566 if (stm_data->set_options)
567 err = stm_data->set_options(stm_data,
568 stmf->output.master,
569 stmf->output.channel,
570 stmf->output.nr_chans,
571 options);
572
573 break;
574 default:
575 break;
576 }
577
578 return err;
579}
580
581#ifdef CONFIG_COMPAT
582static long
583stm_char_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
584{
585 return stm_char_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
586}
587#else
588#define stm_char_compat_ioctl NULL
589#endif
590
591static const struct file_operations stm_fops = {
592 .open = stm_char_open,
593 .release = stm_char_release,
594 .write = stm_char_write,
595 .mmap = stm_char_mmap,
596 .unlocked_ioctl = stm_char_ioctl,
597 .compat_ioctl = stm_char_compat_ioctl,
598 .llseek = no_llseek,
599};
600
601static void stm_device_release(struct device *dev)
602{
603 struct stm_device *stm = to_stm_device(dev);
604
605 kfree(stm);
606}
607
608int stm_register_device(struct device *parent, struct stm_data *stm_data,
609 struct module *owner)
610{
611 struct stm_device *stm;
612 unsigned int nmasters;
613 int err = -ENOMEM;
614
615 if (!stm_core_up)
616 return -EPROBE_DEFER;
617
618 if (!stm_data->packet || !stm_data->sw_nchannels)
619 return -EINVAL;
620
621 nmasters = stm_data->sw_end - stm_data->sw_start;
622 stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
623 if (!stm)
624 return -ENOMEM;
625
626 stm->major = register_chrdev(0, stm_data->name, &stm_fops);
627 if (stm->major < 0)
628 goto err_free;
629
630 device_initialize(&stm->dev);
631 stm->dev.devt = MKDEV(stm->major, 0);
632 stm->dev.class = &stm_class;
633 stm->dev.parent = parent;
634 stm->dev.release = stm_device_release;
635
636 err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
637 if (err)
638 goto err_device;
639
640 err = device_add(&stm->dev);
641 if (err)
642 goto err_device;
643
644 spin_lock_init(&stm->link_lock);
645 INIT_LIST_HEAD(&stm->link_list);
646
647 spin_lock_init(&stm->mc_lock);
648 mutex_init(&stm->policy_mutex);
649 stm->sw_nmasters = nmasters;
650 stm->owner = owner;
651 stm->data = stm_data;
652 stm_data->stm = stm;
653
654 return 0;
655
656err_device:
657 put_device(&stm->dev);
658err_free:
659 kfree(stm);
660
661 return err;
662}
663EXPORT_SYMBOL_GPL(stm_register_device);
664
665static void __stm_source_link_drop(struct stm_source_device *src,
666 struct stm_device *stm);
667
668void stm_unregister_device(struct stm_data *stm_data)
669{
670 struct stm_device *stm = stm_data->stm;
671 struct stm_source_device *src, *iter;
672 int i;
673
674 spin_lock(&stm->link_lock);
675 list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
676 __stm_source_link_drop(src, stm);
677 }
678 spin_unlock(&stm->link_lock);
679
680 synchronize_srcu(&stm_source_srcu);
681
682 unregister_chrdev(stm->major, stm_data->name);
683
684 mutex_lock(&stm->policy_mutex);
685 if (stm->policy)
686 stp_policy_unbind(stm->policy);
687 mutex_unlock(&stm->policy_mutex);
688
689 for (i = 0; i < stm->sw_nmasters; i++)
690 stp_master_free(stm, i);
691
692 device_unregister(&stm->dev);
693 stm_data->stm = NULL;
694}
695EXPORT_SYMBOL_GPL(stm_unregister_device);
696
697/**
698 * stm_source_link_add() - connect an stm_source device to an stm device
699 * @src: stm_source device
700 * @stm: stm device
701 *
702 * This function establishes a link from stm_source to an stm device so that
703 * the former can send out trace data to the latter.
704 *
705 * Return: 0 on success, -errno otherwise.
706 */
707static int stm_source_link_add(struct stm_source_device *src,
708 struct stm_device *stm)
709{
710 char *id;
711 int err;
712
713 spin_lock(&stm->link_lock);
714 spin_lock(&src->link_lock);
715
716 /* src->link is dereferenced under stm_source_srcu but not the list */
717 rcu_assign_pointer(src->link, stm);
718 list_add_tail(&src->link_entry, &stm->link_list);
719
720 spin_unlock(&src->link_lock);
721 spin_unlock(&stm->link_lock);
722
723 id = kstrdup(src->data->name, GFP_KERNEL);
724 if (id) {
725 src->policy_node =
726 stp_policy_node_lookup(stm, id);
727
728 kfree(id);
729 }
730
731 err = stm_output_assign(stm, src->data->nr_chans,
732 src->policy_node, &src->output);
733
734 if (src->policy_node)
735 stp_policy_node_put(src->policy_node);
736
737 if (err)
738 goto fail_detach;
739
740 /* this is to notify the STM device that a new link has been made */
741 if (stm->data->link)
742 err = stm->data->link(stm->data, src->output.master,
743 src->output.channel);
744
745 if (err)
746 goto fail_free_output;
747
748 /* this is to let the source carry out all necessary preparations */
749 if (src->data->link)
750 src->data->link(src->data);
751
752 return 0;
753
754fail_free_output:
755 stm_output_free(stm, &src->output);
756 stm_put_device(stm);
757
758fail_detach:
759 spin_lock(&stm->link_lock);
760 spin_lock(&src->link_lock);
761
762 rcu_assign_pointer(src->link, NULL);
763 list_del_init(&src->link_entry);
764
765 spin_unlock(&src->link_lock);
766 spin_unlock(&stm->link_lock);
767
768 return err;
769}
770
771/**
772 * __stm_source_link_drop() - detach stm_source from an stm device
773 * @src: stm_source device
774 * @stm: stm device
775 *
776 * If @stm is @src::link, disconnect them from one another and put the
777 * reference on the @stm device.
778 *
779 * Caller must hold stm::link_lock.
780 */
781static void __stm_source_link_drop(struct stm_source_device *src,
782 struct stm_device *stm)
783{
784 struct stm_device *link;
785
786 spin_lock(&src->link_lock);
787 link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
788 if (WARN_ON_ONCE(link != stm)) {
789 spin_unlock(&src->link_lock);
790 return;
791 }
792
793 stm_output_free(link, &src->output);
794 /* caller must hold stm::link_lock */
795 list_del_init(&src->link_entry);
796 /* matches stm_find_device() from stm_source_link_store() */
797 stm_put_device(link);
798 rcu_assign_pointer(src->link, NULL);
799
800 spin_unlock(&src->link_lock);
801}
802
803/**
804 * stm_source_link_drop() - detach stm_source from its stm device
805 * @src: stm_source device
806 *
807 * Unlinking means disconnecting from source's STM device; after this
808 * writes will be unsuccessful until it is linked to a new STM device.
809 *
810 * This will happen on "stm_source_link" sysfs attribute write to undo
811 * the existing link (if any), or on linked STM device's de-registration.
812 */
813static void stm_source_link_drop(struct stm_source_device *src)
814{
815 struct stm_device *stm;
816 int idx;
817
818 idx = srcu_read_lock(&stm_source_srcu);
819 stm = srcu_dereference(src->link, &stm_source_srcu);
820
821 if (stm) {
822 if (src->data->unlink)
823 src->data->unlink(src->data);
824
825 spin_lock(&stm->link_lock);
826 __stm_source_link_drop(src, stm);
827 spin_unlock(&stm->link_lock);
828 }
829
830 srcu_read_unlock(&stm_source_srcu, idx);
831}
832
833static ssize_t stm_source_link_show(struct device *dev,
834 struct device_attribute *attr,
835 char *buf)
836{
837 struct stm_source_device *src = to_stm_source_device(dev);
838 struct stm_device *stm;
839 int idx, ret;
840
841 idx = srcu_read_lock(&stm_source_srcu);
842 stm = srcu_dereference(src->link, &stm_source_srcu);
843 ret = sprintf(buf, "%s\n",
844 stm ? dev_name(&stm->dev) : "<none>");
845 srcu_read_unlock(&stm_source_srcu, idx);
846
847 return ret;
848}
849
850static ssize_t stm_source_link_store(struct device *dev,
851 struct device_attribute *attr,
852 const char *buf, size_t count)
853{
854 struct stm_source_device *src = to_stm_source_device(dev);
855 struct stm_device *link;
856 int err;
857
858 stm_source_link_drop(src);
859
860 link = stm_find_device(buf);
861 if (!link)
862 return -EINVAL;
863
864 err = stm_source_link_add(src, link);
865 if (err)
866 stm_put_device(link);
867
868 return err ? : count;
869}
870
871static DEVICE_ATTR_RW(stm_source_link);
872
873static struct attribute *stm_source_attrs[] = {
874 &dev_attr_stm_source_link.attr,
875 NULL,
876};
877
878ATTRIBUTE_GROUPS(stm_source);
879
880static struct class stm_source_class = {
881 .name = "stm_source",
882 .dev_groups = stm_source_groups,
883};
884
885static void stm_source_device_release(struct device *dev)
886{
887 struct stm_source_device *src = to_stm_source_device(dev);
888
889 kfree(src);
890}
891
892/**
893 * stm_source_register_device() - register an stm_source device
894 * @parent: parent device
895 * @data: device description structure
896 *
897 * This will create a device of stm_source class that can write
898 * data to an stm device once linked.
899 *
900 * Return: 0 on success, -errno otherwise.
901 */
902int stm_source_register_device(struct device *parent,
903 struct stm_source_data *data)
904{
905 struct stm_source_device *src;
906 int err;
907
908 if (!stm_core_up)
909 return -EPROBE_DEFER;
910
911 src = kzalloc(sizeof(*src), GFP_KERNEL);
912 if (!src)
913 return -ENOMEM;
914
915 device_initialize(&src->dev);
916 src->dev.class = &stm_source_class;
917 src->dev.parent = parent;
918 src->dev.release = stm_source_device_release;
919
920 err = kobject_set_name(&src->dev.kobj, "%s", data->name);
921 if (err)
922 goto err;
923
924 err = device_add(&src->dev);
925 if (err)
926 goto err;
927
928 spin_lock_init(&src->link_lock);
929 INIT_LIST_HEAD(&src->link_entry);
930 src->data = data;
931 data->src = src;
932
933 return 0;
934
935err:
936 put_device(&src->dev);
937 kfree(src);
938
939 return err;
940}
941EXPORT_SYMBOL_GPL(stm_source_register_device);
942
943/**
944 * stm_source_unregister_device() - unregister an stm_source device
945 * @data: device description that was used to register the device
946 *
947 * This will remove a previously created stm_source device from the system.
948 */
949void stm_source_unregister_device(struct stm_source_data *data)
950{
951 struct stm_source_device *src = data->src;
952
953 stm_source_link_drop(src);
954
955 device_destroy(&stm_source_class, src->dev.devt);
956}
957EXPORT_SYMBOL_GPL(stm_source_unregister_device);
958
959int stm_source_write(struct stm_source_data *data, unsigned int chan,
960 const char *buf, size_t count)
961{
962 struct stm_source_device *src = data->src;
963 struct stm_device *stm;
964 int idx;
965
966 if (!src->output.nr_chans)
967 return -ENODEV;
968
969 if (chan >= src->output.nr_chans)
970 return -EINVAL;
971
972 idx = srcu_read_lock(&stm_source_srcu);
973
974 stm = srcu_dereference(src->link, &stm_source_srcu);
975 if (stm)
976 stm_write(stm->data, src->output.master,
977 src->output.channel + chan,
978 buf, count);
979 else
980 count = -ENODEV;
981
982 srcu_read_unlock(&stm_source_srcu, idx);
983
984 return count;
985}
986EXPORT_SYMBOL_GPL(stm_source_write);
987
988static int __init stm_core_init(void)
989{
990 int err;
991
992 err = class_register(&stm_class);
993 if (err)
994 return err;
995
996 err = class_register(&stm_source_class);
997 if (err)
998 goto err_stm;
999
1000 err = stp_configfs_init();
1001 if (err)
1002 goto err_src;
1003
1004 init_srcu_struct(&stm_source_srcu);
1005
1006 stm_core_up++;
1007
1008 return 0;
1009
1010err_src:
1011 class_unregister(&stm_source_class);
1012err_stm:
1013 class_unregister(&stm_class);
1014
1015 return err;
1016}
1017
1018module_init(stm_core_init);
1019
1020static void __exit stm_core_exit(void)
1021{
1022 cleanup_srcu_struct(&stm_source_srcu);
1023 class_unregister(&stm_source_class);
1024 class_unregister(&stm_class);
1025 stp_configfs_exit();
1026}
1027
1028module_exit(stm_core_exit);
1029
1030MODULE_LICENSE("GPL v2");
1031MODULE_DESCRIPTION("System Trace Module device class");
1032MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
new file mode 100644
index 000000000000..3709bef0b21f
--- /dev/null
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -0,0 +1,66 @@
1/*
2 * A dummy STM device for stm/stm_source class testing.
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * STM class implements generic infrastructure for System Trace Module devices
15 * as defined in MIPI STPv2 specification.
16 */
17
18#undef DEBUG
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/stm.h>
23
24static ssize_t
25dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
26 unsigned int channel, unsigned int packet, unsigned int flags,
27 unsigned int size, const unsigned char *payload)
28{
29#ifdef DEBUG
30 u64 pl = 0;
31
32 if (payload)
33 pl = *(u64 *)payload;
34
35 if (size < 8)
36 pl &= (1ull << (size * 8)) - 1;
37 trace_printk("[%u:%u] [pkt: %x/%x] (%llx)\n", master, channel,
38 packet, size, pl);
39#endif
40 return size;
41}
42
43static struct stm_data dummy_stm = {
44 .name = "dummy_stm",
45 .sw_start = 0x0000,
46 .sw_end = 0xffff,
47 .sw_nchannels = 0xffff,
48 .packet = dummy_stm_packet,
49};
50
51static int dummy_stm_init(void)
52{
53 return stm_register_device(NULL, &dummy_stm, THIS_MODULE);
54}
55
56static void dummy_stm_exit(void)
57{
58 stm_unregister_device(&dummy_stm);
59}
60
61module_init(dummy_stm_init);
62module_exit(dummy_stm_exit);
63
64MODULE_LICENSE("GPL v2");
65MODULE_DESCRIPTION("dummy_stm device");
66MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
new file mode 100644
index 000000000000..6498a9dbb7bd
--- /dev/null
+++ b/drivers/hwtracing/stm/policy.c
@@ -0,0 +1,529 @@
1/*
2 * System Trace Module (STM) master/channel allocation policy management
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * A master/channel allocation policy allows mapping string identifiers to
15 * master and channel ranges, where allocation can be done.
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/types.h>
21#include <linux/module.h>
22#include <linux/device.h>
23#include <linux/configfs.h>
24#include <linux/slab.h>
25#include <linux/stm.h>
26#include "stm.h"
27
28/*
29 * STP Master/Channel allocation policy configfs layout.
30 */
31
32struct stp_policy {
33 struct config_group group;
34 struct stm_device *stm;
35};
36
37struct stp_policy_node {
38 struct config_group group;
39 struct stp_policy *policy;
40 unsigned int first_master;
41 unsigned int last_master;
42 unsigned int first_channel;
43 unsigned int last_channel;
44};
45
46static struct configfs_subsystem stp_policy_subsys;
47
48void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
49 unsigned int *mstart, unsigned int *mend,
50 unsigned int *cstart, unsigned int *cend)
51{
52 *mstart = policy_node->first_master;
53 *mend = policy_node->last_master;
54 *cstart = policy_node->first_channel;
55 *cend = policy_node->last_channel;
56}
57
58static inline char *stp_policy_node_name(struct stp_policy_node *policy_node)
59{
60 return policy_node->group.cg_item.ci_name ? : "<none>";
61}
62
63static inline struct stp_policy *to_stp_policy(struct config_item *item)
64{
65 return item ?
66 container_of(to_config_group(item), struct stp_policy, group) :
67 NULL;
68}
69
70static inline struct stp_policy_node *
71to_stp_policy_node(struct config_item *item)
72{
73 return item ?
74 container_of(to_config_group(item), struct stp_policy_node,
75 group) :
76 NULL;
77}
78
79static ssize_t stp_policy_node_masters_show(struct stp_policy_node *policy_node,
80 char *page)
81{
82 ssize_t count;
83
84 count = sprintf(page, "%u %u\n", policy_node->first_master,
85 policy_node->last_master);
86
87 return count;
88}
89
90static ssize_t
91stp_policy_node_masters_store(struct stp_policy_node *policy_node,
92 const char *page, size_t count)
93{
94 unsigned int first, last;
95 struct stm_device *stm;
96 char *p = (char *)page;
97 ssize_t ret = -ENODEV;
98
99 if (sscanf(p, "%u %u", &first, &last) != 2)
100 return -EINVAL;
101
102 mutex_lock(&stp_policy_subsys.su_mutex);
103 stm = policy_node->policy->stm;
104 if (!stm)
105 goto unlock;
106
107 /* must be within [sw_start..sw_end], which is an inclusive range */
108 if (first > INT_MAX || last > INT_MAX || first > last ||
109 first < stm->data->sw_start ||
110 last > stm->data->sw_end) {
111 ret = -ERANGE;
112 goto unlock;
113 }
114
115 ret = count;
116 policy_node->first_master = first;
117 policy_node->last_master = last;
118
119unlock:
120 mutex_unlock(&stp_policy_subsys.su_mutex);
121
122 return ret;
123}
124
125static ssize_t
126stp_policy_node_channels_show(struct stp_policy_node *policy_node, char *page)
127{
128 ssize_t count;
129
130 count = sprintf(page, "%u %u\n", policy_node->first_channel,
131 policy_node->last_channel);
132
133 return count;
134}
135
136static ssize_t
137stp_policy_node_channels_store(struct stp_policy_node *policy_node,
138 const char *page, size_t count)
139{
140 unsigned int first, last;
141 struct stm_device *stm;
142 char *p = (char *)page;
143 ssize_t ret = -ENODEV;
144
145 if (sscanf(p, "%u %u", &first, &last) != 2)
146 return -EINVAL;
147
148 mutex_lock(&stp_policy_subsys.su_mutex);
149 stm = policy_node->policy->stm;
150 if (!stm)
151 goto unlock;
152
153 if (first > INT_MAX || last > INT_MAX || first > last ||
154 last >= stm->data->sw_nchannels) {
155 ret = -ERANGE;
156 goto unlock;
157 }
158
159 ret = count;
160 policy_node->first_channel = first;
161 policy_node->last_channel = last;
162
163unlock:
164 mutex_unlock(&stp_policy_subsys.su_mutex);
165
166 return ret;
167}
168
169static void stp_policy_node_release(struct config_item *item)
170{
171 kfree(to_stp_policy_node(item));
172}
173
174struct stp_policy_node_attribute {
175 struct configfs_attribute attr;
176 ssize_t (*show)(struct stp_policy_node *, char *);
177 ssize_t (*store)(struct stp_policy_node *, const char *, size_t);
178};
179
180static ssize_t stp_policy_node_attr_show(struct config_item *item,
181 struct configfs_attribute *attr,
182 char *page)
183{
184 struct stp_policy_node *policy_node = to_stp_policy_node(item);
185 struct stp_policy_node_attribute *pn_attr =
186 container_of(attr, struct stp_policy_node_attribute, attr);
187 ssize_t count = 0;
188
189 if (pn_attr->show)
190 count = pn_attr->show(policy_node, page);
191
192 return count;
193}
194
195static ssize_t stp_policy_node_attr_store(struct config_item *item,
196 struct configfs_attribute *attr,
197 const char *page, size_t len)
198{
199 struct stp_policy_node *policy_node = to_stp_policy_node(item);
200 struct stp_policy_node_attribute *pn_attr =
201 container_of(attr, struct stp_policy_node_attribute, attr);
202 ssize_t count = -EINVAL;
203
204 if (pn_attr->store)
205 count = pn_attr->store(policy_node, page, len);
206
207 return count;
208}
209
210static struct configfs_item_operations stp_policy_node_item_ops = {
211 .release = stp_policy_node_release,
212 .show_attribute = stp_policy_node_attr_show,
213 .store_attribute = stp_policy_node_attr_store,
214};
215
216static struct stp_policy_node_attribute stp_policy_node_attr_range = {
217 .attr = {
218 .ca_owner = THIS_MODULE,
219 .ca_name = "masters",
220 .ca_mode = S_IRUGO | S_IWUSR,
221 },
222 .show = stp_policy_node_masters_show,
223 .store = stp_policy_node_masters_store,
224};
225
226static struct stp_policy_node_attribute stp_policy_node_attr_channels = {
227 .attr = {
228 .ca_owner = THIS_MODULE,
229 .ca_name = "channels",
230 .ca_mode = S_IRUGO | S_IWUSR,
231 },
232 .show = stp_policy_node_channels_show,
233 .store = stp_policy_node_channels_store,
234};
235
236static struct configfs_attribute *stp_policy_node_attrs[] = {
237 &stp_policy_node_attr_range.attr,
238 &stp_policy_node_attr_channels.attr,
239 NULL,
240};
241
242static struct config_item_type stp_policy_type;
243static struct config_item_type stp_policy_node_type;
244
245static struct config_group *
246stp_policy_node_make(struct config_group *group, const char *name)
247{
248 struct stp_policy_node *policy_node, *parent_node;
249 struct stp_policy *policy;
250
251 if (group->cg_item.ci_type == &stp_policy_type) {
252 policy = container_of(group, struct stp_policy, group);
253 } else {
254 parent_node = container_of(group, struct stp_policy_node,
255 group);
256 policy = parent_node->policy;
257 }
258
259 if (!policy->stm)
260 return ERR_PTR(-ENODEV);
261
262 policy_node = kzalloc(sizeof(struct stp_policy_node), GFP_KERNEL);
263 if (!policy_node)
264 return ERR_PTR(-ENOMEM);
265
266 config_group_init_type_name(&policy_node->group, name,
267 &stp_policy_node_type);
268
269 policy_node->policy = policy;
270
271 /* default values for the attributes */
272 policy_node->first_master = policy->stm->data->sw_start;
273 policy_node->last_master = policy->stm->data->sw_end;
274 policy_node->first_channel = 0;
275 policy_node->last_channel = policy->stm->data->sw_nchannels - 1;
276
277 return &policy_node->group;
278}
279
280static void
281stp_policy_node_drop(struct config_group *group, struct config_item *item)
282{
283 config_item_put(item);
284}
285
286static struct configfs_group_operations stp_policy_node_group_ops = {
287 .make_group = stp_policy_node_make,
288 .drop_item = stp_policy_node_drop,
289};
290
291static struct config_item_type stp_policy_node_type = {
292 .ct_item_ops = &stp_policy_node_item_ops,
293 .ct_group_ops = &stp_policy_node_group_ops,
294 .ct_attrs = stp_policy_node_attrs,
295 .ct_owner = THIS_MODULE,
296};
297
298/*
299 * Root group: policies.
300 */
301static struct configfs_attribute stp_policy_attr_device = {
302 .ca_owner = THIS_MODULE,
303 .ca_name = "device",
304 .ca_mode = S_IRUGO,
305};
306
307static struct configfs_attribute *stp_policy_attrs[] = {
308 &stp_policy_attr_device,
309 NULL,
310};
311
312static ssize_t stp_policy_attr_show(struct config_item *item,
313 struct configfs_attribute *attr,
314 char *page)
315{
316 struct stp_policy *policy = to_stp_policy(item);
317 ssize_t count;
318
319 count = sprintf(page, "%s\n",
320 (policy && policy->stm) ?
321 policy->stm->data->name :
322 "<none>");
323
324 return count;
325}
326
327void stp_policy_unbind(struct stp_policy *policy)
328{
329 struct stm_device *stm = policy->stm;
330
331 if (WARN_ON_ONCE(!policy->stm))
332 return;
333
334 mutex_lock(&stm->policy_mutex);
335 stm->policy = NULL;
336 mutex_unlock(&stm->policy_mutex);
337
338 policy->stm = NULL;
339
340 stm_put_device(stm);
341}
342
343static void stp_policy_release(struct config_item *item)
344{
345 struct stp_policy *policy = to_stp_policy(item);
346
347 stp_policy_unbind(policy);
348 kfree(policy);
349}
350
351static struct configfs_item_operations stp_policy_item_ops = {
352 .release = stp_policy_release,
353 .show_attribute = stp_policy_attr_show,
354};
355
356static struct configfs_group_operations stp_policy_group_ops = {
357 .make_group = stp_policy_node_make,
358};
359
360static struct config_item_type stp_policy_type = {
361 .ct_item_ops = &stp_policy_item_ops,
362 .ct_group_ops = &stp_policy_group_ops,
363 .ct_attrs = stp_policy_attrs,
364 .ct_owner = THIS_MODULE,
365};
366
367static struct config_group *
368stp_policies_make(struct config_group *group, const char *name)
369{
370 struct config_group *ret;
371 struct stm_device *stm;
372 char *devname, *p;
373
374 devname = kasprintf(GFP_KERNEL, "%s", name);
375 if (!devname)
376 return ERR_PTR(-ENOMEM);
377
378 /*
379 * node must look like <device_name>.<policy_name>, where
380 * <device_name> is the name of an existing stm device and
381 * <policy_name> is an arbitrary string
382 */
383 p = strchr(devname, '.');
384 if (!p) {
385 kfree(devname);
386 return ERR_PTR(-EINVAL);
387 }
388
389 *p++ = '\0';
390
391 stm = stm_find_device(devname);
392 kfree(devname);
393
394 if (!stm)
395 return ERR_PTR(-ENODEV);
396
397 mutex_lock(&stm->policy_mutex);
398 if (stm->policy) {
399 ret = ERR_PTR(-EBUSY);
400 goto unlock_policy;
401 }
402
403 stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
404 if (!stm->policy) {
405 ret = ERR_PTR(-ENOMEM);
406 goto unlock_policy;
407 }
408
409 config_group_init_type_name(&stm->policy->group, name,
410 &stp_policy_type);
411 stm->policy->stm = stm;
412
413 ret = &stm->policy->group;
414
415unlock_policy:
416 mutex_unlock(&stm->policy_mutex);
417
418 if (IS_ERR(ret))
419 stm_put_device(stm);
420
421 return ret;
422}
423
424static struct configfs_group_operations stp_policies_group_ops = {
425 .make_group = stp_policies_make,
426};
427
428static struct config_item_type stp_policies_type = {
429 .ct_group_ops = &stp_policies_group_ops,
430 .ct_owner = THIS_MODULE,
431};
432
433static struct configfs_subsystem stp_policy_subsys = {
434 .su_group = {
435 .cg_item = {
436 .ci_namebuf = "stp-policy",
437 .ci_type = &stp_policies_type,
438 },
439 },
440};
441
442/*
443 * Lock the policy mutex from the outside
444 */
445static struct stp_policy_node *
446__stp_policy_node_lookup(struct stp_policy *policy, char *s)
447{
448 struct stp_policy_node *policy_node, *ret;
449 struct list_head *head = &policy->group.cg_children;
450 struct config_item *item;
451 char *start, *end = s;
452
453 if (list_empty(head))
454 return NULL;
455
456 /* return the first entry if everything else fails */
457 item = list_entry(head->next, struct config_item, ci_entry);
458 ret = to_stp_policy_node(item);
459
460next:
461 for (;;) {
462 start = strsep(&end, "/");
463 if (!start)
464 break;
465
466 if (!*start)
467 continue;
468
469 list_for_each_entry(item, head, ci_entry) {
470 policy_node = to_stp_policy_node(item);
471
472 if (!strcmp(start,
473 policy_node->group.cg_item.ci_name)) {
474 ret = policy_node;
475
476 if (!end)
477 goto out;
478
479 head = &policy_node->group.cg_children;
480 goto next;
481 }
482 }
483 break;
484 }
485
486out:
487 return ret;
488}
489
490
491struct stp_policy_node *
492stp_policy_node_lookup(struct stm_device *stm, char *s)
493{
494 struct stp_policy_node *policy_node = NULL;
495
496 mutex_lock(&stp_policy_subsys.su_mutex);
497
498 mutex_lock(&stm->policy_mutex);
499 if (stm->policy)
500 policy_node = __stp_policy_node_lookup(stm->policy, s);
501 mutex_unlock(&stm->policy_mutex);
502
503 if (policy_node)
504 config_item_get(&policy_node->group.cg_item);
505 mutex_unlock(&stp_policy_subsys.su_mutex);
506
507 return policy_node;
508}
509
510void stp_policy_node_put(struct stp_policy_node *policy_node)
511{
512 config_item_put(&policy_node->group.cg_item);
513}
514
515int __init stp_configfs_init(void)
516{
517 int err;
518
519 config_group_init(&stp_policy_subsys.su_group);
520 mutex_init(&stp_policy_subsys.su_mutex);
521 err = configfs_register_subsystem(&stp_policy_subsys);
522
523 return err;
524}
525
526void __exit stp_configfs_exit(void)
527{
528 configfs_unregister_subsystem(&stp_policy_subsys);
529}
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
new file mode 100644
index 000000000000..95ece0292c99
--- /dev/null
+++ b/drivers/hwtracing/stm/stm.h
@@ -0,0 +1,87 @@
1/*
2 * System Trace Module (STM) infrastructure
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * STM class implements generic infrastructure for System Trace Module devices
15 * as defined in MIPI STPv2 specification.
16 */
17
18#ifndef _STM_STM_H_
19#define _STM_STM_H_
20
21struct stp_policy;
22struct stp_policy_node;
23
24struct stp_policy_node *
25stp_policy_node_lookup(struct stm_device *stm, char *s);
26void stp_policy_node_put(struct stp_policy_node *policy_node);
27void stp_policy_unbind(struct stp_policy *policy);
28
29void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
30 unsigned int *mstart, unsigned int *mend,
31 unsigned int *cstart, unsigned int *cend);
32int stp_configfs_init(void);
33void stp_configfs_exit(void);
34
35struct stp_master {
36 unsigned int nr_free;
37 unsigned long chan_map[0];
38};
39
40struct stm_device {
41 struct device dev;
42 struct module *owner;
43 struct stp_policy *policy;
44 struct mutex policy_mutex;
45 int major;
46 unsigned int sw_nmasters;
47 struct stm_data *data;
48 spinlock_t link_lock;
49 struct list_head link_list;
50 /* master allocation */
51 spinlock_t mc_lock;
52 struct stp_master *masters[0];
53};
54
55#define to_stm_device(_d) \
56 container_of((_d), struct stm_device, dev)
57
58struct stm_output {
59 unsigned int master;
60 unsigned int channel;
61 unsigned int nr_chans;
62};
63
64struct stm_file {
65 struct stm_device *stm;
66 struct stp_policy_node *policy_node;
67 struct stm_output output;
68};
69
70struct stm_device *stm_find_device(const char *name);
71void stm_put_device(struct stm_device *stm);
72
73struct stm_source_device {
74 struct device dev;
75 struct stm_source_data *data;
76 spinlock_t link_lock;
77 struct stm_device __rcu *link;
78 struct list_head link_entry;
79 /* one output per stm_source device */
80 struct stp_policy_node *policy_node;
81 struct stm_output output;
82};
83
84#define to_stm_source_device(_d) \
85 container_of((_d), struct stm_source_device, dev)
86
87#endif /* _STM_STM_H_ */
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index 9018ab83517a..a4be451074e5 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -409,6 +409,7 @@ static int mcb_init(void)
409 409
410static void mcb_exit(void) 410static void mcb_exit(void)
411{ 411{
412 ida_destroy(&mcb_ida);
412 bus_unregister(&mcb_bus_type); 413 bus_unregister(&mcb_bus_type);
413} 414}
414 415
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 051645498b53..67d5e7d08df6 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -51,6 +51,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
51 priv->mapbase = pci_resource_start(pdev, 0); 51 priv->mapbase = pci_resource_start(pdev, 0);
52 if (!priv->mapbase) { 52 if (!priv->mapbase) {
53 dev_err(&pdev->dev, "No PCI resource\n"); 53 dev_err(&pdev->dev, "No PCI resource\n");
54 ret = -ENODEV;
54 goto out_disable; 55 goto out_disable;
55 } 56 }
56 57
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
index d708ded5457b..662d050243be 100644
--- a/drivers/memory/fsl-corenet-cf.c
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -61,6 +61,7 @@ static const struct of_device_id ccf_matches[] = {
61 }, 61 },
62 {} 62 {}
63}; 63};
64MODULE_DEVICE_TABLE(of, ccf_matches);
64 65
65struct ccf_err_regs { 66struct ccf_err_regs {
66 u32 errdet; /* 0x00 Error Detect Register */ 67 u32 errdet; /* 0x00 Error Detect Register */
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index ca7d97a9a9ba..a579a0f25840 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -324,6 +324,7 @@ static const struct of_device_id aemif_of_match[] = {
324 { .compatible = "ti,da850-aemif", }, 324 { .compatible = "ti,da850-aemif", },
325 {}, 325 {},
326}; 326};
327MODULE_DEVICE_TABLE(of, aemif_of_match);
327 328
328static int aemif_probe(struct platform_device *pdev) 329static int aemif_probe(struct platform_device *pdev)
329{ 330{
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index c4b9374efd76..28f2ae30507a 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -1481,6 +1481,7 @@ static const struct reg_default wm5110_reg_default[] = {
1481 { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */ 1481 { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
1482 { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */ 1482 { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
1483 { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */ 1483 { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
1484 { 0x00000C18, 0x0000 }, /* R3096 - GP Switch 1 */
1484 { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */ 1485 { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
1485 { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */ 1486 { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
1486 { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */ 1487 { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
@@ -1811,6 +1812,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
1811 case ARIZONA_MIC_DETECT_1: 1812 case ARIZONA_MIC_DETECT_1:
1812 case ARIZONA_MIC_DETECT_2: 1813 case ARIZONA_MIC_DETECT_2:
1813 case ARIZONA_MIC_DETECT_3: 1814 case ARIZONA_MIC_DETECT_3:
1815 case ARIZONA_MIC_DETECT_4:
1814 case ARIZONA_MIC_DETECT_LEVEL_1: 1816 case ARIZONA_MIC_DETECT_LEVEL_1:
1815 case ARIZONA_MIC_DETECT_LEVEL_2: 1817 case ARIZONA_MIC_DETECT_LEVEL_2:
1816 case ARIZONA_MIC_DETECT_LEVEL_3: 1818 case ARIZONA_MIC_DETECT_LEVEL_3:
@@ -1910,6 +1912,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
1910 case ARIZONA_HP1_SHORT_CIRCUIT_CTRL: 1912 case ARIZONA_HP1_SHORT_CIRCUIT_CTRL:
1911 case ARIZONA_HP2_SHORT_CIRCUIT_CTRL: 1913 case ARIZONA_HP2_SHORT_CIRCUIT_CTRL:
1912 case ARIZONA_HP3_SHORT_CIRCUIT_CTRL: 1914 case ARIZONA_HP3_SHORT_CIRCUIT_CTRL:
1915 case ARIZONA_HP_TEST_CTRL_1:
1913 case ARIZONA_AIF1_BCLK_CTRL: 1916 case ARIZONA_AIF1_BCLK_CTRL:
1914 case ARIZONA_AIF1_TX_PIN_CTRL: 1917 case ARIZONA_AIF1_TX_PIN_CTRL:
1915 case ARIZONA_AIF1_RX_PIN_CTRL: 1918 case ARIZONA_AIF1_RX_PIN_CTRL:
@@ -2527,6 +2530,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
2527 case ARIZONA_GPIO5_CTRL: 2530 case ARIZONA_GPIO5_CTRL:
2528 case ARIZONA_IRQ_CTRL_1: 2531 case ARIZONA_IRQ_CTRL_1:
2529 case ARIZONA_GPIO_DEBOUNCE_CONFIG: 2532 case ARIZONA_GPIO_DEBOUNCE_CONFIG:
2533 case ARIZONA_GP_SWITCH_1:
2530 case ARIZONA_MISC_PAD_CTRL_1: 2534 case ARIZONA_MISC_PAD_CTRL_1:
2531 case ARIZONA_MISC_PAD_CTRL_2: 2535 case ARIZONA_MISC_PAD_CTRL_2:
2532 case ARIZONA_MISC_PAD_CTRL_3: 2536 case ARIZONA_MISC_PAD_CTRL_3:
@@ -2847,12 +2851,14 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
2847 case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS: 2851 case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
2848 case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS: 2852 case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
2849 case ARIZONA_MIC_DETECT_3: 2853 case ARIZONA_MIC_DETECT_3:
2854 case ARIZONA_MIC_DETECT_4:
2850 case ARIZONA_HP_CTRL_1L: 2855 case ARIZONA_HP_CTRL_1L:
2851 case ARIZONA_HP_CTRL_1R: 2856 case ARIZONA_HP_CTRL_1R:
2852 case ARIZONA_HEADPHONE_DETECT_2: 2857 case ARIZONA_HEADPHONE_DETECT_2:
2853 case ARIZONA_INPUT_ENABLES_STATUS: 2858 case ARIZONA_INPUT_ENABLES_STATUS:
2854 case ARIZONA_OUTPUT_STATUS_1: 2859 case ARIZONA_OUTPUT_STATUS_1:
2855 case ARIZONA_RAW_OUTPUT_STATUS_1: 2860 case ARIZONA_RAW_OUTPUT_STATUS_1:
2861 case ARIZONA_HP_TEST_CTRL_1:
2856 case ARIZONA_SLIMBUS_RX_PORT_STATUS: 2862 case ARIZONA_SLIMBUS_RX_PORT_STATUS:
2857 case ARIZONA_SLIMBUS_TX_PORT_STATUS: 2863 case ARIZONA_SLIMBUS_TX_PORT_STATUS:
2858 case ARIZONA_INTERRUPT_STATUS_1: 2864 case ARIZONA_INTERRUPT_STATUS_1:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index ccccc2943f2f..22892c701c63 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -414,7 +414,7 @@ config TI_DAC7512
414 414
415config VMWARE_BALLOON 415config VMWARE_BALLOON
416 tristate "VMware Balloon Driver" 416 tristate "VMware Balloon Driver"
417 depends on X86 && HYPERVISOR_GUEST 417 depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
418 help 418 help
419 This is VMware physical memory management driver which acts 419 This is VMware physical memory management driver which acts
420 like a "balloon" that can be inflated to reclaim physical pages 420 like a "balloon" that can be inflated to reclaim physical pages
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index d11187d36ddd..4f832002d116 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -117,4 +117,3 @@ module_i2c_driver(ad_dpot_i2c_driver);
117MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 117MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
118MODULE_DESCRIPTION("digital potentiometer I2C bus driver"); 118MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
119MODULE_LICENSE("GPL"); 119MODULE_LICENSE("GPL");
120MODULE_ALIAS("i2c:ad_dpot");
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index e7353449874b..cb851c14ca4b 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -514,7 +514,7 @@ int __genwqe_execute_ddcb(struct genwqe_dev *cd,
514/** 514/**
515 * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation 515 * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation
516 * 516 *
517 * This version will not do address translation or any modifcation of 517 * This version will not do address translation or any modification of
518 * the DDCB data. It is used e.g. for the MoveFlash DDCB which is 518 * the DDCB data. It is used e.g. for the MoveFlash DDCB which is
519 * entirely prepared by the driver itself. That means the appropriate 519 * entirely prepared by the driver itself. That means the appropriate
520 * DMA addresses are already in the DDCB and do not need any 520 * DMA addresses are already in the DDCB and do not need any
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 6d51e5f08664..353ee0cc733d 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -203,7 +203,7 @@ struct genwqe_ddcb_cmd *ddcb_requ_alloc(void)
203{ 203{
204 struct ddcb_requ *req; 204 struct ddcb_requ *req;
205 205
206 req = kzalloc(sizeof(*req), GFP_ATOMIC); 206 req = kzalloc(sizeof(*req), GFP_KERNEL);
207 if (!req) 207 if (!req)
208 return NULL; 208 return NULL;
209 209
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 70e62d6a3231..7f1b282d7d96 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -449,7 +449,7 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
449 if (get_order(vsize) > MAX_ORDER) 449 if (get_order(vsize) > MAX_ORDER)
450 return -ENOMEM; 450 return -ENOMEM;
451 451
452 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); 452 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
453 if (dma_map == NULL) 453 if (dma_map == NULL)
454 return -ENOMEM; 454 return -ENOMEM;
455 455
@@ -785,7 +785,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
785 map_addr = (m->addr & PAGE_MASK); 785 map_addr = (m->addr & PAGE_MASK);
786 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); 786 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
787 787
788 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); 788 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
789 if (dma_map == NULL) 789 if (dma_map == NULL)
790 return -ENOMEM; 790 return -ENOMEM;
791 791
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 1ca94e6fa8fb..222367cc8c81 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -220,7 +220,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
220 if (get_order(size) > MAX_ORDER) 220 if (get_order(size) > MAX_ORDER)
221 return NULL; 221 return NULL;
222 222
223 return pci_alloc_consistent(cd->pci_dev, size, dma_handle); 223 return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
224 GFP_KERNEL);
224} 225}
225 226
226void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, 227void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
@@ -229,7 +230,7 @@ void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
229 if (vaddr == NULL) 230 if (vaddr == NULL)
230 return; 231 return;
231 232
232 pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle); 233 dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
233} 234}
234 235
235static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, 236static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index b83e3ca12a41..d6a901cd4222 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -2,7 +2,7 @@
2 * Driver for the HP iLO management processor. 2 * Driver for the HP iLO management processor.
3 * 3 *
4 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. 4 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
5 * David Altobelli <david.altobelli@hp.com> 5 * David Altobelli <david.altobelli@hpe.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -902,11 +902,11 @@ static void __exit ilo_exit(void)
902MODULE_VERSION("1.4.1"); 902MODULE_VERSION("1.4.1");
903MODULE_ALIAS(ILO_NAME); 903MODULE_ALIAS(ILO_NAME);
904MODULE_DESCRIPTION(ILO_NAME); 904MODULE_DESCRIPTION(ILO_NAME);
905MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); 905MODULE_AUTHOR("David Altobelli <david.altobelli@hpe.com>");
906MODULE_LICENSE("GPL v2"); 906MODULE_LICENSE("GPL v2");
907 907
908module_param(max_ccb, uint, 0444); 908module_param(max_ccb, uint, 0444);
909MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (16)"); 909MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8-24)(default=16)");
910 910
911module_init(ilo_init); 911module_init(ilo_init);
912module_exit(ilo_exit); 912module_exit(ilo_exit);
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 9a60bd4d3c49..99635dd9dbac 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -1112,6 +1112,7 @@ static int __init init_kgdbts(void)
1112 1112
1113 return configure_kgdbts(); 1113 return configure_kgdbts();
1114} 1114}
1115device_initcall(init_kgdbts);
1115 1116
1116static int kgdbts_get_char(void) 1117static int kgdbts_get_char(void)
1117{ 1118{
@@ -1180,10 +1181,9 @@ static struct kgdb_io kgdbts_io_ops = {
1180 .post_exception = kgdbts_post_exp_handler, 1181 .post_exception = kgdbts_post_exp_handler,
1181}; 1182};
1182 1183
1183module_init(init_kgdbts); 1184/*
1185 * not really modular, but the easiest way to keep compat with existing
1186 * bootargs behaviour is to continue using module_param here.
1187 */
1184module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644); 1188module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644);
1185MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]"); 1189MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]");
1186MODULE_DESCRIPTION("KGDB Test Suite");
1187MODULE_LICENSE("GPL");
1188MODULE_AUTHOR("Wind River Systems, Inc.");
1189
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index b5abe34120b8..11fdadc68e53 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -472,7 +472,7 @@ static void lkdtm_do_action(enum ctype which)
472 break; 472 break;
473 } 473 }
474 case CT_ACCESS_USERSPACE: { 474 case CT_ACCESS_USERSPACE: {
475 unsigned long user_addr, tmp; 475 unsigned long user_addr, tmp = 0;
476 unsigned long *ptr; 476 unsigned long *ptr;
477 477
478 user_addr = vm_mmap(NULL, 0, PAGE_SIZE, 478 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
@@ -483,6 +483,12 @@ static void lkdtm_do_action(enum ctype which)
483 return; 483 return;
484 } 484 }
485 485
486 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
487 pr_warn("copy_to_user failed\n");
488 vm_munmap(user_addr, PAGE_SIZE);
489 return;
490 }
491
486 ptr = (unsigned long *)user_addr; 492 ptr = (unsigned long *)user_addr;
487 493
488 pr_info("attempting bad read at %p\n", ptr); 494 pr_info("attempting bad read at %p\n", ptr);
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 1e42781592d8..cd0403f09267 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -458,7 +458,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
458 return; 458 return;
459 } 459 }
460 460
461 if (dev->iamthif_canceled != 1) { 461 if (!dev->iamthif_canceled) {
462 dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; 462 dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
463 dev->iamthif_stall_timer = 0; 463 dev->iamthif_stall_timer = 0;
464 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); 464 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 3e536ca85f7d..020de5919c21 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -285,11 +285,11 @@ static struct mei_fixup {
285}; 285};
286 286
287/** 287/**
288 * mei_cl_dev_fixup - run fixup handlers 288 * mei_cldev_fixup - run fixup handlers
289 * 289 *
290 * @cldev: me client device 290 * @cldev: me client device
291 */ 291 */
292void mei_cl_dev_fixup(struct mei_cl_device *cldev) 292void mei_cl_bus_dev_fixup(struct mei_cl_device *cldev)
293{ 293{
294 struct mei_fixup *f; 294 struct mei_fixup *f;
295 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 295 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index eef1c6b46ad8..0b05aa938799 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -91,7 +91,7 @@ out:
91 * __mei_cl_recv - internal client receive (read) 91 * __mei_cl_recv - internal client receive (read)
92 * 92 *
93 * @cl: host client 93 * @cl: host client
94 * @buf: buffer to send 94 * @buf: buffer to receive
95 * @length: buffer length 95 * @length: buffer length
96 * 96 *
97 * Return: read size in bytes of < 0 on error 97 * Return: read size in bytes of < 0 on error
@@ -165,7 +165,7 @@ out:
165} 165}
166 166
167/** 167/**
168 * mei_cl_send - me device send (write) 168 * mei_cldev_send - me device send (write)
169 * 169 *
170 * @cldev: me client device 170 * @cldev: me client device
171 * @buf: buffer to send 171 * @buf: buffer to send
@@ -173,7 +173,7 @@ out:
173 * 173 *
174 * Return: written size in bytes or < 0 on error 174 * Return: written size in bytes or < 0 on error
175 */ 175 */
176ssize_t mei_cl_send(struct mei_cl_device *cldev, u8 *buf, size_t length) 176ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
177{ 177{
178 struct mei_cl *cl = cldev->cl; 178 struct mei_cl *cl = cldev->cl;
179 179
@@ -182,18 +182,18 @@ ssize_t mei_cl_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
182 182
183 return __mei_cl_send(cl, buf, length, 1); 183 return __mei_cl_send(cl, buf, length, 1);
184} 184}
185EXPORT_SYMBOL_GPL(mei_cl_send); 185EXPORT_SYMBOL_GPL(mei_cldev_send);
186 186
187/** 187/**
188 * mei_cl_recv - client receive (read) 188 * mei_cldev_recv - client receive (read)
189 * 189 *
190 * @cldev: me client device 190 * @cldev: me client device
191 * @buf: buffer to send 191 * @buf: buffer to receive
192 * @length: buffer length 192 * @length: buffer length
193 * 193 *
194 * Return: read size in bytes of < 0 on error 194 * Return: read size in bytes of < 0 on error
195 */ 195 */
196ssize_t mei_cl_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) 196ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
197{ 197{
198 struct mei_cl *cl = cldev->cl; 198 struct mei_cl *cl = cldev->cl;
199 199
@@ -202,15 +202,15 @@ ssize_t mei_cl_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
202 202
203 return __mei_cl_recv(cl, buf, length); 203 return __mei_cl_recv(cl, buf, length);
204} 204}
205EXPORT_SYMBOL_GPL(mei_cl_recv); 205EXPORT_SYMBOL_GPL(mei_cldev_recv);
206 206
207/** 207/**
208 * mei_bus_event_work - dispatch rx event for a bus device 208 * mei_cl_bus_event_work - dispatch rx event for a bus device
209 * and schedule new work 209 * and schedule new work
210 * 210 *
211 * @work: work 211 * @work: work
212 */ 212 */
213static void mei_bus_event_work(struct work_struct *work) 213static void mei_cl_bus_event_work(struct work_struct *work)
214{ 214{
215 struct mei_cl_device *cldev; 215 struct mei_cl_device *cldev;
216 216
@@ -272,7 +272,7 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
272} 272}
273 273
274/** 274/**
275 * mei_cl_register_event_cb - register event callback 275 * mei_cldev_register_event_cb - register event callback
276 * 276 *
277 * @cldev: me client devices 277 * @cldev: me client devices
278 * @event_cb: callback function 278 * @event_cb: callback function
@@ -283,9 +283,9 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
283 * -EALREADY if an callback is already registered 283 * -EALREADY if an callback is already registered
284 * <0 on other errors 284 * <0 on other errors
285 */ 285 */
286int mei_cl_register_event_cb(struct mei_cl_device *cldev, 286int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
287 unsigned long events_mask, 287 unsigned long events_mask,
288 mei_cl_event_cb_t event_cb, void *context) 288 mei_cldev_event_cb_t event_cb, void *context)
289{ 289{
290 int ret; 290 int ret;
291 291
@@ -296,7 +296,7 @@ int mei_cl_register_event_cb(struct mei_cl_device *cldev,
296 cldev->events_mask = events_mask; 296 cldev->events_mask = events_mask;
297 cldev->event_cb = event_cb; 297 cldev->event_cb = event_cb;
298 cldev->event_context = context; 298 cldev->event_context = context;
299 INIT_WORK(&cldev->event_work, mei_bus_event_work); 299 INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
300 300
301 if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { 301 if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
302 ret = mei_cl_read_start(cldev->cl, 0, NULL); 302 ret = mei_cl_read_start(cldev->cl, 0, NULL);
@@ -314,42 +314,81 @@ int mei_cl_register_event_cb(struct mei_cl_device *cldev,
314 314
315 return 0; 315 return 0;
316} 316}
317EXPORT_SYMBOL_GPL(mei_cl_register_event_cb); 317EXPORT_SYMBOL_GPL(mei_cldev_register_event_cb);
318 318
319/** 319/**
320 * mei_cl_get_drvdata - driver data getter 320 * mei_cldev_get_drvdata - driver data getter
321 * 321 *
322 * @cldev: mei client device 322 * @cldev: mei client device
323 * 323 *
324 * Return: driver private data 324 * Return: driver private data
325 */ 325 */
326void *mei_cl_get_drvdata(const struct mei_cl_device *cldev) 326void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
327{ 327{
328 return dev_get_drvdata(&cldev->dev); 328 return dev_get_drvdata(&cldev->dev);
329} 329}
330EXPORT_SYMBOL_GPL(mei_cl_get_drvdata); 330EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
331 331
332/** 332/**
333 * mei_cl_set_drvdata - driver data setter 333 * mei_cldev_set_drvdata - driver data setter
334 * 334 *
335 * @cldev: mei client device 335 * @cldev: mei client device
336 * @data: data to store 336 * @data: data to store
337 */ 337 */
338void mei_cl_set_drvdata(struct mei_cl_device *cldev, void *data) 338void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
339{ 339{
340 dev_set_drvdata(&cldev->dev, data); 340 dev_set_drvdata(&cldev->dev, data);
341} 341}
342EXPORT_SYMBOL_GPL(mei_cl_set_drvdata); 342EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
343
344/**
345 * mei_cldev_uuid - return uuid of the underlying me client
346 *
347 * @cldev: mei client device
348 *
349 * Return: me client uuid
350 */
351const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
352{
353 return mei_me_cl_uuid(cldev->me_cl);
354}
355EXPORT_SYMBOL_GPL(mei_cldev_uuid);
356
357/**
358 * mei_cldev_ver - return protocol version of the underlying me client
359 *
360 * @cldev: mei client device
361 *
362 * Return: me client protocol version
363 */
364u8 mei_cldev_ver(const struct mei_cl_device *cldev)
365{
366 return mei_me_cl_ver(cldev->me_cl);
367}
368EXPORT_SYMBOL_GPL(mei_cldev_ver);
369
370/**
371 * mei_cldev_enabled - check whether the device is enabled
372 *
373 * @cldev: mei client device
374 *
375 * Return: true if me client is initialized and connected
376 */
377bool mei_cldev_enabled(struct mei_cl_device *cldev)
378{
379 return cldev->cl && mei_cl_is_connected(cldev->cl);
380}
381EXPORT_SYMBOL_GPL(mei_cldev_enabled);
343 382
344/** 383/**
345 * mei_cl_enable_device - enable me client device 384 * mei_cldev_enable_device - enable me client device
346 * create connection with me client 385 * create connection with me client
347 * 386 *
348 * @cldev: me client device 387 * @cldev: me client device
349 * 388 *
350 * Return: 0 on success and < 0 on error 389 * Return: 0 on success and < 0 on error
351 */ 390 */
352int mei_cl_enable_device(struct mei_cl_device *cldev) 391int mei_cldev_enable(struct mei_cl_device *cldev)
353{ 392{
354 struct mei_device *bus = cldev->bus; 393 struct mei_device *bus = cldev->bus;
355 struct mei_cl *cl; 394 struct mei_cl *cl;
@@ -389,17 +428,17 @@ out:
389 428
390 return ret; 429 return ret;
391} 430}
392EXPORT_SYMBOL_GPL(mei_cl_enable_device); 431EXPORT_SYMBOL_GPL(mei_cldev_enable);
393 432
394/** 433/**
395 * mei_cl_disable_device - disable me client device 434 * mei_cldev_disable - disable me client device
396 * disconnect form the me client 435 * disconnect form the me client
397 * 436 *
398 * @cldev: me client device 437 * @cldev: me client device
399 * 438 *
400 * Return: 0 on success and < 0 on error 439 * Return: 0 on success and < 0 on error
401 */ 440 */
402int mei_cl_disable_device(struct mei_cl_device *cldev) 441int mei_cldev_disable(struct mei_cl_device *cldev)
403{ 442{
404 struct mei_device *bus; 443 struct mei_device *bus;
405 struct mei_cl *cl; 444 struct mei_cl *cl;
@@ -437,7 +476,7 @@ out:
437 mutex_unlock(&bus->device_lock); 476 mutex_unlock(&bus->device_lock);
438 return err; 477 return err;
439} 478}
440EXPORT_SYMBOL_GPL(mei_cl_disable_device); 479EXPORT_SYMBOL_GPL(mei_cldev_disable);
441 480
442/** 481/**
443 * mei_cl_device_find - find matching entry in the driver id table 482 * mei_cl_device_find - find matching entry in the driver id table
@@ -453,17 +492,26 @@ struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev,
453{ 492{
454 const struct mei_cl_device_id *id; 493 const struct mei_cl_device_id *id;
455 const uuid_le *uuid; 494 const uuid_le *uuid;
495 u8 version;
496 bool match;
456 497
457 uuid = mei_me_cl_uuid(cldev->me_cl); 498 uuid = mei_me_cl_uuid(cldev->me_cl);
499 version = mei_me_cl_ver(cldev->me_cl);
458 500
459 id = cldrv->id_table; 501 id = cldrv->id_table;
460 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) { 502 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
461 if (!uuid_le_cmp(*uuid, id->uuid)) { 503 if (!uuid_le_cmp(*uuid, id->uuid)) {
504 match = true;
462 505
463 if (!cldev->name[0]) 506 if (cldev->name[0])
464 return id; 507 if (strncmp(cldev->name, id->name,
508 sizeof(id->name)))
509 match = false;
465 510
466 if (!strncmp(cldev->name, id->name, sizeof(id->name))) 511 if (id->version != MEI_CL_VERSION_ANY)
512 if (id->version != version)
513 match = false;
514 if (match)
467 return id; 515 return id;
468 } 516 }
469 517
@@ -590,6 +638,19 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
590} 638}
591static DEVICE_ATTR_RO(uuid); 639static DEVICE_ATTR_RO(uuid);
592 640
641static ssize_t version_show(struct device *dev, struct device_attribute *a,
642 char *buf)
643{
644 struct mei_cl_device *cldev = to_mei_cl_device(dev);
645 u8 version = mei_me_cl_ver(cldev->me_cl);
646 size_t len;
647
648 len = snprintf(buf, PAGE_SIZE, "%02X", version);
649
650 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
651}
652static DEVICE_ATTR_RO(version);
653
593static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 654static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
594 char *buf) 655 char *buf)
595{ 656{
@@ -597,20 +658,19 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
597 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 658 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
598 size_t len; 659 size_t len;
599 660
600 len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":", 661 len = snprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
601 cldev->name, MEI_CL_UUID_ARGS(uuid->b));
602
603 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 662 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
604} 663}
605static DEVICE_ATTR_RO(modalias); 664static DEVICE_ATTR_RO(modalias);
606 665
607static struct attribute *mei_cl_dev_attrs[] = { 666static struct attribute *mei_cldev_attrs[] = {
608 &dev_attr_name.attr, 667 &dev_attr_name.attr,
609 &dev_attr_uuid.attr, 668 &dev_attr_uuid.attr,
669 &dev_attr_version.attr,
610 &dev_attr_modalias.attr, 670 &dev_attr_modalias.attr,
611 NULL, 671 NULL,
612}; 672};
613ATTRIBUTE_GROUPS(mei_cl_dev); 673ATTRIBUTE_GROUPS(mei_cldev);
614 674
615/** 675/**
616 * mei_cl_device_uevent - me client bus uevent handler 676 * mei_cl_device_uevent - me client bus uevent handler
@@ -624,6 +684,10 @@ static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
624{ 684{
625 struct mei_cl_device *cldev = to_mei_cl_device(dev); 685 struct mei_cl_device *cldev = to_mei_cl_device(dev);
626 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 686 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
687 u8 version = mei_me_cl_ver(cldev->me_cl);
688
689 if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
690 return -ENOMEM;
627 691
628 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid)) 692 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
629 return -ENOMEM; 693 return -ENOMEM;
@@ -631,8 +695,8 @@ static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
631 if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name)) 695 if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
632 return -ENOMEM; 696 return -ENOMEM;
633 697
634 if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":", 698 if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
635 cldev->name, MEI_CL_UUID_ARGS(uuid->b))) 699 cldev->name, uuid, version))
636 return -ENOMEM; 700 return -ENOMEM;
637 701
638 return 0; 702 return 0;
@@ -640,7 +704,7 @@ static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
640 704
641static struct bus_type mei_cl_bus_type = { 705static struct bus_type mei_cl_bus_type = {
642 .name = "mei", 706 .name = "mei",
643 .dev_groups = mei_cl_dev_groups, 707 .dev_groups = mei_cldev_groups,
644 .match = mei_cl_device_match, 708 .match = mei_cl_device_match,
645 .probe = mei_cl_device_probe, 709 .probe = mei_cl_device_probe,
646 .remove = mei_cl_device_remove, 710 .remove = mei_cl_device_remove,
@@ -661,7 +725,7 @@ static void mei_dev_bus_put(struct mei_device *bus)
661 put_device(bus->dev); 725 put_device(bus->dev);
662} 726}
663 727
664static void mei_cl_dev_release(struct device *dev) 728static void mei_cl_bus_dev_release(struct device *dev)
665{ 729{
666 struct mei_cl_device *cldev = to_mei_cl_device(dev); 730 struct mei_cl_device *cldev = to_mei_cl_device(dev);
667 731
@@ -674,19 +738,32 @@ static void mei_cl_dev_release(struct device *dev)
674} 738}
675 739
676static struct device_type mei_cl_device_type = { 740static struct device_type mei_cl_device_type = {
677 .release = mei_cl_dev_release, 741 .release = mei_cl_bus_dev_release,
678}; 742};
679 743
680/** 744/**
681 * mei_cl_dev_alloc - initialize and allocate mei client device 745 * mei_cl_bus_set_name - set device name for me client device
746 *
747 * @cldev: me client device
748 */
749static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
750{
751 dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
752 cldev->name,
753 mei_me_cl_uuid(cldev->me_cl),
754 mei_me_cl_ver(cldev->me_cl));
755}
756
757/**
758 * mei_cl_bus_dev_alloc - initialize and allocate mei client device
682 * 759 *
683 * @bus: mei device 760 * @bus: mei device
684 * @me_cl: me client 761 * @me_cl: me client
685 * 762 *
686 * Return: allocated device structur or NULL on allocation failure 763 * Return: allocated device structur or NULL on allocation failure
687 */ 764 */
688static struct mei_cl_device *mei_cl_dev_alloc(struct mei_device *bus, 765static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
689 struct mei_me_client *me_cl) 766 struct mei_me_client *me_cl)
690{ 767{
691 struct mei_cl_device *cldev; 768 struct mei_cl_device *cldev;
692 769
@@ -700,6 +777,7 @@ static struct mei_cl_device *mei_cl_dev_alloc(struct mei_device *bus,
700 cldev->dev.type = &mei_cl_device_type; 777 cldev->dev.type = &mei_cl_device_type;
701 cldev->bus = mei_dev_bus_get(bus); 778 cldev->bus = mei_dev_bus_get(bus);
702 cldev->me_cl = mei_me_cl_get(me_cl); 779 cldev->me_cl = mei_me_cl_get(me_cl);
780 mei_cl_bus_set_name(cldev);
703 cldev->is_added = 0; 781 cldev->is_added = 0;
704 INIT_LIST_HEAD(&cldev->bus_list); 782 INIT_LIST_HEAD(&cldev->bus_list);
705 783
@@ -715,15 +793,15 @@ static struct mei_cl_device *mei_cl_dev_alloc(struct mei_device *bus,
715 * 793 *
716 * Return: true if the device is eligible for enumeration 794 * Return: true if the device is eligible for enumeration
717 */ 795 */
718static bool mei_cl_dev_setup(struct mei_device *bus, 796static bool mei_cl_bus_dev_setup(struct mei_device *bus,
719 struct mei_cl_device *cldev) 797 struct mei_cl_device *cldev)
720{ 798{
721 cldev->do_match = 1; 799 cldev->do_match = 1;
722 mei_cl_dev_fixup(cldev); 800 mei_cl_bus_dev_fixup(cldev);
723 801
802 /* the device name can change during fix up */
724 if (cldev->do_match) 803 if (cldev->do_match)
725 dev_set_name(&cldev->dev, "mei:%s:%pUl", 804 mei_cl_bus_set_name(cldev);
726 cldev->name, mei_me_cl_uuid(cldev->me_cl));
727 805
728 return cldev->do_match == 1; 806 return cldev->do_match == 1;
729} 807}
@@ -739,7 +817,9 @@ static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
739{ 817{
740 int ret; 818 int ret;
741 819
742 dev_dbg(cldev->bus->dev, "adding %pUL\n", mei_me_cl_uuid(cldev->me_cl)); 820 dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
821 mei_me_cl_uuid(cldev->me_cl),
822 mei_me_cl_ver(cldev->me_cl));
743 ret = device_add(&cldev->dev); 823 ret = device_add(&cldev->dev);
744 if (!ret) 824 if (!ret)
745 cldev->is_added = 1; 825 cldev->is_added = 1;
@@ -762,17 +842,20 @@ static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
762 * mei_cl_bus_dev_destroy - destroy me client devices object 842 * mei_cl_bus_dev_destroy - destroy me client devices object
763 * 843 *
764 * @cldev: me client device 844 * @cldev: me client device
845 *
846 * Locking: called under "dev->cl_bus_lock" lock
765 */ 847 */
766static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev) 848static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
767{ 849{
850
851 WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
852
768 if (!cldev->is_added) 853 if (!cldev->is_added)
769 return; 854 return;
770 855
771 device_del(&cldev->dev); 856 device_del(&cldev->dev);
772 857
773 mutex_lock(&cldev->bus->cl_bus_lock);
774 list_del_init(&cldev->bus_list); 858 list_del_init(&cldev->bus_list);
775 mutex_unlock(&cldev->bus->cl_bus_lock);
776 859
777 cldev->is_added = 0; 860 cldev->is_added = 0;
778 put_device(&cldev->dev); 861 put_device(&cldev->dev);
@@ -798,35 +881,40 @@ void mei_cl_bus_remove_devices(struct mei_device *bus)
798{ 881{
799 struct mei_cl_device *cldev, *next; 882 struct mei_cl_device *cldev, *next;
800 883
884 mutex_lock(&bus->cl_bus_lock);
801 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list) 885 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
802 mei_cl_bus_remove_device(cldev); 886 mei_cl_bus_remove_device(cldev);
887 mutex_unlock(&bus->cl_bus_lock);
803} 888}
804 889
805 890
806/** 891/**
807 * mei_cl_dev_init - allocate and initializes an mei client devices 892 * mei_cl_bus_dev_init - allocate and initializes an mei client devices
808 * based on me client 893 * based on me client
809 * 894 *
810 * @bus: mei device 895 * @bus: mei device
811 * @me_cl: me client 896 * @me_cl: me client
897 *
898 * Locking: called under "dev->cl_bus_lock" lock
812 */ 899 */
813static void mei_cl_dev_init(struct mei_device *bus, struct mei_me_client *me_cl) 900static void mei_cl_bus_dev_init(struct mei_device *bus,
901 struct mei_me_client *me_cl)
814{ 902{
815 struct mei_cl_device *cldev; 903 struct mei_cl_device *cldev;
816 904
905 WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
906
817 dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl)); 907 dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
818 908
819 if (me_cl->bus_added) 909 if (me_cl->bus_added)
820 return; 910 return;
821 911
822 cldev = mei_cl_dev_alloc(bus, me_cl); 912 cldev = mei_cl_bus_dev_alloc(bus, me_cl);
823 if (!cldev) 913 if (!cldev)
824 return; 914 return;
825 915
826 mutex_lock(&cldev->bus->cl_bus_lock);
827 me_cl->bus_added = true; 916 me_cl->bus_added = true;
828 list_add_tail(&cldev->bus_list, &bus->device_list); 917 list_add_tail(&cldev->bus_list, &bus->device_list);
829 mutex_unlock(&cldev->bus->cl_bus_lock);
830 918
831} 919}
832 920
@@ -841,12 +929,13 @@ void mei_cl_bus_rescan(struct mei_device *bus)
841 struct mei_cl_device *cldev, *n; 929 struct mei_cl_device *cldev, *n;
842 struct mei_me_client *me_cl; 930 struct mei_me_client *me_cl;
843 931
932 mutex_lock(&bus->cl_bus_lock);
933
844 down_read(&bus->me_clients_rwsem); 934 down_read(&bus->me_clients_rwsem);
845 list_for_each_entry(me_cl, &bus->me_clients, list) 935 list_for_each_entry(me_cl, &bus->me_clients, list)
846 mei_cl_dev_init(bus, me_cl); 936 mei_cl_bus_dev_init(bus, me_cl);
847 up_read(&bus->me_clients_rwsem); 937 up_read(&bus->me_clients_rwsem);
848 938
849 mutex_lock(&bus->cl_bus_lock);
850 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) { 939 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
851 940
852 if (!mei_me_cl_is_active(cldev->me_cl)) { 941 if (!mei_me_cl_is_active(cldev->me_cl)) {
@@ -857,7 +946,7 @@ void mei_cl_bus_rescan(struct mei_device *bus)
857 if (cldev->is_added) 946 if (cldev->is_added)
858 continue; 947 continue;
859 948
860 if (mei_cl_dev_setup(bus, cldev)) 949 if (mei_cl_bus_dev_setup(bus, cldev))
861 mei_cl_bus_dev_add(cldev); 950 mei_cl_bus_dev_add(cldev);
862 else { 951 else {
863 list_del_init(&cldev->bus_list); 952 list_del_init(&cldev->bus_list);
@@ -869,7 +958,8 @@ void mei_cl_bus_rescan(struct mei_device *bus)
869 dev_dbg(bus->dev, "rescan end"); 958 dev_dbg(bus->dev, "rescan end");
870} 959}
871 960
872int __mei_cl_driver_register(struct mei_cl_driver *cldrv, struct module *owner) 961int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
962 struct module *owner)
873{ 963{
874 int err; 964 int err;
875 965
@@ -885,15 +975,15 @@ int __mei_cl_driver_register(struct mei_cl_driver *cldrv, struct module *owner)
885 975
886 return 0; 976 return 0;
887} 977}
888EXPORT_SYMBOL_GPL(__mei_cl_driver_register); 978EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
889 979
890void mei_cl_driver_unregister(struct mei_cl_driver *cldrv) 980void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
891{ 981{
892 driver_unregister(&cldrv->driver); 982 driver_unregister(&cldrv->driver);
893 983
894 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name); 984 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
895} 985}
896EXPORT_SYMBOL_GPL(mei_cl_driver_unregister); 986EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
897 987
898 988
899int __init mei_cl_bus_init(void) 989int __init mei_cl_bus_init(void)
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 1c7cad07d731..04e1aa39243f 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -68,6 +68,18 @@ static inline const uuid_le *mei_me_cl_uuid(const struct mei_me_client *me_cl)
68 return &me_cl->props.protocol_name; 68 return &me_cl->props.protocol_name;
69} 69}
70 70
71/**
72 * mei_me_cl_ver - return me client protocol version
73 *
74 * @me_cl: me client
75 *
76 * Return: me client protocol version
77 */
78static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
79{
80 return me_cl->props.protocol_version;
81}
82
71/* 83/*
72 * MEI IO Functions 84 * MEI IO Functions
73 */ 85 */
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 8504dbeacd3b..a138d8a27ab5 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -215,7 +215,7 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
215 f = debugfs_create_file("active", S_IRUSR, dir, 215 f = debugfs_create_file("active", S_IRUSR, dir,
216 dev, &mei_dbgfs_fops_active); 216 dev, &mei_dbgfs_fops_active);
217 if (!f) { 217 if (!f) {
218 dev_err(dev->dev, "meclients: registration failed\n"); 218 dev_err(dev->dev, "active: registration failed\n");
219 goto err; 219 goto err;
220 } 220 }
221 f = debugfs_create_file("devstate", S_IRUSR, dir, 221 f = debugfs_create_file("devstate", S_IRUSR, dir,
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 6d7c188fb65c..e7b7aad0999b 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -281,7 +281,7 @@ int mei_hbm_start_req(struct mei_device *dev)
281 return 0; 281 return 0;
282} 282}
283 283
284/* 284/**
285 * mei_hbm_enum_clients_req - sends enumeration client request message. 285 * mei_hbm_enum_clients_req - sends enumeration client request message.
286 * 286 *
287 * @dev: the device structure 287 * @dev: the device structure
@@ -314,7 +314,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
314 return 0; 314 return 0;
315} 315}
316 316
317/* 317/**
318 * mei_hbm_me_cl_add - add new me client to the list 318 * mei_hbm_me_cl_add - add new me client to the list
319 * 319 *
320 * @dev: the device structure 320 * @dev: the device structure
@@ -569,7 +569,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
569 return 0; 569 return 0;
570} 570}
571 571
572/* 572/**
573 * mei_hbm_pg - sends pg command 573 * mei_hbm_pg - sends pg command
574 * 574 *
575 * @dev: the device structure 575 * @dev: the device structure
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 65511d39d89b..25b1997a62cb 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -150,7 +150,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
150 u32 reg; 150 u32 reg;
151 151
152 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C); 152 reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
153 trace_mei_reg_read(dev->dev, "H_D0I3C", H_CSR, reg); 153 trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
154 154
155 return reg; 155 return reg;
156} 156}
@@ -163,7 +163,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
163 */ 163 */
164static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg) 164static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
165{ 165{
166 trace_mei_reg_write(dev->dev, "H_D0I3C", H_CSR, reg); 166 trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
167 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg); 167 mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
168} 168}
169 169
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index e374661652cd..3edafc8d3ad4 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -329,10 +329,10 @@ void mei_stop(struct mei_device *dev)
329{ 329{
330 dev_dbg(dev->dev, "stopping the device.\n"); 330 dev_dbg(dev->dev, "stopping the device.\n");
331 331
332 mei_cancel_work(dev);
333
334 mei_cl_bus_remove_devices(dev); 332 mei_cl_bus_remove_devices(dev);
335 333
334 mei_cancel_work(dev);
335
336 mutex_lock(&dev->device_lock); 336 mutex_lock(&dev->device_lock);
337 337
338 mei_wd_stop(dev); 338 mei_wd_stop(dev);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index c418d7888994..64b568a0268d 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -21,6 +21,7 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/pm_runtime.h>
24 25
25#include <linux/mei.h> 26#include <linux/mei.h>
26 27
@@ -147,6 +148,9 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
147 cb->read_time = jiffies; 148 cb->read_time = jiffies;
148 cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx); 149 cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx);
149 list_move_tail(&cb->list, &complete_list->list); 150 list_move_tail(&cb->list, &complete_list->list);
151 } else {
152 pm_runtime_mark_last_busy(dev->dev);
153 pm_request_autosuspend(dev->dev);
150 } 154 }
151 155
152out: 156out:
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index d74b6aa8ae27..4250555d5e72 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -275,32 +275,33 @@ struct mei_cl {
275 struct mei_cl_device *cldev; 275 struct mei_cl_device *cldev;
276}; 276};
277 277
278/** struct mei_hw_ops 278/**
279 * struct mei_hw_ops - hw specific ops
279 * 280 *
280 * @host_is_ready : query for host readiness 281 * @host_is_ready : query for host readiness
281 282 *
282 * @hw_is_ready : query if hw is ready 283 * @hw_is_ready : query if hw is ready
283 * @hw_reset : reset hw 284 * @hw_reset : reset hw
284 * @hw_start : start hw after reset 285 * @hw_start : start hw after reset
285 * @hw_config : configure hw 286 * @hw_config : configure hw
286 287 *
287 * @fw_status : get fw status registers 288 * @fw_status : get fw status registers
288 * @pg_state : power gating state of the device 289 * @pg_state : power gating state of the device
289 * @pg_in_transition : is device now in pg transition 290 * @pg_in_transition : is device now in pg transition
290 * @pg_is_enabled : is power gating enabled 291 * @pg_is_enabled : is power gating enabled
291 292 *
292 * @intr_clear : clear pending interrupts 293 * @intr_clear : clear pending interrupts
293 * @intr_enable : enable interrupts 294 * @intr_enable : enable interrupts
294 * @intr_disable : disable interrupts 295 * @intr_disable : disable interrupts
295 296 *
296 * @hbuf_free_slots : query for write buffer empty slots 297 * @hbuf_free_slots : query for write buffer empty slots
297 * @hbuf_is_ready : query if write buffer is empty 298 * @hbuf_is_ready : query if write buffer is empty
298 * @hbuf_max_len : query for write buffer max len 299 * @hbuf_max_len : query for write buffer max len
299 300 *
300 * @write : write a message to FW 301 * @write : write a message to FW
301 302 *
302 * @rdbuf_full_slots : query how many slots are filled 303 * @rdbuf_full_slots : query how many slots are filled
303 304 *
304 * @read_hdr : get first 4 bytes (header) 305 * @read_hdr : get first 4 bytes (header)
305 * @read : read a buffer from the FW 306 * @read : read a buffer from the FW
306 */ 307 */
@@ -340,7 +341,7 @@ struct mei_hw_ops {
340 341
341/* MEI bus API*/ 342/* MEI bus API*/
342void mei_cl_bus_rescan(struct mei_device *bus); 343void mei_cl_bus_rescan(struct mei_device *bus);
343void mei_cl_dev_fixup(struct mei_cl_device *dev); 344void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
344ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, 345ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
345 bool blocking); 346 bool blocking);
346ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); 347ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index e9f2f56c370d..40677df7f996 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -36,7 +36,7 @@ comment "Intel MIC Host Driver"
36 36
37config INTEL_MIC_HOST 37config INTEL_MIC_HOST
38 tristate "Intel MIC Host Driver" 38 tristate "Intel MIC Host Driver"
39 depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS 39 depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
40 select VHOST_RING 40 select VHOST_RING
41 help 41 help
42 This enables Host Driver support for the Intel Many Integrated 42 This enables Host Driver support for the Intel Many Integrated
@@ -56,7 +56,7 @@ comment "Intel MIC Card Driver"
56 56
57config INTEL_MIC_CARD 57config INTEL_MIC_CARD
58 tristate "Intel MIC Card Driver" 58 tristate "Intel MIC Card Driver"
59 depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS 59 depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
60 select VIRTIO 60 select VIRTIO
61 help 61 help
62 This enables card driver support for the Intel Many Integrated 62 This enables card driver support for the Intel Many Integrated
@@ -74,7 +74,8 @@ comment "SCIF Driver"
74 74
75config SCIF 75config SCIF
76 tristate "SCIF Driver" 76 tristate "SCIF Driver"
77 depends on 64BIT && PCI && X86 && SCIF_BUS 77 depends on 64BIT && PCI && X86 && SCIF_BUS && IOMMU_SUPPORT
78 select IOMMU_IOVA
78 help 79 help
79 This enables SCIF Driver support for the Intel Many Integrated 80 This enables SCIF Driver support for the Intel Many Integrated
80 Core (MIC) family of PCIe form factor coprocessor devices that 81 Core (MIC) family of PCIe form factor coprocessor devices that
@@ -88,3 +89,21 @@ config SCIF
88 More information about the Intel MIC family as well as the Linux 89 More information about the Intel MIC family as well as the Linux
89 OS and tools for MIC to use with this driver are available from 90 OS and tools for MIC to use with this driver are available from
90 <http://software.intel.com/en-us/mic-developer>. 91 <http://software.intel.com/en-us/mic-developer>.
92
93comment "Intel MIC Coprocessor State Management (COSM) Drivers"
94
95config MIC_COSM
96 tristate "Intel MIC Coprocessor State Management (COSM) Drivers"
97 depends on 64BIT && PCI && X86 && SCIF
98 help
99 This enables COSM driver support for the Intel Many
100 Integrated Core (MIC) family of PCIe form factor coprocessor
101 devices. COSM drivers implement functions such as boot,
102 shutdown, reset and reboot of MIC devices.
103
104 If you are building a host kernel with an Intel MIC device then
105 say M (recommended) or Y, else say N. If unsure say N.
106
107 More information about the Intel MIC family as well as the Linux
108 OS and tools for MIC to use with this driver are available from
109 <http://software.intel.com/en-us/mic-developer>.
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index a74042c58649..e288a1106738 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_INTEL_MIC_HOST) += host/
6obj-$(CONFIG_INTEL_MIC_CARD) += card/ 6obj-$(CONFIG_INTEL_MIC_CARD) += card/
7obj-y += bus/ 7obj-y += bus/
8obj-$(CONFIG_SCIF) += scif/ 8obj-$(CONFIG_SCIF) += scif/
9obj-$(CONFIG_MIC_COSM) += cosm/
10obj-$(CONFIG_MIC_COSM) += cosm_client/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
index 1ed37e234c96..761842b0d0bb 100644
--- a/drivers/misc/mic/bus/Makefile
+++ b/drivers/misc/mic/bus/Makefile
@@ -4,3 +4,4 @@
4# 4#
5obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o 5obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
6obj-$(CONFIG_SCIF_BUS) += scif_bus.o 6obj-$(CONFIG_SCIF_BUS) += scif_bus.o
7obj-$(CONFIG_MIC_COSM) += cosm_bus.o
diff --git a/drivers/misc/mic/bus/cosm_bus.c b/drivers/misc/mic/bus/cosm_bus.c
new file mode 100644
index 000000000000..d31d6c6e6cb1
--- /dev/null
+++ b/drivers/misc/mic/bus/cosm_bus.c
@@ -0,0 +1,141 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC COSM Bus Driver
19 */
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/idr.h>
23#include "cosm_bus.h"
24
25/* Unique numbering for cosm devices. */
26static DEFINE_IDA(cosm_index_ida);
27
28static int cosm_dev_probe(struct device *d)
29{
30 struct cosm_device *dev = dev_to_cosm(d);
31 struct cosm_driver *drv = drv_to_cosm(dev->dev.driver);
32
33 return drv->probe(dev);
34}
35
36static int cosm_dev_remove(struct device *d)
37{
38 struct cosm_device *dev = dev_to_cosm(d);
39 struct cosm_driver *drv = drv_to_cosm(dev->dev.driver);
40
41 drv->remove(dev);
42 return 0;
43}
44
45static struct bus_type cosm_bus = {
46 .name = "cosm_bus",
47 .probe = cosm_dev_probe,
48 .remove = cosm_dev_remove,
49};
50
51int cosm_register_driver(struct cosm_driver *driver)
52{
53 driver->driver.bus = &cosm_bus;
54 return driver_register(&driver->driver);
55}
56EXPORT_SYMBOL_GPL(cosm_register_driver);
57
58void cosm_unregister_driver(struct cosm_driver *driver)
59{
60 driver_unregister(&driver->driver);
61}
62EXPORT_SYMBOL_GPL(cosm_unregister_driver);
63
64static inline void cosm_release_dev(struct device *d)
65{
66 struct cosm_device *cdev = dev_to_cosm(d);
67
68 kfree(cdev);
69}
70
71struct cosm_device *
72cosm_register_device(struct device *pdev, struct cosm_hw_ops *hw_ops)
73{
74 struct cosm_device *cdev;
75 int ret;
76
77 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
78 if (!cdev)
79 return ERR_PTR(-ENOMEM);
80
81 cdev->dev.parent = pdev;
82 cdev->dev.release = cosm_release_dev;
83 cdev->hw_ops = hw_ops;
84 dev_set_drvdata(&cdev->dev, cdev);
85 cdev->dev.bus = &cosm_bus;
86
87 /* Assign a unique device index and hence name */
88 ret = ida_simple_get(&cosm_index_ida, 0, 0, GFP_KERNEL);
89 if (ret < 0)
90 goto free_cdev;
91
92 cdev->index = ret;
93 cdev->dev.id = ret;
94 dev_set_name(&cdev->dev, "cosm-dev%u", cdev->index);
95
96 ret = device_register(&cdev->dev);
97 if (ret)
98 goto ida_remove;
99 return cdev;
100ida_remove:
101 ida_simple_remove(&cosm_index_ida, cdev->index);
102free_cdev:
103 put_device(&cdev->dev);
104 return ERR_PTR(ret);
105}
106EXPORT_SYMBOL_GPL(cosm_register_device);
107
108void cosm_unregister_device(struct cosm_device *dev)
109{
110 int index = dev->index; /* save for after device release */
111
112 device_unregister(&dev->dev);
113 ida_simple_remove(&cosm_index_ida, index);
114}
115EXPORT_SYMBOL_GPL(cosm_unregister_device);
116
117struct cosm_device *cosm_find_cdev_by_id(int id)
118{
119 struct device *dev = subsys_find_device_by_id(&cosm_bus, id, NULL);
120
121 return dev ? container_of(dev, struct cosm_device, dev) : NULL;
122}
123EXPORT_SYMBOL_GPL(cosm_find_cdev_by_id);
124
125static int __init cosm_init(void)
126{
127 return bus_register(&cosm_bus);
128}
129
130static void __exit cosm_exit(void)
131{
132 bus_unregister(&cosm_bus);
133 ida_destroy(&cosm_index_ida);
134}
135
136core_initcall(cosm_init);
137module_exit(cosm_exit);
138
139MODULE_AUTHOR("Intel Corporation");
140MODULE_DESCRIPTION("Intel(R) MIC card OS state management bus driver");
141MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/cosm_bus.h b/drivers/misc/mic/bus/cosm_bus.h
new file mode 100644
index 000000000000..f7c57f266916
--- /dev/null
+++ b/drivers/misc/mic/bus/cosm_bus.h
@@ -0,0 +1,134 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC COSM Bus Driver
19 */
20#ifndef _COSM_BUS_H_
21#define _COSM_BUS_H_
22
23#include <linux/scif.h>
24#include <linux/mic_common.h>
25#include "../common/mic_dev.h"
26
27/**
28 * cosm_device - representation of a cosm device
29 *
30 * @attr_group: Pointer to list of sysfs attribute groups.
31 * @sdev: Device for sysfs entries.
32 * @state: MIC state.
33 * @shutdown_status: MIC status reported by card for shutdown/crashes.
34 * @shutdown_status_int: Internal shutdown status maintained by the driver
35 * @cosm_mutex: Mutex for synchronizing access to data structures.
36 * @reset_trigger_work: Work for triggering reset requests.
37 * @scif_work: Work for handling per device SCIF connections
38 * @cmdline: Kernel command line.
39 * @firmware: Firmware file name.
40 * @ramdisk: Ramdisk file name.
41 * @bootmode: Boot mode i.e. "linux" or "elf" for flash updates.
42 * @log_buf_addr: Log buffer address for MIC.
43 * @log_buf_len: Log buffer length address for MIC.
44 * @state_sysfs: Sysfs dirent for notifying ring 3 about MIC state changes.
45 * @hw_ops: the hardware bus ops for this device.
46 * @dev: underlying device.
47 * @index: unique position on the cosm bus
48 * @dbg_dir: debug fs directory
49 * @newepd: new endpoint from scif accept to be assigned to this cdev
50 * @epd: SCIF endpoint for this cdev
51 * @heartbeat_watchdog_enable: if heartbeat watchdog is enabled for this cdev
52 * @sysfs_heartbeat_enable: sysfs setting for disabling heartbeat notification
53 */
54struct cosm_device {
55 const struct attribute_group **attr_group;
56 struct device *sdev;
57 u8 state;
58 u8 shutdown_status;
59 u8 shutdown_status_int;
60 struct mutex cosm_mutex;
61 struct work_struct reset_trigger_work;
62 struct work_struct scif_work;
63 char *cmdline;
64 char *firmware;
65 char *ramdisk;
66 char *bootmode;
67 void *log_buf_addr;
68 int *log_buf_len;
69 struct kernfs_node *state_sysfs;
70 struct cosm_hw_ops *hw_ops;
71 struct device dev;
72 int index;
73 struct dentry *dbg_dir;
74 scif_epd_t newepd;
75 scif_epd_t epd;
76 bool heartbeat_watchdog_enable;
77 bool sysfs_heartbeat_enable;
78};
79
80/**
81 * cosm_driver - operations for a cosm driver
82 *
83 * @driver: underlying device driver (populate name and owner).
84 * @probe: the function to call when a device is found. Returns 0 or -errno.
85 * @remove: the function to call when a device is removed.
86 */
87struct cosm_driver {
88 struct device_driver driver;
89 int (*probe)(struct cosm_device *dev);
90 void (*remove)(struct cosm_device *dev);
91};
92
93/**
94 * cosm_hw_ops - cosm bus ops
95 *
96 * @reset: trigger MIC reset
97 * @force_reset: force MIC reset
98 * @post_reset: inform MIC reset is complete
99 * @ready: is MIC ready for OS download
100 * @start: boot MIC
101 * @stop: prepare MIC for reset
102 * @family: return MIC HW family string
103 * @stepping: return MIC HW stepping string
104 * @aper: return MIC PCIe aperture
105 */
106struct cosm_hw_ops {
107 void (*reset)(struct cosm_device *cdev);
108 void (*force_reset)(struct cosm_device *cdev);
109 void (*post_reset)(struct cosm_device *cdev, enum mic_states state);
110 bool (*ready)(struct cosm_device *cdev);
111 int (*start)(struct cosm_device *cdev, int id);
112 void (*stop)(struct cosm_device *cdev, bool force);
113 ssize_t (*family)(struct cosm_device *cdev, char *buf);
114 ssize_t (*stepping)(struct cosm_device *cdev, char *buf);
115 struct mic_mw *(*aper)(struct cosm_device *cdev);
116};
117
118struct cosm_device *
119cosm_register_device(struct device *pdev, struct cosm_hw_ops *hw_ops);
120void cosm_unregister_device(struct cosm_device *dev);
121int cosm_register_driver(struct cosm_driver *drv);
122void cosm_unregister_driver(struct cosm_driver *drv);
123struct cosm_device *cosm_find_cdev_by_id(int id);
124
125static inline struct cosm_device *dev_to_cosm(struct device *dev)
126{
127 return container_of(dev, struct cosm_device, dev);
128}
129
130static inline struct cosm_driver *drv_to_cosm(struct device_driver *drv)
131{
132 return container_of(drv, struct cosm_driver, driver);
133}
134#endif /* _COSM_BUS_H */
diff --git a/drivers/misc/mic/bus/mic_bus.c b/drivers/misc/mic/bus/mic_bus.c
index 961ae90aae47..be37890abb93 100644
--- a/drivers/misc/mic/bus/mic_bus.c
+++ b/drivers/misc/mic/bus/mic_bus.c
@@ -25,9 +25,6 @@
25#include <linux/idr.h> 25#include <linux/idr.h>
26#include <linux/mic_bus.h> 26#include <linux/mic_bus.h>
27 27
28/* Unique numbering for mbus devices. */
29static DEFINE_IDA(mbus_index_ida);
30
31static ssize_t device_show(struct device *d, 28static ssize_t device_show(struct device *d,
32 struct device_attribute *attr, char *buf) 29 struct device_attribute *attr, char *buf)
33{ 30{
@@ -147,7 +144,8 @@ static void mbus_release_dev(struct device *d)
147 144
148struct mbus_device * 145struct mbus_device *
149mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 146mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
150 struct mbus_hw_ops *hw_ops, void __iomem *mmio_va) 147 struct mbus_hw_ops *hw_ops, int index,
148 void __iomem *mmio_va)
151{ 149{
152 int ret; 150 int ret;
153 struct mbus_device *mbdev; 151 struct mbus_device *mbdev;
@@ -166,13 +164,7 @@ mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
166 mbdev->dev.release = mbus_release_dev; 164 mbdev->dev.release = mbus_release_dev;
167 mbdev->hw_ops = hw_ops; 165 mbdev->hw_ops = hw_ops;
168 mbdev->dev.bus = &mic_bus; 166 mbdev->dev.bus = &mic_bus;
169 167 mbdev->index = index;
170 /* Assign a unique device index and hence name. */
171 ret = ida_simple_get(&mbus_index_ida, 0, 0, GFP_KERNEL);
172 if (ret < 0)
173 goto free_mbdev;
174
175 mbdev->index = ret;
176 dev_set_name(&mbdev->dev, "mbus-dev%u", mbdev->index); 168 dev_set_name(&mbdev->dev, "mbus-dev%u", mbdev->index);
177 /* 169 /*
178 * device_register() causes the bus infrastructure to look for a 170 * device_register() causes the bus infrastructure to look for a
@@ -180,22 +172,17 @@ mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
180 */ 172 */
181 ret = device_register(&mbdev->dev); 173 ret = device_register(&mbdev->dev);
182 if (ret) 174 if (ret)
183 goto ida_remove; 175 goto free_mbdev;
184 return mbdev; 176 return mbdev;
185ida_remove:
186 ida_simple_remove(&mbus_index_ida, mbdev->index);
187free_mbdev: 177free_mbdev:
188 kfree(mbdev); 178 put_device(&mbdev->dev);
189 return ERR_PTR(ret); 179 return ERR_PTR(ret);
190} 180}
191EXPORT_SYMBOL_GPL(mbus_register_device); 181EXPORT_SYMBOL_GPL(mbus_register_device);
192 182
193void mbus_unregister_device(struct mbus_device *mbdev) 183void mbus_unregister_device(struct mbus_device *mbdev)
194{ 184{
195 int index = mbdev->index; /* save for after device release */
196
197 device_unregister(&mbdev->dev); 185 device_unregister(&mbdev->dev);
198 ida_simple_remove(&mbus_index_ida, index);
199} 186}
200EXPORT_SYMBOL_GPL(mbus_unregister_device); 187EXPORT_SYMBOL_GPL(mbus_unregister_device);
201 188
@@ -207,7 +194,6 @@ static int __init mbus_init(void)
207static void __exit mbus_exit(void) 194static void __exit mbus_exit(void)
208{ 195{
209 bus_unregister(&mic_bus); 196 bus_unregister(&mic_bus);
210 ida_destroy(&mbus_index_ida);
211} 197}
212 198
213core_initcall(mbus_init); 199core_initcall(mbus_init);
diff --git a/drivers/misc/mic/bus/scif_bus.c b/drivers/misc/mic/bus/scif_bus.c
index 2da7ceed015d..ff6e01c25810 100644
--- a/drivers/misc/mic/bus/scif_bus.c
+++ b/drivers/misc/mic/bus/scif_bus.c
@@ -28,7 +28,6 @@ static ssize_t device_show(struct device *d,
28 28
29 return sprintf(buf, "0x%04x\n", dev->id.device); 29 return sprintf(buf, "0x%04x\n", dev->id.device);
30} 30}
31
32static DEVICE_ATTR_RO(device); 31static DEVICE_ATTR_RO(device);
33 32
34static ssize_t vendor_show(struct device *d, 33static ssize_t vendor_show(struct device *d,
@@ -38,7 +37,6 @@ static ssize_t vendor_show(struct device *d,
38 37
39 return sprintf(buf, "0x%04x\n", dev->id.vendor); 38 return sprintf(buf, "0x%04x\n", dev->id.vendor);
40} 39}
41
42static DEVICE_ATTR_RO(vendor); 40static DEVICE_ATTR_RO(vendor);
43 41
44static ssize_t modalias_show(struct device *d, 42static ssize_t modalias_show(struct device *d,
@@ -49,7 +47,6 @@ static ssize_t modalias_show(struct device *d,
49 return sprintf(buf, "scif:d%08Xv%08X\n", 47 return sprintf(buf, "scif:d%08Xv%08X\n",
50 dev->id.device, dev->id.vendor); 48 dev->id.device, dev->id.vendor);
51} 49}
52
53static DEVICE_ATTR_RO(modalias); 50static DEVICE_ATTR_RO(modalias);
54 51
55static struct attribute *scif_dev_attrs[] = { 52static struct attribute *scif_dev_attrs[] = {
@@ -144,7 +141,8 @@ struct scif_hw_dev *
144scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 141scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
145 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, 142 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
146 struct mic_mw *mmio, struct mic_mw *aper, void *dp, 143 struct mic_mw *mmio, struct mic_mw *aper, void *dp,
147 void __iomem *rdp, struct dma_chan **chan, int num_chan) 144 void __iomem *rdp, struct dma_chan **chan, int num_chan,
145 bool card_rel_da)
148{ 146{
149 int ret; 147 int ret;
150 struct scif_hw_dev *sdev; 148 struct scif_hw_dev *sdev;
@@ -171,6 +169,7 @@ scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
171 dma_set_mask(&sdev->dev, DMA_BIT_MASK(64)); 169 dma_set_mask(&sdev->dev, DMA_BIT_MASK(64));
172 sdev->dma_ch = chan; 170 sdev->dma_ch = chan;
173 sdev->num_dma_ch = num_chan; 171 sdev->num_dma_ch = num_chan;
172 sdev->card_rel_da = card_rel_da;
174 dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode); 173 dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode);
175 /* 174 /*
176 * device_register() causes the bus infrastructure to look for a 175 * device_register() causes the bus infrastructure to look for a
@@ -181,7 +180,7 @@ scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
181 goto free_sdev; 180 goto free_sdev;
182 return sdev; 181 return sdev;
183free_sdev: 182free_sdev:
184 kfree(sdev); 183 put_device(&sdev->dev);
185 return ERR_PTR(ret); 184 return ERR_PTR(ret);
186} 185}
187EXPORT_SYMBOL_GPL(scif_register_device); 186EXPORT_SYMBOL_GPL(scif_register_device);
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
index 335a228a8236..94f29ac608b6 100644
--- a/drivers/misc/mic/bus/scif_bus.h
+++ b/drivers/misc/mic/bus/scif_bus.h
@@ -46,6 +46,8 @@ struct scif_hw_dev_id {
46 * @rdp - Remote device page 46 * @rdp - Remote device page
47 * @dma_ch - Array of DMA channels 47 * @dma_ch - Array of DMA channels
48 * @num_dma_ch - Number of DMA channels available 48 * @num_dma_ch - Number of DMA channels available
49 * @card_rel_da - Set to true if DMA addresses programmed in the DMA engine
50 * are relative to the card point of view
49 */ 51 */
50struct scif_hw_dev { 52struct scif_hw_dev {
51 struct scif_hw_ops *hw_ops; 53 struct scif_hw_ops *hw_ops;
@@ -59,6 +61,7 @@ struct scif_hw_dev {
59 void __iomem *rdp; 61 void __iomem *rdp;
60 struct dma_chan **dma_ch; 62 struct dma_chan **dma_ch;
61 int num_dma_ch; 63 int num_dma_ch;
64 bool card_rel_da;
62}; 65};
63 66
64/** 67/**
@@ -114,7 +117,8 @@ scif_register_device(struct device *pdev, int id,
114 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, 117 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
115 struct mic_mw *mmio, struct mic_mw *aper, 118 struct mic_mw *mmio, struct mic_mw *aper,
116 void *dp, void __iomem *rdp, 119 void *dp, void __iomem *rdp,
117 struct dma_chan **chan, int num_chan); 120 struct dma_chan **chan, int num_chan,
121 bool card_rel_da);
118void scif_unregister_device(struct scif_hw_dev *sdev); 122void scif_unregister_device(struct scif_hw_dev *sdev);
119 123
120static inline struct scif_hw_dev *dev_to_scif(struct device *dev) 124static inline struct scif_hw_dev *dev_to_scif(struct device *dev)
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index 6338908b2252..d0edaf7e0cd5 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -37,71 +37,6 @@
37#include "mic_virtio.h" 37#include "mic_virtio.h"
38 38
39static struct mic_driver *g_drv; 39static struct mic_driver *g_drv;
40static struct mic_irq *shutdown_cookie;
41
42static void mic_notify_host(u8 state)
43{
44 struct mic_driver *mdrv = g_drv;
45 struct mic_bootparam __iomem *bootparam = mdrv->dp;
46
47 iowrite8(state, &bootparam->shutdown_status);
48 dev_dbg(mdrv->dev, "%s %d system_state %d\n",
49 __func__, __LINE__, state);
50 mic_send_intr(&mdrv->mdev, ioread8(&bootparam->c2h_shutdown_db));
51}
52
53static int mic_panic_event(struct notifier_block *this, unsigned long event,
54 void *ptr)
55{
56 struct mic_driver *mdrv = g_drv;
57 struct mic_bootparam __iomem *bootparam = mdrv->dp;
58
59 iowrite8(-1, &bootparam->h2c_config_db);
60 iowrite8(-1, &bootparam->h2c_shutdown_db);
61 mic_notify_host(MIC_CRASHED);
62 return NOTIFY_DONE;
63}
64
65static struct notifier_block mic_panic = {
66 .notifier_call = mic_panic_event,
67};
68
69static irqreturn_t mic_shutdown_isr(int irq, void *data)
70{
71 struct mic_driver *mdrv = g_drv;
72 struct mic_bootparam __iomem *bootparam = mdrv->dp;
73
74 mic_ack_interrupt(&g_drv->mdev);
75 if (ioread8(&bootparam->shutdown_card))
76 orderly_poweroff(true);
77 return IRQ_HANDLED;
78}
79
80static int mic_shutdown_init(void)
81{
82 int rc = 0;
83 struct mic_driver *mdrv = g_drv;
84 struct mic_bootparam __iomem *bootparam = mdrv->dp;
85 int shutdown_db;
86
87 shutdown_db = mic_next_card_db();
88 shutdown_cookie = mic_request_card_irq(mic_shutdown_isr, NULL,
89 "Shutdown", mdrv, shutdown_db);
90 if (IS_ERR(shutdown_cookie))
91 rc = PTR_ERR(shutdown_cookie);
92 else
93 iowrite8(shutdown_db, &bootparam->h2c_shutdown_db);
94 return rc;
95}
96
97static void mic_shutdown_uninit(void)
98{
99 struct mic_driver *mdrv = g_drv;
100 struct mic_bootparam __iomem *bootparam = mdrv->dp;
101
102 iowrite8(-1, &bootparam->h2c_shutdown_db);
103 mic_free_card_irq(shutdown_cookie, mdrv);
104}
105 40
106static int __init mic_dp_init(void) 41static int __init mic_dp_init(void)
107{ 42{
@@ -359,11 +294,7 @@ int __init mic_driver_init(struct mic_driver *mdrv)
359 u8 node_id; 294 u8 node_id;
360 295
361 g_drv = mdrv; 296 g_drv = mdrv;
362 /* 297 /* Unloading the card module is not supported. */
363 * Unloading the card module is not supported. The MIC card module
364 * handles fundamental operations like host/card initiated shutdowns
365 * and informing the host about card crashes and cannot be unloaded.
366 */
367 if (!try_module_get(mdrv->dev->driver->owner)) { 298 if (!try_module_get(mdrv->dev->driver->owner)) {
368 rc = -ENODEV; 299 rc = -ENODEV;
369 goto done; 300 goto done;
@@ -374,12 +305,9 @@ int __init mic_driver_init(struct mic_driver *mdrv)
374 rc = mic_init_irq(); 305 rc = mic_init_irq();
375 if (rc) 306 if (rc)
376 goto dp_uninit; 307 goto dp_uninit;
377 rc = mic_shutdown_init();
378 if (rc)
379 goto irq_uninit;
380 if (!mic_request_dma_chans(mdrv)) { 308 if (!mic_request_dma_chans(mdrv)) {
381 rc = -ENODEV; 309 rc = -ENODEV;
382 goto shutdown_uninit; 310 goto irq_uninit;
383 } 311 }
384 rc = mic_devices_init(mdrv); 312 rc = mic_devices_init(mdrv);
385 if (rc) 313 if (rc)
@@ -390,21 +318,18 @@ int __init mic_driver_init(struct mic_driver *mdrv)
390 NULL, &scif_hw_ops, 318 NULL, &scif_hw_ops,
391 0, node_id, &mdrv->mdev.mmio, NULL, 319 0, node_id, &mdrv->mdev.mmio, NULL,
392 NULL, mdrv->dp, mdrv->dma_ch, 320 NULL, mdrv->dp, mdrv->dma_ch,
393 mdrv->num_dma_ch); 321 mdrv->num_dma_ch, true);
394 if (IS_ERR(mdrv->scdev)) { 322 if (IS_ERR(mdrv->scdev)) {
395 rc = PTR_ERR(mdrv->scdev); 323 rc = PTR_ERR(mdrv->scdev);
396 goto device_uninit; 324 goto device_uninit;
397 } 325 }
398 mic_create_card_debug_dir(mdrv); 326 mic_create_card_debug_dir(mdrv);
399 atomic_notifier_chain_register(&panic_notifier_list, &mic_panic);
400done: 327done:
401 return rc; 328 return rc;
402device_uninit: 329device_uninit:
403 mic_devices_uninit(mdrv); 330 mic_devices_uninit(mdrv);
404dma_free: 331dma_free:
405 mic_free_dma_chans(mdrv); 332 mic_free_dma_chans(mdrv);
406shutdown_uninit:
407 mic_shutdown_uninit();
408irq_uninit: 333irq_uninit:
409 mic_uninit_irq(); 334 mic_uninit_irq();
410dp_uninit: 335dp_uninit:
@@ -425,13 +350,6 @@ void mic_driver_uninit(struct mic_driver *mdrv)
425 scif_unregister_device(mdrv->scdev); 350 scif_unregister_device(mdrv->scdev);
426 mic_devices_uninit(mdrv); 351 mic_devices_uninit(mdrv);
427 mic_free_dma_chans(mdrv); 352 mic_free_dma_chans(mdrv);
428 /*
429 * Inform the host about the shutdown status i.e. poweroff/restart etc.
430 * The module cannot be unloaded so the only code path to call
431 * mic_devices_uninit(..) is the shutdown callback.
432 */
433 mic_notify_host(system_state);
434 mic_shutdown_uninit();
435 mic_uninit_irq(); 353 mic_uninit_irq();
436 mic_dp_uninit(); 354 mic_dp_uninit();
437 module_put(mdrv->dev->driver->owner); 355 module_put(mdrv->dev->driver->owner);
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index 77fd41781c2e..b2958ce2368c 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -261,7 +261,7 @@ static int __init mic_probe(struct platform_device *pdev)
261 mic_hw_intr_init(mdrv); 261 mic_hw_intr_init(mdrv);
262 platform_set_drvdata(pdev, mdrv); 262 platform_set_drvdata(pdev, mdrv);
263 mdrv->dma_mbdev = mbus_register_device(mdrv->dev, MBUS_DEV_DMA_MIC, 263 mdrv->dma_mbdev = mbus_register_device(mdrv->dev, MBUS_DEV_DMA_MIC,
264 NULL, &mbus_hw_ops, 264 NULL, &mbus_hw_ops, 0,
265 mdrv->mdev.mmio.va); 265 mdrv->mdev.mmio.va);
266 if (IS_ERR(mdrv->dma_mbdev)) { 266 if (IS_ERR(mdrv->dma_mbdev)) {
267 rc = PTR_ERR(mdrv->dma_mbdev); 267 rc = PTR_ERR(mdrv->dma_mbdev);
diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h
index 0b58c46045dc..50776772ebdf 100644
--- a/drivers/misc/mic/common/mic_dev.h
+++ b/drivers/misc/mic/common/mic_dev.h
@@ -21,6 +21,19 @@
21#ifndef __MIC_DEV_H__ 21#ifndef __MIC_DEV_H__
22#define __MIC_DEV_H__ 22#define __MIC_DEV_H__
23 23
24/* The maximum number of MIC devices supported in a single host system. */
25#define MIC_MAX_NUM_DEVS 128
26
27/**
28 * enum mic_hw_family - The hardware family to which a device belongs.
29 */
30enum mic_hw_family {
31 MIC_FAMILY_X100 = 0,
32 MIC_FAMILY_X200,
33 MIC_FAMILY_UNKNOWN,
34 MIC_FAMILY_LAST
35};
36
24/** 37/**
25 * struct mic_mw - MIC memory window 38 * struct mic_mw - MIC memory window
26 * 39 *
diff --git a/drivers/misc/mic/cosm/Makefile b/drivers/misc/mic/cosm/Makefile
new file mode 100644
index 000000000000..b85d4d49df46
--- /dev/null
+++ b/drivers/misc/mic/cosm/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile - Intel MIC Coprocessor State Management (COSM) Driver
3# Copyright(c) 2015, Intel Corporation.
4#
5obj-$(CONFIG_MIC_COSM) += mic_cosm.o
6
7mic_cosm-objs := cosm_main.o
8mic_cosm-objs += cosm_debugfs.o
9mic_cosm-objs += cosm_sysfs.o
10mic_cosm-objs += cosm_scif_server.o
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
new file mode 100644
index 000000000000..216cb3cd2fe3
--- /dev/null
+++ b/drivers/misc/mic/cosm/cosm_debugfs.c
@@ -0,0 +1,156 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC Coprocessor State Management (COSM) Driver
19 *
20 */
21
22#include <linux/debugfs.h>
23#include <linux/slab.h>
24#include <linux/io.h>
25#include "cosm_main.h"
26
27/* Debugfs parent dir */
28static struct dentry *cosm_dbg;
29
30/**
31 * cosm_log_buf_show - Display MIC kernel log buffer
32 *
33 * log_buf addr/len is read from System.map by user space
34 * and populated in sysfs entries.
35 */
36static int cosm_log_buf_show(struct seq_file *s, void *unused)
37{
38 void __iomem *log_buf_va;
39 int __iomem *log_buf_len_va;
40 struct cosm_device *cdev = s->private;
41 void *kva;
42 int size;
43 u64 aper_offset;
44
45 if (!cdev || !cdev->log_buf_addr || !cdev->log_buf_len)
46 goto done;
47
48 mutex_lock(&cdev->cosm_mutex);
49 switch (cdev->state) {
50 case MIC_BOOTING:
51 case MIC_ONLINE:
52 case MIC_SHUTTING_DOWN:
53 break;
54 default:
55 goto unlock;
56 }
57
58 /*
59 * Card kernel will never be relocated and any kernel text/data mapping
60 * can be translated to phys address by subtracting __START_KERNEL_map.
61 */
62 aper_offset = (u64)cdev->log_buf_len - __START_KERNEL_map;
63 log_buf_len_va = cdev->hw_ops->aper(cdev)->va + aper_offset;
64 aper_offset = (u64)cdev->log_buf_addr - __START_KERNEL_map;
65 log_buf_va = cdev->hw_ops->aper(cdev)->va + aper_offset;
66
67 size = ioread32(log_buf_len_va);
68 kva = kmalloc(size, GFP_KERNEL);
69 if (!kva)
70 goto unlock;
71
72 memcpy_fromio(kva, log_buf_va, size);
73 seq_write(s, kva, size);
74 kfree(kva);
75unlock:
76 mutex_unlock(&cdev->cosm_mutex);
77done:
78 return 0;
79}
80
81static int cosm_log_buf_open(struct inode *inode, struct file *file)
82{
83 return single_open(file, cosm_log_buf_show, inode->i_private);
84}
85
86static const struct file_operations log_buf_ops = {
87 .owner = THIS_MODULE,
88 .open = cosm_log_buf_open,
89 .read = seq_read,
90 .llseek = seq_lseek,
91 .release = single_release
92};
93
94/**
95 * cosm_force_reset_show - Force MIC reset
96 *
97 * Invokes the force_reset COSM bus op instead of the standard reset
98 * op in case a force reset of the MIC device is required
99 */
100static int cosm_force_reset_show(struct seq_file *s, void *pos)
101{
102 struct cosm_device *cdev = s->private;
103
104 cosm_stop(cdev, true);
105 return 0;
106}
107
108static int cosm_force_reset_debug_open(struct inode *inode, struct file *file)
109{
110 return single_open(file, cosm_force_reset_show, inode->i_private);
111}
112
113static const struct file_operations force_reset_ops = {
114 .owner = THIS_MODULE,
115 .open = cosm_force_reset_debug_open,
116 .read = seq_read,
117 .llseek = seq_lseek,
118 .release = single_release
119};
120
121void cosm_create_debug_dir(struct cosm_device *cdev)
122{
123 char name[16];
124
125 if (!cosm_dbg)
126 return;
127
128 scnprintf(name, sizeof(name), "mic%d", cdev->index);
129 cdev->dbg_dir = debugfs_create_dir(name, cosm_dbg);
130 if (!cdev->dbg_dir)
131 return;
132
133 debugfs_create_file("log_buf", 0444, cdev->dbg_dir, cdev, &log_buf_ops);
134 debugfs_create_file("force_reset", 0444, cdev->dbg_dir, cdev,
135 &force_reset_ops);
136}
137
138void cosm_delete_debug_dir(struct cosm_device *cdev)
139{
140 if (!cdev->dbg_dir)
141 return;
142
143 debugfs_remove_recursive(cdev->dbg_dir);
144}
145
146void cosm_init_debugfs(void)
147{
148 cosm_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
149 if (!cosm_dbg)
150 pr_err("can't create debugfs dir\n");
151}
152
153void cosm_exit_debugfs(void)
154{
155 debugfs_remove(cosm_dbg);
156}
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
new file mode 100644
index 000000000000..4b4b356c797d
--- /dev/null
+++ b/drivers/misc/mic/cosm/cosm_main.c
@@ -0,0 +1,388 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC Coprocessor State Management (COSM) Driver
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/idr.h>
25#include <linux/slab.h>
26#include <linux/cred.h>
27#include "cosm_main.h"
28
29static const char cosm_driver_name[] = "mic";
30
31/* COSM ID allocator */
32static struct ida g_cosm_ida;
33/* Class of MIC devices for sysfs accessibility. */
34static struct class *g_cosm_class;
35/* Number of MIC devices */
36static atomic_t g_num_dev;
37
38/**
39 * cosm_hw_reset - Issue a HW reset for the MIC device
40 * @cdev: pointer to cosm_device instance
41 */
42static void cosm_hw_reset(struct cosm_device *cdev, bool force)
43{
44 int i;
45
46#define MIC_RESET_TO (45)
47 if (force && cdev->hw_ops->force_reset)
48 cdev->hw_ops->force_reset(cdev);
49 else
50 cdev->hw_ops->reset(cdev);
51
52 for (i = 0; i < MIC_RESET_TO; i++) {
53 if (cdev->hw_ops->ready(cdev)) {
54 cosm_set_state(cdev, MIC_READY);
55 return;
56 }
57 /*
58 * Resets typically take 10s of seconds to complete.
59 * Since an MMIO read is required to check if the
60 * firmware is ready or not, a 1 second delay works nicely.
61 */
62 msleep(1000);
63 }
64 cosm_set_state(cdev, MIC_RESET_FAILED);
65}
66
67/**
68 * cosm_start - Start the MIC
69 * @cdev: pointer to cosm_device instance
70 *
71 * This function prepares an MIC for boot and initiates boot.
72 * RETURNS: An appropriate -ERRNO error value on error, or 0 for success.
73 */
74int cosm_start(struct cosm_device *cdev)
75{
76 const struct cred *orig_cred;
77 struct cred *override_cred;
78 int rc;
79
80 mutex_lock(&cdev->cosm_mutex);
81 if (!cdev->bootmode) {
82 dev_err(&cdev->dev, "%s %d bootmode not set\n",
83 __func__, __LINE__);
84 rc = -EINVAL;
85 goto unlock_ret;
86 }
87retry:
88 if (cdev->state != MIC_READY) {
89 dev_err(&cdev->dev, "%s %d MIC state not READY\n",
90 __func__, __LINE__);
91 rc = -EINVAL;
92 goto unlock_ret;
93 }
94 if (!cdev->hw_ops->ready(cdev)) {
95 cosm_hw_reset(cdev, false);
96 /*
97 * The state will either be MIC_READY if the reset succeeded
98 * or MIC_RESET_FAILED if the firmware reset failed.
99 */
100 goto retry;
101 }
102
103 /*
104 * Set credentials to root to allow non-root user to download initramsfs
105 * with 600 permissions
106 */
107 override_cred = prepare_creds();
108 if (!override_cred) {
109 dev_err(&cdev->dev, "%s %d prepare_creds failed\n",
110 __func__, __LINE__);
111 rc = -ENOMEM;
112 goto unlock_ret;
113 }
114 override_cred->fsuid = GLOBAL_ROOT_UID;
115 orig_cred = override_creds(override_cred);
116
117 rc = cdev->hw_ops->start(cdev, cdev->index);
118
119 revert_creds(orig_cred);
120 put_cred(override_cred);
121 if (rc)
122 goto unlock_ret;
123
124 /*
125 * If linux is being booted, card is treated 'online' only
126 * when the scif interface in the card is up. If anything else
127 * is booted, we set card to 'online' immediately.
128 */
129 if (!strcmp(cdev->bootmode, "linux"))
130 cosm_set_state(cdev, MIC_BOOTING);
131 else
132 cosm_set_state(cdev, MIC_ONLINE);
133unlock_ret:
134 mutex_unlock(&cdev->cosm_mutex);
135 if (rc)
136 dev_err(&cdev->dev, "cosm_start failed rc %d\n", rc);
137 return rc;
138}
139
140/**
141 * cosm_stop - Prepare the MIC for reset and trigger reset
142 * @cdev: pointer to cosm_device instance
143 * @force: force a MIC to reset even if it is already reset and ready.
144 *
145 * RETURNS: None
146 */
147void cosm_stop(struct cosm_device *cdev, bool force)
148{
149 mutex_lock(&cdev->cosm_mutex);
150 if (cdev->state != MIC_READY || force) {
151 /*
152 * Don't call hw_ops if they have been called previously.
153 * stop(..) calls device_unregister and will crash the system if
154 * called multiple times.
155 */
156 bool call_hw_ops = cdev->state != MIC_RESET_FAILED &&
157 cdev->state != MIC_READY;
158
159 if (cdev->state != MIC_RESETTING)
160 cosm_set_state(cdev, MIC_RESETTING);
161 cdev->heartbeat_watchdog_enable = false;
162 if (call_hw_ops)
163 cdev->hw_ops->stop(cdev, force);
164 cosm_hw_reset(cdev, force);
165 cosm_set_shutdown_status(cdev, MIC_NOP);
166 if (call_hw_ops && cdev->hw_ops->post_reset)
167 cdev->hw_ops->post_reset(cdev, cdev->state);
168 }
169 mutex_unlock(&cdev->cosm_mutex);
170 flush_work(&cdev->scif_work);
171}
172
173/**
174 * cosm_reset_trigger_work - Trigger MIC reset
175 * @work: The work structure
176 *
177 * This work is scheduled whenever the host wants to reset the MIC.
178 */
179static void cosm_reset_trigger_work(struct work_struct *work)
180{
181 struct cosm_device *cdev = container_of(work, struct cosm_device,
182 reset_trigger_work);
183 cosm_stop(cdev, false);
184}
185
186/**
187 * cosm_reset - Schedule MIC reset
188 * @cdev: pointer to cosm_device instance
189 *
190 * RETURNS: An -EINVAL if the card is already READY or 0 for success.
191 */
192int cosm_reset(struct cosm_device *cdev)
193{
194 int rc = 0;
195
196 mutex_lock(&cdev->cosm_mutex);
197 if (cdev->state != MIC_READY) {
198 cosm_set_state(cdev, MIC_RESETTING);
199 schedule_work(&cdev->reset_trigger_work);
200 } else {
201 dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__);
202 rc = -EINVAL;
203 }
204 mutex_unlock(&cdev->cosm_mutex);
205 return rc;
206}
207
208/**
209 * cosm_shutdown - Initiate MIC shutdown.
210 * @cdev: pointer to cosm_device instance
211 *
212 * RETURNS: None
213 */
214int cosm_shutdown(struct cosm_device *cdev)
215{
216 struct cosm_msg msg = { .id = COSM_MSG_SHUTDOWN };
217 int rc = 0;
218
219 mutex_lock(&cdev->cosm_mutex);
220 if (cdev->state != MIC_ONLINE) {
221 rc = -EINVAL;
222 dev_err(&cdev->dev, "%s %d skipping shutdown in state: %s\n",
223 __func__, __LINE__, cosm_state_string[cdev->state]);
224 goto err;
225 }
226
227 if (!cdev->epd) {
228 rc = -ENOTCONN;
229 dev_err(&cdev->dev, "%s %d scif endpoint not connected rc %d\n",
230 __func__, __LINE__, rc);
231 goto err;
232 }
233
234 rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
235 if (rc < 0) {
236 dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
237 __func__, __LINE__, rc);
238 goto err;
239 }
240 cdev->heartbeat_watchdog_enable = false;
241 cosm_set_state(cdev, MIC_SHUTTING_DOWN);
242 rc = 0;
243err:
244 mutex_unlock(&cdev->cosm_mutex);
245 return rc;
246}
247
248static int cosm_driver_probe(struct cosm_device *cdev)
249{
250 int rc;
251
252 /* Initialize SCIF server at first probe */
253 if (atomic_add_return(1, &g_num_dev) == 1) {
254 rc = cosm_scif_init();
255 if (rc)
256 goto scif_exit;
257 }
258 mutex_init(&cdev->cosm_mutex);
259 INIT_WORK(&cdev->reset_trigger_work, cosm_reset_trigger_work);
260 INIT_WORK(&cdev->scif_work, cosm_scif_work);
261 cdev->sysfs_heartbeat_enable = true;
262 cosm_sysfs_init(cdev);
263 cdev->sdev = device_create_with_groups(g_cosm_class, cdev->dev.parent,
264 MKDEV(0, cdev->index), cdev, cdev->attr_group,
265 "mic%d", cdev->index);
266 if (IS_ERR(cdev->sdev)) {
267 rc = PTR_ERR(cdev->sdev);
268 dev_err(&cdev->dev, "device_create_with_groups failed rc %d\n",
269 rc);
270 goto scif_exit;
271 }
272
273 cdev->state_sysfs = sysfs_get_dirent(cdev->sdev->kobj.sd,
274 "state");
275 if (!cdev->state_sysfs) {
276 rc = -ENODEV;
277 dev_err(&cdev->dev, "sysfs_get_dirent failed rc %d\n", rc);
278 goto destroy_device;
279 }
280 cosm_create_debug_dir(cdev);
281 return 0;
282destroy_device:
283 device_destroy(g_cosm_class, MKDEV(0, cdev->index));
284scif_exit:
285 if (atomic_dec_and_test(&g_num_dev))
286 cosm_scif_exit();
287 return rc;
288}
289
290static void cosm_driver_remove(struct cosm_device *cdev)
291{
292 cosm_delete_debug_dir(cdev);
293 sysfs_put(cdev->state_sysfs);
294 device_destroy(g_cosm_class, MKDEV(0, cdev->index));
295 flush_work(&cdev->reset_trigger_work);
296 cosm_stop(cdev, false);
297 if (atomic_dec_and_test(&g_num_dev))
298 cosm_scif_exit();
299
300 /* These sysfs entries might have allocated */
301 kfree(cdev->cmdline);
302 kfree(cdev->firmware);
303 kfree(cdev->ramdisk);
304 kfree(cdev->bootmode);
305}
306
307static int cosm_suspend(struct device *dev)
308{
309 struct cosm_device *cdev = dev_to_cosm(dev);
310
311 mutex_lock(&cdev->cosm_mutex);
312 switch (cdev->state) {
313 /**
314 * Suspend/freeze hooks in userspace have already shutdown the card.
315 * Card should be 'ready' in most cases. It is however possible that
316 * some userspace application initiated a boot. In those cases, we
317 * simply reset the card.
318 */
319 case MIC_ONLINE:
320 case MIC_BOOTING:
321 case MIC_SHUTTING_DOWN:
322 mutex_unlock(&cdev->cosm_mutex);
323 cosm_stop(cdev, false);
324 break;
325 default:
326 mutex_unlock(&cdev->cosm_mutex);
327 break;
328 }
329 return 0;
330}
331
332static const struct dev_pm_ops cosm_pm_ops = {
333 .suspend = cosm_suspend,
334 .freeze = cosm_suspend
335};
336
337static struct cosm_driver cosm_driver = {
338 .driver = {
339 .name = KBUILD_MODNAME,
340 .owner = THIS_MODULE,
341 .pm = &cosm_pm_ops,
342 },
343 .probe = cosm_driver_probe,
344 .remove = cosm_driver_remove
345};
346
347static int __init cosm_init(void)
348{
349 int ret;
350
351 cosm_init_debugfs();
352
353 g_cosm_class = class_create(THIS_MODULE, cosm_driver_name);
354 if (IS_ERR(g_cosm_class)) {
355 ret = PTR_ERR(g_cosm_class);
356 pr_err("class_create failed ret %d\n", ret);
357 goto cleanup_debugfs;
358 }
359
360 ida_init(&g_cosm_ida);
361 ret = cosm_register_driver(&cosm_driver);
362 if (ret) {
363 pr_err("cosm_register_driver failed ret %d\n", ret);
364 goto ida_destroy;
365 }
366 return 0;
367ida_destroy:
368 ida_destroy(&g_cosm_ida);
369 class_destroy(g_cosm_class);
370cleanup_debugfs:
371 cosm_exit_debugfs();
372 return ret;
373}
374
375static void __exit cosm_exit(void)
376{
377 cosm_unregister_driver(&cosm_driver);
378 ida_destroy(&g_cosm_ida);
379 class_destroy(g_cosm_class);
380 cosm_exit_debugfs();
381}
382
383module_init(cosm_init);
384module_exit(cosm_exit);
385
386MODULE_AUTHOR("Intel Corporation");
387MODULE_DESCRIPTION("Intel(R) MIC Coprocessor State Management (COSM) Driver");
388MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/cosm/cosm_main.h b/drivers/misc/mic/cosm/cosm_main.h
new file mode 100644
index 000000000000..f01156fca881
--- /dev/null
+++ b/drivers/misc/mic/cosm/cosm_main.h
@@ -0,0 +1,70 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC Coprocessor State Management (COSM) Driver
19 *
20 */
21#ifndef _COSM_COSM_H_
22#define _COSM_COSM_H_
23
24#include <linux/scif.h>
25#include "../bus/cosm_bus.h"
26
27#define COSM_HEARTBEAT_SEND_SEC 30
28#define SCIF_COSM_LISTEN_PORT 201
29
30/**
31 * enum COSM msg id's
32 * @COSM_MSG_SHUTDOWN: host->card trigger shutdown
33 * @COSM_MSG_SYNC_TIME: host->card send host time to card to sync time
34 * @COSM_MSG_HEARTBEAT: card->host heartbeat
35 * @COSM_MSG_SHUTDOWN_STATUS: card->host with shutdown status as payload
36 */
37enum cosm_msg_id {
38 COSM_MSG_SHUTDOWN,
39 COSM_MSG_SYNC_TIME,
40 COSM_MSG_HEARTBEAT,
41 COSM_MSG_SHUTDOWN_STATUS,
42};
43
44struct cosm_msg {
45 u64 id;
46 union {
47 u64 shutdown_status;
48 struct timespec64 timespec;
49 };
50};
51
52extern const char * const cosm_state_string[];
53extern const char * const cosm_shutdown_status_string[];
54
55void cosm_sysfs_init(struct cosm_device *cdev);
56int cosm_start(struct cosm_device *cdev);
57void cosm_stop(struct cosm_device *cdev, bool force);
58int cosm_reset(struct cosm_device *cdev);
59int cosm_shutdown(struct cosm_device *cdev);
60void cosm_set_state(struct cosm_device *cdev, u8 state);
61void cosm_set_shutdown_status(struct cosm_device *cdev, u8 status);
62void cosm_init_debugfs(void);
63void cosm_exit_debugfs(void);
64void cosm_create_debug_dir(struct cosm_device *cdev);
65void cosm_delete_debug_dir(struct cosm_device *cdev);
66int cosm_scif_init(void);
67void cosm_scif_exit(void);
68void cosm_scif_work(struct work_struct *work);
69
70#endif
diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c
new file mode 100644
index 000000000000..5696df4326b5
--- /dev/null
+++ b/drivers/misc/mic/cosm/cosm_scif_server.c
@@ -0,0 +1,405 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC Coprocessor State Management (COSM) Driver
19 *
20 */
21#include <linux/kthread.h>
22#include "cosm_main.h"
23
24/*
25 * The COSM driver uses SCIF to communicate between the management node and the
26 * MIC cards. SCIF is used to (a) Send a shutdown command to the card (b)
27 * receive a shutdown status back from the card upon completion of shutdown and
28 * (c) receive periodic heartbeat messages from the card used to deduce if the
29 * card has crashed.
30 *
31 * A COSM server consisting of a SCIF listening endpoint waits for incoming
32 * connections from the card. Upon acceptance of the connection, a separate
33 * work-item is scheduled to handle SCIF message processing for that card. The
34 * life-time of this work-item is therefore the time from which the connection
35 * from a card is accepted to the time at which the connection is closed. A new
36 * work-item starts each time the card boots and is alive till the card (a)
37 * shuts down (b) is reset (c) crashes (d) cosm_client driver on the card is
38 * unloaded.
39 *
40 * From the point of view of COSM interactions with SCIF during card
41 * shutdown, reset and crash are as follows:
42 *
43 * Card shutdown
44 * -------------
45 * 1. COSM client on the card invokes orderly_poweroff() in response to SHUTDOWN
46 * message from the host.
47 * 2. Card driver shutdown callback invokes scif_unregister_device(..) resulting
48 * in scif_remove(..) getting called on the card
49 * 3. scif_remove -> scif_stop -> scif_handle_remove_node ->
50 * scif_peer_unregister_device -> device_unregister for the host peer device
51 * 4. During device_unregister remove(..) method of cosm_client is invoked which
52 * closes the COSM SCIF endpoint on the card. This results in a SCIF_DISCNCT
53 * message being sent to host SCIF. SCIF_DISCNCT message processing on the
54 * host SCIF sets the host COSM SCIF endpoint state to DISCONNECTED and wakes
55 * up the host COSM thread blocked in scif_poll(..) resulting in
56 * scif_poll(..) returning POLLHUP.
57 * 5. On the card, scif_peer_release_dev is next called which results in an
58 * SCIF_EXIT message being sent to the host and after receiving the
59 * SCIF_EXIT_ACK from the host the peer device teardown on the card is
60 * complete.
61 * 6. As part of the SCIF_EXIT message processing on the host, host sends a
62 * SCIF_REMOVE_NODE to itself corresponding to the card being removed. This
63 * starts a similar SCIF peer device teardown sequence on the host
64 * corresponding to the card being shut down.
65 *
66 * Card reset
67 * ----------
68 * The case of interest here is when the card has not been previously shut down
69 * since most of the steps below are skipped in that case:
70
71 * 1. cosm_stop(..) invokes hw_ops->stop(..) method of the base PCIe driver
72 * which unregisters the SCIF HW device resulting in scif_remove(..) being
73 * called on the host.
74 * 2. scif_remove(..) calls scif_disconnect_node(..) which results in a
75 * SCIF_EXIT message being sent to the card.
76 * 3. The card executes scif_stop() as part of SCIF_EXIT message
77 * processing. This results in the COSM endpoint on the card being closed and
78 * the SCIF host peer device on the card getting unregistered similar to
79 * steps 3, 4 and 5 for the card shutdown case above. scif_poll(..) on the
80 * host returns POLLHUP as a result.
81 * 4. On the host, card peer device unregister and SCIF HW remove(..) also
82 * subsequently complete.
83 *
84 * Card crash
85 * ----------
86 * If a reset is issued after the card has crashed, there is no SCIF_DISCNT
87 * message from the card which would result in scif_poll(..) returning
88 * POLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE
89 * message to itself resulting in the card SCIF peer device being unregistered,
90 * this results in a scif_peer_release_dev -> scif_cleanup_scifdev->
91 * scif_invalidate_ep call sequence which sets the endpoint state to
92 * DISCONNECTED and results in scif_poll(..) returning POLLHUP.
93 */
94
95#define COSM_SCIF_BACKLOG 16
96#define COSM_HEARTBEAT_CHECK_DELTA_SEC 10
97#define COSM_HEARTBEAT_TIMEOUT_SEC \
98 (COSM_HEARTBEAT_SEND_SEC + COSM_HEARTBEAT_CHECK_DELTA_SEC)
99#define COSM_HEARTBEAT_TIMEOUT_MSEC (COSM_HEARTBEAT_TIMEOUT_SEC * MSEC_PER_SEC)
100
101static struct task_struct *server_thread;
102static scif_epd_t listen_epd;
103
104/* Publish MIC card's shutdown status to user space MIC daemon */
105static void cosm_update_mic_status(struct cosm_device *cdev)
106{
107 if (cdev->shutdown_status_int != MIC_NOP) {
108 cosm_set_shutdown_status(cdev, cdev->shutdown_status_int);
109 cdev->shutdown_status_int = MIC_NOP;
110 }
111}
112
113/* Store MIC card's shutdown status internally when it is received */
114static void cosm_shutdown_status_int(struct cosm_device *cdev,
115 enum mic_status shutdown_status)
116{
117 switch (shutdown_status) {
118 case MIC_HALTED:
119 case MIC_POWER_OFF:
120 case MIC_RESTART:
121 case MIC_CRASHED:
122 break;
123 default:
124 dev_err(&cdev->dev, "%s %d Unexpected shutdown_status %d\n",
125 __func__, __LINE__, shutdown_status);
126 return;
127 };
128 cdev->shutdown_status_int = shutdown_status;
129 cdev->heartbeat_watchdog_enable = false;
130
131 if (cdev->state != MIC_SHUTTING_DOWN)
132 cosm_set_state(cdev, MIC_SHUTTING_DOWN);
133}
134
135/* Non-blocking recv. Read and process all available messages */
136static void cosm_scif_recv(struct cosm_device *cdev)
137{
138 struct cosm_msg msg;
139 int rc;
140
141 while (1) {
142 rc = scif_recv(cdev->epd, &msg, sizeof(msg), 0);
143 if (!rc) {
144 break;
145 } else if (rc < 0) {
146 dev_dbg(&cdev->dev, "%s: %d rc %d\n",
147 __func__, __LINE__, rc);
148 break;
149 }
150 dev_dbg(&cdev->dev, "%s: %d rc %d id 0x%llx\n",
151 __func__, __LINE__, rc, msg.id);
152
153 switch (msg.id) {
154 case COSM_MSG_SHUTDOWN_STATUS:
155 cosm_shutdown_status_int(cdev, msg.shutdown_status);
156 break;
157 case COSM_MSG_HEARTBEAT:
158 /* Nothing to do, heartbeat only unblocks scif_poll */
159 break;
160 default:
161 dev_err(&cdev->dev, "%s: %d unknown msg.id %lld\n",
162 __func__, __LINE__, msg.id);
163 break;
164 }
165 }
166}
167
168/* Publish crashed status for this MIC card */
169static void cosm_set_crashed(struct cosm_device *cdev)
170{
171 dev_err(&cdev->dev, "node alive timeout\n");
172 cosm_shutdown_status_int(cdev, MIC_CRASHED);
173 cosm_update_mic_status(cdev);
174}
175
176/* Send host time to the MIC card to sync system time between host and MIC */
177static void cosm_send_time(struct cosm_device *cdev)
178{
179 struct cosm_msg msg = { .id = COSM_MSG_SYNC_TIME };
180 int rc;
181
182 getnstimeofday64(&msg.timespec);
183 rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
184 if (rc < 0)
185 dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
186 __func__, __LINE__, rc);
187}
188
189/*
190 * Close this cosm_device's endpoint after its peer endpoint on the card has
191 * been closed. In all cases except MIC card crash POLLHUP on the host is
192 * triggered by the client's endpoint being closed.
193 */
194static void cosm_scif_close(struct cosm_device *cdev)
195{
196 /*
197 * Because SHUTDOWN_STATUS message is sent by the MIC cards in the
198 * reboot notifier when shutdown is still not complete, we notify mpssd
199 * to reset the card when SCIF endpoint is closed.
200 */
201 cosm_update_mic_status(cdev);
202 scif_close(cdev->epd);
203 cdev->epd = NULL;
204 dev_dbg(&cdev->dev, "%s %d\n", __func__, __LINE__);
205}
206
207/*
208 * Set card state to ONLINE when a new SCIF connection from a MIC card is
209 * received. Normally the state is BOOTING when the connection comes in, but can
210 * be ONLINE if cosm_client driver on the card was unloaded and then reloaded.
211 */
212static int cosm_set_online(struct cosm_device *cdev)
213{
214 int rc = 0;
215
216 if (MIC_BOOTING == cdev->state || MIC_ONLINE == cdev->state) {
217 cdev->heartbeat_watchdog_enable = cdev->sysfs_heartbeat_enable;
218 cdev->epd = cdev->newepd;
219 if (cdev->state == MIC_BOOTING)
220 cosm_set_state(cdev, MIC_ONLINE);
221 cosm_send_time(cdev);
222 dev_dbg(&cdev->dev, "%s %d\n", __func__, __LINE__);
223 } else {
224 dev_warn(&cdev->dev, "%s %d not going online in state: %s\n",
225 __func__, __LINE__, cosm_state_string[cdev->state]);
226 rc = -EINVAL;
227 }
228 /* Drop reference acquired by bus_find_device in the server thread */
229 put_device(&cdev->dev);
230 return rc;
231}
232
233/*
234 * Work function for handling work for a SCIF connection from a particular MIC
235 * card. It first sets the card state to ONLINE and then calls scif_poll to
236 * block on activity such as incoming messages on the SCIF endpoint. When the
237 * endpoint is closed, the work function exits, completing its life cycle, from
238 * MIC card boot to card shutdown/reset/crash.
239 */
240void cosm_scif_work(struct work_struct *work)
241{
242 struct cosm_device *cdev = container_of(work, struct cosm_device,
243 scif_work);
244 struct scif_pollepd pollepd;
245 int rc;
246
247 mutex_lock(&cdev->cosm_mutex);
248 if (cosm_set_online(cdev))
249 goto exit;
250
251 while (1) {
252 pollepd.epd = cdev->epd;
253 pollepd.events = POLLIN;
254
255 /* Drop the mutex before blocking in scif_poll(..) */
256 mutex_unlock(&cdev->cosm_mutex);
257 /* poll(..) with timeout on our endpoint */
258 rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_TIMEOUT_MSEC);
259 mutex_lock(&cdev->cosm_mutex);
260 if (rc < 0) {
261 dev_err(&cdev->dev, "%s %d scif_poll rc %d\n",
262 __func__, __LINE__, rc);
263 continue;
264 }
265
266 /* There is a message from the card */
267 if (pollepd.revents & POLLIN)
268 cosm_scif_recv(cdev);
269
270 /* The peer endpoint is closed or this endpoint disconnected */
271 if (pollepd.revents & POLLHUP) {
272 cosm_scif_close(cdev);
273 break;
274 }
275
276 /* Did we timeout from poll? */
277 if (!rc && cdev->heartbeat_watchdog_enable)
278 cosm_set_crashed(cdev);
279 }
280exit:
281 dev_dbg(&cdev->dev, "%s %d exiting\n", __func__, __LINE__);
282 mutex_unlock(&cdev->cosm_mutex);
283}
284
285/*
286 * COSM SCIF server thread function. Accepts incoming SCIF connections from MIC
287 * cards, finds the correct cosm_device to associate that connection with and
288 * schedules individual work items for each MIC card.
289 */
290static int cosm_scif_server(void *unused)
291{
292 struct cosm_device *cdev;
293 scif_epd_t newepd;
294 struct scif_port_id port_id;
295 int rc;
296
297 allow_signal(SIGKILL);
298
299 while (!kthread_should_stop()) {
300 rc = scif_accept(listen_epd, &port_id, &newepd,
301 SCIF_ACCEPT_SYNC);
302 if (rc < 0) {
303 if (-ERESTARTSYS != rc)
304 pr_err("%s %d rc %d\n", __func__, __LINE__, rc);
305 continue;
306 }
307
308 /*
309 * Associate the incoming connection with a particular
310 * cosm_device, COSM device ID == SCIF node ID - 1
311 */
312 cdev = cosm_find_cdev_by_id(port_id.node - 1);
313 if (!cdev)
314 continue;
315 cdev->newepd = newepd;
316 schedule_work(&cdev->scif_work);
317 }
318
319 pr_debug("%s %d Server thread stopped\n", __func__, __LINE__);
320 return 0;
321}
322
323static int cosm_scif_listen(void)
324{
325 int rc;
326
327 listen_epd = scif_open();
328 if (!listen_epd) {
329 pr_err("%s %d scif_open failed\n", __func__, __LINE__);
330 return -ENOMEM;
331 }
332
333 rc = scif_bind(listen_epd, SCIF_COSM_LISTEN_PORT);
334 if (rc < 0) {
335 pr_err("%s %d scif_bind failed rc %d\n",
336 __func__, __LINE__, rc);
337 goto err;
338 }
339
340 rc = scif_listen(listen_epd, COSM_SCIF_BACKLOG);
341 if (rc < 0) {
342 pr_err("%s %d scif_listen rc %d\n", __func__, __LINE__, rc);
343 goto err;
344 }
345 pr_debug("%s %d listen_epd set up\n", __func__, __LINE__);
346 return 0;
347err:
348 scif_close(listen_epd);
349 listen_epd = NULL;
350 return rc;
351}
352
353static void cosm_scif_listen_exit(void)
354{
355 pr_debug("%s %d closing listen_epd\n", __func__, __LINE__);
356 if (listen_epd) {
357 scif_close(listen_epd);
358 listen_epd = NULL;
359 }
360}
361
362/*
363 * Create a listening SCIF endpoint and a server kthread which accepts incoming
364 * SCIF connections from MIC cards
365 */
366int cosm_scif_init(void)
367{
368 int rc = cosm_scif_listen();
369
370 if (rc) {
371 pr_err("%s %d cosm_scif_listen rc %d\n",
372 __func__, __LINE__, rc);
373 goto err;
374 }
375
376 server_thread = kthread_run(cosm_scif_server, NULL, "cosm_server");
377 if (IS_ERR(server_thread)) {
378 rc = PTR_ERR(server_thread);
379 pr_err("%s %d kthread_run rc %d\n", __func__, __LINE__, rc);
380 goto listen_exit;
381 }
382 return 0;
383listen_exit:
384 cosm_scif_listen_exit();
385err:
386 return rc;
387}
388
389/* Stop the running server thread and close the listening SCIF endpoint */
390void cosm_scif_exit(void)
391{
392 int rc;
393
394 if (!IS_ERR_OR_NULL(server_thread)) {
395 rc = send_sig(SIGKILL, server_thread, 0);
396 if (rc) {
397 pr_err("%s %d send_sig rc %d\n",
398 __func__, __LINE__, rc);
399 return;
400 }
401 kthread_stop(server_thread);
402 }
403
404 cosm_scif_listen_exit();
405}
diff --git a/drivers/misc/mic/cosm/cosm_sysfs.c b/drivers/misc/mic/cosm/cosm_sysfs.c
new file mode 100644
index 000000000000..29d6863b6e59
--- /dev/null
+++ b/drivers/misc/mic/cosm/cosm_sysfs.c
@@ -0,0 +1,461 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC Coprocessor State Management (COSM) Driver
19 *
20 */
21#include <linux/slab.h>
22#include "cosm_main.h"
23
24/*
25 * A state-to-string lookup table, for exposing a human readable state
26 * via sysfs. Always keep in sync with enum cosm_states
27 */
28const char * const cosm_state_string[] = {
29 [MIC_READY] = "ready",
30 [MIC_BOOTING] = "booting",
31 [MIC_ONLINE] = "online",
32 [MIC_SHUTTING_DOWN] = "shutting_down",
33 [MIC_RESETTING] = "resetting",
34 [MIC_RESET_FAILED] = "reset_failed",
35};
36
37/*
38 * A shutdown-status-to-string lookup table, for exposing a human
39 * readable state via sysfs. Always keep in sync with enum cosm_shutdown_status
40 */
41const char * const cosm_shutdown_status_string[] = {
42 [MIC_NOP] = "nop",
43 [MIC_CRASHED] = "crashed",
44 [MIC_HALTED] = "halted",
45 [MIC_POWER_OFF] = "poweroff",
46 [MIC_RESTART] = "restart",
47};
48
49void cosm_set_shutdown_status(struct cosm_device *cdev, u8 shutdown_status)
50{
51 dev_dbg(&cdev->dev, "Shutdown Status %s -> %s\n",
52 cosm_shutdown_status_string[cdev->shutdown_status],
53 cosm_shutdown_status_string[shutdown_status]);
54 cdev->shutdown_status = shutdown_status;
55}
56
57void cosm_set_state(struct cosm_device *cdev, u8 state)
58{
59 dev_dbg(&cdev->dev, "State %s -> %s\n",
60 cosm_state_string[cdev->state],
61 cosm_state_string[state]);
62 cdev->state = state;
63 sysfs_notify_dirent(cdev->state_sysfs);
64}
65
66static ssize_t
67family_show(struct device *dev, struct device_attribute *attr, char *buf)
68{
69 struct cosm_device *cdev = dev_get_drvdata(dev);
70
71 if (!cdev)
72 return -EINVAL;
73
74 return cdev->hw_ops->family(cdev, buf);
75}
76static DEVICE_ATTR_RO(family);
77
78static ssize_t
79stepping_show(struct device *dev, struct device_attribute *attr, char *buf)
80{
81 struct cosm_device *cdev = dev_get_drvdata(dev);
82
83 if (!cdev)
84 return -EINVAL;
85
86 return cdev->hw_ops->stepping(cdev, buf);
87}
88static DEVICE_ATTR_RO(stepping);
89
90static ssize_t
91state_show(struct device *dev, struct device_attribute *attr, char *buf)
92{
93 struct cosm_device *cdev = dev_get_drvdata(dev);
94
95 if (!cdev || cdev->state >= MIC_LAST)
96 return -EINVAL;
97
98 return scnprintf(buf, PAGE_SIZE, "%s\n",
99 cosm_state_string[cdev->state]);
100}
101
102static ssize_t
103state_store(struct device *dev, struct device_attribute *attr,
104 const char *buf, size_t count)
105{
106 struct cosm_device *cdev = dev_get_drvdata(dev);
107 int rc;
108
109 if (!cdev)
110 return -EINVAL;
111
112 if (sysfs_streq(buf, "boot")) {
113 rc = cosm_start(cdev);
114 goto done;
115 }
116 if (sysfs_streq(buf, "reset")) {
117 rc = cosm_reset(cdev);
118 goto done;
119 }
120
121 if (sysfs_streq(buf, "shutdown")) {
122 rc = cosm_shutdown(cdev);
123 goto done;
124 }
125 rc = -EINVAL;
126done:
127 if (rc)
128 count = rc;
129 return count;
130}
131static DEVICE_ATTR_RW(state);
132
133static ssize_t shutdown_status_show(struct device *dev,
134 struct device_attribute *attr, char *buf)
135{
136 struct cosm_device *cdev = dev_get_drvdata(dev);
137
138 if (!cdev || cdev->shutdown_status >= MIC_STATUS_LAST)
139 return -EINVAL;
140
141 return scnprintf(buf, PAGE_SIZE, "%s\n",
142 cosm_shutdown_status_string[cdev->shutdown_status]);
143}
144static DEVICE_ATTR_RO(shutdown_status);
145
146static ssize_t
147heartbeat_enable_show(struct device *dev,
148 struct device_attribute *attr, char *buf)
149{
150 struct cosm_device *cdev = dev_get_drvdata(dev);
151
152 if (!cdev)
153 return -EINVAL;
154
155 return scnprintf(buf, PAGE_SIZE, "%d\n", cdev->sysfs_heartbeat_enable);
156}
157
158static ssize_t
159heartbeat_enable_store(struct device *dev,
160 struct device_attribute *attr,
161 const char *buf, size_t count)
162{
163 struct cosm_device *cdev = dev_get_drvdata(dev);
164 int enable;
165 int ret;
166
167 if (!cdev)
168 return -EINVAL;
169
170 mutex_lock(&cdev->cosm_mutex);
171 ret = kstrtoint(buf, 10, &enable);
172 if (ret)
173 goto unlock;
174
175 cdev->sysfs_heartbeat_enable = enable;
176 /* if state is not online, cdev->heartbeat_watchdog_enable is 0 */
177 if (cdev->state == MIC_ONLINE)
178 cdev->heartbeat_watchdog_enable = enable;
179 ret = count;
180unlock:
181 mutex_unlock(&cdev->cosm_mutex);
182 return ret;
183}
184static DEVICE_ATTR_RW(heartbeat_enable);
185
186static ssize_t
187cmdline_show(struct device *dev, struct device_attribute *attr, char *buf)
188{
189 struct cosm_device *cdev = dev_get_drvdata(dev);
190 char *cmdline;
191
192 if (!cdev)
193 return -EINVAL;
194
195 cmdline = cdev->cmdline;
196
197 if (cmdline)
198 return scnprintf(buf, PAGE_SIZE, "%s\n", cmdline);
199 return 0;
200}
201
202static ssize_t
203cmdline_store(struct device *dev, struct device_attribute *attr,
204 const char *buf, size_t count)
205{
206 struct cosm_device *cdev = dev_get_drvdata(dev);
207
208 if (!cdev)
209 return -EINVAL;
210
211 mutex_lock(&cdev->cosm_mutex);
212 kfree(cdev->cmdline);
213
214 cdev->cmdline = kmalloc(count + 1, GFP_KERNEL);
215 if (!cdev->cmdline) {
216 count = -ENOMEM;
217 goto unlock;
218 }
219
220 strncpy(cdev->cmdline, buf, count);
221
222 if (cdev->cmdline[count - 1] == '\n')
223 cdev->cmdline[count - 1] = '\0';
224 else
225 cdev->cmdline[count] = '\0';
226unlock:
227 mutex_unlock(&cdev->cosm_mutex);
228 return count;
229}
230static DEVICE_ATTR_RW(cmdline);
231
232static ssize_t
233firmware_show(struct device *dev, struct device_attribute *attr, char *buf)
234{
235 struct cosm_device *cdev = dev_get_drvdata(dev);
236 char *firmware;
237
238 if (!cdev)
239 return -EINVAL;
240
241 firmware = cdev->firmware;
242
243 if (firmware)
244 return scnprintf(buf, PAGE_SIZE, "%s\n", firmware);
245 return 0;
246}
247
248static ssize_t
249firmware_store(struct device *dev, struct device_attribute *attr,
250 const char *buf, size_t count)
251{
252 struct cosm_device *cdev = dev_get_drvdata(dev);
253
254 if (!cdev)
255 return -EINVAL;
256
257 mutex_lock(&cdev->cosm_mutex);
258 kfree(cdev->firmware);
259
260 cdev->firmware = kmalloc(count + 1, GFP_KERNEL);
261 if (!cdev->firmware) {
262 count = -ENOMEM;
263 goto unlock;
264 }
265 strncpy(cdev->firmware, buf, count);
266
267 if (cdev->firmware[count - 1] == '\n')
268 cdev->firmware[count - 1] = '\0';
269 else
270 cdev->firmware[count] = '\0';
271unlock:
272 mutex_unlock(&cdev->cosm_mutex);
273 return count;
274}
275static DEVICE_ATTR_RW(firmware);
276
277static ssize_t
278ramdisk_show(struct device *dev, struct device_attribute *attr, char *buf)
279{
280 struct cosm_device *cdev = dev_get_drvdata(dev);
281 char *ramdisk;
282
283 if (!cdev)
284 return -EINVAL;
285
286 ramdisk = cdev->ramdisk;
287
288 if (ramdisk)
289 return scnprintf(buf, PAGE_SIZE, "%s\n", ramdisk);
290 return 0;
291}
292
293static ssize_t
294ramdisk_store(struct device *dev, struct device_attribute *attr,
295 const char *buf, size_t count)
296{
297 struct cosm_device *cdev = dev_get_drvdata(dev);
298
299 if (!cdev)
300 return -EINVAL;
301
302 mutex_lock(&cdev->cosm_mutex);
303 kfree(cdev->ramdisk);
304
305 cdev->ramdisk = kmalloc(count + 1, GFP_KERNEL);
306 if (!cdev->ramdisk) {
307 count = -ENOMEM;
308 goto unlock;
309 }
310
311 strncpy(cdev->ramdisk, buf, count);
312
313 if (cdev->ramdisk[count - 1] == '\n')
314 cdev->ramdisk[count - 1] = '\0';
315 else
316 cdev->ramdisk[count] = '\0';
317unlock:
318 mutex_unlock(&cdev->cosm_mutex);
319 return count;
320}
321static DEVICE_ATTR_RW(ramdisk);
322
323static ssize_t
324bootmode_show(struct device *dev, struct device_attribute *attr, char *buf)
325{
326 struct cosm_device *cdev = dev_get_drvdata(dev);
327 char *bootmode;
328
329 if (!cdev)
330 return -EINVAL;
331
332 bootmode = cdev->bootmode;
333
334 if (bootmode)
335 return scnprintf(buf, PAGE_SIZE, "%s\n", bootmode);
336 return 0;
337}
338
339static ssize_t
340bootmode_store(struct device *dev, struct device_attribute *attr,
341 const char *buf, size_t count)
342{
343 struct cosm_device *cdev = dev_get_drvdata(dev);
344
345 if (!cdev)
346 return -EINVAL;
347
348 if (!sysfs_streq(buf, "linux") && !sysfs_streq(buf, "flash"))
349 return -EINVAL;
350
351 mutex_lock(&cdev->cosm_mutex);
352 kfree(cdev->bootmode);
353
354 cdev->bootmode = kmalloc(count + 1, GFP_KERNEL);
355 if (!cdev->bootmode) {
356 count = -ENOMEM;
357 goto unlock;
358 }
359
360 strncpy(cdev->bootmode, buf, count);
361
362 if (cdev->bootmode[count - 1] == '\n')
363 cdev->bootmode[count - 1] = '\0';
364 else
365 cdev->bootmode[count] = '\0';
366unlock:
367 mutex_unlock(&cdev->cosm_mutex);
368 return count;
369}
370static DEVICE_ATTR_RW(bootmode);
371
372static ssize_t
373log_buf_addr_show(struct device *dev, struct device_attribute *attr,
374 char *buf)
375{
376 struct cosm_device *cdev = dev_get_drvdata(dev);
377
378 if (!cdev)
379 return -EINVAL;
380
381 return scnprintf(buf, PAGE_SIZE, "%p\n", cdev->log_buf_addr);
382}
383
384static ssize_t
385log_buf_addr_store(struct device *dev, struct device_attribute *attr,
386 const char *buf, size_t count)
387{
388 struct cosm_device *cdev = dev_get_drvdata(dev);
389 int ret;
390 unsigned long addr;
391
392 if (!cdev)
393 return -EINVAL;
394
395 ret = kstrtoul(buf, 16, &addr);
396 if (ret)
397 goto exit;
398
399 cdev->log_buf_addr = (void *)addr;
400 ret = count;
401exit:
402 return ret;
403}
404static DEVICE_ATTR_RW(log_buf_addr);
405
406static ssize_t
407log_buf_len_show(struct device *dev, struct device_attribute *attr,
408 char *buf)
409{
410 struct cosm_device *cdev = dev_get_drvdata(dev);
411
412 if (!cdev)
413 return -EINVAL;
414
415 return scnprintf(buf, PAGE_SIZE, "%p\n", cdev->log_buf_len);
416}
417
418static ssize_t
419log_buf_len_store(struct device *dev, struct device_attribute *attr,
420 const char *buf, size_t count)
421{
422 struct cosm_device *cdev = dev_get_drvdata(dev);
423 int ret;
424 unsigned long addr;
425
426 if (!cdev)
427 return -EINVAL;
428
429 ret = kstrtoul(buf, 16, &addr);
430 if (ret)
431 goto exit;
432
433 cdev->log_buf_len = (int *)addr;
434 ret = count;
435exit:
436 return ret;
437}
438static DEVICE_ATTR_RW(log_buf_len);
439
440static struct attribute *cosm_default_attrs[] = {
441 &dev_attr_family.attr,
442 &dev_attr_stepping.attr,
443 &dev_attr_state.attr,
444 &dev_attr_shutdown_status.attr,
445 &dev_attr_heartbeat_enable.attr,
446 &dev_attr_cmdline.attr,
447 &dev_attr_firmware.attr,
448 &dev_attr_ramdisk.attr,
449 &dev_attr_bootmode.attr,
450 &dev_attr_log_buf_addr.attr,
451 &dev_attr_log_buf_len.attr,
452
453 NULL
454};
455
456ATTRIBUTE_GROUPS(cosm_default);
457
458void cosm_sysfs_init(struct cosm_device *cdev)
459{
460 cdev->attr_group = cosm_default_groups;
461}
diff --git a/drivers/misc/mic/cosm_client/Makefile b/drivers/misc/mic/cosm_client/Makefile
new file mode 100644
index 000000000000..6f751a519a09
--- /dev/null
+++ b/drivers/misc/mic/cosm_client/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile - Intel MIC COSM Client Driver
3# Copyright(c) 2015, Intel Corporation.
4#
5obj-$(CONFIG_MIC_COSM) += cosm_client.o
6
7cosm_client-objs += cosm_scif_client.o
diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c
new file mode 100644
index 000000000000..03e98bf1ac15
--- /dev/null
+++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c
@@ -0,0 +1,275 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC COSM Client Driver
19 *
20 */
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/reboot.h>
24#include <linux/kthread.h>
25#include "../cosm/cosm_main.h"
26
27#define COSM_SCIF_MAX_RETRIES 10
28#define COSM_HEARTBEAT_SEND_MSEC (COSM_HEARTBEAT_SEND_SEC * MSEC_PER_SEC)
29
30static struct task_struct *client_thread;
31static scif_epd_t client_epd;
32static struct scif_peer_dev *client_spdev;
33
34/*
35 * Reboot notifier: receives shutdown status from the OS and communicates it
36 * back to the COSM process on the host
37 */
38static int cosm_reboot_event(struct notifier_block *this, unsigned long event,
39 void *ptr)
40{
41 struct cosm_msg msg = { .id = COSM_MSG_SHUTDOWN_STATUS };
42 int rc;
43
44 event = (event == SYS_RESTART) ? SYSTEM_RESTART : event;
45 dev_info(&client_spdev->dev, "%s %d received event %ld\n",
46 __func__, __LINE__, event);
47
48 msg.shutdown_status = event;
49 rc = scif_send(client_epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
50 if (rc < 0)
51 dev_err(&client_spdev->dev, "%s %d scif_send rc %d\n",
52 __func__, __LINE__, rc);
53
54 return NOTIFY_DONE;
55}
56
57static struct notifier_block cosm_reboot = {
58 .notifier_call = cosm_reboot_event,
59};
60
61/* Set system time from timespec value received from the host */
62static void cosm_set_time(struct cosm_msg *msg)
63{
64 int rc = do_settimeofday64(&msg->timespec);
65
66 if (rc)
67 dev_err(&client_spdev->dev, "%s: %d settimeofday rc %d\n",
68 __func__, __LINE__, rc);
69}
70
71/* COSM client receive message processing */
72static void cosm_client_recv(void)
73{
74 struct cosm_msg msg;
75 int rc;
76
77 while (1) {
78 rc = scif_recv(client_epd, &msg, sizeof(msg), 0);
79 if (!rc) {
80 return;
81 } else if (rc < 0) {
82 dev_err(&client_spdev->dev, "%s: %d rc %d\n",
83 __func__, __LINE__, rc);
84 return;
85 }
86
87 dev_dbg(&client_spdev->dev, "%s: %d rc %d id 0x%llx\n",
88 __func__, __LINE__, rc, msg.id);
89
90 switch (msg.id) {
91 case COSM_MSG_SYNC_TIME:
92 cosm_set_time(&msg);
93 break;
94 case COSM_MSG_SHUTDOWN:
95 orderly_poweroff(true);
96 break;
97 default:
98 dev_err(&client_spdev->dev, "%s: %d unknown id %lld\n",
99 __func__, __LINE__, msg.id);
100 break;
101 }
102 }
103}
104
105/* Initiate connection to the COSM server on the host */
106static int cosm_scif_connect(void)
107{
108 struct scif_port_id port_id;
109 int i, rc;
110
111 client_epd = scif_open();
112 if (!client_epd) {
113 dev_err(&client_spdev->dev, "%s %d scif_open failed\n",
114 __func__, __LINE__);
115 return -ENOMEM;
116 }
117
118 port_id.node = 0;
119 port_id.port = SCIF_COSM_LISTEN_PORT;
120
121 for (i = 0; i < COSM_SCIF_MAX_RETRIES; i++) {
122 rc = scif_connect(client_epd, &port_id);
123 if (rc < 0)
124 msleep(1000);
125 else
126 break;
127 }
128
129 if (rc < 0) {
130 dev_err(&client_spdev->dev, "%s %d scif_connect rc %d\n",
131 __func__, __LINE__, rc);
132 scif_close(client_epd);
133 client_epd = NULL;
134 }
135 return rc < 0 ? rc : 0;
136}
137
138/* Close host SCIF connection */
139static void cosm_scif_connect_exit(void)
140{
141 if (client_epd) {
142 scif_close(client_epd);
143 client_epd = NULL;
144 }
145}
146
147/*
148 * COSM SCIF client thread function: waits for messages from the host and sends
149 * a heartbeat to the host
150 */
151static int cosm_scif_client(void *unused)
152{
153 struct cosm_msg msg = { .id = COSM_MSG_HEARTBEAT };
154 struct scif_pollepd pollepd;
155 int rc;
156
157 allow_signal(SIGKILL);
158
159 while (!kthread_should_stop()) {
160 pollepd.epd = client_epd;
161 pollepd.events = POLLIN;
162
163 rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_SEND_MSEC);
164 if (rc < 0) {
165 if (-EINTR != rc)
166 dev_err(&client_spdev->dev,
167 "%s %d scif_poll rc %d\n",
168 __func__, __LINE__, rc);
169 continue;
170 }
171
172 if (pollepd.revents & POLLIN)
173 cosm_client_recv();
174
175 msg.id = COSM_MSG_HEARTBEAT;
176 rc = scif_send(client_epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
177 if (rc < 0)
178 dev_err(&client_spdev->dev, "%s %d scif_send rc %d\n",
179 __func__, __LINE__, rc);
180 }
181
182 dev_dbg(&client_spdev->dev, "%s %d Client thread stopped\n",
183 __func__, __LINE__);
184 return 0;
185}
186
187static void cosm_scif_probe(struct scif_peer_dev *spdev)
188{
189 int rc;
190
191 dev_dbg(&spdev->dev, "%s %d: dnode %d\n",
192 __func__, __LINE__, spdev->dnode);
193
194 /* We are only interested in the host with spdev->dnode == 0 */
195 if (spdev->dnode)
196 return;
197
198 client_spdev = spdev;
199 rc = cosm_scif_connect();
200 if (rc)
201 goto exit;
202
203 rc = register_reboot_notifier(&cosm_reboot);
204 if (rc) {
205 dev_err(&spdev->dev,
206 "reboot notifier registration failed rc %d\n", rc);
207 goto connect_exit;
208 }
209
210 client_thread = kthread_run(cosm_scif_client, NULL, "cosm_client");
211 if (IS_ERR(client_thread)) {
212 rc = PTR_ERR(client_thread);
213 dev_err(&spdev->dev, "%s %d kthread_run rc %d\n",
214 __func__, __LINE__, rc);
215 goto unreg_reboot;
216 }
217 return;
218unreg_reboot:
219 unregister_reboot_notifier(&cosm_reboot);
220connect_exit:
221 cosm_scif_connect_exit();
222exit:
223 client_spdev = NULL;
224}
225
226static void cosm_scif_remove(struct scif_peer_dev *spdev)
227{
228 int rc;
229
230 dev_dbg(&spdev->dev, "%s %d: dnode %d\n",
231 __func__, __LINE__, spdev->dnode);
232
233 if (spdev->dnode)
234 return;
235
236 if (!IS_ERR_OR_NULL(client_thread)) {
237 rc = send_sig(SIGKILL, client_thread, 0);
238 if (rc) {
239 pr_err("%s %d send_sig rc %d\n",
240 __func__, __LINE__, rc);
241 return;
242 }
243 kthread_stop(client_thread);
244 }
245 unregister_reboot_notifier(&cosm_reboot);
246 cosm_scif_connect_exit();
247 client_spdev = NULL;
248}
249
250static struct scif_client scif_client_cosm = {
251 .name = KBUILD_MODNAME,
252 .probe = cosm_scif_probe,
253 .remove = cosm_scif_remove,
254};
255
256static int __init cosm_client_init(void)
257{
258 int rc = scif_client_register(&scif_client_cosm);
259
260 if (rc)
261 pr_err("scif_client_register failed rc %d\n", rc);
262 return rc;
263}
264
265static void __exit cosm_client_exit(void)
266{
267 scif_client_unregister(&scif_client_cosm);
268}
269
270module_init(cosm_client_init);
271module_exit(cosm_client_exit);
272
273MODULE_AUTHOR("Intel Corporation");
274MODULE_DESCRIPTION("Intel(R) MIC card OS state management client driver");
275MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile
index c2197f999394..004d3db0f990 100644
--- a/drivers/misc/mic/host/Makefile
+++ b/drivers/misc/mic/host/Makefile
@@ -5,7 +5,6 @@
5obj-$(CONFIG_INTEL_MIC_HOST) += mic_host.o 5obj-$(CONFIG_INTEL_MIC_HOST) += mic_host.o
6mic_host-objs := mic_main.o 6mic_host-objs := mic_main.o
7mic_host-objs += mic_x100.o 7mic_host-objs += mic_x100.o
8mic_host-objs += mic_sysfs.o
9mic_host-objs += mic_smpt.o 8mic_host-objs += mic_smpt.o
10mic_host-objs += mic_intr.o 9mic_host-objs += mic_intr.o
11mic_host-objs += mic_boot.o 10mic_host-objs += mic_boot.o
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index e5f6a5e7bca1..7845564dff64 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -22,9 +22,9 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25
26#include <linux/mic_common.h> 25#include <linux/mic_common.h>
27#include <linux/mic_bus.h> 26#include <linux/mic_bus.h>
27#include "../bus/scif_bus.h"
28#include "../common/mic_dev.h" 28#include "../common/mic_dev.h"
29#include "mic_device.h" 29#include "mic_device.h"
30#include "mic_smpt.h" 30#include "mic_smpt.h"
@@ -99,7 +99,7 @@ static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
99 int i, j, ret; 99 int i, j, ret;
100 dma_addr_t da; 100 dma_addr_t da;
101 101
102 ret = dma_map_sg(mdev->sdev->parent, sg, nents, dir); 102 ret = dma_map_sg(&mdev->pdev->dev, sg, nents, dir);
103 if (ret <= 0) 103 if (ret <= 0)
104 return 0; 104 return 0;
105 105
@@ -115,7 +115,7 @@ err:
115 mic_unmap(mdev, sg_dma_address(s), s->length); 115 mic_unmap(mdev, sg_dma_address(s), s->length);
116 sg_dma_address(s) = mic_to_dma_addr(mdev, sg_dma_address(s)); 116 sg_dma_address(s) = mic_to_dma_addr(mdev, sg_dma_address(s));
117 } 117 }
118 dma_unmap_sg(mdev->sdev->parent, sg, nents, dir); 118 dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
119 return 0; 119 return 0;
120} 120}
121 121
@@ -135,7 +135,7 @@ static void __mic_dma_unmap_sg(struct device *dev,
135 mic_unmap(mdev, sg_dma_address(s), s->length); 135 mic_unmap(mdev, sg_dma_address(s), s->length);
136 sg_dma_address(s) = da; 136 sg_dma_address(s) = da;
137 } 137 }
138 dma_unmap_sg(mdev->sdev->parent, sg, nents, dir); 138 dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
139} 139}
140 140
141static struct dma_map_ops __mic_dma_ops = { 141static struct dma_map_ops __mic_dma_ops = {
@@ -270,48 +270,13 @@ static struct mbus_hw_ops mbus_hw_ops = {
270 .ack_interrupt = _mic_ack_interrupt, 270 .ack_interrupt = _mic_ack_interrupt,
271}; 271};
272 272
273/**
274 * mic_reset - Reset the MIC device.
275 * @mdev: pointer to mic_device instance
276 */
277static void mic_reset(struct mic_device *mdev)
278{
279 int i;
280
281#define MIC_RESET_TO (45)
282
283 reinit_completion(&mdev->reset_wait);
284 mdev->ops->reset_fw_ready(mdev);
285 mdev->ops->reset(mdev);
286
287 for (i = 0; i < MIC_RESET_TO; i++) {
288 if (mdev->ops->is_fw_ready(mdev))
289 goto done;
290 /*
291 * Resets typically take 10s of seconds to complete.
292 * Since an MMIO read is required to check if the
293 * firmware is ready or not, a 1 second delay works nicely.
294 */
295 msleep(1000);
296 }
297 mic_set_state(mdev, MIC_RESET_FAILED);
298done:
299 complete_all(&mdev->reset_wait);
300}
301
302/* Initialize the MIC bootparams */ 273/* Initialize the MIC bootparams */
303void mic_bootparam_init(struct mic_device *mdev) 274void mic_bootparam_init(struct mic_device *mdev)
304{ 275{
305 struct mic_bootparam *bootparam = mdev->dp; 276 struct mic_bootparam *bootparam = mdev->dp;
306 277
307 bootparam->magic = cpu_to_le32(MIC_MAGIC); 278 bootparam->magic = cpu_to_le32(MIC_MAGIC);
308 bootparam->c2h_shutdown_db = mdev->shutdown_db;
309 bootparam->h2c_shutdown_db = -1;
310 bootparam->h2c_config_db = -1; 279 bootparam->h2c_config_db = -1;
311 bootparam->shutdown_status = 0;
312 bootparam->shutdown_card = 0;
313 /* Total nodes = number of MICs + 1 for self node */
314 bootparam->tot_nodes = atomic_read(&g_num_mics) + 1;
315 bootparam->node_id = mdev->id + 1; 280 bootparam->node_id = mdev->id + 1;
316 bootparam->scif_host_dma_addr = 0x0; 281 bootparam->scif_host_dma_addr = 0x0;
317 bootparam->scif_card_dma_addr = 0x0; 282 bootparam->scif_card_dma_addr = 0x0;
@@ -319,6 +284,26 @@ void mic_bootparam_init(struct mic_device *mdev)
319 bootparam->h2c_scif_db = -1; 284 bootparam->h2c_scif_db = -1;
320} 285}
321 286
287static inline struct mic_device *cosmdev_to_mdev(struct cosm_device *cdev)
288{
289 return dev_get_drvdata(cdev->dev.parent);
290}
291
292static void _mic_reset(struct cosm_device *cdev)
293{
294 struct mic_device *mdev = cosmdev_to_mdev(cdev);
295
296 mdev->ops->reset_fw_ready(mdev);
297 mdev->ops->reset(mdev);
298}
299
300static bool _mic_ready(struct cosm_device *cdev)
301{
302 struct mic_device *mdev = cosmdev_to_mdev(cdev);
303
304 return mdev->ops->is_fw_ready(mdev);
305}
306
322/** 307/**
323 * mic_request_dma_chans - Request DMA channels 308 * mic_request_dma_chans - Request DMA channels
324 * @mdev: pointer to mic_device instance 309 * @mdev: pointer to mic_device instance
@@ -336,14 +321,14 @@ static int mic_request_dma_chans(struct mic_device *mdev)
336 321
337 do { 322 do {
338 chan = dma_request_channel(mask, mdev->ops->dma_filter, 323 chan = dma_request_channel(mask, mdev->ops->dma_filter,
339 mdev->sdev->parent); 324 &mdev->pdev->dev);
340 if (chan) { 325 if (chan) {
341 mdev->dma_ch[mdev->num_dma_ch++] = chan; 326 mdev->dma_ch[mdev->num_dma_ch++] = chan;
342 if (mdev->num_dma_ch >= MIC_MAX_DMA_CHAN) 327 if (mdev->num_dma_ch >= MIC_MAX_DMA_CHAN)
343 break; 328 break;
344 } 329 }
345 } while (chan); 330 } while (chan);
346 dev_info(mdev->sdev->parent, "DMA channels # %d\n", mdev->num_dma_ch); 331 dev_info(&mdev->pdev->dev, "DMA channels # %d\n", mdev->num_dma_ch);
347 return mdev->num_dma_ch; 332 return mdev->num_dma_ch;
348} 333}
349 334
@@ -365,34 +350,24 @@ static void mic_free_dma_chans(struct mic_device *mdev)
365} 350}
366 351
367/** 352/**
368 * mic_start - Start the MIC. 353 * _mic_start - Start the MIC.
369 * @mdev: pointer to mic_device instance 354 * @cdev: pointer to cosm_device instance
370 * @buf: buffer containing boot string including firmware/ramdisk path. 355 * @id: MIC device id/index provided by COSM used in other drivers like SCIF
371 * 356 *
372 * This function prepares an MIC for boot and initiates boot. 357 * This function prepares an MIC for boot and initiates boot.
373 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 358 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
359 *
360 * For all cosm_hw_ops the caller holds a mutex to ensure serialization.
374 */ 361 */
375int mic_start(struct mic_device *mdev, const char *buf) 362static int _mic_start(struct cosm_device *cdev, int id)
376{ 363{
364 struct mic_device *mdev = cosmdev_to_mdev(cdev);
377 int rc; 365 int rc;
378 mutex_lock(&mdev->mic_mutex); 366
379 mic_bootparam_init(mdev); 367 mic_bootparam_init(mdev);
380retry: 368 mdev->dma_mbdev = mbus_register_device(&mdev->pdev->dev,
381 if (MIC_OFFLINE != mdev->state) {
382 rc = -EINVAL;
383 goto unlock_ret;
384 }
385 if (!mdev->ops->is_fw_ready(mdev)) {
386 mic_reset(mdev);
387 /*
388 * The state will either be MIC_OFFLINE if the reset succeeded
389 * or MIC_RESET_FAILED if the firmware reset failed.
390 */
391 goto retry;
392 }
393 mdev->dma_mbdev = mbus_register_device(mdev->sdev->parent,
394 MBUS_DEV_DMA_HOST, &mic_dma_ops, 369 MBUS_DEV_DMA_HOST, &mic_dma_ops,
395 &mbus_hw_ops, mdev->mmio.va); 370 &mbus_hw_ops, id, mdev->mmio.va);
396 if (IS_ERR(mdev->dma_mbdev)) { 371 if (IS_ERR(mdev->dma_mbdev)) {
397 rc = PTR_ERR(mdev->dma_mbdev); 372 rc = PTR_ERR(mdev->dma_mbdev);
398 goto unlock_ret; 373 goto unlock_ret;
@@ -401,16 +376,18 @@ retry:
401 rc = -ENODEV; 376 rc = -ENODEV;
402 goto dma_remove; 377 goto dma_remove;
403 } 378 }
404 mdev->scdev = scif_register_device(mdev->sdev->parent, MIC_SCIF_DEV, 379 mdev->scdev = scif_register_device(&mdev->pdev->dev, MIC_SCIF_DEV,
405 &__mic_dma_ops, &scif_hw_ops, 380 &__mic_dma_ops, &scif_hw_ops,
406 mdev->id + 1, 0, &mdev->mmio, 381 id + 1, 0, &mdev->mmio,
407 &mdev->aper, mdev->dp, NULL, 382 &mdev->aper, mdev->dp, NULL,
408 mdev->dma_ch, mdev->num_dma_ch); 383 mdev->dma_ch, mdev->num_dma_ch,
384 true);
409 if (IS_ERR(mdev->scdev)) { 385 if (IS_ERR(mdev->scdev)) {
410 rc = PTR_ERR(mdev->scdev); 386 rc = PTR_ERR(mdev->scdev);
411 goto dma_free; 387 goto dma_free;
412 } 388 }
413 rc = mdev->ops->load_mic_fw(mdev, buf); 389
390 rc = mdev->ops->load_mic_fw(mdev, NULL);
414 if (rc) 391 if (rc)
415 goto scif_remove; 392 goto scif_remove;
416 mic_smpt_restore(mdev); 393 mic_smpt_restore(mdev);
@@ -419,7 +396,6 @@ retry:
419 mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr); 396 mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
420 mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); 397 mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
421 mdev->ops->send_firmware_intr(mdev); 398 mdev->ops->send_firmware_intr(mdev);
422 mic_set_state(mdev, MIC_ONLINE);
423 goto unlock_ret; 399 goto unlock_ret;
424scif_remove: 400scif_remove:
425 scif_unregister_device(mdev->scdev); 401 scif_unregister_device(mdev->scdev);
@@ -428,198 +404,79 @@ dma_free:
428dma_remove: 404dma_remove:
429 mbus_unregister_device(mdev->dma_mbdev); 405 mbus_unregister_device(mdev->dma_mbdev);
430unlock_ret: 406unlock_ret:
431 mutex_unlock(&mdev->mic_mutex);
432 return rc; 407 return rc;
433} 408}
434 409
435/** 410/**
436 * mic_stop - Prepare the MIC for reset and trigger reset. 411 * _mic_stop - Prepare the MIC for reset and trigger reset.
437 * @mdev: pointer to mic_device instance 412 * @cdev: pointer to cosm_device instance
438 * @force: force a MIC to reset even if it is already offline. 413 * @force: force a MIC to reset even if it is already offline.
439 * 414 *
440 * RETURNS: None. 415 * RETURNS: None.
441 */ 416 */
442void mic_stop(struct mic_device *mdev, bool force) 417static void _mic_stop(struct cosm_device *cdev, bool force)
443{
444 mutex_lock(&mdev->mic_mutex);
445 if (MIC_OFFLINE != mdev->state || force) {
446 scif_unregister_device(mdev->scdev);
447 mic_virtio_reset_devices(mdev);
448 mic_free_dma_chans(mdev);
449 mbus_unregister_device(mdev->dma_mbdev);
450 mic_bootparam_init(mdev);
451 mic_reset(mdev);
452 if (MIC_RESET_FAILED == mdev->state)
453 goto unlock;
454 mic_set_shutdown_status(mdev, MIC_NOP);
455 if (MIC_SUSPENDED != mdev->state)
456 mic_set_state(mdev, MIC_OFFLINE);
457 }
458unlock:
459 mutex_unlock(&mdev->mic_mutex);
460}
461
462/**
463 * mic_shutdown - Initiate MIC shutdown.
464 * @mdev: pointer to mic_device instance
465 *
466 * RETURNS: None.
467 */
468void mic_shutdown(struct mic_device *mdev)
469{ 418{
470 struct mic_bootparam *bootparam = mdev->dp; 419 struct mic_device *mdev = cosmdev_to_mdev(cdev);
471 s8 db = bootparam->h2c_shutdown_db;
472
473 mutex_lock(&mdev->mic_mutex);
474 if (MIC_ONLINE == mdev->state && db != -1) {
475 bootparam->shutdown_card = 1;
476 mdev->ops->send_intr(mdev, db);
477 mic_set_state(mdev, MIC_SHUTTING_DOWN);
478 }
479 mutex_unlock(&mdev->mic_mutex);
480}
481
482/**
483 * mic_shutdown_work - Handle shutdown interrupt from MIC.
484 * @work: The work structure.
485 *
486 * This work is scheduled whenever the host has received a shutdown
487 * interrupt from the MIC.
488 */
489void mic_shutdown_work(struct work_struct *work)
490{
491 struct mic_device *mdev = container_of(work, struct mic_device,
492 shutdown_work);
493 struct mic_bootparam *bootparam = mdev->dp;
494
495 mutex_lock(&mdev->mic_mutex);
496 mic_set_shutdown_status(mdev, bootparam->shutdown_status);
497 bootparam->shutdown_status = 0;
498 420
499 /* 421 /*
500 * if state is MIC_SUSPENDED, OSPM suspend is in progress. We do not 422 * Since SCIF handles card shutdown and reset (using COSM), it will
501 * change the state here so as to prevent users from booting the card 423 * will be the first to be registered and the last to be
502 * during and after the suspend operation. 424 * unregistered.
503 */ 425 */
504 if (MIC_SHUTTING_DOWN != mdev->state && 426 mic_virtio_reset_devices(mdev);
505 MIC_SUSPENDED != mdev->state) 427 scif_unregister_device(mdev->scdev);
506 mic_set_state(mdev, MIC_SHUTTING_DOWN); 428 mic_free_dma_chans(mdev);
507 mutex_unlock(&mdev->mic_mutex); 429 mbus_unregister_device(mdev->dma_mbdev);
430 mic_bootparam_init(mdev);
508} 431}
509 432
510/** 433static ssize_t _mic_family(struct cosm_device *cdev, char *buf)
511 * mic_reset_trigger_work - Trigger MIC reset.
512 * @work: The work structure.
513 *
514 * This work is scheduled whenever the host wants to reset the MIC.
515 */
516void mic_reset_trigger_work(struct work_struct *work)
517{ 434{
518 struct mic_device *mdev = container_of(work, struct mic_device, 435 struct mic_device *mdev = cosmdev_to_mdev(cdev);
519 reset_trigger_work); 436 static const char *family[MIC_FAMILY_LAST] = { "x100", "Unknown" };
520 437
521 mic_stop(mdev, false); 438 return scnprintf(buf, PAGE_SIZE, "%s\n", family[mdev->family]);
522} 439}
523 440
524/** 441static ssize_t _mic_stepping(struct cosm_device *cdev, char *buf)
525 * mic_complete_resume - Complete MIC Resume after an OSPM suspend/hibernate
526 * event.
527 * @mdev: pointer to mic_device instance
528 *
529 * RETURNS: None.
530 */
531void mic_complete_resume(struct mic_device *mdev)
532{ 442{
533 if (mdev->state != MIC_SUSPENDED) { 443 struct mic_device *mdev = cosmdev_to_mdev(cdev);
534 dev_warn(mdev->sdev->parent, "state %d should be %d\n", 444 const char *string = "??";
535 mdev->state, MIC_SUSPENDED);
536 return;
537 }
538
539 /* Make sure firmware is ready */
540 if (!mdev->ops->is_fw_ready(mdev))
541 mic_stop(mdev, true);
542 445
543 mutex_lock(&mdev->mic_mutex); 446 switch (mdev->stepping) {
544 mic_set_state(mdev, MIC_OFFLINE); 447 case MIC_A0_STEP:
545 mutex_unlock(&mdev->mic_mutex); 448 string = "A0";
546}
547
548/**
549 * mic_prepare_suspend - Handle suspend notification for the MIC device.
550 * @mdev: pointer to mic_device instance
551 *
552 * RETURNS: None.
553 */
554void mic_prepare_suspend(struct mic_device *mdev)
555{
556 unsigned long timeout;
557
558#define MIC_SUSPEND_TIMEOUT (60 * HZ)
559
560 mutex_lock(&mdev->mic_mutex);
561 switch (mdev->state) {
562 case MIC_OFFLINE:
563 /*
564 * Card is already offline. Set state to MIC_SUSPENDED
565 * to prevent users from booting the card.
566 */
567 mic_set_state(mdev, MIC_SUSPENDED);
568 mutex_unlock(&mdev->mic_mutex);
569 break; 449 break;
570 case MIC_ONLINE: 450 case MIC_B0_STEP:
571 /* 451 string = "B0";
572 * Card is online. Set state to MIC_SUSPENDING and notify 452 break;
573 * MIC user space daemon which will issue card 453 case MIC_B1_STEP:
574 * shutdown and reset. 454 string = "B1";
575 */
576 mic_set_state(mdev, MIC_SUSPENDING);
577 mutex_unlock(&mdev->mic_mutex);
578 timeout = wait_for_completion_timeout(&mdev->reset_wait,
579 MIC_SUSPEND_TIMEOUT);
580 /* Force reset the card if the shutdown completion timed out */
581 if (!timeout) {
582 mutex_lock(&mdev->mic_mutex);
583 mic_set_state(mdev, MIC_SUSPENDED);
584 mutex_unlock(&mdev->mic_mutex);
585 mic_stop(mdev, true);
586 }
587 break; 455 break;
588 case MIC_SHUTTING_DOWN: 456 case MIC_C0_STEP:
589 /* 457 string = "C0";
590 * Card is shutting down. Set state to MIC_SUSPENDED
591 * to prevent further boot of the card.
592 */
593 mic_set_state(mdev, MIC_SUSPENDED);
594 mutex_unlock(&mdev->mic_mutex);
595 timeout = wait_for_completion_timeout(&mdev->reset_wait,
596 MIC_SUSPEND_TIMEOUT);
597 /* Force reset the card if the shutdown completion timed out */
598 if (!timeout)
599 mic_stop(mdev, true);
600 break; 458 break;
601 default: 459 default:
602 mutex_unlock(&mdev->mic_mutex);
603 break; 460 break;
604 } 461 }
462 return scnprintf(buf, PAGE_SIZE, "%s\n", string);
605} 463}
606 464
607/** 465static struct mic_mw *_mic_aper(struct cosm_device *cdev)
608 * mic_suspend - Initiate MIC suspend. Suspend merely issues card shutdown.
609 * @mdev: pointer to mic_device instance
610 *
611 * RETURNS: None.
612 */
613void mic_suspend(struct mic_device *mdev)
614{ 466{
615 struct mic_bootparam *bootparam = mdev->dp; 467 struct mic_device *mdev = cosmdev_to_mdev(cdev);
616 s8 db = bootparam->h2c_shutdown_db;
617 468
618 mutex_lock(&mdev->mic_mutex); 469 return &mdev->aper;
619 if (MIC_SUSPENDING == mdev->state && db != -1) {
620 bootparam->shutdown_card = 1;
621 mdev->ops->send_intr(mdev, db);
622 mic_set_state(mdev, MIC_SUSPENDED);
623 }
624 mutex_unlock(&mdev->mic_mutex);
625} 470}
471
472struct cosm_hw_ops cosm_hw_ops = {
473 .reset = _mic_reset,
474 .force_reset = _mic_reset,
475 .post_reset = NULL,
476 .ready = _mic_ready,
477 .start = _mic_start,
478 .stop = _mic_stop,
479 .family = _mic_family,
480 .stepping = _mic_stepping,
481 .aper = _mic_aper,
482};
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 3c9ea4896f3c..10581600777a 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -31,71 +31,6 @@
31/* Debugfs parent dir */ 31/* Debugfs parent dir */
32static struct dentry *mic_dbg; 32static struct dentry *mic_dbg;
33 33
34/**
35 * mic_log_buf_show - Display MIC kernel log buffer.
36 *
37 * log_buf addr/len is read from System.map by user space
38 * and populated in sysfs entries.
39 */
40static int mic_log_buf_show(struct seq_file *s, void *unused)
41{
42 void __iomem *log_buf_va;
43 int __iomem *log_buf_len_va;
44 struct mic_device *mdev = s->private;
45 void *kva;
46 int size;
47 unsigned long aper_offset;
48
49 if (!mdev || !mdev->log_buf_addr || !mdev->log_buf_len)
50 goto done;
51 /*
52 * Card kernel will never be relocated and any kernel text/data mapping
53 * can be translated to phys address by subtracting __START_KERNEL_map.
54 */
55 aper_offset = (unsigned long)mdev->log_buf_len - __START_KERNEL_map;
56 log_buf_len_va = mdev->aper.va + aper_offset;
57 aper_offset = (unsigned long)mdev->log_buf_addr - __START_KERNEL_map;
58 log_buf_va = mdev->aper.va + aper_offset;
59 size = ioread32(log_buf_len_va);
60
61 kva = kmalloc(size, GFP_KERNEL);
62 if (!kva)
63 goto done;
64 mutex_lock(&mdev->mic_mutex);
65 memcpy_fromio(kva, log_buf_va, size);
66 switch (mdev->state) {
67 case MIC_ONLINE:
68 /* Fall through */
69 case MIC_SHUTTING_DOWN:
70 seq_write(s, kva, size);
71 break;
72 default:
73 break;
74 }
75 mutex_unlock(&mdev->mic_mutex);
76 kfree(kva);
77done:
78 return 0;
79}
80
81static int mic_log_buf_open(struct inode *inode, struct file *file)
82{
83 return single_open(file, mic_log_buf_show, inode->i_private);
84}
85
86static int mic_log_buf_release(struct inode *inode, struct file *file)
87{
88 return single_release(inode, file);
89}
90
91static const struct file_operations log_buf_ops = {
92 .owner = THIS_MODULE,
93 .open = mic_log_buf_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = mic_log_buf_release
97};
98
99static int mic_smpt_show(struct seq_file *s, void *pos) 34static int mic_smpt_show(struct seq_file *s, void *pos)
100{ 35{
101 int i; 36 int i;
@@ -138,32 +73,6 @@ static const struct file_operations smpt_file_ops = {
138 .release = mic_smpt_debug_release 73 .release = mic_smpt_debug_release
139}; 74};
140 75
141static int mic_soft_reset_show(struct seq_file *s, void *pos)
142{
143 struct mic_device *mdev = s->private;
144
145 mic_stop(mdev, true);
146 return 0;
147}
148
149static int mic_soft_reset_debug_open(struct inode *inode, struct file *file)
150{
151 return single_open(file, mic_soft_reset_show, inode->i_private);
152}
153
154static int mic_soft_reset_debug_release(struct inode *inode, struct file *file)
155{
156 return single_release(inode, file);
157}
158
159static const struct file_operations soft_reset_ops = {
160 .owner = THIS_MODULE,
161 .open = mic_soft_reset_debug_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = mic_soft_reset_debug_release
165};
166
167static int mic_post_code_show(struct seq_file *s, void *pos) 76static int mic_post_code_show(struct seq_file *s, void *pos)
168{ 77{
169 struct mic_device *mdev = s->private; 78 struct mic_device *mdev = s->private;
@@ -204,18 +113,8 @@ static int mic_dp_show(struct seq_file *s, void *pos)
204 113
205 seq_printf(s, "Bootparam: magic 0x%x\n", 114 seq_printf(s, "Bootparam: magic 0x%x\n",
206 bootparam->magic); 115 bootparam->magic);
207 seq_printf(s, "Bootparam: h2c_shutdown_db %d\n",
208 bootparam->h2c_shutdown_db);
209 seq_printf(s, "Bootparam: h2c_config_db %d\n", 116 seq_printf(s, "Bootparam: h2c_config_db %d\n",
210 bootparam->h2c_config_db); 117 bootparam->h2c_config_db);
211 seq_printf(s, "Bootparam: c2h_shutdown_db %d\n",
212 bootparam->c2h_shutdown_db);
213 seq_printf(s, "Bootparam: shutdown_status %d\n",
214 bootparam->shutdown_status);
215 seq_printf(s, "Bootparam: shutdown_card %d\n",
216 bootparam->shutdown_card);
217 seq_printf(s, "Bootparam: tot_nodes %d\n",
218 bootparam->tot_nodes);
219 seq_printf(s, "Bootparam: node_id %d\n", 118 seq_printf(s, "Bootparam: node_id %d\n",
220 bootparam->node_id); 119 bootparam->node_id);
221 seq_printf(s, "Bootparam: c2h_scif_db %d\n", 120 seq_printf(s, "Bootparam: c2h_scif_db %d\n",
@@ -392,8 +291,7 @@ static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
392 int i, j; 291 int i, j;
393 u16 entry; 292 u16 entry;
394 u16 vector; 293 u16 vector;
395 struct pci_dev *pdev = container_of(mdev->sdev->parent, 294 struct pci_dev *pdev = mdev->pdev;
396 struct pci_dev, dev);
397 295
398 if (pci_dev_msi_enabled(pdev)) { 296 if (pci_dev_msi_enabled(pdev)) {
399 for (i = 0; i < mdev->irq_info.num_vectors; i++) { 297 for (i = 0; i < mdev->irq_info.num_vectors; i++) {
@@ -454,20 +352,18 @@ static const struct file_operations msi_irq_info_ops = {
454 */ 352 */
455void mic_create_debug_dir(struct mic_device *mdev) 353void mic_create_debug_dir(struct mic_device *mdev)
456{ 354{
355 char name[16];
356
457 if (!mic_dbg) 357 if (!mic_dbg)
458 return; 358 return;
459 359
460 mdev->dbg_dir = debugfs_create_dir(dev_name(mdev->sdev), mic_dbg); 360 scnprintf(name, sizeof(name), "mic%d", mdev->id);
361 mdev->dbg_dir = debugfs_create_dir(name, mic_dbg);
461 if (!mdev->dbg_dir) 362 if (!mdev->dbg_dir)
462 return; 363 return;
463 364
464 debugfs_create_file("log_buf", 0444, mdev->dbg_dir, mdev, &log_buf_ops);
465
466 debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev, &smpt_file_ops); 365 debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev, &smpt_file_ops);
467 366
468 debugfs_create_file("soft_reset", 0444, mdev->dbg_dir, mdev,
469 &soft_reset_ops);
470
471 debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev, 367 debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
472 &post_code_ops); 368 &post_code_ops);
473 369
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 01a7555aa648..461184a12fbb 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -26,21 +26,12 @@
26#include <linux/notifier.h> 26#include <linux/notifier.h>
27#include <linux/irqreturn.h> 27#include <linux/irqreturn.h>
28#include <linux/dmaengine.h> 28#include <linux/dmaengine.h>
29#include <linux/miscdevice.h>
29#include <linux/mic_bus.h> 30#include <linux/mic_bus.h>
30#include "../bus/scif_bus.h" 31#include "../bus/scif_bus.h"
32#include "../bus/cosm_bus.h"
31#include "mic_intr.h" 33#include "mic_intr.h"
32 34
33/* The maximum number of MIC devices supported in a single host system. */
34#define MIC_MAX_NUM_DEVS 256
35
36/**
37 * enum mic_hw_family - The hardware family to which a device belongs.
38 */
39enum mic_hw_family {
40 MIC_FAMILY_X100 = 0,
41 MIC_FAMILY_UNKNOWN
42};
43
44/** 35/**
45 * enum mic_stepping - MIC stepping ids. 36 * enum mic_stepping - MIC stepping ids.
46 */ 37 */
@@ -51,6 +42,8 @@ enum mic_stepping {
51 MIC_C0_STEP = 0x20, 42 MIC_C0_STEP = 0x20,
52}; 43};
53 44
45extern struct cosm_hw_ops cosm_hw_ops;
46
54/** 47/**
55 * struct mic_device - MIC device information for each card. 48 * struct mic_device - MIC device information for each card.
56 * 49 *
@@ -60,8 +53,7 @@ enum mic_stepping {
60 * @ops: MIC HW specific operations. 53 * @ops: MIC HW specific operations.
61 * @id: The unique device id for this MIC device. 54 * @id: The unique device id for this MIC device.
62 * @stepping: Stepping ID. 55 * @stepping: Stepping ID.
63 * @attr_group: Pointer to list of sysfs attribute groups. 56 * @pdev: Underlying PCI device.
64 * @sdev: Device for sysfs entries.
65 * @mic_mutex: Mutex for synchronizing access to mic_device. 57 * @mic_mutex: Mutex for synchronizing access to mic_device.
66 * @intr_ops: HW specific interrupt operations. 58 * @intr_ops: HW specific interrupt operations.
67 * @smpt_ops: Hardware specific SMPT operations. 59 * @smpt_ops: Hardware specific SMPT operations.
@@ -69,30 +61,17 @@ enum mic_stepping {
69 * @intr_info: H/W specific interrupt information. 61 * @intr_info: H/W specific interrupt information.
70 * @irq_info: The OS specific irq information 62 * @irq_info: The OS specific irq information
71 * @dbg_dir: debugfs directory of this MIC device. 63 * @dbg_dir: debugfs directory of this MIC device.
72 * @cmdline: Kernel command line.
73 * @firmware: Firmware file name.
74 * @ramdisk: Ramdisk file name.
75 * @bootmode: Boot mode i.e. "linux" or "elf" for flash updates.
76 * @bootaddr: MIC boot address. 64 * @bootaddr: MIC boot address.
77 * @reset_trigger_work: Work for triggering reset requests.
78 * @shutdown_work: Work for handling shutdown interrupts.
79 * @state: MIC state.
80 * @shutdown_status: MIC status reported by card for shutdown/crashes.
81 * @state_sysfs: Sysfs dirent for notifying ring 3 about MIC state changes.
82 * @reset_wait: Waitqueue for sleeping while reset completes.
83 * @log_buf_addr: Log buffer address for MIC.
84 * @log_buf_len: Log buffer length address for MIC.
85 * @dp: virtio device page 65 * @dp: virtio device page
86 * @dp_dma_addr: virtio device page DMA address. 66 * @dp_dma_addr: virtio device page DMA address.
87 * @shutdown_db: shutdown doorbell. 67 * @name: name for the misc char device
88 * @shutdown_cookie: shutdown cookie. 68 * @miscdev: registered misc char device
89 * @cdev: Character device for MIC.
90 * @vdev_list: list of virtio devices. 69 * @vdev_list: list of virtio devices.
91 * @pm_notifier: Handles PM notifications from the OS.
92 * @dma_mbdev: MIC BUS DMA device. 70 * @dma_mbdev: MIC BUS DMA device.
93 * @dma_ch - Array of DMA channels 71 * @dma_ch - Array of DMA channels
94 * @num_dma_ch - Number of DMA channels available 72 * @num_dma_ch - Number of DMA channels available
95 * @scdev: SCIF device on the SCIF virtual bus. 73 * @scdev: SCIF device on the SCIF virtual bus.
74 * @cosm_dev: COSM device
96 */ 75 */
97struct mic_device { 76struct mic_device {
98 struct mic_mw mmio; 77 struct mic_mw mmio;
@@ -101,8 +80,7 @@ struct mic_device {
101 struct mic_hw_ops *ops; 80 struct mic_hw_ops *ops;
102 int id; 81 int id;
103 enum mic_stepping stepping; 82 enum mic_stepping stepping;
104 const struct attribute_group **attr_group; 83 struct pci_dev *pdev;
105 struct device *sdev;
106 struct mutex mic_mutex; 84 struct mutex mic_mutex;
107 struct mic_hw_intr_ops *intr_ops; 85 struct mic_hw_intr_ops *intr_ops;
108 struct mic_smpt_ops *smpt_ops; 86 struct mic_smpt_ops *smpt_ops;
@@ -110,30 +88,17 @@ struct mic_device {
110 struct mic_intr_info *intr_info; 88 struct mic_intr_info *intr_info;
111 struct mic_irq_info irq_info; 89 struct mic_irq_info irq_info;
112 struct dentry *dbg_dir; 90 struct dentry *dbg_dir;
113 char *cmdline;
114 char *firmware;
115 char *ramdisk;
116 char *bootmode;
117 u32 bootaddr; 91 u32 bootaddr;
118 struct work_struct reset_trigger_work;
119 struct work_struct shutdown_work;
120 u8 state;
121 u8 shutdown_status;
122 struct kernfs_node *state_sysfs;
123 struct completion reset_wait;
124 void *log_buf_addr;
125 int *log_buf_len;
126 void *dp; 92 void *dp;
127 dma_addr_t dp_dma_addr; 93 dma_addr_t dp_dma_addr;
128 int shutdown_db; 94 char name[16];
129 struct mic_irq *shutdown_cookie; 95 struct miscdevice miscdev;
130 struct cdev cdev;
131 struct list_head vdev_list; 96 struct list_head vdev_list;
132 struct notifier_block pm_notifier;
133 struct mbus_device *dma_mbdev; 97 struct mbus_device *dma_mbdev;
134 struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; 98 struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
135 int num_dma_ch; 99 int num_dma_ch;
136 struct scif_hw_dev *scdev; 100 struct scif_hw_dev *scdev;
101 struct cosm_device *cosm_dev;
137}; 102};
138 103
139/** 104/**
@@ -199,38 +164,9 @@ mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
199 iowrite32(val, mw->va + offset); 164 iowrite32(val, mw->va + offset);
200} 165}
201 166
202static inline struct dma_chan *mic_request_dma_chan(struct mic_device *mdev)
203{
204 dma_cap_mask_t mask;
205 struct dma_chan *chan;
206
207 dma_cap_zero(mask);
208 dma_cap_set(DMA_MEMCPY, mask);
209 chan = dma_request_channel(mask, mdev->ops->dma_filter,
210 mdev->sdev->parent);
211 if (chan)
212 return chan;
213 dev_err(mdev->sdev->parent, "%s %d unable to acquire channel\n",
214 __func__, __LINE__);
215 return NULL;
216}
217
218void mic_sysfs_init(struct mic_device *mdev);
219int mic_start(struct mic_device *mdev, const char *buf);
220void mic_stop(struct mic_device *mdev, bool force);
221void mic_shutdown(struct mic_device *mdev);
222void mic_reset_delayed_work(struct work_struct *work);
223void mic_reset_trigger_work(struct work_struct *work);
224void mic_shutdown_work(struct work_struct *work);
225void mic_bootparam_init(struct mic_device *mdev); 167void mic_bootparam_init(struct mic_device *mdev);
226void mic_set_state(struct mic_device *mdev, u8 state);
227void mic_set_shutdown_status(struct mic_device *mdev, u8 status);
228void mic_create_debug_dir(struct mic_device *dev); 168void mic_create_debug_dir(struct mic_device *dev);
229void mic_delete_debug_dir(struct mic_device *dev); 169void mic_delete_debug_dir(struct mic_device *dev);
230void __init mic_init_debugfs(void); 170void __init mic_init_debugfs(void);
231void mic_exit_debugfs(void); 171void mic_exit_debugfs(void);
232void mic_prepare_suspend(struct mic_device *mdev);
233void mic_complete_resume(struct mic_device *mdev);
234void mic_suspend(struct mic_device *mdev);
235extern atomic_t g_num_mics;
236#endif 172#endif
diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c
index 85776d7327f3..8cc1d90cd949 100644
--- a/drivers/misc/mic/host/mic_fops.c
+++ b/drivers/misc/mic/host/mic_fops.c
@@ -30,8 +30,8 @@
30int mic_open(struct inode *inode, struct file *f) 30int mic_open(struct inode *inode, struct file *f)
31{ 31{
32 struct mic_vdev *mvdev; 32 struct mic_vdev *mvdev;
33 struct mic_device *mdev = container_of(inode->i_cdev, 33 struct mic_device *mdev = container_of(f->private_data,
34 struct mic_device, cdev); 34 struct mic_device, miscdev);
35 35
36 mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); 36 mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
37 if (!mvdev) 37 if (!mvdev)
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index b4ca6c884d19..08ca3e372fa4 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -30,8 +30,7 @@ static irqreturn_t mic_thread_fn(int irq, void *dev)
30 struct mic_intr_info *intr_info = mdev->intr_info; 30 struct mic_intr_info *intr_info = mdev->intr_info;
31 struct mic_irq_info *irq_info = &mdev->irq_info; 31 struct mic_irq_info *irq_info = &mdev->irq_info;
32 struct mic_intr_cb *intr_cb; 32 struct mic_intr_cb *intr_cb;
33 struct pci_dev *pdev = container_of(mdev->sdev->parent, 33 struct pci_dev *pdev = mdev->pdev;
34 struct pci_dev, dev);
35 int i; 34 int i;
36 35
37 spin_lock(&irq_info->mic_thread_lock); 36 spin_lock(&irq_info->mic_thread_lock);
@@ -57,8 +56,7 @@ static irqreturn_t mic_interrupt(int irq, void *dev)
57 struct mic_intr_info *intr_info = mdev->intr_info; 56 struct mic_intr_info *intr_info = mdev->intr_info;
58 struct mic_irq_info *irq_info = &mdev->irq_info; 57 struct mic_irq_info *irq_info = &mdev->irq_info;
59 struct mic_intr_cb *intr_cb; 58 struct mic_intr_cb *intr_cb;
60 struct pci_dev *pdev = container_of(mdev->sdev->parent, 59 struct pci_dev *pdev = mdev->pdev;
61 struct pci_dev, dev);
62 u32 mask; 60 u32 mask;
63 int i; 61 int i;
64 62
@@ -83,7 +81,7 @@ static irqreturn_t mic_interrupt(int irq, void *dev)
83 81
84/* Return the interrupt offset from the index. Index is 0 based. */ 82/* Return the interrupt offset from the index. Index is 0 based. */
85static u16 mic_map_src_to_offset(struct mic_device *mdev, 83static u16 mic_map_src_to_offset(struct mic_device *mdev,
86 int intr_src, enum mic_intr_type type) 84 int intr_src, enum mic_intr_type type)
87{ 85{
88 if (type >= MIC_NUM_INTR_TYPES) 86 if (type >= MIC_NUM_INTR_TYPES)
89 return MIC_NUM_OFFSETS; 87 return MIC_NUM_OFFSETS;
@@ -214,7 +212,7 @@ static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
214 mdev->irq_info.msix_entries[i].entry = i; 212 mdev->irq_info.msix_entries[i].entry = i;
215 213
216 rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries, 214 rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries,
217 MIC_MIN_MSIX); 215 MIC_MIN_MSIX);
218 if (rc) { 216 if (rc) {
219 dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc); 217 dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc);
220 goto err_enable_msix; 218 goto err_enable_msix;
@@ -229,7 +227,7 @@ static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
229 goto err_nomem2; 227 goto err_nomem2;
230 } 228 }
231 229
232 dev_dbg(mdev->sdev->parent, 230 dev_dbg(&mdev->pdev->dev,
233 "%d MSIx irqs setup\n", mdev->irq_info.num_vectors); 231 "%d MSIx irqs setup\n", mdev->irq_info.num_vectors);
234 return 0; 232 return 0;
235err_nomem2: 233err_nomem2:
@@ -281,7 +279,6 @@ static void mic_release_callbacks(struct mic_device *mdev)
281 spin_lock(&mdev->irq_info.mic_thread_lock); 279 spin_lock(&mdev->irq_info.mic_thread_lock);
282 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags); 280 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
283 for (i = 0; i < MIC_NUM_OFFSETS; i++) { 281 for (i = 0; i < MIC_NUM_OFFSETS; i++) {
284
285 if (list_empty(&mdev->irq_info.cb_list[i])) 282 if (list_empty(&mdev->irq_info.cb_list[i]))
286 break; 283 break;
287 284
@@ -443,12 +440,11 @@ mic_request_threaded_irq(struct mic_device *mdev,
443 unsigned long cookie = 0; 440 unsigned long cookie = 0;
444 u16 entry; 441 u16 entry;
445 struct mic_intr_cb *intr_cb; 442 struct mic_intr_cb *intr_cb;
446 struct pci_dev *pdev = container_of(mdev->sdev->parent, 443 struct pci_dev *pdev = mdev->pdev;
447 struct pci_dev, dev);
448 444
449 offset = mic_map_src_to_offset(mdev, intr_src, type); 445 offset = mic_map_src_to_offset(mdev, intr_src, type);
450 if (offset >= MIC_NUM_OFFSETS) { 446 if (offset >= MIC_NUM_OFFSETS) {
451 dev_err(mdev->sdev->parent, 447 dev_err(&mdev->pdev->dev,
452 "Error mapping index %d to a valid source id.\n", 448 "Error mapping index %d to a valid source id.\n",
453 intr_src); 449 intr_src);
454 rc = -EINVAL; 450 rc = -EINVAL;
@@ -458,7 +454,7 @@ mic_request_threaded_irq(struct mic_device *mdev,
458 if (mdev->irq_info.num_vectors > 1) { 454 if (mdev->irq_info.num_vectors > 1) {
459 msix = mic_get_available_vector(mdev); 455 msix = mic_get_available_vector(mdev);
460 if (!msix) { 456 if (!msix) {
461 dev_err(mdev->sdev->parent, 457 dev_err(&mdev->pdev->dev,
462 "No MSIx vectors available for use.\n"); 458 "No MSIx vectors available for use.\n");
463 rc = -ENOSPC; 459 rc = -ENOSPC;
464 goto err; 460 goto err;
@@ -467,7 +463,7 @@ mic_request_threaded_irq(struct mic_device *mdev,
467 rc = request_threaded_irq(msix->vector, handler, thread_fn, 463 rc = request_threaded_irq(msix->vector, handler, thread_fn,
468 0, name, data); 464 0, name, data);
469 if (rc) { 465 if (rc) {
470 dev_dbg(mdev->sdev->parent, 466 dev_dbg(&mdev->pdev->dev,
471 "request irq failed rc = %d\n", rc); 467 "request irq failed rc = %d\n", rc);
472 goto err; 468 goto err;
473 } 469 }
@@ -476,13 +472,13 @@ mic_request_threaded_irq(struct mic_device *mdev,
476 mdev->intr_ops->program_msi_to_src_map(mdev, 472 mdev->intr_ops->program_msi_to_src_map(mdev,
477 entry, offset, true); 473 entry, offset, true);
478 cookie = MK_COOKIE(entry, offset); 474 cookie = MK_COOKIE(entry, offset);
479 dev_dbg(mdev->sdev->parent, "irq: %d assigned for src: %d\n", 475 dev_dbg(&mdev->pdev->dev, "irq: %d assigned for src: %d\n",
480 msix->vector, intr_src); 476 msix->vector, intr_src);
481 } else { 477 } else {
482 intr_cb = mic_register_intr_callback(mdev, offset, handler, 478 intr_cb = mic_register_intr_callback(mdev, offset, handler,
483 thread_fn, data); 479 thread_fn, data);
484 if (IS_ERR(intr_cb)) { 480 if (IS_ERR(intr_cb)) {
485 dev_err(mdev->sdev->parent, 481 dev_err(&mdev->pdev->dev,
486 "No available callback entries for use\n"); 482 "No available callback entries for use\n");
487 rc = PTR_ERR(intr_cb); 483 rc = PTR_ERR(intr_cb);
488 goto err; 484 goto err;
@@ -495,7 +491,7 @@ mic_request_threaded_irq(struct mic_device *mdev,
495 entry, offset, true); 491 entry, offset, true);
496 } 492 }
497 cookie = MK_COOKIE(entry, intr_cb->cb_id); 493 cookie = MK_COOKIE(entry, intr_cb->cb_id);
498 dev_dbg(mdev->sdev->parent, "callback %d registered for src: %d\n", 494 dev_dbg(&mdev->pdev->dev, "callback %d registered for src: %d\n",
499 intr_cb->cb_id, intr_src); 495 intr_cb->cb_id, intr_src);
500 } 496 }
501 return (struct mic_irq *)cookie; 497 return (struct mic_irq *)cookie;
@@ -515,20 +511,19 @@ err:
515 * returns: none. 511 * returns: none.
516 */ 512 */
517void mic_free_irq(struct mic_device *mdev, 513void mic_free_irq(struct mic_device *mdev,
518 struct mic_irq *cookie, void *data) 514 struct mic_irq *cookie, void *data)
519{ 515{
520 u32 offset; 516 u32 offset;
521 u32 entry; 517 u32 entry;
522 u8 src_id; 518 u8 src_id;
523 unsigned int irq; 519 unsigned int irq;
524 struct pci_dev *pdev = container_of(mdev->sdev->parent, 520 struct pci_dev *pdev = mdev->pdev;
525 struct pci_dev, dev);
526 521
527 entry = GET_ENTRY((unsigned long)cookie); 522 entry = GET_ENTRY((unsigned long)cookie);
528 offset = GET_OFFSET((unsigned long)cookie); 523 offset = GET_OFFSET((unsigned long)cookie);
529 if (mdev->irq_info.num_vectors > 1) { 524 if (mdev->irq_info.num_vectors > 1) {
530 if (entry >= mdev->irq_info.num_vectors) { 525 if (entry >= mdev->irq_info.num_vectors) {
531 dev_warn(mdev->sdev->parent, 526 dev_warn(&mdev->pdev->dev,
532 "entry %d should be < num_irq %d\n", 527 "entry %d should be < num_irq %d\n",
533 entry, mdev->irq_info.num_vectors); 528 entry, mdev->irq_info.num_vectors);
534 return; 529 return;
@@ -539,12 +534,12 @@ void mic_free_irq(struct mic_device *mdev,
539 mdev->intr_ops->program_msi_to_src_map(mdev, 534 mdev->intr_ops->program_msi_to_src_map(mdev,
540 entry, offset, false); 535 entry, offset, false);
541 536
542 dev_dbg(mdev->sdev->parent, "irq: %d freed\n", irq); 537 dev_dbg(&mdev->pdev->dev, "irq: %d freed\n", irq);
543 } else { 538 } else {
544 irq = pdev->irq; 539 irq = pdev->irq;
545 src_id = mic_unregister_intr_callback(mdev, offset); 540 src_id = mic_unregister_intr_callback(mdev, offset);
546 if (src_id >= MIC_NUM_OFFSETS) { 541 if (src_id >= MIC_NUM_OFFSETS) {
547 dev_warn(mdev->sdev->parent, "Error unregistering callback\n"); 542 dev_warn(&mdev->pdev->dev, "Error unregistering callback\n");
548 return; 543 return;
549 } 544 }
550 if (pci_dev_msi_enabled(pdev)) { 545 if (pci_dev_msi_enabled(pdev)) {
@@ -552,7 +547,7 @@ void mic_free_irq(struct mic_device *mdev,
552 mdev->intr_ops->program_msi_to_src_map(mdev, 547 mdev->intr_ops->program_msi_to_src_map(mdev,
553 entry, src_id, false); 548 entry, src_id, false);
554 } 549 }
555 dev_dbg(mdev->sdev->parent, "callback %d unregistered for src: %d\n", 550 dev_dbg(&mdev->pdev->dev, "callback %d unregistered for src: %d\n",
556 offset, src_id); 551 offset, src_id);
557 } 552 }
558} 553}
@@ -579,7 +574,7 @@ int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
579 574
580 rc = mic_setup_intx(mdev, pdev); 575 rc = mic_setup_intx(mdev, pdev);
581 if (rc) { 576 if (rc) {
582 dev_err(mdev->sdev->parent, "no usable interrupts\n"); 577 dev_err(&mdev->pdev->dev, "no usable interrupts\n");
583 return rc; 578 return rc;
584 } 579 }
585done: 580done:
@@ -635,8 +630,7 @@ void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
635void mic_intr_restore(struct mic_device *mdev) 630void mic_intr_restore(struct mic_device *mdev)
636{ 631{
637 int entry, offset; 632 int entry, offset;
638 struct pci_dev *pdev = container_of(mdev->sdev->parent, 633 struct pci_dev *pdev = mdev->pdev;
639 struct pci_dev, dev);
640 634
641 if (!pci_dev_msi_enabled(pdev)) 635 if (!pci_dev_msi_enabled(pdev))
642 return; 636 return;
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index 456462932151..153894e7ed5b 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -16,17 +16,11 @@
16 * the file called "COPYING". 16 * the file called "COPYING".
17 * 17 *
18 * Intel MIC Host driver. 18 * Intel MIC Host driver.
19 *
20 * Global TODO's across the driver to be added after initial base
21 * patches are accepted upstream:
22 * 1) Enable DMA support.
23 * 2) Enable per vring interrupt support.
24 */ 19 */
25#include <linux/fs.h> 20#include <linux/fs.h>
26#include <linux/module.h> 21#include <linux/module.h>
27#include <linux/pci.h> 22#include <linux/pci.h>
28#include <linux/poll.h> 23#include <linux/poll.h>
29#include <linux/suspend.h>
30 24
31#include <linux/mic_common.h> 25#include <linux/mic_common.h>
32#include "../common/mic_dev.h" 26#include "../common/mic_dev.h"
@@ -63,12 +57,8 @@ MODULE_DEVICE_TABLE(pci, mic_pci_tbl);
63 57
64/* ID allocator for MIC devices */ 58/* ID allocator for MIC devices */
65static struct ida g_mic_ida; 59static struct ida g_mic_ida;
66/* Class of MIC devices for sysfs accessibility. */
67static struct class *g_mic_class;
68/* Base device node number for MIC devices */ 60/* Base device node number for MIC devices */
69static dev_t g_mic_devno; 61static dev_t g_mic_devno;
70/* Track the total number of MIC devices */
71atomic_t g_num_mics;
72 62
73static const struct file_operations mic_fops = { 63static const struct file_operations mic_fops = {
74 .open = mic_open, 64 .open = mic_open,
@@ -83,17 +73,14 @@ static const struct file_operations mic_fops = {
83static int mic_dp_init(struct mic_device *mdev) 73static int mic_dp_init(struct mic_device *mdev)
84{ 74{
85 mdev->dp = kzalloc(MIC_DP_SIZE, GFP_KERNEL); 75 mdev->dp = kzalloc(MIC_DP_SIZE, GFP_KERNEL);
86 if (!mdev->dp) { 76 if (!mdev->dp)
87 dev_err(mdev->sdev->parent, "%s %d err %d\n",
88 __func__, __LINE__, -ENOMEM);
89 return -ENOMEM; 77 return -ENOMEM;
90 }
91 78
92 mdev->dp_dma_addr = mic_map_single(mdev, 79 mdev->dp_dma_addr = mic_map_single(mdev,
93 mdev->dp, MIC_DP_SIZE); 80 mdev->dp, MIC_DP_SIZE);
94 if (mic_map_error(mdev->dp_dma_addr)) { 81 if (mic_map_error(mdev->dp_dma_addr)) {
95 kfree(mdev->dp); 82 kfree(mdev->dp);
96 dev_err(mdev->sdev->parent, "%s %d err %d\n", 83 dev_err(&mdev->pdev->dev, "%s %d err %d\n",
97 __func__, __LINE__, -ENOMEM); 84 __func__, __LINE__, -ENOMEM);
98 return -ENOMEM; 85 return -ENOMEM;
99 } 86 }
@@ -110,30 +97,6 @@ static void mic_dp_uninit(struct mic_device *mdev)
110} 97}
111 98
112/** 99/**
113 * mic_shutdown_db - Shutdown doorbell interrupt handler.
114 */
115static irqreturn_t mic_shutdown_db(int irq, void *data)
116{
117 struct mic_device *mdev = data;
118 struct mic_bootparam *bootparam = mdev->dp;
119
120 mdev->ops->intr_workarounds(mdev);
121
122 switch (bootparam->shutdown_status) {
123 case MIC_HALTED:
124 case MIC_POWER_OFF:
125 case MIC_RESTART:
126 /* Fall through */
127 case MIC_CRASHED:
128 schedule_work(&mdev->shutdown_work);
129 break;
130 default:
131 break;
132 };
133 return IRQ_HANDLED;
134}
135
136/**
137 * mic_ops_init: Initialize HW specific operation tables. 100 * mic_ops_init: Initialize HW specific operation tables.
138 * 101 *
139 * @mdev: pointer to mic_device instance 102 * @mdev: pointer to mic_device instance
@@ -190,43 +153,6 @@ static enum mic_hw_family mic_get_family(struct pci_dev *pdev)
190} 153}
191 154
192/** 155/**
193* mic_pm_notifier: Notifier callback function that handles
194* PM notifications.
195*
196* @notifier_block: The notifier structure.
197* @pm_event: The event for which the driver was notified.
198* @unused: Meaningless. Always NULL.
199*
200* returns NOTIFY_DONE
201*/
202static int mic_pm_notifier(struct notifier_block *notifier,
203 unsigned long pm_event, void *unused)
204{
205 struct mic_device *mdev = container_of(notifier,
206 struct mic_device, pm_notifier);
207
208 switch (pm_event) {
209 case PM_HIBERNATION_PREPARE:
210 /* Fall through */
211 case PM_SUSPEND_PREPARE:
212 mic_prepare_suspend(mdev);
213 break;
214 case PM_POST_HIBERNATION:
215 /* Fall through */
216 case PM_POST_SUSPEND:
217 /* Fall through */
218 case PM_POST_RESTORE:
219 mic_complete_resume(mdev);
220 break;
221 case PM_RESTORE_PREPARE:
222 break;
223 default:
224 break;
225 }
226 return NOTIFY_DONE;
227}
228
229/**
230 * mic_device_init - Allocates and initializes the MIC device structure 156 * mic_device_init - Allocates and initializes the MIC device structure
231 * 157 *
232 * @mdev: pointer to mic_device instance 158 * @mdev: pointer to mic_device instance
@@ -234,52 +160,16 @@ static int mic_pm_notifier(struct notifier_block *notifier,
234 * 160 *
235 * returns none. 161 * returns none.
236 */ 162 */
237static int 163static void
238mic_device_init(struct mic_device *mdev, struct pci_dev *pdev) 164mic_device_init(struct mic_device *mdev, struct pci_dev *pdev)
239{ 165{
240 int rc; 166 mdev->pdev = pdev;
241
242 mdev->family = mic_get_family(pdev); 167 mdev->family = mic_get_family(pdev);
243 mdev->stepping = pdev->revision; 168 mdev->stepping = pdev->revision;
244 mic_ops_init(mdev); 169 mic_ops_init(mdev);
245 mic_sysfs_init(mdev);
246 mutex_init(&mdev->mic_mutex); 170 mutex_init(&mdev->mic_mutex);
247 mdev->irq_info.next_avail_src = 0; 171 mdev->irq_info.next_avail_src = 0;
248 INIT_WORK(&mdev->reset_trigger_work, mic_reset_trigger_work);
249 INIT_WORK(&mdev->shutdown_work, mic_shutdown_work);
250 init_completion(&mdev->reset_wait);
251 INIT_LIST_HEAD(&mdev->vdev_list); 172 INIT_LIST_HEAD(&mdev->vdev_list);
252 mdev->pm_notifier.notifier_call = mic_pm_notifier;
253 rc = register_pm_notifier(&mdev->pm_notifier);
254 if (rc) {
255 dev_err(&pdev->dev, "register_pm_notifier failed rc %d\n",
256 rc);
257 goto register_pm_notifier_fail;
258 }
259 return 0;
260register_pm_notifier_fail:
261 flush_work(&mdev->shutdown_work);
262 flush_work(&mdev->reset_trigger_work);
263 return rc;
264}
265
266/**
267 * mic_device_uninit - Frees resources allocated during mic_device_init(..)
268 *
269 * @mdev: pointer to mic_device instance
270 *
271 * returns none
272 */
273static void mic_device_uninit(struct mic_device *mdev)
274{
275 /* The cmdline sysfs entry might have allocated cmdline */
276 kfree(mdev->cmdline);
277 kfree(mdev->firmware);
278 kfree(mdev->ramdisk);
279 kfree(mdev->bootmode);
280 flush_work(&mdev->reset_trigger_work);
281 flush_work(&mdev->shutdown_work);
282 unregister_pm_notifier(&mdev->pm_notifier);
283} 173}
284 174
285/** 175/**
@@ -291,7 +181,7 @@ static void mic_device_uninit(struct mic_device *mdev)
291 * returns 0 on success, < 0 on failure. 181 * returns 0 on success, < 0 on failure.
292 */ 182 */
293static int mic_probe(struct pci_dev *pdev, 183static int mic_probe(struct pci_dev *pdev,
294 const struct pci_device_id *ent) 184 const struct pci_device_id *ent)
295{ 185{
296 int rc; 186 int rc;
297 struct mic_device *mdev; 187 struct mic_device *mdev;
@@ -309,16 +199,12 @@ static int mic_probe(struct pci_dev *pdev,
309 goto ida_fail; 199 goto ida_fail;
310 } 200 }
311 201
312 rc = mic_device_init(mdev, pdev); 202 mic_device_init(mdev, pdev);
313 if (rc) {
314 dev_err(&pdev->dev, "mic_device_init failed rc %d\n", rc);
315 goto device_init_fail;
316 }
317 203
318 rc = pci_enable_device(pdev); 204 rc = pci_enable_device(pdev);
319 if (rc) { 205 if (rc) {
320 dev_err(&pdev->dev, "failed to enable pci device.\n"); 206 dev_err(&pdev->dev, "failed to enable pci device.\n");
321 goto uninit_device; 207 goto ida_remove;
322 } 208 }
323 209
324 pci_set_master(pdev); 210 pci_set_master(pdev);
@@ -367,62 +253,39 @@ static int mic_probe(struct pci_dev *pdev,
367 253
368 pci_set_drvdata(pdev, mdev); 254 pci_set_drvdata(pdev, mdev);
369 255
370 mdev->sdev = device_create_with_groups(g_mic_class, &pdev->dev,
371 MKDEV(MAJOR(g_mic_devno), mdev->id), NULL,
372 mdev->attr_group, "mic%d", mdev->id);
373 if (IS_ERR(mdev->sdev)) {
374 rc = PTR_ERR(mdev->sdev);
375 dev_err(&pdev->dev,
376 "device_create_with_groups failed rc %d\n", rc);
377 goto smpt_uninit;
378 }
379 mdev->state_sysfs = sysfs_get_dirent(mdev->sdev->kobj.sd, "state");
380 if (!mdev->state_sysfs) {
381 rc = -ENODEV;
382 dev_err(&pdev->dev, "sysfs_get_dirent failed rc %d\n", rc);
383 goto destroy_device;
384 }
385
386 rc = mic_dp_init(mdev); 256 rc = mic_dp_init(mdev);
387 if (rc) { 257 if (rc) {
388 dev_err(&pdev->dev, "mic_dp_init failed rc %d\n", rc); 258 dev_err(&pdev->dev, "mic_dp_init failed rc %d\n", rc);
389 goto sysfs_put; 259 goto smpt_uninit;
390 }
391 mutex_lock(&mdev->mic_mutex);
392
393 mdev->shutdown_db = mic_next_db(mdev);
394 mdev->shutdown_cookie = mic_request_threaded_irq(mdev, mic_shutdown_db,
395 NULL, "shutdown-interrupt", mdev,
396 mdev->shutdown_db, MIC_INTR_DB);
397 if (IS_ERR(mdev->shutdown_cookie)) {
398 rc = PTR_ERR(mdev->shutdown_cookie);
399 mutex_unlock(&mdev->mic_mutex);
400 goto dp_uninit;
401 } 260 }
402 mutex_unlock(&mdev->mic_mutex);
403 mic_bootparam_init(mdev); 261 mic_bootparam_init(mdev);
404 262
405 mic_create_debug_dir(mdev); 263 mic_create_debug_dir(mdev);
406 cdev_init(&mdev->cdev, &mic_fops); 264
407 mdev->cdev.owner = THIS_MODULE; 265 mdev->miscdev.minor = MISC_DYNAMIC_MINOR;
408 rc = cdev_add(&mdev->cdev, MKDEV(MAJOR(g_mic_devno), mdev->id), 1); 266 snprintf(mdev->name, sizeof(mdev->name), "mic%d", mdev->id);
267 mdev->miscdev.name = mdev->name;
268 mdev->miscdev.fops = &mic_fops;
269 mdev->miscdev.parent = &mdev->pdev->dev;
270 rc = misc_register(&mdev->miscdev);
409 if (rc) { 271 if (rc) {
410 dev_err(&pdev->dev, "cdev_add err id %d rc %d\n", mdev->id, rc); 272 dev_err(&pdev->dev, "misc_register err id %d rc %d\n",
273 mdev->id, rc);
411 goto cleanup_debug_dir; 274 goto cleanup_debug_dir;
412 } 275 }
413 atomic_inc(&g_num_mics); 276
277 mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops);
278 if (IS_ERR(mdev->cosm_dev)) {
279 rc = PTR_ERR(mdev->cosm_dev);
280 dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc);
281 goto misc_dereg;
282 }
414 return 0; 283 return 0;
284misc_dereg:
285 misc_deregister(&mdev->miscdev);
415cleanup_debug_dir: 286cleanup_debug_dir:
416 mic_delete_debug_dir(mdev); 287 mic_delete_debug_dir(mdev);
417 mutex_lock(&mdev->mic_mutex);
418 mic_free_irq(mdev, mdev->shutdown_cookie, mdev);
419 mutex_unlock(&mdev->mic_mutex);
420dp_uninit:
421 mic_dp_uninit(mdev); 288 mic_dp_uninit(mdev);
422sysfs_put:
423 sysfs_put(mdev->state_sysfs);
424destroy_device:
425 device_destroy(g_mic_class, MKDEV(MAJOR(g_mic_devno), mdev->id));
426smpt_uninit: 289smpt_uninit:
427 mic_smpt_uninit(mdev); 290 mic_smpt_uninit(mdev);
428free_interrupts: 291free_interrupts:
@@ -435,9 +298,7 @@ release_regions:
435 pci_release_regions(pdev); 298 pci_release_regions(pdev);
436disable_device: 299disable_device:
437 pci_disable_device(pdev); 300 pci_disable_device(pdev);
438uninit_device: 301ida_remove:
439 mic_device_uninit(mdev);
440device_init_fail:
441 ida_simple_remove(&g_mic_ida, mdev->id); 302 ida_simple_remove(&g_mic_ida, mdev->id);
442ida_fail: 303ida_fail:
443 kfree(mdev); 304 kfree(mdev);
@@ -461,22 +322,14 @@ static void mic_remove(struct pci_dev *pdev)
461 if (!mdev) 322 if (!mdev)
462 return; 323 return;
463 324
464 mic_stop(mdev, false); 325 cosm_unregister_device(mdev->cosm_dev);
465 atomic_dec(&g_num_mics); 326 misc_deregister(&mdev->miscdev);
466 cdev_del(&mdev->cdev);
467 mic_delete_debug_dir(mdev); 327 mic_delete_debug_dir(mdev);
468 mutex_lock(&mdev->mic_mutex);
469 mic_free_irq(mdev, mdev->shutdown_cookie, mdev);
470 mutex_unlock(&mdev->mic_mutex);
471 flush_work(&mdev->shutdown_work);
472 mic_dp_uninit(mdev); 328 mic_dp_uninit(mdev);
473 sysfs_put(mdev->state_sysfs);
474 device_destroy(g_mic_class, MKDEV(MAJOR(g_mic_devno), mdev->id));
475 mic_smpt_uninit(mdev); 329 mic_smpt_uninit(mdev);
476 mic_free_interrupts(mdev, pdev); 330 mic_free_interrupts(mdev, pdev);
477 iounmap(mdev->mmio.va);
478 iounmap(mdev->aper.va); 331 iounmap(mdev->aper.va);
479 mic_device_uninit(mdev); 332 iounmap(mdev->mmio.va);
480 pci_release_regions(pdev); 333 pci_release_regions(pdev);
481 pci_disable_device(pdev); 334 pci_disable_device(pdev);
482 ida_simple_remove(&g_mic_ida, mdev->id); 335 ida_simple_remove(&g_mic_ida, mdev->id);
@@ -495,32 +348,23 @@ static int __init mic_init(void)
495 int ret; 348 int ret;
496 349
497 ret = alloc_chrdev_region(&g_mic_devno, 0, 350 ret = alloc_chrdev_region(&g_mic_devno, 0,
498 MIC_MAX_NUM_DEVS, mic_driver_name); 351 MIC_MAX_NUM_DEVS, mic_driver_name);
499 if (ret) { 352 if (ret) {
500 pr_err("alloc_chrdev_region failed ret %d\n", ret); 353 pr_err("alloc_chrdev_region failed ret %d\n", ret);
501 goto error; 354 goto error;
502 } 355 }
503 356
504 g_mic_class = class_create(THIS_MODULE, mic_driver_name);
505 if (IS_ERR(g_mic_class)) {
506 ret = PTR_ERR(g_mic_class);
507 pr_err("class_create failed ret %d\n", ret);
508 goto cleanup_chrdev;
509 }
510
511 mic_init_debugfs(); 357 mic_init_debugfs();
512 ida_init(&g_mic_ida); 358 ida_init(&g_mic_ida);
513 ret = pci_register_driver(&mic_driver); 359 ret = pci_register_driver(&mic_driver);
514 if (ret) { 360 if (ret) {
515 pr_err("pci_register_driver failed ret %d\n", ret); 361 pr_err("pci_register_driver failed ret %d\n", ret);
516 goto cleanup_debugfs; 362 goto cleanup_chrdev;
517 } 363 }
518 return ret; 364 return ret;
519cleanup_debugfs: 365cleanup_chrdev:
520 ida_destroy(&g_mic_ida); 366 ida_destroy(&g_mic_ida);
521 mic_exit_debugfs(); 367 mic_exit_debugfs();
522 class_destroy(g_mic_class);
523cleanup_chrdev:
524 unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); 368 unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
525error: 369error:
526 return ret; 370 return ret;
@@ -531,7 +375,6 @@ static void __exit mic_exit(void)
531 pci_unregister_driver(&mic_driver); 375 pci_unregister_driver(&mic_driver);
532 ida_destroy(&g_mic_ida); 376 ida_destroy(&g_mic_ida);
533 mic_exit_debugfs(); 377 mic_exit_debugfs();
534 class_destroy(g_mic_class);
535 unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); 378 unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
536} 379}
537 380
diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c
index cec82034875f..c3f958580fb0 100644
--- a/drivers/misc/mic/host/mic_smpt.c
+++ b/drivers/misc/mic/host/mic_smpt.c
@@ -76,7 +76,7 @@ mic_is_system_addr(struct mic_device *mdev, dma_addr_t pa)
76 76
77/* Populate an SMPT entry and update the reference counts. */ 77/* Populate an SMPT entry and update the reference counts. */
78static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr, 78static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr,
79 int entries, struct mic_device *mdev) 79 int entries, struct mic_device *mdev)
80{ 80{
81 struct mic_smpt_info *smpt_info = mdev->smpt; 81 struct mic_smpt_info *smpt_info = mdev->smpt;
82 int i; 82 int i;
@@ -97,7 +97,7 @@ static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr,
97 * for a given DMA address and size. 97 * for a given DMA address and size.
98 */ 98 */
99static dma_addr_t mic_smpt_op(struct mic_device *mdev, u64 dma_addr, 99static dma_addr_t mic_smpt_op(struct mic_device *mdev, u64 dma_addr,
100 int entries, s64 *ref, size_t size) 100 int entries, s64 *ref, size_t size)
101{ 101{
102 int spt; 102 int spt;
103 int ae = 0; 103 int ae = 0;
@@ -148,7 +148,7 @@ found:
148 * and the starting smpt address 148 * and the starting smpt address
149 */ 149 */
150static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr, 150static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr,
151 size_t size, s64 *ref, u64 *smpt_start) 151 size_t size, s64 *ref, u64 *smpt_start)
152{ 152{
153 u64 start = dma_addr; 153 u64 start = dma_addr;
154 u64 end = dma_addr + size; 154 u64 end = dma_addr + size;
@@ -181,7 +181,7 @@ dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
181 dma_addr_t dma_addr; 181 dma_addr_t dma_addr;
182 182
183 if (!mic_is_system_addr(mdev, mic_addr)) { 183 if (!mic_is_system_addr(mdev, mic_addr)) {
184 dev_err(mdev->sdev->parent, 184 dev_err(&mdev->pdev->dev,
185 "mic_addr is invalid. mic_addr = 0x%llx\n", mic_addr); 185 "mic_addr is invalid. mic_addr = 0x%llx\n", mic_addr);
186 return -EINVAL; 186 return -EINVAL;
187 } 187 }
@@ -218,7 +218,7 @@ dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
218 return mic_addr; 218 return mic_addr;
219 219
220 num_entries = mic_get_smpt_ref_count(mdev, dma_addr, size, 220 num_entries = mic_get_smpt_ref_count(mdev, dma_addr, size,
221 ref, &smpt_start); 221 ref, &smpt_start);
222 222
223 /* Set the smpt table appropriately and get 16G aligned mic address */ 223 /* Set the smpt table appropriately and get 16G aligned mic address */
224 mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size); 224 mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size);
@@ -231,7 +231,7 @@ dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
231 * else generate mic_addr by adding the 16G offset in dma_addr 231 * else generate mic_addr by adding the 16G offset in dma_addr
232 */ 232 */
233 if (!mic_addr && MIC_FAMILY_X100 == mdev->family) { 233 if (!mic_addr && MIC_FAMILY_X100 == mdev->family) {
234 dev_err(mdev->sdev->parent, 234 dev_err(&mdev->pdev->dev,
235 "mic_map failed dma_addr 0x%llx size 0x%lx\n", 235 "mic_map failed dma_addr 0x%llx size 0x%lx\n",
236 dma_addr, size); 236 dma_addr, size);
237 return mic_addr; 237 return mic_addr;
@@ -264,7 +264,7 @@ void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
264 return; 264 return;
265 265
266 if (!mic_is_system_addr(mdev, mic_addr)) { 266 if (!mic_is_system_addr(mdev, mic_addr)) {
267 dev_err(mdev->sdev->parent, 267 dev_err(&mdev->pdev->dev,
268 "invalid address: 0x%llx\n", mic_addr); 268 "invalid address: 0x%llx\n", mic_addr);
269 return; 269 return;
270 } 270 }
@@ -284,7 +284,7 @@ void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
284 for (i = spt; i < spt + num_smpt; i++) { 284 for (i = spt; i < spt + num_smpt; i++) {
285 smpt_info->entry[i].ref_count -= ref[i - spt]; 285 smpt_info->entry[i].ref_count -= ref[i - spt];
286 if (smpt_info->entry[i].ref_count < 0) 286 if (smpt_info->entry[i].ref_count < 0)
287 dev_warn(mdev->sdev->parent, 287 dev_warn(&mdev->pdev->dev,
288 "ref count for entry %d is negative\n", i); 288 "ref count for entry %d is negative\n", i);
289 } 289 }
290 spin_unlock_irqrestore(&smpt_info->smpt_lock, flags); 290 spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
@@ -307,15 +307,14 @@ void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
307dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size) 307dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size)
308{ 308{
309 dma_addr_t mic_addr = 0; 309 dma_addr_t mic_addr = 0;
310 struct pci_dev *pdev = container_of(mdev->sdev->parent, 310 struct pci_dev *pdev = mdev->pdev;
311 struct pci_dev, dev);
312 dma_addr_t dma_addr = 311 dma_addr_t dma_addr =
313 pci_map_single(pdev, va, size, PCI_DMA_BIDIRECTIONAL); 312 pci_map_single(pdev, va, size, PCI_DMA_BIDIRECTIONAL);
314 313
315 if (!pci_dma_mapping_error(pdev, dma_addr)) { 314 if (!pci_dma_mapping_error(pdev, dma_addr)) {
316 mic_addr = mic_map(mdev, dma_addr, size); 315 mic_addr = mic_map(mdev, dma_addr, size);
317 if (!mic_addr) { 316 if (!mic_addr) {
318 dev_err(mdev->sdev->parent, 317 dev_err(&mdev->pdev->dev,
319 "mic_map failed dma_addr 0x%llx size 0x%lx\n", 318 "mic_map failed dma_addr 0x%llx size 0x%lx\n",
320 dma_addr, size); 319 dma_addr, size);
321 pci_unmap_single(pdev, dma_addr, 320 pci_unmap_single(pdev, dma_addr,
@@ -339,8 +338,7 @@ dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size)
339void 338void
340mic_unmap_single(struct mic_device *mdev, dma_addr_t mic_addr, size_t size) 339mic_unmap_single(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
341{ 340{
342 struct pci_dev *pdev = container_of(mdev->sdev->parent, 341 struct pci_dev *pdev = mdev->pdev;
343 struct pci_dev, dev);
344 dma_addr_t dma_addr = mic_to_dma_addr(mdev, mic_addr); 342 dma_addr_t dma_addr = mic_to_dma_addr(mdev, mic_addr);
345 mic_unmap(mdev, mic_addr, size); 343 mic_unmap(mdev, mic_addr, size);
346 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); 344 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
@@ -399,18 +397,18 @@ void mic_smpt_uninit(struct mic_device *mdev)
399 struct mic_smpt_info *smpt_info = mdev->smpt; 397 struct mic_smpt_info *smpt_info = mdev->smpt;
400 int i; 398 int i;
401 399
402 dev_dbg(mdev->sdev->parent, 400 dev_dbg(&mdev->pdev->dev,
403 "nodeid %d SMPT ref count %lld map %lld unmap %lld\n", 401 "nodeid %d SMPT ref count %lld map %lld unmap %lld\n",
404 mdev->id, smpt_info->ref_count, 402 mdev->id, smpt_info->ref_count,
405 smpt_info->map_count, smpt_info->unmap_count); 403 smpt_info->map_count, smpt_info->unmap_count);
406 404
407 for (i = 0; i < smpt_info->info.num_reg; i++) { 405 for (i = 0; i < smpt_info->info.num_reg; i++) {
408 dev_dbg(mdev->sdev->parent, 406 dev_dbg(&mdev->pdev->dev,
409 "SMPT entry[%d] dma_addr = 0x%llx ref_count = %lld\n", 407 "SMPT entry[%d] dma_addr = 0x%llx ref_count = %lld\n",
410 i, smpt_info->entry[i].dma_addr, 408 i, smpt_info->entry[i].dma_addr,
411 smpt_info->entry[i].ref_count); 409 smpt_info->entry[i].ref_count);
412 if (smpt_info->entry[i].ref_count) 410 if (smpt_info->entry[i].ref_count)
413 dev_warn(mdev->sdev->parent, 411 dev_warn(&mdev->pdev->dev,
414 "ref count for entry %d is not zero\n", i); 412 "ref count for entry %d is not zero\n", i);
415 } 413 }
416 kfree(smpt_info->entry); 414 kfree(smpt_info->entry);
diff --git a/drivers/misc/mic/host/mic_sysfs.c b/drivers/misc/mic/host/mic_sysfs.c
deleted file mode 100644
index 6dd864e4a617..000000000000
--- a/drivers/misc/mic/host/mic_sysfs.c
+++ /dev/null
@@ -1,459 +0,0 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC Host driver.
19 *
20 */
21#include <linux/pci.h>
22
23#include <linux/mic_common.h>
24#include "../common/mic_dev.h"
25#include "mic_device.h"
26
27/*
28 * A state-to-string lookup table, for exposing a human readable state
29 * via sysfs. Always keep in sync with enum mic_states
30 */
31static const char * const mic_state_string[] = {
32 [MIC_OFFLINE] = "offline",
33 [MIC_ONLINE] = "online",
34 [MIC_SHUTTING_DOWN] = "shutting_down",
35 [MIC_RESET_FAILED] = "reset_failed",
36 [MIC_SUSPENDING] = "suspending",
37 [MIC_SUSPENDED] = "suspended",
38};
39
40/*
41 * A shutdown-status-to-string lookup table, for exposing a human
42 * readable state via sysfs. Always keep in sync with enum mic_shutdown_status
43 */
44static const char * const mic_shutdown_status_string[] = {
45 [MIC_NOP] = "nop",
46 [MIC_CRASHED] = "crashed",
47 [MIC_HALTED] = "halted",
48 [MIC_POWER_OFF] = "poweroff",
49 [MIC_RESTART] = "restart",
50};
51
52void mic_set_shutdown_status(struct mic_device *mdev, u8 shutdown_status)
53{
54 dev_dbg(mdev->sdev->parent, "Shutdown Status %s -> %s\n",
55 mic_shutdown_status_string[mdev->shutdown_status],
56 mic_shutdown_status_string[shutdown_status]);
57 mdev->shutdown_status = shutdown_status;
58}
59
60void mic_set_state(struct mic_device *mdev, u8 state)
61{
62 dev_dbg(mdev->sdev->parent, "State %s -> %s\n",
63 mic_state_string[mdev->state],
64 mic_state_string[state]);
65 mdev->state = state;
66 sysfs_notify_dirent(mdev->state_sysfs);
67}
68
69static ssize_t
70family_show(struct device *dev, struct device_attribute *attr, char *buf)
71{
72 static const char x100[] = "x100";
73 static const char unknown[] = "Unknown";
74 const char *card = NULL;
75 struct mic_device *mdev = dev_get_drvdata(dev->parent);
76
77 if (!mdev)
78 return -EINVAL;
79
80 switch (mdev->family) {
81 case MIC_FAMILY_X100:
82 card = x100;
83 break;
84 default:
85 card = unknown;
86 break;
87 }
88 return scnprintf(buf, PAGE_SIZE, "%s\n", card);
89}
90static DEVICE_ATTR_RO(family);
91
92static ssize_t
93stepping_show(struct device *dev, struct device_attribute *attr, char *buf)
94{
95 struct mic_device *mdev = dev_get_drvdata(dev->parent);
96 char *string = "??";
97
98 if (!mdev)
99 return -EINVAL;
100
101 switch (mdev->stepping) {
102 case MIC_A0_STEP:
103 string = "A0";
104 break;
105 case MIC_B0_STEP:
106 string = "B0";
107 break;
108 case MIC_B1_STEP:
109 string = "B1";
110 break;
111 case MIC_C0_STEP:
112 string = "C0";
113 break;
114 default:
115 break;
116 }
117 return scnprintf(buf, PAGE_SIZE, "%s\n", string);
118}
119static DEVICE_ATTR_RO(stepping);
120
121static ssize_t
122state_show(struct device *dev, struct device_attribute *attr, char *buf)
123{
124 struct mic_device *mdev = dev_get_drvdata(dev->parent);
125
126 if (!mdev || mdev->state >= MIC_LAST)
127 return -EINVAL;
128
129 return scnprintf(buf, PAGE_SIZE, "%s\n",
130 mic_state_string[mdev->state]);
131}
132
133static ssize_t
134state_store(struct device *dev, struct device_attribute *attr,
135 const char *buf, size_t count)
136{
137 int rc = 0;
138 struct mic_device *mdev = dev_get_drvdata(dev->parent);
139 if (!mdev)
140 return -EINVAL;
141 if (sysfs_streq(buf, "boot")) {
142 rc = mic_start(mdev, buf);
143 if (rc) {
144 dev_err(mdev->sdev->parent,
145 "mic_boot failed rc %d\n", rc);
146 count = rc;
147 }
148 goto done;
149 }
150
151 if (sysfs_streq(buf, "reset")) {
152 schedule_work(&mdev->reset_trigger_work);
153 goto done;
154 }
155
156 if (sysfs_streq(buf, "shutdown")) {
157 mic_shutdown(mdev);
158 goto done;
159 }
160
161 if (sysfs_streq(buf, "suspend")) {
162 mic_suspend(mdev);
163 goto done;
164 }
165
166 count = -EINVAL;
167done:
168 return count;
169}
170static DEVICE_ATTR_RW(state);
171
172static ssize_t shutdown_status_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174{
175 struct mic_device *mdev = dev_get_drvdata(dev->parent);
176
177 if (!mdev || mdev->shutdown_status >= MIC_STATUS_LAST)
178 return -EINVAL;
179
180 return scnprintf(buf, PAGE_SIZE, "%s\n",
181 mic_shutdown_status_string[mdev->shutdown_status]);
182}
183static DEVICE_ATTR_RO(shutdown_status);
184
185static ssize_t
186cmdline_show(struct device *dev, struct device_attribute *attr, char *buf)
187{
188 struct mic_device *mdev = dev_get_drvdata(dev->parent);
189 char *cmdline;
190
191 if (!mdev)
192 return -EINVAL;
193
194 cmdline = mdev->cmdline;
195
196 if (cmdline)
197 return scnprintf(buf, PAGE_SIZE, "%s\n", cmdline);
198 return 0;
199}
200
201static ssize_t
202cmdline_store(struct device *dev, struct device_attribute *attr,
203 const char *buf, size_t count)
204{
205 struct mic_device *mdev = dev_get_drvdata(dev->parent);
206
207 if (!mdev)
208 return -EINVAL;
209
210 mutex_lock(&mdev->mic_mutex);
211 kfree(mdev->cmdline);
212
213 mdev->cmdline = kmalloc(count + 1, GFP_KERNEL);
214 if (!mdev->cmdline) {
215 count = -ENOMEM;
216 goto unlock;
217 }
218
219 strncpy(mdev->cmdline, buf, count);
220
221 if (mdev->cmdline[count - 1] == '\n')
222 mdev->cmdline[count - 1] = '\0';
223 else
224 mdev->cmdline[count] = '\0';
225unlock:
226 mutex_unlock(&mdev->mic_mutex);
227 return count;
228}
229static DEVICE_ATTR_RW(cmdline);
230
231static ssize_t
232firmware_show(struct device *dev, struct device_attribute *attr, char *buf)
233{
234 struct mic_device *mdev = dev_get_drvdata(dev->parent);
235 char *firmware;
236
237 if (!mdev)
238 return -EINVAL;
239
240 firmware = mdev->firmware;
241
242 if (firmware)
243 return scnprintf(buf, PAGE_SIZE, "%s\n", firmware);
244 return 0;
245}
246
247static ssize_t
248firmware_store(struct device *dev, struct device_attribute *attr,
249 const char *buf, size_t count)
250{
251 struct mic_device *mdev = dev_get_drvdata(dev->parent);
252
253 if (!mdev)
254 return -EINVAL;
255
256 mutex_lock(&mdev->mic_mutex);
257 kfree(mdev->firmware);
258
259 mdev->firmware = kmalloc(count + 1, GFP_KERNEL);
260 if (!mdev->firmware) {
261 count = -ENOMEM;
262 goto unlock;
263 }
264 strncpy(mdev->firmware, buf, count);
265
266 if (mdev->firmware[count - 1] == '\n')
267 mdev->firmware[count - 1] = '\0';
268 else
269 mdev->firmware[count] = '\0';
270unlock:
271 mutex_unlock(&mdev->mic_mutex);
272 return count;
273}
274static DEVICE_ATTR_RW(firmware);
275
276static ssize_t
277ramdisk_show(struct device *dev, struct device_attribute *attr, char *buf)
278{
279 struct mic_device *mdev = dev_get_drvdata(dev->parent);
280 char *ramdisk;
281
282 if (!mdev)
283 return -EINVAL;
284
285 ramdisk = mdev->ramdisk;
286
287 if (ramdisk)
288 return scnprintf(buf, PAGE_SIZE, "%s\n", ramdisk);
289 return 0;
290}
291
292static ssize_t
293ramdisk_store(struct device *dev, struct device_attribute *attr,
294 const char *buf, size_t count)
295{
296 struct mic_device *mdev = dev_get_drvdata(dev->parent);
297
298 if (!mdev)
299 return -EINVAL;
300
301 mutex_lock(&mdev->mic_mutex);
302 kfree(mdev->ramdisk);
303
304 mdev->ramdisk = kmalloc(count + 1, GFP_KERNEL);
305 if (!mdev->ramdisk) {
306 count = -ENOMEM;
307 goto unlock;
308 }
309
310 strncpy(mdev->ramdisk, buf, count);
311
312 if (mdev->ramdisk[count - 1] == '\n')
313 mdev->ramdisk[count - 1] = '\0';
314 else
315 mdev->ramdisk[count] = '\0';
316unlock:
317 mutex_unlock(&mdev->mic_mutex);
318 return count;
319}
320static DEVICE_ATTR_RW(ramdisk);
321
322static ssize_t
323bootmode_show(struct device *dev, struct device_attribute *attr, char *buf)
324{
325 struct mic_device *mdev = dev_get_drvdata(dev->parent);
326 char *bootmode;
327
328 if (!mdev)
329 return -EINVAL;
330
331 bootmode = mdev->bootmode;
332
333 if (bootmode)
334 return scnprintf(buf, PAGE_SIZE, "%s\n", bootmode);
335 return 0;
336}
337
338static ssize_t
339bootmode_store(struct device *dev, struct device_attribute *attr,
340 const char *buf, size_t count)
341{
342 struct mic_device *mdev = dev_get_drvdata(dev->parent);
343
344 if (!mdev)
345 return -EINVAL;
346
347 if (!sysfs_streq(buf, "linux") && !sysfs_streq(buf, "elf"))
348 return -EINVAL;
349
350 mutex_lock(&mdev->mic_mutex);
351 kfree(mdev->bootmode);
352
353 mdev->bootmode = kmalloc(count + 1, GFP_KERNEL);
354 if (!mdev->bootmode) {
355 count = -ENOMEM;
356 goto unlock;
357 }
358
359 strncpy(mdev->bootmode, buf, count);
360
361 if (mdev->bootmode[count - 1] == '\n')
362 mdev->bootmode[count - 1] = '\0';
363 else
364 mdev->bootmode[count] = '\0';
365unlock:
366 mutex_unlock(&mdev->mic_mutex);
367 return count;
368}
369static DEVICE_ATTR_RW(bootmode);
370
371static ssize_t
372log_buf_addr_show(struct device *dev, struct device_attribute *attr,
373 char *buf)
374{
375 struct mic_device *mdev = dev_get_drvdata(dev->parent);
376
377 if (!mdev)
378 return -EINVAL;
379
380 return scnprintf(buf, PAGE_SIZE, "%p\n", mdev->log_buf_addr);
381}
382
383static ssize_t
384log_buf_addr_store(struct device *dev, struct device_attribute *attr,
385 const char *buf, size_t count)
386{
387 struct mic_device *mdev = dev_get_drvdata(dev->parent);
388 int ret;
389 unsigned long addr;
390
391 if (!mdev)
392 return -EINVAL;
393
394 ret = kstrtoul(buf, 16, &addr);
395 if (ret)
396 goto exit;
397
398 mdev->log_buf_addr = (void *)addr;
399 ret = count;
400exit:
401 return ret;
402}
403static DEVICE_ATTR_RW(log_buf_addr);
404
405static ssize_t
406log_buf_len_show(struct device *dev, struct device_attribute *attr,
407 char *buf)
408{
409 struct mic_device *mdev = dev_get_drvdata(dev->parent);
410
411 if (!mdev)
412 return -EINVAL;
413
414 return scnprintf(buf, PAGE_SIZE, "%p\n", mdev->log_buf_len);
415}
416
417static ssize_t
418log_buf_len_store(struct device *dev, struct device_attribute *attr,
419 const char *buf, size_t count)
420{
421 struct mic_device *mdev = dev_get_drvdata(dev->parent);
422 int ret;
423 unsigned long addr;
424
425 if (!mdev)
426 return -EINVAL;
427
428 ret = kstrtoul(buf, 16, &addr);
429 if (ret)
430 goto exit;
431
432 mdev->log_buf_len = (int *)addr;
433 ret = count;
434exit:
435 return ret;
436}
437static DEVICE_ATTR_RW(log_buf_len);
438
439static struct attribute *mic_default_attrs[] = {
440 &dev_attr_family.attr,
441 &dev_attr_stepping.attr,
442 &dev_attr_state.attr,
443 &dev_attr_shutdown_status.attr,
444 &dev_attr_cmdline.attr,
445 &dev_attr_firmware.attr,
446 &dev_attr_ramdisk.attr,
447 &dev_attr_bootmode.attr,
448 &dev_attr_log_buf_addr.attr,
449 &dev_attr_log_buf_len.attr,
450
451 NULL
452};
453
454ATTRIBUTE_GROUPS(mic_default);
455
456void mic_sysfs_init(struct mic_device *mdev)
457{
458 mdev->attr_group = mic_default_groups;
459}
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index cc08e9f733c9..58b107a24a8b 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -23,7 +23,6 @@
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/mic_common.h> 25#include <linux/mic_common.h>
26
27#include "../common/mic_dev.h" 26#include "../common/mic_dev.h"
28#include "mic_device.h" 27#include "mic_device.h"
29#include "mic_smpt.h" 28#include "mic_smpt.h"
@@ -62,7 +61,7 @@ static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
62 } 61 }
63error: 62error:
64 if (err) 63 if (err)
65 dev_err(mdev->sdev->parent, "%s %d err %d\n", 64 dev_err(&mdev->pdev->dev, "%s %d err %d\n",
66 __func__, __LINE__, err); 65 __func__, __LINE__, err);
67 return err; 66 return err;
68} 67}
@@ -440,7 +439,7 @@ void mic_virtio_reset_devices(struct mic_device *mdev)
440 struct list_head *pos, *tmp; 439 struct list_head *pos, *tmp;
441 struct mic_vdev *mvdev; 440 struct mic_vdev *mvdev;
442 441
443 dev_dbg(mdev->sdev->parent, "%s\n", __func__); 442 dev_dbg(&mdev->pdev->dev, "%s\n", __func__);
444 443
445 list_for_each_safe(pos, tmp, &mdev->vdev_list) { 444 list_for_each_safe(pos, tmp, &mdev->vdev_list) {
446 mvdev = list_entry(pos, struct mic_vdev, list); 445 mvdev = list_entry(pos, struct mic_vdev, list);
@@ -686,7 +685,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
686 mvr->head = USHRT_MAX; 685 mvr->head = USHRT_MAX;
687 mvr->mvdev = mvdev; 686 mvr->mvdev = mvdev;
688 mvr->vrh.notify = mic_notify; 687 mvr->vrh.notify = mic_notify;
689 dev_dbg(mdev->sdev->parent, 688 dev_dbg(&mdev->pdev->dev,
690 "%s %d index %d va %p info %p vr_size 0x%x\n", 689 "%s %d index %d va %p info %p vr_size 0x%x\n",
691 __func__, __LINE__, i, vr->va, vr->info, vr_size); 690 __func__, __LINE__, i, vr->va, vr->info, vr_size);
692 mvr->buf = (void *)__get_free_pages(GFP_KERNEL, 691 mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
@@ -704,7 +703,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
704 mvdev->virtio_db, MIC_INTR_DB); 703 mvdev->virtio_db, MIC_INTR_DB);
705 if (IS_ERR(mvdev->virtio_cookie)) { 704 if (IS_ERR(mvdev->virtio_cookie)) {
706 ret = PTR_ERR(mvdev->virtio_cookie); 705 ret = PTR_ERR(mvdev->virtio_cookie);
707 dev_dbg(mdev->sdev->parent, "request irq failed\n"); 706 dev_dbg(&mdev->pdev->dev, "request irq failed\n");
708 goto err; 707 goto err;
709 } 708 }
710 709
@@ -720,7 +719,7 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
720 smp_wmb(); 719 smp_wmb();
721 dd->type = type; 720 dd->type = type;
722 721
723 dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type); 722 dev_dbg(&mdev->pdev->dev, "Added virtio device id %d\n", dd->type);
724 723
725 db = bootparam->h2c_config_db; 724 db = bootparam->h2c_config_db;
726 if (db != -1) 725 if (db != -1)
@@ -755,7 +754,7 @@ void mic_virtio_del_device(struct mic_vdev *mvdev)
755 db = bootparam->h2c_config_db; 754 db = bootparam->h2c_config_db;
756 if (db == -1) 755 if (db == -1)
757 goto skip_hot_remove; 756 goto skip_hot_remove;
758 dev_dbg(mdev->sdev->parent, 757 dev_dbg(&mdev->pdev->dev,
759 "Requesting hot remove id %d\n", mvdev->virtio_id); 758 "Requesting hot remove id %d\n", mvdev->virtio_id);
760 mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; 759 mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
761 mdev->ops->send_intr(mdev, db); 760 mdev->ops->send_intr(mdev, db);
@@ -765,7 +764,7 @@ void mic_virtio_del_device(struct mic_vdev *mvdev)
765 if (ret) 764 if (ret)
766 break; 765 break;
767 } 766 }
768 dev_dbg(mdev->sdev->parent, 767 dev_dbg(&mdev->pdev->dev,
769 "Device id %d config_change %d guest_ack %d retry %d\n", 768 "Device id %d config_change %d guest_ack %d retry %d\n",
770 mvdev->virtio_id, mvdev->dc->config_change, 769 mvdev->virtio_id, mvdev->dc->config_change,
771 mvdev->dc->guest_ack, retry); 770 mvdev->dc->guest_ack, retry);
@@ -794,7 +793,7 @@ skip_hot_remove:
794 tmp_mvdev = list_entry(pos, struct mic_vdev, list); 793 tmp_mvdev = list_entry(pos, struct mic_vdev, list);
795 if (tmp_mvdev == mvdev) { 794 if (tmp_mvdev == mvdev) {
796 list_del(pos); 795 list_del(pos);
797 dev_dbg(mdev->sdev->parent, 796 dev_dbg(&mdev->pdev->dev,
798 "Removing virtio device id %d\n", 797 "Removing virtio device id %d\n",
799 mvdev->virtio_id); 798 mvdev->virtio_id);
800 break; 799 break;
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/host/mic_virtio.h
index d574efb853d9..a80631f2790d 100644
--- a/drivers/misc/mic/host/mic_virtio.h
+++ b/drivers/misc/mic/host/mic_virtio.h
@@ -124,7 +124,7 @@ void mic_bh_handler(struct work_struct *work);
124/* Helper API to obtain the MIC PCIe device */ 124/* Helper API to obtain the MIC PCIe device */
125static inline struct device *mic_dev(struct mic_vdev *mvdev) 125static inline struct device *mic_dev(struct mic_vdev *mvdev)
126{ 126{
127 return mvdev->mdev->sdev->parent; 127 return &mvdev->mdev->pdev->dev;
128} 128}
129 129
130/* Helper API to check if a virtio device is initialized */ 130/* Helper API to check if a virtio device is initialized */
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 3341e90dede4..8118ac48c764 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -43,7 +43,7 @@
43static void 43static void
44mic_x100_write_spad(struct mic_device *mdev, unsigned int idx, u32 val) 44mic_x100_write_spad(struct mic_device *mdev, unsigned int idx, u32 val)
45{ 45{
46 dev_dbg(mdev->sdev->parent, "Writing 0x%x to scratch pad index %d\n", 46 dev_dbg(&mdev->pdev->dev, "Writing 0x%x to scratch pad index %d\n",
47 val, idx); 47 val, idx);
48 mic_mmio_write(&mdev->mmio, val, 48 mic_mmio_write(&mdev->mmio, val,
49 MIC_X100_SBOX_BASE_ADDRESS + 49 MIC_X100_SBOX_BASE_ADDRESS +
@@ -66,7 +66,7 @@ mic_x100_read_spad(struct mic_device *mdev, unsigned int idx)
66 MIC_X100_SBOX_BASE_ADDRESS + 66 MIC_X100_SBOX_BASE_ADDRESS +
67 MIC_X100_SBOX_SPAD0 + idx * 4); 67 MIC_X100_SBOX_SPAD0 + idx * 4);
68 68
69 dev_dbg(mdev->sdev->parent, 69 dev_dbg(&mdev->pdev->dev,
70 "Reading 0x%x from scratch pad index %d\n", val, idx); 70 "Reading 0x%x from scratch pad index %d\n", val, idx);
71 return val; 71 return val;
72} 72}
@@ -126,7 +126,7 @@ static void mic_x100_disable_interrupts(struct mic_device *mdev)
126 * @mdev: pointer to mic_device instance 126 * @mdev: pointer to mic_device instance
127 */ 127 */
128static void mic_x100_send_sbox_intr(struct mic_device *mdev, 128static void mic_x100_send_sbox_intr(struct mic_device *mdev,
129 int doorbell) 129 int doorbell)
130{ 130{
131 struct mic_mw *mw = &mdev->mmio; 131 struct mic_mw *mw = &mdev->mmio;
132 u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8; 132 u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
@@ -147,7 +147,7 @@ static void mic_x100_send_sbox_intr(struct mic_device *mdev,
147 * @mdev: pointer to mic_device instance 147 * @mdev: pointer to mic_device instance
148 */ 148 */
149static void mic_x100_send_rdmasr_intr(struct mic_device *mdev, 149static void mic_x100_send_rdmasr_intr(struct mic_device *mdev,
150 int doorbell) 150 int doorbell)
151{ 151{
152 int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2); 152 int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
153 /* Ensure that the interrupt is ordered w.r.t. previous stores. */ 153 /* Ensure that the interrupt is ordered w.r.t. previous stores. */
@@ -359,15 +359,14 @@ mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw)
359 359
360 boot_mem = mdev->aper.len >> 20; 360 boot_mem = mdev->aper.len >> 20;
361 buf = kzalloc(CMDLINE_SIZE, GFP_KERNEL); 361 buf = kzalloc(CMDLINE_SIZE, GFP_KERNEL);
362 if (!buf) { 362 if (!buf)
363 dev_err(mdev->sdev->parent,
364 "%s %d allocation failed\n", __func__, __LINE__);
365 return -ENOMEM; 363 return -ENOMEM;
366 } 364
367 len += snprintf(buf, CMDLINE_SIZE - len, 365 len += snprintf(buf, CMDLINE_SIZE - len,
368 " mem=%dM", boot_mem); 366 " mem=%dM", boot_mem);
369 if (mdev->cmdline) 367 if (mdev->cosm_dev->cmdline)
370 snprintf(buf + len, CMDLINE_SIZE - len, " %s", mdev->cmdline); 368 snprintf(buf + len, CMDLINE_SIZE - len, " %s",
369 mdev->cosm_dev->cmdline);
371 memcpy_toio(cmd_line_va, buf, strlen(buf) + 1); 370 memcpy_toio(cmd_line_va, buf, strlen(buf) + 1);
372 kfree(buf); 371 kfree(buf);
373 return 0; 372 return 0;
@@ -386,12 +385,11 @@ mic_x100_load_ramdisk(struct mic_device *mdev)
386 int rc; 385 int rc;
387 struct boot_params __iomem *bp = mdev->aper.va + mdev->bootaddr; 386 struct boot_params __iomem *bp = mdev->aper.va + mdev->bootaddr;
388 387
389 rc = request_firmware(&fw, 388 rc = request_firmware(&fw, mdev->cosm_dev->ramdisk, &mdev->pdev->dev);
390 mdev->ramdisk, mdev->sdev->parent);
391 if (rc < 0) { 389 if (rc < 0) {
392 dev_err(mdev->sdev->parent, 390 dev_err(&mdev->pdev->dev,
393 "ramdisk request_firmware failed: %d %s\n", 391 "ramdisk request_firmware failed: %d %s\n",
394 rc, mdev->ramdisk); 392 rc, mdev->cosm_dev->ramdisk);
395 goto error; 393 goto error;
396 } 394 }
397 /* 395 /*
@@ -423,10 +421,10 @@ mic_x100_get_boot_addr(struct mic_device *mdev)
423 421
424 scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO); 422 scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
425 boot_addr = MIC_X100_SPAD2_DOWNLOAD_ADDR(scratch2); 423 boot_addr = MIC_X100_SPAD2_DOWNLOAD_ADDR(scratch2);
426 dev_dbg(mdev->sdev->parent, "%s %d boot_addr 0x%x\n", 424 dev_dbg(&mdev->pdev->dev, "%s %d boot_addr 0x%x\n",
427 __func__, __LINE__, boot_addr); 425 __func__, __LINE__, boot_addr);
428 if (boot_addr > (1 << 31)) { 426 if (boot_addr > (1 << 31)) {
429 dev_err(mdev->sdev->parent, 427 dev_err(&mdev->pdev->dev,
430 "incorrect bootaddr 0x%x\n", 428 "incorrect bootaddr 0x%x\n",
431 boot_addr); 429 boot_addr);
432 rc = -EINVAL; 430 rc = -EINVAL;
@@ -454,37 +452,37 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
454 if (rc) 452 if (rc)
455 goto error; 453 goto error;
456 /* load OS */ 454 /* load OS */
457 rc = request_firmware(&fw, mdev->firmware, mdev->sdev->parent); 455 rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev);
458 if (rc < 0) { 456 if (rc < 0) {
459 dev_err(mdev->sdev->parent, 457 dev_err(&mdev->pdev->dev,
460 "ramdisk request_firmware failed: %d %s\n", 458 "ramdisk request_firmware failed: %d %s\n",
461 rc, mdev->firmware); 459 rc, mdev->cosm_dev->firmware);
462 goto error; 460 goto error;
463 } 461 }
464 if (mdev->bootaddr > mdev->aper.len - fw->size) { 462 if (mdev->bootaddr > mdev->aper.len - fw->size) {
465 rc = -EINVAL; 463 rc = -EINVAL;
466 dev_err(mdev->sdev->parent, "%s %d rc %d bootaddr 0x%x\n", 464 dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n",
467 __func__, __LINE__, rc, mdev->bootaddr); 465 __func__, __LINE__, rc, mdev->bootaddr);
468 release_firmware(fw); 466 release_firmware(fw);
469 goto error; 467 goto error;
470 } 468 }
471 memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size); 469 memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
472 mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size); 470 mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
473 if (!strcmp(mdev->bootmode, "elf")) 471 if (!strcmp(mdev->cosm_dev->bootmode, "flash"))
474 goto done; 472 goto done;
475 /* load command line */ 473 /* load command line */
476 rc = mic_x100_load_command_line(mdev, fw); 474 rc = mic_x100_load_command_line(mdev, fw);
477 if (rc) { 475 if (rc) {
478 dev_err(mdev->sdev->parent, "%s %d rc %d\n", 476 dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
479 __func__, __LINE__, rc); 477 __func__, __LINE__, rc);
480 goto error; 478 goto error;
481 } 479 }
482 release_firmware(fw); 480 release_firmware(fw);
483 /* load ramdisk */ 481 /* load ramdisk */
484 if (mdev->ramdisk) 482 if (mdev->cosm_dev->ramdisk)
485 rc = mic_x100_load_ramdisk(mdev); 483 rc = mic_x100_load_ramdisk(mdev);
486error: 484error:
487 dev_dbg(mdev->sdev->parent, "%s %d rc %d\n", __func__, __LINE__, rc); 485 dev_dbg(&mdev->pdev->dev, "%s %d rc %d\n", __func__, __LINE__, rc);
488done: 486done:
489 return rc; 487 return rc;
490} 488}
diff --git a/drivers/misc/mic/scif/Makefile b/drivers/misc/mic/scif/Makefile
index bf10bb7e2b91..29cfc3e51ac9 100644
--- a/drivers/misc/mic/scif/Makefile
+++ b/drivers/misc/mic/scif/Makefile
@@ -13,3 +13,8 @@ scif-objs += scif_epd.o
13scif-objs += scif_rb.o 13scif-objs += scif_rb.o
14scif-objs += scif_nodeqp.o 14scif-objs += scif_nodeqp.o
15scif-objs += scif_nm.o 15scif-objs += scif_nm.o
16scif-objs += scif_dma.o
17scif-objs += scif_fence.o
18scif-objs += scif_mmap.o
19scif-objs += scif_rma.o
20scif-objs += scif_rma_list.o
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index f39d3135a9ef..ddc9e4b08b5c 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -37,9 +37,21 @@ enum conn_async_state {
37 ASYNC_CONN_FLUSH_WORK /* async work flush in progress */ 37 ASYNC_CONN_FLUSH_WORK /* async work flush in progress */
38}; 38};
39 39
40/*
41 * File operations for anonymous inode file associated with a SCIF endpoint,
42 * used in kernel mode SCIF poll. Kernel mode SCIF poll calls portions of the
43 * poll API in the kernel and these take in a struct file *. Since a struct
44 * file is not available to kernel mode SCIF, it uses an anonymous file for
45 * this purpose.
46 */
47const struct file_operations scif_anon_fops = {
48 .owner = THIS_MODULE,
49};
50
40scif_epd_t scif_open(void) 51scif_epd_t scif_open(void)
41{ 52{
42 struct scif_endpt *ep; 53 struct scif_endpt *ep;
54 int err;
43 55
44 might_sleep(); 56 might_sleep();
45 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 57 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
@@ -50,15 +62,22 @@ scif_epd_t scif_open(void)
50 if (!ep->qp_info.qp) 62 if (!ep->qp_info.qp)
51 goto err_qp_alloc; 63 goto err_qp_alloc;
52 64
65 err = scif_anon_inode_getfile(ep);
66 if (err)
67 goto err_anon_inode;
68
53 spin_lock_init(&ep->lock); 69 spin_lock_init(&ep->lock);
54 mutex_init(&ep->sendlock); 70 mutex_init(&ep->sendlock);
55 mutex_init(&ep->recvlock); 71 mutex_init(&ep->recvlock);
56 72
73 scif_rma_ep_init(ep);
57 ep->state = SCIFEP_UNBOUND; 74 ep->state = SCIFEP_UNBOUND;
58 dev_dbg(scif_info.mdev.this_device, 75 dev_dbg(scif_info.mdev.this_device,
59 "SCIFAPI open: ep %p success\n", ep); 76 "SCIFAPI open: ep %p success\n", ep);
60 return ep; 77 return ep;
61 78
79err_anon_inode:
80 kfree(ep->qp_info.qp);
62err_qp_alloc: 81err_qp_alloc:
63 kfree(ep); 82 kfree(ep);
64err_ep_alloc: 83err_ep_alloc:
@@ -166,8 +185,11 @@ int scif_close(scif_epd_t epd)
166 185
167 switch (oldstate) { 186 switch (oldstate) {
168 case SCIFEP_ZOMBIE: 187 case SCIFEP_ZOMBIE:
188 dev_err(scif_info.mdev.this_device,
189 "SCIFAPI close: zombie state unexpected\n");
169 case SCIFEP_DISCONNECTED: 190 case SCIFEP_DISCONNECTED:
170 spin_unlock(&ep->lock); 191 spin_unlock(&ep->lock);
192 scif_unregister_all_windows(epd);
171 /* Remove from the disconnected list */ 193 /* Remove from the disconnected list */
172 mutex_lock(&scif_info.connlock); 194 mutex_lock(&scif_info.connlock);
173 list_for_each_safe(pos, tmpq, &scif_info.disconnected) { 195 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
@@ -189,6 +211,7 @@ int scif_close(scif_epd_t epd)
189 case SCIFEP_CLOSING: 211 case SCIFEP_CLOSING:
190 { 212 {
191 spin_unlock(&ep->lock); 213 spin_unlock(&ep->lock);
214 scif_unregister_all_windows(epd);
192 scif_disconnect_ep(ep); 215 scif_disconnect_ep(ep);
193 break; 216 break;
194 } 217 }
@@ -200,7 +223,7 @@ int scif_close(scif_epd_t epd)
200 struct scif_endpt *aep; 223 struct scif_endpt *aep;
201 224
202 spin_unlock(&ep->lock); 225 spin_unlock(&ep->lock);
203 spin_lock(&scif_info.eplock); 226 mutex_lock(&scif_info.eplock);
204 227
205 /* remove from listen list */ 228 /* remove from listen list */
206 list_for_each_safe(pos, tmpq, &scif_info.listen) { 229 list_for_each_safe(pos, tmpq, &scif_info.listen) {
@@ -222,7 +245,7 @@ int scif_close(scif_epd_t epd)
222 break; 245 break;
223 } 246 }
224 } 247 }
225 spin_unlock(&scif_info.eplock); 248 mutex_unlock(&scif_info.eplock);
226 mutex_lock(&scif_info.connlock); 249 mutex_lock(&scif_info.connlock);
227 list_for_each_safe(pos, tmpq, &scif_info.connected) { 250 list_for_each_safe(pos, tmpq, &scif_info.connected) {
228 tmpep = list_entry(pos, 251 tmpep = list_entry(pos,
@@ -242,13 +265,13 @@ int scif_close(scif_epd_t epd)
242 } 265 }
243 mutex_unlock(&scif_info.connlock); 266 mutex_unlock(&scif_info.connlock);
244 scif_teardown_ep(aep); 267 scif_teardown_ep(aep);
245 spin_lock(&scif_info.eplock); 268 mutex_lock(&scif_info.eplock);
246 scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD); 269 scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD);
247 ep->acceptcnt--; 270 ep->acceptcnt--;
248 } 271 }
249 272
250 spin_lock(&ep->lock); 273 spin_lock(&ep->lock);
251 spin_unlock(&scif_info.eplock); 274 mutex_unlock(&scif_info.eplock);
252 275
253 /* Remove and reject any pending connection requests. */ 276 /* Remove and reject any pending connection requests. */
254 while (ep->conreqcnt) { 277 while (ep->conreqcnt) {
@@ -279,6 +302,7 @@ int scif_close(scif_epd_t epd)
279 } 302 }
280 } 303 }
281 scif_put_port(ep->port.port); 304 scif_put_port(ep->port.port);
305 scif_anon_inode_fput(ep);
282 scif_teardown_ep(ep); 306 scif_teardown_ep(ep);
283 scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD); 307 scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD);
284 return 0; 308 return 0;
@@ -409,9 +433,9 @@ int scif_listen(scif_epd_t epd, int backlog)
409 scif_teardown_ep(ep); 433 scif_teardown_ep(ep);
410 ep->qp_info.qp = NULL; 434 ep->qp_info.qp = NULL;
411 435
412 spin_lock(&scif_info.eplock); 436 mutex_lock(&scif_info.eplock);
413 list_add_tail(&ep->list, &scif_info.listen); 437 list_add_tail(&ep->list, &scif_info.listen);
414 spin_unlock(&scif_info.eplock); 438 mutex_unlock(&scif_info.eplock);
415 return 0; 439 return 0;
416} 440}
417EXPORT_SYMBOL_GPL(scif_listen); 441EXPORT_SYMBOL_GPL(scif_listen);
@@ -450,6 +474,13 @@ static int scif_conn_func(struct scif_endpt *ep)
450 struct scifmsg msg; 474 struct scifmsg msg;
451 struct device *spdev; 475 struct device *spdev;
452 476
477 err = scif_reserve_dma_chan(ep);
478 if (err) {
479 dev_err(&ep->remote_dev->sdev->dev,
480 "%s %d err %d\n", __func__, __LINE__, err);
481 ep->state = SCIFEP_BOUND;
482 goto connect_error_simple;
483 }
453 /* Initiate the first part of the endpoint QP setup */ 484 /* Initiate the first part of the endpoint QP setup */
454 err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset, 485 err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset,
455 SCIF_ENDPT_QP_SIZE, ep->remote_dev); 486 SCIF_ENDPT_QP_SIZE, ep->remote_dev);
@@ -558,8 +589,10 @@ void scif_conn_handler(struct work_struct *work)
558 list_del(&ep->conn_list); 589 list_del(&ep->conn_list);
559 } 590 }
560 spin_unlock(&scif_info.nb_connect_lock); 591 spin_unlock(&scif_info.nb_connect_lock);
561 if (ep) 592 if (ep) {
562 ep->conn_err = scif_conn_func(ep); 593 ep->conn_err = scif_conn_func(ep);
594 wake_up_interruptible(&ep->conn_pend_wq);
595 }
563 } while (ep); 596 } while (ep);
564} 597}
565 598
@@ -660,6 +693,7 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
660 ep->remote_dev = &scif_dev[dst->node]; 693 ep->remote_dev = &scif_dev[dst->node];
661 ep->qp_info.qp->magic = SCIFEP_MAGIC; 694 ep->qp_info.qp->magic = SCIFEP_MAGIC;
662 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) { 695 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
696 init_waitqueue_head(&ep->conn_pend_wq);
663 spin_lock(&scif_info.nb_connect_lock); 697 spin_lock(&scif_info.nb_connect_lock);
664 list_add_tail(&ep->conn_list, &scif_info.nb_connect_list); 698 list_add_tail(&ep->conn_list, &scif_info.nb_connect_list);
665 spin_unlock(&scif_info.nb_connect_lock); 699 spin_unlock(&scif_info.nb_connect_lock);
@@ -782,12 +816,25 @@ retry_connection:
782 cep->remote_dev = &scif_dev[peer->node]; 816 cep->remote_dev = &scif_dev[peer->node];
783 cep->remote_ep = conreq->msg.payload[0]; 817 cep->remote_ep = conreq->msg.payload[0];
784 818
819 scif_rma_ep_init(cep);
820
821 err = scif_reserve_dma_chan(cep);
822 if (err) {
823 dev_err(scif_info.mdev.this_device,
824 "%s %d err %d\n", __func__, __LINE__, err);
825 goto scif_accept_error_qpalloc;
826 }
827
785 cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL); 828 cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL);
786 if (!cep->qp_info.qp) { 829 if (!cep->qp_info.qp) {
787 err = -ENOMEM; 830 err = -ENOMEM;
788 goto scif_accept_error_qpalloc; 831 goto scif_accept_error_qpalloc;
789 } 832 }
790 833
834 err = scif_anon_inode_getfile(cep);
835 if (err)
836 goto scif_accept_error_anon_inode;
837
791 cep->qp_info.qp->magic = SCIFEP_MAGIC; 838 cep->qp_info.qp->magic = SCIFEP_MAGIC;
792 spdev = scif_get_peer_dev(cep->remote_dev); 839 spdev = scif_get_peer_dev(cep->remote_dev);
793 if (IS_ERR(spdev)) { 840 if (IS_ERR(spdev)) {
@@ -858,6 +905,8 @@ retry:
858 spin_unlock(&cep->lock); 905 spin_unlock(&cep->lock);
859 return 0; 906 return 0;
860scif_accept_error_map: 907scif_accept_error_map:
908 scif_anon_inode_fput(cep);
909scif_accept_error_anon_inode:
861 scif_teardown_ep(cep); 910 scif_teardown_ep(cep);
862scif_accept_error_qpalloc: 911scif_accept_error_qpalloc:
863 kfree(cep); 912 kfree(cep);
@@ -1247,6 +1296,134 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags)
1247} 1296}
1248EXPORT_SYMBOL_GPL(scif_recv); 1297EXPORT_SYMBOL_GPL(scif_recv);
1249 1298
1299static inline void _scif_poll_wait(struct file *f, wait_queue_head_t *wq,
1300 poll_table *p, struct scif_endpt *ep)
1301{
1302 /*
1303 * Because poll_wait makes a GFP_KERNEL allocation, give up the lock
1304 * and regrab it afterwards. Because the endpoint state might have
1305 * changed while the lock was given up, the state must be checked
1306 * again after re-acquiring the lock. The code in __scif_pollfd(..)
1307 * does this.
1308 */
1309 spin_unlock(&ep->lock);
1310 poll_wait(f, wq, p);
1311 spin_lock(&ep->lock);
1312}
1313
1314unsigned int
1315__scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep)
1316{
1317 unsigned int mask = 0;
1318
1319 dev_dbg(scif_info.mdev.this_device,
1320 "SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]);
1321
1322 spin_lock(&ep->lock);
1323
1324 /* Endpoint is waiting for a non-blocking connect to complete */
1325 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
1326 _scif_poll_wait(f, &ep->conn_pend_wq, wait, ep);
1327 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
1328 if (ep->state == SCIFEP_CONNECTED ||
1329 ep->state == SCIFEP_DISCONNECTED ||
1330 ep->conn_err)
1331 mask |= POLLOUT;
1332 goto exit;
1333 }
1334 }
1335
1336 /* Endpoint is listening for incoming connection requests */
1337 if (ep->state == SCIFEP_LISTENING) {
1338 _scif_poll_wait(f, &ep->conwq, wait, ep);
1339 if (ep->state == SCIFEP_LISTENING) {
1340 if (ep->conreqcnt)
1341 mask |= POLLIN;
1342 goto exit;
1343 }
1344 }
1345
1346 /* Endpoint is connected or disconnected */
1347 if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) {
1348 if (poll_requested_events(wait) & POLLIN)
1349 _scif_poll_wait(f, &ep->recvwq, wait, ep);
1350 if (poll_requested_events(wait) & POLLOUT)
1351 _scif_poll_wait(f, &ep->sendwq, wait, ep);
1352 if (ep->state == SCIFEP_CONNECTED ||
1353 ep->state == SCIFEP_DISCONNECTED) {
1354 /* Data can be read without blocking */
1355 if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1))
1356 mask |= POLLIN;
1357 /* Data can be written without blocking */
1358 if (scif_rb_space(&ep->qp_info.qp->outbound_q))
1359 mask |= POLLOUT;
1360 /* Return POLLHUP if endpoint is disconnected */
1361 if (ep->state == SCIFEP_DISCONNECTED)
1362 mask |= POLLHUP;
1363 goto exit;
1364 }
1365 }
1366
1367 /* Return POLLERR if the endpoint is in none of the above states */
1368 mask |= POLLERR;
1369exit:
1370 spin_unlock(&ep->lock);
1371 return mask;
1372}
1373
1374/**
1375 * scif_poll() - Kernel mode SCIF poll
1376 * @ufds: Array of scif_pollepd structures containing the end points
1377 * and events to poll on
1378 * @nfds: Size of the ufds array
1379 * @timeout_msecs: Timeout in msecs, -ve implies infinite timeout
1380 *
1381 * The code flow in this function is based on do_poll(..) in select.c
1382 *
1383 * Returns the number of endpoints which have pending events or 0 in
1384 * the event of a timeout. If a signal is used for wake up, -EINTR is
1385 * returned.
1386 */
1387int
1388scif_poll(struct scif_pollepd *ufds, unsigned int nfds, long timeout_msecs)
1389{
1390 struct poll_wqueues table;
1391 poll_table *pt;
1392 int i, mask, count = 0, timed_out = timeout_msecs == 0;
1393 u64 timeout = timeout_msecs < 0 ? MAX_SCHEDULE_TIMEOUT
1394 : msecs_to_jiffies(timeout_msecs);
1395
1396 poll_initwait(&table);
1397 pt = &table.pt;
1398 while (1) {
1399 for (i = 0; i < nfds; i++) {
1400 pt->_key = ufds[i].events | POLLERR | POLLHUP;
1401 mask = __scif_pollfd(ufds[i].epd->anon,
1402 pt, ufds[i].epd);
1403 mask &= ufds[i].events | POLLERR | POLLHUP;
1404 if (mask) {
1405 count++;
1406 pt->_qproc = NULL;
1407 }
1408 ufds[i].revents = mask;
1409 }
1410 pt->_qproc = NULL;
1411 if (!count) {
1412 count = table.error;
1413 if (signal_pending(current))
1414 count = -EINTR;
1415 }
1416 if (count || timed_out)
1417 break;
1418
1419 if (!schedule_timeout_interruptible(timeout))
1420 timed_out = 1;
1421 }
1422 poll_freewait(&table);
1423 return count;
1424}
1425EXPORT_SYMBOL_GPL(scif_poll);
1426
1250int scif_get_node_ids(u16 *nodes, int len, u16 *self) 1427int scif_get_node_ids(u16 *nodes, int len, u16 *self)
1251{ 1428{
1252 int online = 0; 1429 int online = 0;
@@ -1274,3 +1451,46 @@ int scif_get_node_ids(u16 *nodes, int len, u16 *self)
1274 return online; 1451 return online;
1275} 1452}
1276EXPORT_SYMBOL_GPL(scif_get_node_ids); 1453EXPORT_SYMBOL_GPL(scif_get_node_ids);
1454
1455static int scif_add_client_dev(struct device *dev, struct subsys_interface *si)
1456{
1457 struct scif_client *client =
1458 container_of(si, struct scif_client, si);
1459 struct scif_peer_dev *spdev =
1460 container_of(dev, struct scif_peer_dev, dev);
1461
1462 if (client->probe)
1463 client->probe(spdev);
1464 return 0;
1465}
1466
1467static void scif_remove_client_dev(struct device *dev,
1468 struct subsys_interface *si)
1469{
1470 struct scif_client *client =
1471 container_of(si, struct scif_client, si);
1472 struct scif_peer_dev *spdev =
1473 container_of(dev, struct scif_peer_dev, dev);
1474
1475 if (client->remove)
1476 client->remove(spdev);
1477}
1478
1479void scif_client_unregister(struct scif_client *client)
1480{
1481 subsys_interface_unregister(&client->si);
1482}
1483EXPORT_SYMBOL_GPL(scif_client_unregister);
1484
1485int scif_client_register(struct scif_client *client)
1486{
1487 struct subsys_interface *si = &client->si;
1488
1489 si->name = client->name;
1490 si->subsys = &scif_peer_bus;
1491 si->add_dev = scif_add_client_dev;
1492 si->remove_dev = scif_remove_client_dev;
1493
1494 return subsys_interface_register(&client->si);
1495}
1496EXPORT_SYMBOL_GPL(scif_client_register);
diff --git a/drivers/misc/mic/scif/scif_debugfs.c b/drivers/misc/mic/scif/scif_debugfs.c
index 51f14e2a1196..6884dad97e17 100644
--- a/drivers/misc/mic/scif/scif_debugfs.c
+++ b/drivers/misc/mic/scif/scif_debugfs.c
@@ -62,10 +62,87 @@ static const struct file_operations scif_dev_ops = {
62 .release = scif_dev_test_release 62 .release = scif_dev_test_release
63}; 63};
64 64
65void __init scif_init_debugfs(void) 65static void scif_display_window(struct scif_window *window, struct seq_file *s)
66{
67 int j;
68 struct scatterlist *sg;
69 scif_pinned_pages_t pin = window->pinned_pages;
70
71 seq_printf(s, "window %p type %d temp %d offset 0x%llx ",
72 window, window->type, window->temp, window->offset);
73 seq_printf(s, "nr_pages 0x%llx nr_contig_chunks 0x%x prot %d ",
74 window->nr_pages, window->nr_contig_chunks, window->prot);
75 seq_printf(s, "ref_count %d magic 0x%llx peer_window 0x%llx ",
76 window->ref_count, window->magic, window->peer_window);
77 seq_printf(s, "unreg_state 0x%x va_for_temp 0x%lx\n",
78 window->unreg_state, window->va_for_temp);
79
80 for (j = 0; j < window->nr_contig_chunks; j++)
81 seq_printf(s, "page[%d] dma_addr 0x%llx num_pages 0x%llx\n", j,
82 window->dma_addr[j], window->num_pages[j]);
83
84 if (window->type == SCIF_WINDOW_SELF && pin)
85 for (j = 0; j < window->nr_pages; j++)
86 seq_printf(s, "page[%d] = pinned_pages %p address %p\n",
87 j, pin->pages[j],
88 page_address(pin->pages[j]));
89
90 if (window->st)
91 for_each_sg(window->st->sgl, sg, window->st->nents, j)
92 seq_printf(s, "sg[%d] dma addr 0x%llx length 0x%x\n",
93 j, sg_dma_address(sg), sg_dma_len(sg));
94}
95
96static void scif_display_all_windows(struct list_head *head, struct seq_file *s)
66{ 97{
67 struct dentry *d; 98 struct list_head *item;
99 struct scif_window *window;
68 100
101 list_for_each(item, head) {
102 window = list_entry(item, struct scif_window, list);
103 scif_display_window(window, s);
104 }
105}
106
107static int scif_rma_test(struct seq_file *s, void *unused)
108{
109 struct scif_endpt *ep;
110 struct list_head *pos;
111
112 mutex_lock(&scif_info.connlock);
113 list_for_each(pos, &scif_info.connected) {
114 ep = list_entry(pos, struct scif_endpt, list);
115 seq_printf(s, "ep %p self windows\n", ep);
116 mutex_lock(&ep->rma_info.rma_lock);
117 scif_display_all_windows(&ep->rma_info.reg_list, s);
118 seq_printf(s, "ep %p remote windows\n", ep);
119 scif_display_all_windows(&ep->rma_info.remote_reg_list, s);
120 mutex_unlock(&ep->rma_info.rma_lock);
121 }
122 mutex_unlock(&scif_info.connlock);
123 return 0;
124}
125
126static int scif_rma_test_open(struct inode *inode, struct file *file)
127{
128 return single_open(file, scif_rma_test, inode->i_private);
129}
130
131static int scif_rma_test_release(struct inode *inode, struct file *file)
132{
133 return single_release(inode, file);
134}
135
136static const struct file_operations scif_rma_ops = {
137 .owner = THIS_MODULE,
138 .open = scif_rma_test_open,
139 .read = seq_read,
140 .llseek = seq_lseek,
141 .release = scif_rma_test_release
142};
143
144void __init scif_init_debugfs(void)
145{
69 scif_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); 146 scif_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
70 if (!scif_dbg) { 147 if (!scif_dbg) {
71 dev_err(scif_info.mdev.this_device, 148 dev_err(scif_info.mdev.this_device,
@@ -73,8 +150,8 @@ void __init scif_init_debugfs(void)
73 return; 150 return;
74 } 151 }
75 152
76 d = debugfs_create_file("scif_dev", 0444, scif_dbg, 153 debugfs_create_file("scif_dev", 0444, scif_dbg, NULL, &scif_dev_ops);
77 NULL, &scif_dev_ops); 154 debugfs_create_file("scif_rma", 0444, scif_dbg, NULL, &scif_rma_ops);
78 debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log); 155 debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log);
79 debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable); 156 debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable);
80} 157}
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
new file mode 100644
index 000000000000..95a13c629a8e
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -0,0 +1,1979 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19#include "scif_map.h"
20
21/*
22 * struct scif_dma_comp_cb - SCIF DMA completion callback
23 *
24 * @dma_completion_func: DMA completion callback
25 * @cb_cookie: DMA completion callback cookie
26 * @temp_buf: Temporary buffer
27 * @temp_buf_to_free: Temporary buffer to be freed
28 * @is_cache: Is a kmem_cache allocated buffer
29 * @dst_offset: Destination registration offset
30 * @dst_window: Destination registration window
31 * @len: Length of the temp buffer
32 * @temp_phys: DMA address of the temp buffer
33 * @sdev: The SCIF device
34 * @header_padding: padding for cache line alignment
35 */
36struct scif_dma_comp_cb {
37 void (*dma_completion_func)(void *cookie);
38 void *cb_cookie;
39 u8 *temp_buf;
40 u8 *temp_buf_to_free;
41 bool is_cache;
42 s64 dst_offset;
43 struct scif_window *dst_window;
44 size_t len;
45 dma_addr_t temp_phys;
46 struct scif_dev *sdev;
47 int header_padding;
48};
49
50/**
51 * struct scif_copy_work - Work for DMA copy
52 *
53 * @src_offset: Starting source offset
54 * @dst_offset: Starting destination offset
55 * @src_window: Starting src registered window
56 * @dst_window: Starting dst registered window
57 * @loopback: true if this is a loopback DMA transfer
58 * @len: Length of the transfer
59 * @comp_cb: DMA copy completion callback
60 * @remote_dev: The remote SCIF peer device
61 * @fence_type: polling or interrupt based
62 * @ordered: is this a tail byte ordered DMA transfer
63 */
64struct scif_copy_work {
65 s64 src_offset;
66 s64 dst_offset;
67 struct scif_window *src_window;
68 struct scif_window *dst_window;
69 int loopback;
70 size_t len;
71 struct scif_dma_comp_cb *comp_cb;
72 struct scif_dev *remote_dev;
73 int fence_type;
74 bool ordered;
75};
76
77#ifndef list_entry_next
78#define list_entry_next(pos, member) \
79 list_entry(pos->member.next, typeof(*pos), member)
80#endif
81
82/**
83 * scif_reserve_dma_chan:
84 * @ep: Endpoint Descriptor.
85 *
86 * This routine reserves a DMA channel for a particular
87 * endpoint. All DMA transfers for an endpoint are always
88 * programmed on the same DMA channel.
89 */
90int scif_reserve_dma_chan(struct scif_endpt *ep)
91{
92 int err = 0;
93 struct scif_dev *scifdev;
94 struct scif_hw_dev *sdev;
95 struct dma_chan *chan;
96
97 /* Loopback DMAs are not supported on the management node */
98 if (!scif_info.nodeid && scifdev_self(ep->remote_dev))
99 return 0;
100 if (scif_info.nodeid)
101 scifdev = &scif_dev[0];
102 else
103 scifdev = ep->remote_dev;
104 sdev = scifdev->sdev;
105 if (!sdev->num_dma_ch)
106 return -ENODEV;
107 chan = sdev->dma_ch[scifdev->dma_ch_idx];
108 scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch;
109 mutex_lock(&ep->rma_info.rma_lock);
110 ep->rma_info.dma_chan = chan;
111 mutex_unlock(&ep->rma_info.rma_lock);
112 return err;
113}
114
115#ifdef CONFIG_MMU_NOTIFIER
116/**
117 * scif_rma_destroy_tcw:
118 *
119 * This routine destroys temporary cached windows
120 */
121static
122void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn,
123 struct scif_endpt *ep,
124 u64 start, u64 len)
125{
126 struct list_head *item, *tmp;
127 struct scif_window *window;
128 u64 start_va, end_va;
129 u64 end = start + len;
130
131 if (end <= start)
132 return;
133
134 list_for_each_safe(item, tmp, &mmn->tc_reg_list) {
135 window = list_entry(item, struct scif_window, list);
136 ep = (struct scif_endpt *)window->ep;
137 if (!len)
138 break;
139 start_va = window->va_for_temp;
140 end_va = start_va + (window->nr_pages << PAGE_SHIFT);
141 if (start < start_va && end <= start_va)
142 break;
143 if (start >= end_va)
144 continue;
145 __scif_rma_destroy_tcw_helper(window);
146 }
147}
148
149static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len)
150{
151 struct scif_endpt *ep = mmn->ep;
152
153 spin_lock(&ep->rma_info.tc_lock);
154 __scif_rma_destroy_tcw(mmn, ep, start, len);
155 spin_unlock(&ep->rma_info.tc_lock);
156}
157
158static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
159{
160 struct list_head *item, *tmp;
161 struct scif_mmu_notif *mmn;
162
163 list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
164 mmn = list_entry(item, struct scif_mmu_notif, list);
165 scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
166 }
167}
168
169static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
170{
171 struct list_head *item, *tmp;
172 struct scif_mmu_notif *mmn;
173
174 spin_lock(&ep->rma_info.tc_lock);
175 list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
176 mmn = list_entry(item, struct scif_mmu_notif, list);
177 __scif_rma_destroy_tcw(mmn, ep, 0, ULONG_MAX);
178 }
179 spin_unlock(&ep->rma_info.tc_lock);
180}
181
182static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
183{
184 if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit)
185 return false;
186 if ((atomic_read(&ep->rma_info.tcw_total_pages)
187 + (cur_bytes >> PAGE_SHIFT)) >
188 scif_info.rma_tc_limit) {
189 dev_info(scif_info.mdev.this_device,
190 "%s %d total=%d, current=%zu reached max\n",
191 __func__, __LINE__,
192 atomic_read(&ep->rma_info.tcw_total_pages),
193 (1 + (cur_bytes >> PAGE_SHIFT)));
194 scif_rma_destroy_tcw_invalid();
195 __scif_rma_destroy_tcw_ep(ep);
196 }
197 return true;
198}
199
200static void scif_mmu_notifier_release(struct mmu_notifier *mn,
201 struct mm_struct *mm)
202{
203 struct scif_mmu_notif *mmn;
204
205 mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
206 scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
207 schedule_work(&scif_info.misc_work);
208}
209
210static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
211 struct mm_struct *mm,
212 unsigned long address)
213{
214 struct scif_mmu_notif *mmn;
215
216 mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
217 scif_rma_destroy_tcw(mmn, address, PAGE_SIZE);
218}
219
220static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
221 struct mm_struct *mm,
222 unsigned long start,
223 unsigned long end)
224{
225 struct scif_mmu_notif *mmn;
226
227 mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
228 scif_rma_destroy_tcw(mmn, start, end - start);
229}
230
231static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
232 struct mm_struct *mm,
233 unsigned long start,
234 unsigned long end)
235{
236 /*
237 * Nothing to do here, everything needed was done in
238 * invalidate_range_start.
239 */
240}
241
242static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
243 .release = scif_mmu_notifier_release,
244 .clear_flush_young = NULL,
245 .invalidate_page = scif_mmu_notifier_invalidate_page,
246 .invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
247 .invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
248
249static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep)
250{
251 struct scif_endpt_rma_info *rma = &ep->rma_info;
252 struct scif_mmu_notif *mmn = NULL;
253 struct list_head *item, *tmp;
254
255 mutex_lock(&ep->rma_info.mmn_lock);
256 list_for_each_safe(item, tmp, &rma->mmn_list) {
257 mmn = list_entry(item, struct scif_mmu_notif, list);
258 mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm);
259 list_del(item);
260 kfree(mmn);
261 }
262 mutex_unlock(&ep->rma_info.mmn_lock);
263}
264
265static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn,
266 struct mm_struct *mm, struct scif_endpt *ep)
267{
268 mmn->ep = ep;
269 mmn->mm = mm;
270 mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops;
271 INIT_LIST_HEAD(&mmn->list);
272 INIT_LIST_HEAD(&mmn->tc_reg_list);
273}
274
275static struct scif_mmu_notif *
276scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
277{
278 struct scif_mmu_notif *mmn;
279 struct list_head *item;
280
281 list_for_each(item, &rma->mmn_list) {
282 mmn = list_entry(item, struct scif_mmu_notif, list);
283 if (mmn->mm == mm)
284 return mmn;
285 }
286 return NULL;
287}
288
289static struct scif_mmu_notif *
290scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
291{
292 struct scif_mmu_notif *mmn
293 = kzalloc(sizeof(*mmn), GFP_KERNEL);
294
295 if (!mmn)
296 return ERR_PTR(ENOMEM);
297
298 scif_init_mmu_notifier(mmn, current->mm, ep);
299 if (mmu_notifier_register(&mmn->ep_mmu_notifier,
300 current->mm)) {
301 kfree(mmn);
302 return ERR_PTR(EBUSY);
303 }
304 list_add(&mmn->list, &ep->rma_info.mmn_list);
305 return mmn;
306}
307
308/*
309 * Called from the misc thread to destroy temporary cached windows and
310 * unregister the MMU notifier for the SCIF endpoint.
311 */
312void scif_mmu_notif_handler(struct work_struct *work)
313{
314 struct list_head *pos, *tmpq;
315 struct scif_endpt *ep;
316restart:
317 scif_rma_destroy_tcw_invalid();
318 spin_lock(&scif_info.rmalock);
319 list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) {
320 ep = list_entry(pos, struct scif_endpt, mmu_list);
321 list_del(&ep->mmu_list);
322 spin_unlock(&scif_info.rmalock);
323 scif_rma_destroy_tcw_ep(ep);
324 scif_ep_unregister_mmu_notifier(ep);
325 goto restart;
326 }
327 spin_unlock(&scif_info.rmalock);
328}
329
330static bool scif_is_set_reg_cache(int flags)
331{
332 return !!(flags & SCIF_RMA_USECACHE);
333}
334#else
335static struct scif_mmu_notif *
336scif_find_mmu_notifier(struct mm_struct *mm,
337 struct scif_endpt_rma_info *rma)
338{
339 return NULL;
340}
341
342static struct scif_mmu_notif *
343scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
344{
345 return NULL;
346}
347
348void scif_mmu_notif_handler(struct work_struct *work)
349{
350}
351
352static bool scif_is_set_reg_cache(int flags)
353{
354 return false;
355}
356
357static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
358{
359 return false;
360}
361#endif
362
363/**
364 * scif_register_temp:
365 * @epd: End Point Descriptor.
366 * @addr: virtual address to/from which to copy
367 * @len: length of range to copy
368 * @out_offset: computed offset returned by reference.
369 * @out_window: allocated registered window returned by reference.
370 *
371 * Create a temporary registered window. The peer will not know about this
372 * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's.
373 */
374static int
375scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot,
376 off_t *out_offset, struct scif_window **out_window)
377{
378 struct scif_endpt *ep = (struct scif_endpt *)epd;
379 int err;
380 scif_pinned_pages_t pinned_pages;
381 size_t aligned_len;
382
383 aligned_len = ALIGN(len, PAGE_SIZE);
384
385 err = __scif_pin_pages((void *)(addr & PAGE_MASK),
386 aligned_len, &prot, 0, &pinned_pages);
387 if (err)
388 return err;
389
390 pinned_pages->prot = prot;
391
392 /* Compute the offset for this registration */
393 err = scif_get_window_offset(ep, 0, 0,
394 aligned_len >> PAGE_SHIFT,
395 (s64 *)out_offset);
396 if (err)
397 goto error_unpin;
398
399 /* Allocate and prepare self registration window */
400 *out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT,
401 *out_offset, true);
402 if (!*out_window) {
403 scif_free_window_offset(ep, NULL, *out_offset);
404 err = -ENOMEM;
405 goto error_unpin;
406 }
407
408 (*out_window)->pinned_pages = pinned_pages;
409 (*out_window)->nr_pages = pinned_pages->nr_pages;
410 (*out_window)->prot = pinned_pages->prot;
411
412 (*out_window)->va_for_temp = addr & PAGE_MASK;
413 err = scif_map_window(ep->remote_dev, *out_window);
414 if (err) {
415 /* Something went wrong! Rollback */
416 scif_destroy_window(ep, *out_window);
417 *out_window = NULL;
418 } else {
419 *out_offset |= (addr - (*out_window)->va_for_temp);
420 }
421 return err;
422error_unpin:
423 if (err)
424 dev_err(&ep->remote_dev->sdev->dev,
425 "%s %d err %d\n", __func__, __LINE__, err);
426 scif_unpin_pages(pinned_pages);
427 return err;
428}
429
430#define SCIF_DMA_TO (3 * HZ)
431
432/*
433 * scif_sync_dma - Program a DMA without an interrupt descriptor
434 *
435 * @dev - The address of the pointer to the device instance used
436 * for DMA registration.
437 * @chan - DMA channel to be used.
438 * @sync_wait: Wait for DMA to complete?
439 *
440 * Return 0 on success and -errno on error.
441 */
442static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan,
443 bool sync_wait)
444{
445 int err = 0;
446 struct dma_async_tx_descriptor *tx = NULL;
447 enum dma_ctrl_flags flags = DMA_PREP_FENCE;
448 dma_cookie_t cookie;
449 struct dma_device *ddev;
450
451 if (!chan) {
452 err = -EIO;
453 dev_err(&sdev->dev, "%s %d err %d\n",
454 __func__, __LINE__, err);
455 return err;
456 }
457 ddev = chan->device;
458
459 tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
460 if (!tx) {
461 err = -ENOMEM;
462 dev_err(&sdev->dev, "%s %d err %d\n",
463 __func__, __LINE__, err);
464 goto release;
465 }
466 cookie = tx->tx_submit(tx);
467
468 if (dma_submit_error(cookie)) {
469 err = -ENOMEM;
470 dev_err(&sdev->dev, "%s %d err %d\n",
471 __func__, __LINE__, err);
472 goto release;
473 }
474 if (!sync_wait) {
475 dma_async_issue_pending(chan);
476 } else {
477 if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) {
478 err = 0;
479 } else {
480 err = -EIO;
481 dev_err(&sdev->dev, "%s %d err %d\n",
482 __func__, __LINE__, err);
483 }
484 }
485release:
486 return err;
487}
488
489static void scif_dma_callback(void *arg)
490{
491 struct completion *done = (struct completion *)arg;
492
493 complete(done);
494}
495
496#define SCIF_DMA_SYNC_WAIT true
497#define SCIF_DMA_POLL BIT(0)
498#define SCIF_DMA_INTR BIT(1)
499
500/*
501 * scif_async_dma - Program a DMA with an interrupt descriptor
502 *
503 * @dev - The address of the pointer to the device instance used
504 * for DMA registration.
505 * @chan - DMA channel to be used.
506 * Return 0 on success and -errno on error.
507 */
508static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan)
509{
510 int err = 0;
511 struct dma_device *ddev;
512 struct dma_async_tx_descriptor *tx = NULL;
513 enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
514 DECLARE_COMPLETION_ONSTACK(done_wait);
515 dma_cookie_t cookie;
516 enum dma_status status;
517
518 if (!chan) {
519 err = -EIO;
520 dev_err(&sdev->dev, "%s %d err %d\n",
521 __func__, __LINE__, err);
522 return err;
523 }
524 ddev = chan->device;
525
526 tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
527 if (!tx) {
528 err = -ENOMEM;
529 dev_err(&sdev->dev, "%s %d err %d\n",
530 __func__, __LINE__, err);
531 goto release;
532 }
533 reinit_completion(&done_wait);
534 tx->callback = scif_dma_callback;
535 tx->callback_param = &done_wait;
536 cookie = tx->tx_submit(tx);
537
538 if (dma_submit_error(cookie)) {
539 err = -ENOMEM;
540 dev_err(&sdev->dev, "%s %d err %d\n",
541 __func__, __LINE__, err);
542 goto release;
543 }
544 dma_async_issue_pending(chan);
545
546 err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO);
547 if (!err) {
548 err = -EIO;
549 dev_err(&sdev->dev, "%s %d err %d\n",
550 __func__, __LINE__, err);
551 goto release;
552 }
553 err = 0;
554 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
555 if (status != DMA_COMPLETE) {
556 err = -EIO;
557 dev_err(&sdev->dev, "%s %d err %d\n",
558 __func__, __LINE__, err);
559 goto release;
560 }
561release:
562 return err;
563}
564
565/*
566 * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular
567 * DMA channel via polling.
568 *
569 * @sdev - The SCIF device
570 * @chan - DMA channel
571 * Return 0 on success and -errno on error.
572 */
573static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan)
574{
575 if (!chan)
576 return -EINVAL;
577 return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT);
578}
579
580/*
581 * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular
582 * DMA channel via interrupt based blocking wait.
583 *
584 * @sdev - The SCIF device
585 * @chan - DMA channel
586 * Return 0 on success and -errno on error.
587 */
588int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan)
589{
590 if (!chan)
591 return -EINVAL;
592 return scif_async_dma(sdev, chan);
593}
594
595/**
596 * scif_rma_destroy_windows:
597 *
598 * This routine destroys all windows queued for cleanup
599 */
600void scif_rma_destroy_windows(void)
601{
602 struct list_head *item, *tmp;
603 struct scif_window *window;
604 struct scif_endpt *ep;
605 struct dma_chan *chan;
606
607 might_sleep();
608restart:
609 spin_lock(&scif_info.rmalock);
610 list_for_each_safe(item, tmp, &scif_info.rma) {
611 window = list_entry(item, struct scif_window,
612 list);
613 ep = (struct scif_endpt *)window->ep;
614 chan = ep->rma_info.dma_chan;
615
616 list_del_init(&window->list);
617 spin_unlock(&scif_info.rmalock);
618 if (!chan || !scifdev_alive(ep) ||
619 !scif_drain_dma_intr(ep->remote_dev->sdev,
620 ep->rma_info.dma_chan))
621 /* Remove window from global list */
622 window->unreg_state = OP_COMPLETED;
623 else
624 dev_warn(&ep->remote_dev->sdev->dev,
625 "DMA engine hung?\n");
626 if (window->unreg_state == OP_COMPLETED) {
627 if (window->type == SCIF_WINDOW_SELF)
628 scif_destroy_window(ep, window);
629 else
630 scif_destroy_remote_window(window);
631 atomic_dec(&ep->rma_info.tw_refcount);
632 }
633 goto restart;
634 }
635 spin_unlock(&scif_info.rmalock);
636}
637
638/**
639 * scif_rma_destroy_tcw:
640 *
641 * This routine destroys temporary cached registered windows
642 * which have been queued for cleanup.
643 */
644void scif_rma_destroy_tcw_invalid(void)
645{
646 struct list_head *item, *tmp;
647 struct scif_window *window;
648 struct scif_endpt *ep;
649 struct dma_chan *chan;
650
651 might_sleep();
652restart:
653 spin_lock(&scif_info.rmalock);
654 list_for_each_safe(item, tmp, &scif_info.rma_tc) {
655 window = list_entry(item, struct scif_window, list);
656 ep = (struct scif_endpt *)window->ep;
657 chan = ep->rma_info.dma_chan;
658 list_del_init(&window->list);
659 spin_unlock(&scif_info.rmalock);
660 mutex_lock(&ep->rma_info.rma_lock);
661 if (!chan || !scifdev_alive(ep) ||
662 !scif_drain_dma_intr(ep->remote_dev->sdev,
663 ep->rma_info.dma_chan)) {
664 atomic_sub(window->nr_pages,
665 &ep->rma_info.tcw_total_pages);
666 scif_destroy_window(ep, window);
667 atomic_dec(&ep->rma_info.tcw_refcount);
668 } else {
669 dev_warn(&ep->remote_dev->sdev->dev,
670 "DMA engine hung?\n");
671 }
672 mutex_unlock(&ep->rma_info.rma_lock);
673 goto restart;
674 }
675 spin_unlock(&scif_info.rmalock);
676}
677
678static inline
679void *_get_local_va(off_t off, struct scif_window *window, size_t len)
680{
681 int page_nr = (off - window->offset) >> PAGE_SHIFT;
682 off_t page_off = off & ~PAGE_MASK;
683 void *va = NULL;
684
685 if (window->type == SCIF_WINDOW_SELF) {
686 struct page **pages = window->pinned_pages->pages;
687
688 va = page_address(pages[page_nr]) + page_off;
689 }
690 return va;
691}
692
693static inline
694void *ioremap_remote(off_t off, struct scif_window *window,
695 size_t len, struct scif_dev *dev,
696 struct scif_window_iter *iter)
697{
698 dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter);
699
700 /*
701 * If the DMA address is not card relative then we need the DMA
702 * addresses to be an offset into the bar. The aperture base was already
703 * added so subtract it here since scif_ioremap is going to add it again
704 */
705 if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER &&
706 dev->sdev->aper && !dev->sdev->card_rel_da)
707 phys = phys - dev->sdev->aper->pa;
708 return scif_ioremap(phys, len, dev);
709}
710
711static inline void
712iounmap_remote(void *virt, size_t size, struct scif_copy_work *work)
713{
714 scif_iounmap(virt, size, work->remote_dev);
715}
716
717/*
718 * Takes care of ordering issue caused by
719 * 1. Hardware: Only in the case of cpu copy from mgmt node to card
720 * because of WC memory.
721 * 2. Software: If memcpy reorders copy instructions for optimization.
722 * This could happen at both mgmt node and card.
723 */
724static inline void
725scif_ordered_memcpy_toio(char *dst, const char *src, size_t count)
726{
727 if (!count)
728 return;
729
730 memcpy_toio((void __iomem __force *)dst, src, --count);
731 /* Order the last byte with the previous stores */
732 wmb();
733 *(dst + count) = *(src + count);
734}
735
736static inline void scif_unaligned_cpy_toio(char *dst, const char *src,
737 size_t count, bool ordered)
738{
739 if (ordered)
740 scif_ordered_memcpy_toio(dst, src, count);
741 else
742 memcpy_toio((void __iomem __force *)dst, src, count);
743}
744
745static inline
746void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count)
747{
748 if (!count)
749 return;
750
751 memcpy_fromio(dst, (void __iomem __force *)src, --count);
752 /* Order the last byte with the previous loads */
753 rmb();
754 *(dst + count) = *(src + count);
755}
756
757static inline void scif_unaligned_cpy_fromio(char *dst, const char *src,
758 size_t count, bool ordered)
759{
760 if (ordered)
761 scif_ordered_memcpy_fromio(dst, src, count);
762 else
763 memcpy_fromio(dst, (void __iomem __force *)src, count);
764}
765
766#define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0)
767
768/*
769 * scif_off_to_dma_addr:
770 * Obtain the dma_addr given the window and the offset.
771 * @window: Registered window.
772 * @off: Window offset.
773 * @nr_bytes: Return the number of contiguous bytes till next DMA addr index.
774 * @index: Return the index of the dma_addr array found.
775 * @start_off: start offset of index of the dma addr array found.
776 * The nr_bytes provides the callee an estimate of the maximum possible
777 * DMA xfer possible while the index/start_off provide faster lookups
778 * for the next iteration.
779 */
780dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
781 size_t *nr_bytes, struct scif_window_iter *iter)
782{
783 int i, page_nr;
784 s64 start, end;
785 off_t page_off;
786
787 if (window->nr_pages == window->nr_contig_chunks) {
788 page_nr = (off - window->offset) >> PAGE_SHIFT;
789 page_off = off & ~PAGE_MASK;
790
791 if (nr_bytes)
792 *nr_bytes = PAGE_SIZE - page_off;
793 return window->dma_addr[page_nr] | page_off;
794 }
795 if (iter) {
796 i = iter->index;
797 start = iter->offset;
798 } else {
799 i = 0;
800 start = window->offset;
801 }
802 for (; i < window->nr_contig_chunks; i++) {
803 end = start + (window->num_pages[i] << PAGE_SHIFT);
804 if (off >= start && off < end) {
805 if (iter) {
806 iter->index = i;
807 iter->offset = start;
808 }
809 if (nr_bytes)
810 *nr_bytes = end - off;
811 return (window->dma_addr[i] + (off - start));
812 }
813 start += (window->num_pages[i] << PAGE_SHIFT);
814 }
815 dev_err(scif_info.mdev.this_device,
816 "%s %d BUG. Addr not found? window %p off 0x%llx\n",
817 __func__, __LINE__, window, off);
818 return SCIF_RMA_ERROR_CODE;
819}
820
821/*
822 * Copy between rma window and temporary buffer
823 */
824static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window,
825 u8 *temp, size_t rem_len, bool to_temp)
826{
827 void *window_virt;
828 size_t loop_len;
829 int offset_in_page;
830 s64 end_offset;
831
832 offset_in_page = offset & ~PAGE_MASK;
833 loop_len = PAGE_SIZE - offset_in_page;
834
835 if (rem_len < loop_len)
836 loop_len = rem_len;
837
838 window_virt = _get_local_va(offset, window, loop_len);
839 if (!window_virt)
840 return;
841 if (to_temp)
842 memcpy(temp, window_virt, loop_len);
843 else
844 memcpy(window_virt, temp, loop_len);
845
846 offset += loop_len;
847 temp += loop_len;
848 rem_len -= loop_len;
849
850 end_offset = window->offset +
851 (window->nr_pages << PAGE_SHIFT);
852 while (rem_len) {
853 if (offset == end_offset) {
854 window = list_entry_next(window, list);
855 end_offset = window->offset +
856 (window->nr_pages << PAGE_SHIFT);
857 }
858 loop_len = min(PAGE_SIZE, rem_len);
859 window_virt = _get_local_va(offset, window, loop_len);
860 if (!window_virt)
861 return;
862 if (to_temp)
863 memcpy(temp, window_virt, loop_len);
864 else
865 memcpy(window_virt, temp, loop_len);
866 offset += loop_len;
867 temp += loop_len;
868 rem_len -= loop_len;
869 }
870}
871
872/**
873 * scif_rma_completion_cb:
874 * @data: RMA cookie
875 *
876 * RMA interrupt completion callback.
877 */
878static void scif_rma_completion_cb(void *data)
879{
880 struct scif_dma_comp_cb *comp_cb = data;
881
882 /* Free DMA Completion CB. */
883 if (comp_cb->dst_window)
884 scif_rma_local_cpu_copy(comp_cb->dst_offset,
885 comp_cb->dst_window,
886 comp_cb->temp_buf +
887 comp_cb->header_padding,
888 comp_cb->len, false);
889 scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev,
890 SCIF_KMEM_UNALIGNED_BUF_SIZE);
891 if (comp_cb->is_cache)
892 kmem_cache_free(unaligned_cache,
893 comp_cb->temp_buf_to_free);
894 else
895 kfree(comp_cb->temp_buf_to_free);
896}
897
898/* Copies between temporary buffer and offsets provided in work */
899static int
900scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
901 u8 *temp, struct dma_chan *chan,
902 bool src_local)
903{
904 struct scif_dma_comp_cb *comp_cb = work->comp_cb;
905 dma_addr_t window_dma_addr, temp_dma_addr;
906 dma_addr_t temp_phys = comp_cb->temp_phys;
907 size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len;
908 int offset_in_ca, ret = 0;
909 s64 end_offset, offset;
910 struct scif_window *window;
911 void *window_virt_addr;
912 size_t tail_len;
913 struct dma_async_tx_descriptor *tx;
914 struct dma_device *dev = chan->device;
915 dma_cookie_t cookie;
916
917 if (src_local) {
918 offset = work->dst_offset;
919 window = work->dst_window;
920 } else {
921 offset = work->src_offset;
922 window = work->src_window;
923 }
924
925 offset_in_ca = offset & (L1_CACHE_BYTES - 1);
926 if (offset_in_ca) {
927 loop_len = L1_CACHE_BYTES - offset_in_ca;
928 loop_len = min(loop_len, remaining_len);
929 window_virt_addr = ioremap_remote(offset, window,
930 loop_len,
931 work->remote_dev,
932 NULL);
933 if (!window_virt_addr)
934 return -ENOMEM;
935 if (src_local)
936 scif_unaligned_cpy_toio(window_virt_addr, temp,
937 loop_len,
938 work->ordered &&
939 !(remaining_len - loop_len));
940 else
941 scif_unaligned_cpy_fromio(temp, window_virt_addr,
942 loop_len, work->ordered &&
943 !(remaining_len - loop_len));
944 iounmap_remote(window_virt_addr, loop_len, work);
945
946 offset += loop_len;
947 temp += loop_len;
948 temp_phys += loop_len;
949 remaining_len -= loop_len;
950 }
951
952 offset_in_ca = offset & ~PAGE_MASK;
953 end_offset = window->offset +
954 (window->nr_pages << PAGE_SHIFT);
955
956 tail_len = remaining_len & (L1_CACHE_BYTES - 1);
957 remaining_len -= tail_len;
958 while (remaining_len) {
959 if (offset == end_offset) {
960 window = list_entry_next(window, list);
961 end_offset = window->offset +
962 (window->nr_pages << PAGE_SHIFT);
963 }
964 if (scif_is_mgmt_node())
965 temp_dma_addr = temp_phys;
966 else
967 /* Fix if we ever enable IOMMU on the card */
968 temp_dma_addr = (dma_addr_t)virt_to_phys(temp);
969 window_dma_addr = scif_off_to_dma_addr(window, offset,
970 &nr_contig_bytes,
971 NULL);
972 loop_len = min(nr_contig_bytes, remaining_len);
973 if (src_local) {
974 if (work->ordered && !tail_len &&
975 !(remaining_len - loop_len) &&
976 loop_len != L1_CACHE_BYTES) {
977 /*
978 * Break up the last chunk of the transfer into
979 * two steps. if there is no tail to guarantee
980 * DMA ordering. SCIF_DMA_POLLING inserts
981 * a status update descriptor in step 1 which
982 * acts as a double sided synchronization fence
983 * for the DMA engine to ensure that the last
984 * cache line in step 2 is updated last.
985 */
986 /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
987 tx =
988 dev->device_prep_dma_memcpy(chan,
989 window_dma_addr,
990 temp_dma_addr,
991 loop_len -
992 L1_CACHE_BYTES,
993 DMA_PREP_FENCE);
994 if (!tx) {
995 ret = -ENOMEM;
996 goto err;
997 }
998 cookie = tx->tx_submit(tx);
999 if (dma_submit_error(cookie)) {
1000 ret = -ENOMEM;
1001 goto err;
1002 }
1003 dma_async_issue_pending(chan);
1004 offset += (loop_len - L1_CACHE_BYTES);
1005 temp_dma_addr += (loop_len - L1_CACHE_BYTES);
1006 window_dma_addr += (loop_len - L1_CACHE_BYTES);
1007 remaining_len -= (loop_len - L1_CACHE_BYTES);
1008 loop_len = remaining_len;
1009
1010 /* Step 2) DMA: L1_CACHE_BYTES */
1011 tx =
1012 dev->device_prep_dma_memcpy(chan,
1013 window_dma_addr,
1014 temp_dma_addr,
1015 loop_len, 0);
1016 if (!tx) {
1017 ret = -ENOMEM;
1018 goto err;
1019 }
1020 cookie = tx->tx_submit(tx);
1021 if (dma_submit_error(cookie)) {
1022 ret = -ENOMEM;
1023 goto err;
1024 }
1025 dma_async_issue_pending(chan);
1026 } else {
1027 tx =
1028 dev->device_prep_dma_memcpy(chan,
1029 window_dma_addr,
1030 temp_dma_addr,
1031 loop_len, 0);
1032 if (!tx) {
1033 ret = -ENOMEM;
1034 goto err;
1035 }
1036 cookie = tx->tx_submit(tx);
1037 if (dma_submit_error(cookie)) {
1038 ret = -ENOMEM;
1039 goto err;
1040 }
1041 dma_async_issue_pending(chan);
1042 }
1043 } else {
1044 tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr,
1045 window_dma_addr, loop_len, 0);
1046 if (!tx) {
1047 ret = -ENOMEM;
1048 goto err;
1049 }
1050 cookie = tx->tx_submit(tx);
1051 if (dma_submit_error(cookie)) {
1052 ret = -ENOMEM;
1053 goto err;
1054 }
1055 dma_async_issue_pending(chan);
1056 }
1057 if (ret < 0)
1058 goto err;
1059 offset += loop_len;
1060 temp += loop_len;
1061 temp_phys += loop_len;
1062 remaining_len -= loop_len;
1063 offset_in_ca = 0;
1064 }
1065 if (tail_len) {
1066 if (offset == end_offset) {
1067 window = list_entry_next(window, list);
1068 end_offset = window->offset +
1069 (window->nr_pages << PAGE_SHIFT);
1070 }
1071 window_virt_addr = ioremap_remote(offset, window, tail_len,
1072 work->remote_dev,
1073 NULL);
1074 if (!window_virt_addr)
1075 return -ENOMEM;
1076 /*
1077 * The CPU copy for the tail bytes must be initiated only once
1078 * previous DMA transfers for this endpoint have completed
1079 * to guarantee ordering.
1080 */
1081 if (work->ordered) {
1082 struct scif_dev *rdev = work->remote_dev;
1083
1084 ret = scif_drain_dma_intr(rdev->sdev, chan);
1085 if (ret)
1086 return ret;
1087 }
1088 if (src_local)
1089 scif_unaligned_cpy_toio(window_virt_addr, temp,
1090 tail_len, work->ordered);
1091 else
1092 scif_unaligned_cpy_fromio(temp, window_virt_addr,
1093 tail_len, work->ordered);
1094 iounmap_remote(window_virt_addr, tail_len, work);
1095 }
1096 tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT);
1097 if (!tx) {
1098 ret = -ENOMEM;
1099 return ret;
1100 }
1101 tx->callback = &scif_rma_completion_cb;
1102 tx->callback_param = comp_cb;
1103 cookie = tx->tx_submit(tx);
1104
1105 if (dma_submit_error(cookie)) {
1106 ret = -ENOMEM;
1107 return ret;
1108 }
1109 dma_async_issue_pending(chan);
1110 return 0;
1111err:
1112 dev_err(scif_info.mdev.this_device,
1113 "%s %d Desc Prog Failed ret %d\n",
1114 __func__, __LINE__, ret);
1115 return ret;
1116}
1117
1118/*
1119 * _scif_rma_list_dma_copy_aligned:
1120 *
1121 * Traverse all the windows and perform DMA copy.
1122 */
1123static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
1124 struct dma_chan *chan)
1125{
1126 dma_addr_t src_dma_addr, dst_dma_addr;
1127 size_t loop_len, remaining_len, src_contig_bytes = 0;
1128 size_t dst_contig_bytes = 0;
1129 struct scif_window_iter src_win_iter;
1130 struct scif_window_iter dst_win_iter;
1131 s64 end_src_offset, end_dst_offset;
1132 struct scif_window *src_window = work->src_window;
1133 struct scif_window *dst_window = work->dst_window;
1134 s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1135 int ret = 0;
1136 struct dma_async_tx_descriptor *tx;
1137 struct dma_device *dev = chan->device;
1138 dma_cookie_t cookie;
1139
1140 remaining_len = work->len;
1141
1142 scif_init_window_iter(src_window, &src_win_iter);
1143 scif_init_window_iter(dst_window, &dst_win_iter);
1144 end_src_offset = src_window->offset +
1145 (src_window->nr_pages << PAGE_SHIFT);
1146 end_dst_offset = dst_window->offset +
1147 (dst_window->nr_pages << PAGE_SHIFT);
1148 while (remaining_len) {
1149 if (src_offset == end_src_offset) {
1150 src_window = list_entry_next(src_window, list);
1151 end_src_offset = src_window->offset +
1152 (src_window->nr_pages << PAGE_SHIFT);
1153 scif_init_window_iter(src_window, &src_win_iter);
1154 }
1155 if (dst_offset == end_dst_offset) {
1156 dst_window = list_entry_next(dst_window, list);
1157 end_dst_offset = dst_window->offset +
1158 (dst_window->nr_pages << PAGE_SHIFT);
1159 scif_init_window_iter(dst_window, &dst_win_iter);
1160 }
1161
1162 /* compute dma addresses for transfer */
1163 src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
1164 &src_contig_bytes,
1165 &src_win_iter);
1166 dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
1167 &dst_contig_bytes,
1168 &dst_win_iter);
1169 loop_len = min(src_contig_bytes, dst_contig_bytes);
1170 loop_len = min(loop_len, remaining_len);
1171 if (work->ordered && !(remaining_len - loop_len)) {
1172 /*
1173 * Break up the last chunk of the transfer into two
1174 * steps to ensure that the last byte in step 2 is
1175 * updated last.
1176 */
1177 /* Step 1) DMA: Body Length - 1 */
1178 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1179 src_dma_addr,
1180 loop_len - 1,
1181 DMA_PREP_FENCE);
1182 if (!tx) {
1183 ret = -ENOMEM;
1184 goto err;
1185 }
1186 cookie = tx->tx_submit(tx);
1187 if (dma_submit_error(cookie)) {
1188 ret = -ENOMEM;
1189 goto err;
1190 }
1191 src_offset += (loop_len - 1);
1192 dst_offset += (loop_len - 1);
1193 src_dma_addr += (loop_len - 1);
1194 dst_dma_addr += (loop_len - 1);
1195 remaining_len -= (loop_len - 1);
1196 loop_len = remaining_len;
1197
1198 /* Step 2) DMA: 1 BYTES */
1199 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1200 src_dma_addr, loop_len, 0);
1201 if (!tx) {
1202 ret = -ENOMEM;
1203 goto err;
1204 }
1205 cookie = tx->tx_submit(tx);
1206 if (dma_submit_error(cookie)) {
1207 ret = -ENOMEM;
1208 goto err;
1209 }
1210 dma_async_issue_pending(chan);
1211 } else {
1212 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1213 src_dma_addr, loop_len, 0);
1214 if (!tx) {
1215 ret = -ENOMEM;
1216 goto err;
1217 }
1218 cookie = tx->tx_submit(tx);
1219 if (dma_submit_error(cookie)) {
1220 ret = -ENOMEM;
1221 goto err;
1222 }
1223 }
1224 src_offset += loop_len;
1225 dst_offset += loop_len;
1226 remaining_len -= loop_len;
1227 }
1228 return ret;
1229err:
1230 dev_err(scif_info.mdev.this_device,
1231 "%s %d Desc Prog Failed ret %d\n",
1232 __func__, __LINE__, ret);
1233 return ret;
1234}
1235
1236/*
1237 * scif_rma_list_dma_copy_aligned:
1238 *
1239 * Traverse all the windows and perform DMA copy.
1240 */
1241static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
1242 struct dma_chan *chan)
1243{
1244 dma_addr_t src_dma_addr, dst_dma_addr;
1245 size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0;
1246 size_t dst_contig_bytes = 0;
1247 int src_cache_off;
1248 s64 end_src_offset, end_dst_offset;
1249 struct scif_window_iter src_win_iter;
1250 struct scif_window_iter dst_win_iter;
1251 void *src_virt, *dst_virt;
1252 struct scif_window *src_window = work->src_window;
1253 struct scif_window *dst_window = work->dst_window;
1254 s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1255 int ret = 0;
1256 struct dma_async_tx_descriptor *tx;
1257 struct dma_device *dev = chan->device;
1258 dma_cookie_t cookie;
1259
1260 remaining_len = work->len;
1261 scif_init_window_iter(src_window, &src_win_iter);
1262 scif_init_window_iter(dst_window, &dst_win_iter);
1263
1264 src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
1265 if (src_cache_off != 0) {
1266 /* Head */
1267 loop_len = L1_CACHE_BYTES - src_cache_off;
1268 loop_len = min(loop_len, remaining_len);
1269 src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
1270 dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
1271 if (src_window->type == SCIF_WINDOW_SELF)
1272 src_virt = _get_local_va(src_offset, src_window,
1273 loop_len);
1274 else
1275 src_virt = ioremap_remote(src_offset, src_window,
1276 loop_len,
1277 work->remote_dev, NULL);
1278 if (!src_virt)
1279 return -ENOMEM;
1280 if (dst_window->type == SCIF_WINDOW_SELF)
1281 dst_virt = _get_local_va(dst_offset, dst_window,
1282 loop_len);
1283 else
1284 dst_virt = ioremap_remote(dst_offset, dst_window,
1285 loop_len,
1286 work->remote_dev, NULL);
1287 if (!dst_virt) {
1288 if (src_window->type != SCIF_WINDOW_SELF)
1289 iounmap_remote(src_virt, loop_len, work);
1290 return -ENOMEM;
1291 }
1292 if (src_window->type == SCIF_WINDOW_SELF)
1293 scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
1294 remaining_len == loop_len ?
1295 work->ordered : false);
1296 else
1297 scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len,
1298 remaining_len == loop_len ?
1299 work->ordered : false);
1300 if (src_window->type != SCIF_WINDOW_SELF)
1301 iounmap_remote(src_virt, loop_len, work);
1302 if (dst_window->type != SCIF_WINDOW_SELF)
1303 iounmap_remote(dst_virt, loop_len, work);
1304 src_offset += loop_len;
1305 dst_offset += loop_len;
1306 remaining_len -= loop_len;
1307 }
1308
1309 end_src_offset = src_window->offset +
1310 (src_window->nr_pages << PAGE_SHIFT);
1311 end_dst_offset = dst_window->offset +
1312 (dst_window->nr_pages << PAGE_SHIFT);
1313 tail_len = remaining_len & (L1_CACHE_BYTES - 1);
1314 remaining_len -= tail_len;
1315 while (remaining_len) {
1316 if (src_offset == end_src_offset) {
1317 src_window = list_entry_next(src_window, list);
1318 end_src_offset = src_window->offset +
1319 (src_window->nr_pages << PAGE_SHIFT);
1320 scif_init_window_iter(src_window, &src_win_iter);
1321 }
1322 if (dst_offset == end_dst_offset) {
1323 dst_window = list_entry_next(dst_window, list);
1324 end_dst_offset = dst_window->offset +
1325 (dst_window->nr_pages << PAGE_SHIFT);
1326 scif_init_window_iter(dst_window, &dst_win_iter);
1327 }
1328
1329 /* compute dma addresses for transfer */
1330 src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
1331 &src_contig_bytes,
1332 &src_win_iter);
1333 dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
1334 &dst_contig_bytes,
1335 &dst_win_iter);
1336 loop_len = min(src_contig_bytes, dst_contig_bytes);
1337 loop_len = min(loop_len, remaining_len);
1338 if (work->ordered && !tail_len &&
1339 !(remaining_len - loop_len)) {
1340 /*
1341 * Break up the last chunk of the transfer into two
1342 * steps. if there is no tail to gurantee DMA ordering.
1343 * Passing SCIF_DMA_POLLING inserts a status update
1344 * descriptor in step 1 which acts as a double sided
1345 * synchronization fence for the DMA engine to ensure
1346 * that the last cache line in step 2 is updated last.
1347 */
1348 /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
1349 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1350 src_dma_addr,
1351 loop_len -
1352 L1_CACHE_BYTES,
1353 DMA_PREP_FENCE);
1354 if (!tx) {
1355 ret = -ENOMEM;
1356 goto err;
1357 }
1358 cookie = tx->tx_submit(tx);
1359 if (dma_submit_error(cookie)) {
1360 ret = -ENOMEM;
1361 goto err;
1362 }
1363 dma_async_issue_pending(chan);
1364 src_offset += (loop_len - L1_CACHE_BYTES);
1365 dst_offset += (loop_len - L1_CACHE_BYTES);
1366 src_dma_addr += (loop_len - L1_CACHE_BYTES);
1367 dst_dma_addr += (loop_len - L1_CACHE_BYTES);
1368 remaining_len -= (loop_len - L1_CACHE_BYTES);
1369 loop_len = remaining_len;
1370
1371 /* Step 2) DMA: L1_CACHE_BYTES */
1372 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1373 src_dma_addr,
1374 loop_len, 0);
1375 if (!tx) {
1376 ret = -ENOMEM;
1377 goto err;
1378 }
1379 cookie = tx->tx_submit(tx);
1380 if (dma_submit_error(cookie)) {
1381 ret = -ENOMEM;
1382 goto err;
1383 }
1384 dma_async_issue_pending(chan);
1385 } else {
1386 tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1387 src_dma_addr,
1388 loop_len, 0);
1389 if (!tx) {
1390 ret = -ENOMEM;
1391 goto err;
1392 }
1393 cookie = tx->tx_submit(tx);
1394 if (dma_submit_error(cookie)) {
1395 ret = -ENOMEM;
1396 goto err;
1397 }
1398 dma_async_issue_pending(chan);
1399 }
1400 src_offset += loop_len;
1401 dst_offset += loop_len;
1402 remaining_len -= loop_len;
1403 }
1404 remaining_len = tail_len;
1405 if (remaining_len) {
1406 loop_len = remaining_len;
1407 if (src_offset == end_src_offset)
1408 src_window = list_entry_next(src_window, list);
1409 if (dst_offset == end_dst_offset)
1410 dst_window = list_entry_next(dst_window, list);
1411
1412 src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
1413 dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
1414 /*
1415 * The CPU copy for the tail bytes must be initiated only once
1416 * previous DMA transfers for this endpoint have completed to
1417 * guarantee ordering.
1418 */
1419 if (work->ordered) {
1420 struct scif_dev *rdev = work->remote_dev;
1421
1422 ret = scif_drain_dma_poll(rdev->sdev, chan);
1423 if (ret)
1424 return ret;
1425 }
1426 if (src_window->type == SCIF_WINDOW_SELF)
1427 src_virt = _get_local_va(src_offset, src_window,
1428 loop_len);
1429 else
1430 src_virt = ioremap_remote(src_offset, src_window,
1431 loop_len,
1432 work->remote_dev, NULL);
1433 if (!src_virt)
1434 return -ENOMEM;
1435
1436 if (dst_window->type == SCIF_WINDOW_SELF)
1437 dst_virt = _get_local_va(dst_offset, dst_window,
1438 loop_len);
1439 else
1440 dst_virt = ioremap_remote(dst_offset, dst_window,
1441 loop_len,
1442 work->remote_dev, NULL);
1443 if (!dst_virt) {
1444 if (src_window->type != SCIF_WINDOW_SELF)
1445 iounmap_remote(src_virt, loop_len, work);
1446 return -ENOMEM;
1447 }
1448
1449 if (src_window->type == SCIF_WINDOW_SELF)
1450 scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
1451 work->ordered);
1452 else
1453 scif_unaligned_cpy_fromio(dst_virt, src_virt,
1454 loop_len, work->ordered);
1455 if (src_window->type != SCIF_WINDOW_SELF)
1456 iounmap_remote(src_virt, loop_len, work);
1457
1458 if (dst_window->type != SCIF_WINDOW_SELF)
1459 iounmap_remote(dst_virt, loop_len, work);
1460 remaining_len -= loop_len;
1461 }
1462 return ret;
1463err:
1464 dev_err(scif_info.mdev.this_device,
1465 "%s %d Desc Prog Failed ret %d\n",
1466 __func__, __LINE__, ret);
1467 return ret;
1468}
1469
1470/*
1471 * scif_rma_list_cpu_copy:
1472 *
1473 * Traverse all the windows and perform CPU copy.
1474 */
1475static int scif_rma_list_cpu_copy(struct scif_copy_work *work)
1476{
1477 void *src_virt, *dst_virt;
1478 size_t loop_len, remaining_len;
1479 int src_page_off, dst_page_off;
1480 s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1481 struct scif_window *src_window = work->src_window;
1482 struct scif_window *dst_window = work->dst_window;
1483 s64 end_src_offset, end_dst_offset;
1484 int ret = 0;
1485 struct scif_window_iter src_win_iter;
1486 struct scif_window_iter dst_win_iter;
1487
1488 remaining_len = work->len;
1489
1490 scif_init_window_iter(src_window, &src_win_iter);
1491 scif_init_window_iter(dst_window, &dst_win_iter);
1492 while (remaining_len) {
1493 src_page_off = src_offset & ~PAGE_MASK;
1494 dst_page_off = dst_offset & ~PAGE_MASK;
1495 loop_len = min(PAGE_SIZE -
1496 max(src_page_off, dst_page_off),
1497 remaining_len);
1498
1499 if (src_window->type == SCIF_WINDOW_SELF)
1500 src_virt = _get_local_va(src_offset, src_window,
1501 loop_len);
1502 else
1503 src_virt = ioremap_remote(src_offset, src_window,
1504 loop_len,
1505 work->remote_dev,
1506 &src_win_iter);
1507 if (!src_virt) {
1508 ret = -ENOMEM;
1509 goto error;
1510 }
1511
1512 if (dst_window->type == SCIF_WINDOW_SELF)
1513 dst_virt = _get_local_va(dst_offset, dst_window,
1514 loop_len);
1515 else
1516 dst_virt = ioremap_remote(dst_offset, dst_window,
1517 loop_len,
1518 work->remote_dev,
1519 &dst_win_iter);
1520 if (!dst_virt) {
1521 if (src_window->type == SCIF_WINDOW_PEER)
1522 iounmap_remote(src_virt, loop_len, work);
1523 ret = -ENOMEM;
1524 goto error;
1525 }
1526
1527 if (work->loopback) {
1528 memcpy(dst_virt, src_virt, loop_len);
1529 } else {
1530 if (src_window->type == SCIF_WINDOW_SELF)
1531 memcpy_toio((void __iomem __force *)dst_virt,
1532 src_virt, loop_len);
1533 else
1534 memcpy_fromio(dst_virt,
1535 (void __iomem __force *)src_virt,
1536 loop_len);
1537 }
1538 if (src_window->type == SCIF_WINDOW_PEER)
1539 iounmap_remote(src_virt, loop_len, work);
1540
1541 if (dst_window->type == SCIF_WINDOW_PEER)
1542 iounmap_remote(dst_virt, loop_len, work);
1543
1544 src_offset += loop_len;
1545 dst_offset += loop_len;
1546 remaining_len -= loop_len;
1547 if (remaining_len) {
1548 end_src_offset = src_window->offset +
1549 (src_window->nr_pages << PAGE_SHIFT);
1550 end_dst_offset = dst_window->offset +
1551 (dst_window->nr_pages << PAGE_SHIFT);
1552 if (src_offset == end_src_offset) {
1553 src_window = list_entry_next(src_window, list);
1554 scif_init_window_iter(src_window,
1555 &src_win_iter);
1556 }
1557 if (dst_offset == end_dst_offset) {
1558 dst_window = list_entry_next(dst_window, list);
1559 scif_init_window_iter(dst_window,
1560 &dst_win_iter);
1561 }
1562 }
1563 }
1564error:
1565 return ret;
1566}
1567
1568static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
1569 struct scif_copy_work *work,
1570 struct dma_chan *chan, off_t loffset)
1571{
1572 int src_cache_off, dst_cache_off;
1573 s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1574 u8 *temp = NULL;
1575 bool src_local = true, dst_local = false;
1576 struct scif_dma_comp_cb *comp_cb;
1577 dma_addr_t src_dma_addr, dst_dma_addr;
1578 int err;
1579
1580 if (is_dma_copy_aligned(chan->device, 1, 1, 1))
1581 return _scif_rma_list_dma_copy_aligned(work, chan);
1582
1583 src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
1584 dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1);
1585
1586 if (dst_cache_off == src_cache_off)
1587 return scif_rma_list_dma_copy_aligned(work, chan);
1588
1589 if (work->loopback)
1590 return scif_rma_list_cpu_copy(work);
1591 src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset);
1592 dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset);
1593 src_local = work->src_window->type == SCIF_WINDOW_SELF;
1594 dst_local = work->dst_window->type == SCIF_WINDOW_SELF;
1595
1596 dst_local = dst_local;
1597 /* Allocate dma_completion cb */
1598 comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
1599 if (!comp_cb)
1600 goto error;
1601
1602 work->comp_cb = comp_cb;
1603 comp_cb->cb_cookie = comp_cb;
1604 comp_cb->dma_completion_func = &scif_rma_completion_cb;
1605
1606 if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) {
1607 comp_cb->is_cache = false;
1608 /* Allocate padding bytes to align to a cache line */
1609 temp = kmalloc(work->len + (L1_CACHE_BYTES << 1),
1610 GFP_KERNEL);
1611 if (!temp)
1612 goto free_comp_cb;
1613 comp_cb->temp_buf_to_free = temp;
1614 /* kmalloc(..) does not guarantee cache line alignment */
1615 if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES))
1616 temp = PTR_ALIGN(temp, L1_CACHE_BYTES);
1617 } else {
1618 comp_cb->is_cache = true;
1619 temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL);
1620 if (!temp)
1621 goto free_comp_cb;
1622 comp_cb->temp_buf_to_free = temp;
1623 }
1624
1625 if (src_local) {
1626 temp += dst_cache_off;
1627 scif_rma_local_cpu_copy(work->src_offset, work->src_window,
1628 temp, work->len, true);
1629 } else {
1630 comp_cb->dst_window = work->dst_window;
1631 comp_cb->dst_offset = work->dst_offset;
1632 work->src_offset = work->src_offset - src_cache_off;
1633 comp_cb->len = work->len;
1634 work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES);
1635 comp_cb->header_padding = src_cache_off;
1636 }
1637 comp_cb->temp_buf = temp;
1638
1639 err = scif_map_single(&comp_cb->temp_phys, temp,
1640 work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE);
1641 if (err)
1642 goto free_temp_buf;
1643 comp_cb->sdev = work->remote_dev;
1644 if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0)
1645 goto free_temp_buf;
1646 if (!src_local)
1647 work->fence_type = SCIF_DMA_INTR;
1648 return 0;
1649free_temp_buf:
1650 if (comp_cb->is_cache)
1651 kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free);
1652 else
1653 kfree(comp_cb->temp_buf_to_free);
1654free_comp_cb:
1655 kfree(comp_cb);
1656error:
1657 return -ENOMEM;
1658}
1659
1660/**
1661 * scif_rma_copy:
1662 * @epd: end point descriptor.
1663 * @loffset: offset in local registered address space to/from which to copy
1664 * @addr: user virtual address to/from which to copy
1665 * @len: length of range to copy
1666 * @roffset: offset in remote registered address space to/from which to copy
1667 * @flags: flags
1668 * @dir: LOCAL->REMOTE or vice versa.
1669 * @last_chunk: true if this is the last chunk of a larger transfer
1670 *
1671 * Validate parameters, check if src/dst registered ranges requested for copy
1672 * are valid and initiate either CPU or DMA copy.
1673 */
1674static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr,
1675 size_t len, off_t roffset, int flags,
1676 enum scif_rma_dir dir, bool last_chunk)
1677{
1678 struct scif_endpt *ep = (struct scif_endpt *)epd;
1679 struct scif_rma_req remote_req;
1680 struct scif_rma_req req;
1681 struct scif_window *local_window = NULL;
1682 struct scif_window *remote_window = NULL;
1683 struct scif_copy_work copy_work;
1684 bool loopback;
1685 int err = 0;
1686 struct dma_chan *chan;
1687 struct scif_mmu_notif *mmn = NULL;
1688 bool cache = false;
1689 struct device *spdev;
1690
1691 err = scif_verify_epd(ep);
1692 if (err)
1693 return err;
1694
1695 if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE |
1696 SCIF_RMA_SYNC | SCIF_RMA_ORDERED)))
1697 return -EINVAL;
1698
1699 loopback = scifdev_self(ep->remote_dev) ? true : false;
1700 copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ?
1701 SCIF_DMA_POLL : 0;
1702 copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk);
1703
1704 /* Use CPU for Mgmt node <-> Mgmt node copies */
1705 if (loopback && scif_is_mgmt_node()) {
1706 flags |= SCIF_RMA_USECPU;
1707 copy_work.fence_type = 0x0;
1708 }
1709
1710 cache = scif_is_set_reg_cache(flags);
1711
1712 remote_req.out_window = &remote_window;
1713 remote_req.offset = roffset;
1714 remote_req.nr_bytes = len;
1715 /*
1716 * If transfer is from local to remote then the remote window
1717 * must be writeable and vice versa.
1718 */
1719 remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ;
1720 remote_req.type = SCIF_WINDOW_PARTIAL;
1721 remote_req.head = &ep->rma_info.remote_reg_list;
1722
1723 spdev = scif_get_peer_dev(ep->remote_dev);
1724 if (IS_ERR(spdev)) {
1725 err = PTR_ERR(spdev);
1726 return err;
1727 }
1728
1729 if (addr && cache) {
1730 mutex_lock(&ep->rma_info.mmn_lock);
1731 mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
1732 if (!mmn)
1733 scif_add_mmu_notifier(current->mm, ep);
1734 mutex_unlock(&ep->rma_info.mmn_lock);
1735 if (IS_ERR(mmn)) {
1736 scif_put_peer_dev(spdev);
1737 return PTR_ERR(mmn);
1738 }
1739 cache = cache && !scif_rma_tc_can_cache(ep, len);
1740 }
1741 mutex_lock(&ep->rma_info.rma_lock);
1742 if (addr) {
1743 req.out_window = &local_window;
1744 req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK),
1745 PAGE_SIZE);
1746 req.va_for_temp = addr & PAGE_MASK;
1747 req.prot = (dir == SCIF_LOCAL_TO_REMOTE ?
1748 VM_READ : VM_WRITE | VM_READ);
1749 /* Does a valid local window exist? */
1750 if (mmn) {
1751 spin_lock(&ep->rma_info.tc_lock);
1752 req.head = &mmn->tc_reg_list;
1753 err = scif_query_tcw(ep, &req);
1754 spin_unlock(&ep->rma_info.tc_lock);
1755 }
1756 if (!mmn || err) {
1757 err = scif_register_temp(epd, req.va_for_temp,
1758 req.nr_bytes, req.prot,
1759 &loffset, &local_window);
1760 if (err) {
1761 mutex_unlock(&ep->rma_info.rma_lock);
1762 goto error;
1763 }
1764 if (!cache)
1765 goto skip_cache;
1766 atomic_inc(&ep->rma_info.tcw_refcount);
1767 atomic_add_return(local_window->nr_pages,
1768 &ep->rma_info.tcw_total_pages);
1769 if (mmn) {
1770 spin_lock(&ep->rma_info.tc_lock);
1771 scif_insert_tcw(local_window,
1772 &mmn->tc_reg_list);
1773 spin_unlock(&ep->rma_info.tc_lock);
1774 }
1775 }
1776skip_cache:
1777 loffset = local_window->offset +
1778 (addr - local_window->va_for_temp);
1779 } else {
1780 req.out_window = &local_window;
1781 req.offset = loffset;
1782 /*
1783 * If transfer is from local to remote then the self window
1784 * must be readable and vice versa.
1785 */
1786 req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE;
1787 req.nr_bytes = len;
1788 req.type = SCIF_WINDOW_PARTIAL;
1789 req.head = &ep->rma_info.reg_list;
1790 /* Does a valid local window exist? */
1791 err = scif_query_window(&req);
1792 if (err) {
1793 mutex_unlock(&ep->rma_info.rma_lock);
1794 goto error;
1795 }
1796 }
1797
1798 /* Does a valid remote window exist? */
1799 err = scif_query_window(&remote_req);
1800 if (err) {
1801 mutex_unlock(&ep->rma_info.rma_lock);
1802 goto error;
1803 }
1804
1805 /*
1806 * Prepare copy_work for submitting work to the DMA kernel thread
1807 * or CPU copy routine.
1808 */
1809 copy_work.len = len;
1810 copy_work.loopback = loopback;
1811 copy_work.remote_dev = ep->remote_dev;
1812 if (dir == SCIF_LOCAL_TO_REMOTE) {
1813 copy_work.src_offset = loffset;
1814 copy_work.src_window = local_window;
1815 copy_work.dst_offset = roffset;
1816 copy_work.dst_window = remote_window;
1817 } else {
1818 copy_work.src_offset = roffset;
1819 copy_work.src_window = remote_window;
1820 copy_work.dst_offset = loffset;
1821 copy_work.dst_window = local_window;
1822 }
1823
1824 if (flags & SCIF_RMA_USECPU) {
1825 scif_rma_list_cpu_copy(&copy_work);
1826 } else {
1827 chan = ep->rma_info.dma_chan;
1828 err = scif_rma_list_dma_copy_wrapper(epd, &copy_work,
1829 chan, loffset);
1830 }
1831 if (addr && !cache)
1832 atomic_inc(&ep->rma_info.tw_refcount);
1833
1834 mutex_unlock(&ep->rma_info.rma_lock);
1835
1836 if (last_chunk) {
1837 struct scif_dev *rdev = ep->remote_dev;
1838
1839 if (copy_work.fence_type == SCIF_DMA_POLL)
1840 err = scif_drain_dma_poll(rdev->sdev,
1841 ep->rma_info.dma_chan);
1842 else if (copy_work.fence_type == SCIF_DMA_INTR)
1843 err = scif_drain_dma_intr(rdev->sdev,
1844 ep->rma_info.dma_chan);
1845 }
1846
1847 if (addr && !cache)
1848 scif_queue_for_cleanup(local_window, &scif_info.rma);
1849 scif_put_peer_dev(spdev);
1850 return err;
1851error:
1852 if (err) {
1853 if (addr && local_window && !cache)
1854 scif_destroy_window(ep, local_window);
1855 dev_err(scif_info.mdev.this_device,
1856 "%s %d err %d len 0x%lx\n",
1857 __func__, __LINE__, err, len);
1858 }
1859 scif_put_peer_dev(spdev);
1860 return err;
1861}
1862
1863int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len,
1864 off_t roffset, int flags)
1865{
1866 int err;
1867
1868 dev_dbg(scif_info.mdev.this_device,
1869 "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n",
1870 epd, loffset, len, roffset, flags);
1871 if (scif_unaligned(loffset, roffset)) {
1872 while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1873 err = scif_rma_copy(epd, loffset, 0x0,
1874 SCIF_MAX_UNALIGNED_BUF_SIZE,
1875 roffset, flags,
1876 SCIF_REMOTE_TO_LOCAL, false);
1877 if (err)
1878 goto readfrom_err;
1879 loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1880 roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1881 len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1882 }
1883 }
1884 err = scif_rma_copy(epd, loffset, 0x0, len,
1885 roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
1886readfrom_err:
1887 return err;
1888}
1889EXPORT_SYMBOL_GPL(scif_readfrom);
1890
1891int scif_writeto(scif_epd_t epd, off_t loffset, size_t len,
1892 off_t roffset, int flags)
1893{
1894 int err;
1895
1896 dev_dbg(scif_info.mdev.this_device,
1897 "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n",
1898 epd, loffset, len, roffset, flags);
1899 if (scif_unaligned(loffset, roffset)) {
1900 while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1901 err = scif_rma_copy(epd, loffset, 0x0,
1902 SCIF_MAX_UNALIGNED_BUF_SIZE,
1903 roffset, flags,
1904 SCIF_LOCAL_TO_REMOTE, false);
1905 if (err)
1906 goto writeto_err;
1907 loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1908 roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1909 len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1910 }
1911 }
1912 err = scif_rma_copy(epd, loffset, 0x0, len,
1913 roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
1914writeto_err:
1915 return err;
1916}
1917EXPORT_SYMBOL_GPL(scif_writeto);
1918
1919int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len,
1920 off_t roffset, int flags)
1921{
1922 int err;
1923
1924 dev_dbg(scif_info.mdev.this_device,
1925 "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
1926 epd, addr, len, roffset, flags);
1927 if (scif_unaligned((off_t __force)addr, roffset)) {
1928 if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
1929 flags &= ~SCIF_RMA_USECACHE;
1930
1931 while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1932 err = scif_rma_copy(epd, 0, (u64)addr,
1933 SCIF_MAX_UNALIGNED_BUF_SIZE,
1934 roffset, flags,
1935 SCIF_REMOTE_TO_LOCAL, false);
1936 if (err)
1937 goto vreadfrom_err;
1938 addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
1939 roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1940 len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1941 }
1942 }
1943 err = scif_rma_copy(epd, 0, (u64)addr, len,
1944 roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
1945vreadfrom_err:
1946 return err;
1947}
1948EXPORT_SYMBOL_GPL(scif_vreadfrom);
1949
1950int scif_vwriteto(scif_epd_t epd, void *addr, size_t len,
1951 off_t roffset, int flags)
1952{
1953 int err;
1954
1955 dev_dbg(scif_info.mdev.this_device,
1956 "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
1957 epd, addr, len, roffset, flags);
1958 if (scif_unaligned((off_t __force)addr, roffset)) {
1959 if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
1960 flags &= ~SCIF_RMA_USECACHE;
1961
1962 while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1963 err = scif_rma_copy(epd, 0, (u64)addr,
1964 SCIF_MAX_UNALIGNED_BUF_SIZE,
1965 roffset, flags,
1966 SCIF_LOCAL_TO_REMOTE, false);
1967 if (err)
1968 goto vwriteto_err;
1969 addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
1970 roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1971 len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1972 }
1973 }
1974 err = scif_rma_copy(epd, 0, (u64)addr, len,
1975 roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
1976vwriteto_err:
1977 return err;
1978}
1979EXPORT_SYMBOL_GPL(scif_vwriteto);
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
index b4bfbb08a8e3..00e5d6d66e7b 100644
--- a/drivers/misc/mic/scif/scif_epd.c
+++ b/drivers/misc/mic/scif/scif_epd.c
@@ -65,14 +65,14 @@ void scif_teardown_ep(void *endpt)
65void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held) 65void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
66{ 66{
67 if (!eplock_held) 67 if (!eplock_held)
68 spin_lock(&scif_info.eplock); 68 mutex_lock(&scif_info.eplock);
69 spin_lock(&ep->lock); 69 spin_lock(&ep->lock);
70 ep->state = SCIFEP_ZOMBIE; 70 ep->state = SCIFEP_ZOMBIE;
71 spin_unlock(&ep->lock); 71 spin_unlock(&ep->lock);
72 list_add_tail(&ep->list, &scif_info.zombie); 72 list_add_tail(&ep->list, &scif_info.zombie);
73 scif_info.nr_zombies++; 73 scif_info.nr_zombies++;
74 if (!eplock_held) 74 if (!eplock_held)
75 spin_unlock(&scif_info.eplock); 75 mutex_unlock(&scif_info.eplock);
76 schedule_work(&scif_info.misc_work); 76 schedule_work(&scif_info.misc_work);
77} 77}
78 78
@@ -81,16 +81,15 @@ static struct scif_endpt *scif_find_listen_ep(u16 port)
81 struct scif_endpt *ep = NULL; 81 struct scif_endpt *ep = NULL;
82 struct list_head *pos, *tmpq; 82 struct list_head *pos, *tmpq;
83 83
84 spin_lock(&scif_info.eplock); 84 mutex_lock(&scif_info.eplock);
85 list_for_each_safe(pos, tmpq, &scif_info.listen) { 85 list_for_each_safe(pos, tmpq, &scif_info.listen) {
86 ep = list_entry(pos, struct scif_endpt, list); 86 ep = list_entry(pos, struct scif_endpt, list);
87 if (ep->port.port == port) { 87 if (ep->port.port == port) {
88 spin_lock(&ep->lock); 88 mutex_unlock(&scif_info.eplock);
89 spin_unlock(&scif_info.eplock);
90 return ep; 89 return ep;
91 } 90 }
92 } 91 }
93 spin_unlock(&scif_info.eplock); 92 mutex_unlock(&scif_info.eplock);
94 return NULL; 93 return NULL;
95} 94}
96 95
@@ -99,14 +98,17 @@ void scif_cleanup_zombie_epd(void)
99 struct list_head *pos, *tmpq; 98 struct list_head *pos, *tmpq;
100 struct scif_endpt *ep; 99 struct scif_endpt *ep;
101 100
102 spin_lock(&scif_info.eplock); 101 mutex_lock(&scif_info.eplock);
103 list_for_each_safe(pos, tmpq, &scif_info.zombie) { 102 list_for_each_safe(pos, tmpq, &scif_info.zombie) {
104 ep = list_entry(pos, struct scif_endpt, list); 103 ep = list_entry(pos, struct scif_endpt, list);
105 list_del(pos); 104 if (scif_rma_ep_can_uninit(ep)) {
106 scif_info.nr_zombies--; 105 list_del(pos);
107 kfree(ep); 106 scif_info.nr_zombies--;
107 put_iova_domain(&ep->rma_info.iovad);
108 kfree(ep);
109 }
108 } 110 }
109 spin_unlock(&scif_info.eplock); 111 mutex_unlock(&scif_info.eplock);
110} 112}
111 113
112/** 114/**
@@ -137,6 +139,8 @@ void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
137 if (!ep) 139 if (!ep)
138 /* Send reject due to no listening ports */ 140 /* Send reject due to no listening ports */
139 goto conreq_sendrej_free; 141 goto conreq_sendrej_free;
142 else
143 spin_lock(&ep->lock);
140 144
141 if (ep->backlog <= ep->conreqcnt) { 145 if (ep->backlog <= ep->conreqcnt) {
142 /* Send reject due to too many pending requests */ 146 /* Send reject due to too many pending requests */
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
index 331322a25213..1771d7a9b8d0 100644
--- a/drivers/misc/mic/scif/scif_epd.h
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -96,7 +96,11 @@ struct scif_endpt_qp_info {
96 * @conn_port: Connection port 96 * @conn_port: Connection port
97 * @conn_err: Errors during connection 97 * @conn_err: Errors during connection
98 * @conn_async_state: Async connection 98 * @conn_async_state: Async connection
99 * @conn_pend_wq: Used by poll while waiting for incoming connections
99 * @conn_list: List of async connection requests 100 * @conn_list: List of async connection requests
101 * @rma_info: Information for triggering SCIF RMA and DMA operations
102 * @mmu_list: link to list of MMU notifier cleanup work
103 * @anon: anonymous file for use in kernel mode scif poll
100 */ 104 */
101struct scif_endpt { 105struct scif_endpt {
102 enum scif_epd_state state; 106 enum scif_epd_state state;
@@ -125,7 +129,11 @@ struct scif_endpt {
125 struct scif_port_id conn_port; 129 struct scif_port_id conn_port;
126 int conn_err; 130 int conn_err;
127 int conn_async_state; 131 int conn_async_state;
132 wait_queue_head_t conn_pend_wq;
128 struct list_head conn_list; 133 struct list_head conn_list;
134 struct scif_endpt_rma_info rma_info;
135 struct list_head mmu_list;
136 struct file *anon;
129}; 137};
130 138
131static inline int scifdev_alive(struct scif_endpt *ep) 139static inline int scifdev_alive(struct scif_endpt *ep)
@@ -133,6 +141,43 @@ static inline int scifdev_alive(struct scif_endpt *ep)
133 return _scifdev_alive(ep->remote_dev); 141 return _scifdev_alive(ep->remote_dev);
134} 142}
135 143
144/*
145 * scif_verify_epd:
146 * ep: SCIF endpoint
147 *
148 * Checks several generic error conditions and returns the
149 * appropriate error.
150 */
151static inline int scif_verify_epd(struct scif_endpt *ep)
152{
153 if (ep->state == SCIFEP_DISCONNECTED)
154 return -ECONNRESET;
155
156 if (ep->state != SCIFEP_CONNECTED)
157 return -ENOTCONN;
158
159 if (!scifdev_alive(ep))
160 return -ENODEV;
161
162 return 0;
163}
164
165static inline int scif_anon_inode_getfile(scif_epd_t epd)
166{
167 epd->anon = anon_inode_getfile("scif", &scif_anon_fops, NULL, 0);
168 if (IS_ERR(epd->anon))
169 return PTR_ERR(epd->anon);
170 return 0;
171}
172
173static inline void scif_anon_inode_fput(scif_epd_t epd)
174{
175 if (epd->anon) {
176 fput(epd->anon);
177 epd->anon = NULL;
178 }
179}
180
136void scif_cleanup_zombie_epd(void); 181void scif_cleanup_zombie_epd(void);
137void scif_teardown_ep(void *endpt); 182void scif_teardown_ep(void *endpt);
138void scif_cleanup_ep_qp(struct scif_endpt *ep); 183void scif_cleanup_ep_qp(struct scif_endpt *ep);
@@ -157,4 +202,9 @@ void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg);
157void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg); 202void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
158int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block); 203int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
159int __scif_flush(scif_epd_t epd); 204int __scif_flush(scif_epd_t epd);
205int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd);
206unsigned int __scif_pollfd(struct file *f, poll_table *wait,
207 struct scif_endpt *ep);
208int __scif_pin_pages(void *addr, size_t len, int *out_prot,
209 int map_flags, scif_pinned_pages_t *pages);
160#endif /* SCIF_EPD_H */ 210#endif /* SCIF_EPD_H */
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
index eccf7e7135f9..f7e826142a72 100644
--- a/drivers/misc/mic/scif/scif_fd.c
+++ b/drivers/misc/mic/scif/scif_fd.c
@@ -34,6 +34,20 @@ static int scif_fdclose(struct inode *inode, struct file *f)
34 return scif_close(priv); 34 return scif_close(priv);
35} 35}
36 36
37static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
38{
39 struct scif_endpt *priv = f->private_data;
40
41 return scif_mmap(vma, priv);
42}
43
44static unsigned int scif_fdpoll(struct file *f, poll_table *wait)
45{
46 struct scif_endpt *priv = f->private_data;
47
48 return __scif_pollfd(f, wait, priv);
49}
50
37static int scif_fdflush(struct file *f, fl_owner_t id) 51static int scif_fdflush(struct file *f, fl_owner_t id)
38{ 52{
39 struct scif_endpt *ep = f->private_data; 53 struct scif_endpt *ep = f->private_data;
@@ -140,12 +154,12 @@ static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
140 * Add to the list of user mode eps where the second half 154 * Add to the list of user mode eps where the second half
141 * of the accept is not yet completed. 155 * of the accept is not yet completed.
142 */ 156 */
143 spin_lock(&scif_info.eplock); 157 mutex_lock(&scif_info.eplock);
144 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept); 158 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
145 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept); 159 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
146 (*ep)->listenep = priv; 160 (*ep)->listenep = priv;
147 priv->acceptcnt++; 161 priv->acceptcnt++;
148 spin_unlock(&scif_info.eplock); 162 mutex_unlock(&scif_info.eplock);
149 163
150 return 0; 164 return 0;
151 } 165 }
@@ -163,7 +177,7 @@ static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
163 return -EFAULT; 177 return -EFAULT;
164 178
165 /* Remove form the user accept queue */ 179 /* Remove form the user accept queue */
166 spin_lock(&scif_info.eplock); 180 mutex_lock(&scif_info.eplock);
167 list_for_each_safe(pos, tmpq, &scif_info.uaccept) { 181 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
168 tmpep = list_entry(pos, 182 tmpep = list_entry(pos,
169 struct scif_endpt, miacceptlist); 183 struct scif_endpt, miacceptlist);
@@ -175,7 +189,7 @@ static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
175 } 189 }
176 190
177 if (!fep) { 191 if (!fep) {
178 spin_unlock(&scif_info.eplock); 192 mutex_unlock(&scif_info.eplock);
179 return -ENOENT; 193 return -ENOENT;
180 } 194 }
181 195
@@ -190,9 +204,10 @@ static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
190 } 204 }
191 } 205 }
192 206
193 spin_unlock(&scif_info.eplock); 207 mutex_unlock(&scif_info.eplock);
194 208
195 /* Free the resources automatically created from the open. */ 209 /* Free the resources automatically created from the open. */
210 scif_anon_inode_fput(priv);
196 scif_teardown_ep(priv); 211 scif_teardown_ep(priv);
197 scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD); 212 scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
198 f->private_data = newep; 213 f->private_data = newep;
@@ -290,6 +305,157 @@ getnodes_err1:
290getnodes_err2: 305getnodes_err2:
291 return err; 306 return err;
292 } 307 }
308 case SCIF_REG:
309 {
310 struct scif_endpt *priv = f->private_data;
311 struct scifioctl_reg reg;
312 off_t ret;
313
314 if (copy_from_user(&reg, argp, sizeof(reg))) {
315 err = -EFAULT;
316 goto reg_err;
317 }
318 if (reg.flags & SCIF_MAP_KERNEL) {
319 err = -EINVAL;
320 goto reg_err;
321 }
322 ret = scif_register(priv, (void *)reg.addr, reg.len,
323 reg.offset, reg.prot, reg.flags);
324 if (ret < 0) {
325 err = (int)ret;
326 goto reg_err;
327 }
328
329 if (copy_to_user(&((struct scifioctl_reg __user *)argp)
330 ->out_offset, &ret, sizeof(reg.out_offset))) {
331 err = -EFAULT;
332 goto reg_err;
333 }
334 err = 0;
335reg_err:
336 scif_err_debug(err, "scif_register");
337 return err;
338 }
339 case SCIF_UNREG:
340 {
341 struct scif_endpt *priv = f->private_data;
342 struct scifioctl_unreg unreg;
343
344 if (copy_from_user(&unreg, argp, sizeof(unreg))) {
345 err = -EFAULT;
346 goto unreg_err;
347 }
348 err = scif_unregister(priv, unreg.offset, unreg.len);
349unreg_err:
350 scif_err_debug(err, "scif_unregister");
351 return err;
352 }
353 case SCIF_READFROM:
354 {
355 struct scif_endpt *priv = f->private_data;
356 struct scifioctl_copy copy;
357
358 if (copy_from_user(&copy, argp, sizeof(copy))) {
359 err = -EFAULT;
360 goto readfrom_err;
361 }
362 err = scif_readfrom(priv, copy.loffset, copy.len, copy.roffset,
363 copy.flags);
364readfrom_err:
365 scif_err_debug(err, "scif_readfrom");
366 return err;
367 }
368 case SCIF_WRITETO:
369 {
370 struct scif_endpt *priv = f->private_data;
371 struct scifioctl_copy copy;
372
373 if (copy_from_user(&copy, argp, sizeof(copy))) {
374 err = -EFAULT;
375 goto writeto_err;
376 }
377 err = scif_writeto(priv, copy.loffset, copy.len, copy.roffset,
378 copy.flags);
379writeto_err:
380 scif_err_debug(err, "scif_writeto");
381 return err;
382 }
383 case SCIF_VREADFROM:
384 {
385 struct scif_endpt *priv = f->private_data;
386 struct scifioctl_copy copy;
387
388 if (copy_from_user(&copy, argp, sizeof(copy))) {
389 err = -EFAULT;
390 goto vreadfrom_err;
391 }
392 err = scif_vreadfrom(priv, (void __force *)copy.addr, copy.len,
393 copy.roffset, copy.flags);
394vreadfrom_err:
395 scif_err_debug(err, "scif_vreadfrom");
396 return err;
397 }
398 case SCIF_VWRITETO:
399 {
400 struct scif_endpt *priv = f->private_data;
401 struct scifioctl_copy copy;
402
403 if (copy_from_user(&copy, argp, sizeof(copy))) {
404 err = -EFAULT;
405 goto vwriteto_err;
406 }
407 err = scif_vwriteto(priv, (void __force *)copy.addr, copy.len,
408 copy.roffset, copy.flags);
409vwriteto_err:
410 scif_err_debug(err, "scif_vwriteto");
411 return err;
412 }
413 case SCIF_FENCE_MARK:
414 {
415 struct scif_endpt *priv = f->private_data;
416 struct scifioctl_fence_mark mark;
417 int tmp_mark = 0;
418
419 if (copy_from_user(&mark, argp, sizeof(mark))) {
420 err = -EFAULT;
421 goto fence_mark_err;
422 }
423 err = scif_fence_mark(priv, mark.flags, &tmp_mark);
424 if (err)
425 goto fence_mark_err;
426 if (copy_to_user((void __user *)mark.mark, &tmp_mark,
427 sizeof(tmp_mark))) {
428 err = -EFAULT;
429 goto fence_mark_err;
430 }
431fence_mark_err:
432 scif_err_debug(err, "scif_fence_mark");
433 return err;
434 }
435 case SCIF_FENCE_WAIT:
436 {
437 struct scif_endpt *priv = f->private_data;
438
439 err = scif_fence_wait(priv, arg);
440 scif_err_debug(err, "scif_fence_wait");
441 return err;
442 }
443 case SCIF_FENCE_SIGNAL:
444 {
445 struct scif_endpt *priv = f->private_data;
446 struct scifioctl_fence_signal signal;
447
448 if (copy_from_user(&signal, argp, sizeof(signal))) {
449 err = -EFAULT;
450 goto fence_signal_err;
451 }
452
453 err = scif_fence_signal(priv, signal.loff, signal.lval,
454 signal.roff, signal.rval, signal.flags);
455fence_signal_err:
456 scif_err_debug(err, "scif_fence_signal");
457 return err;
458 }
293 } 459 }
294 return -EINVAL; 460 return -EINVAL;
295} 461}
@@ -298,6 +464,8 @@ const struct file_operations scif_fops = {
298 .open = scif_fdopen, 464 .open = scif_fdopen,
299 .release = scif_fdclose, 465 .release = scif_fdclose,
300 .unlocked_ioctl = scif_fdioctl, 466 .unlocked_ioctl = scif_fdioctl,
467 .mmap = scif_fdmmap,
468 .poll = scif_fdpoll,
301 .flush = scif_fdflush, 469 .flush = scif_fdflush,
302 .owner = THIS_MODULE, 470 .owner = THIS_MODULE,
303}; 471};
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
new file mode 100644
index 000000000000..7f2c96f57066
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -0,0 +1,771 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18
19#include "scif_main.h"
20
21/**
22 * scif_recv_mark: Handle SCIF_MARK request
23 * @msg: Interrupt message
24 *
25 * The peer has requested a mark.
26 */
27void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
28{
29 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
30 int mark, err;
31
32 err = _scif_fence_mark(ep, &mark);
33 if (err)
34 msg->uop = SCIF_MARK_NACK;
35 else
36 msg->uop = SCIF_MARK_ACK;
37 msg->payload[0] = ep->remote_ep;
38 msg->payload[2] = mark;
39 scif_nodeqp_send(ep->remote_dev, msg);
40}
41
42/**
43 * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
44 * @msg: Interrupt message
45 *
46 * The peer has responded to a SCIF_MARK message.
47 */
48void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
49{
50 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
51 struct scif_fence_info *fence_req =
52 (struct scif_fence_info *)msg->payload[1];
53
54 mutex_lock(&ep->rma_info.rma_lock);
55 if (msg->uop == SCIF_MARK_ACK) {
56 fence_req->state = OP_COMPLETED;
57 fence_req->dma_mark = (int)msg->payload[2];
58 } else {
59 fence_req->state = OP_FAILED;
60 }
61 mutex_unlock(&ep->rma_info.rma_lock);
62 complete(&fence_req->comp);
63}
64
65/**
66 * scif_recv_wait: Handle SCIF_WAIT request
67 * @msg: Interrupt message
68 *
69 * The peer has requested waiting on a fence.
70 */
71void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
72{
73 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
74 struct scif_remote_fence_info *fence;
75
76 /*
77 * Allocate structure for remote fence information and
78 * send a NACK if the allocation failed. The peer will
79 * return ENOMEM upon receiving a NACK.
80 */
81 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
82 if (!fence) {
83 msg->payload[0] = ep->remote_ep;
84 msg->uop = SCIF_WAIT_NACK;
85 scif_nodeqp_send(ep->remote_dev, msg);
86 return;
87 }
88
89 /* Prepare the fence request */
90 memcpy(&fence->msg, msg, sizeof(struct scifmsg));
91 INIT_LIST_HEAD(&fence->list);
92
93 /* Insert to the global remote fence request list */
94 mutex_lock(&scif_info.fencelock);
95 atomic_inc(&ep->rma_info.fence_refcount);
96 list_add_tail(&fence->list, &scif_info.fence);
97 mutex_unlock(&scif_info.fencelock);
98
99 schedule_work(&scif_info.misc_work);
100}
101
102/**
103 * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
104 * @msg: Interrupt message
105 *
106 * The peer has responded to a SCIF_WAIT message.
107 */
108void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
109{
110 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
111 struct scif_fence_info *fence_req =
112 (struct scif_fence_info *)msg->payload[1];
113
114 mutex_lock(&ep->rma_info.rma_lock);
115 if (msg->uop == SCIF_WAIT_ACK)
116 fence_req->state = OP_COMPLETED;
117 else
118 fence_req->state = OP_FAILED;
119 mutex_unlock(&ep->rma_info.rma_lock);
120 complete(&fence_req->comp);
121}
122
123/**
124 * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
125 * @msg: Interrupt message
126 *
127 * The peer has requested a signal on a local offset.
128 */
129void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
130{
131 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
132 int err;
133
134 err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
135 SCIF_WINDOW_SELF);
136 if (err)
137 msg->uop = SCIF_SIG_NACK;
138 else
139 msg->uop = SCIF_SIG_ACK;
140 msg->payload[0] = ep->remote_ep;
141 scif_nodeqp_send(ep->remote_dev, msg);
142}
143
144/**
145 * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
146 * @msg: Interrupt message
147 *
148 * The peer has requested a signal on a remote offset.
149 */
150void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
151{
152 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
153 int err;
154
155 err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
156 SCIF_WINDOW_PEER);
157 if (err)
158 msg->uop = SCIF_SIG_NACK;
159 else
160 msg->uop = SCIF_SIG_ACK;
161 msg->payload[0] = ep->remote_ep;
162 scif_nodeqp_send(ep->remote_dev, msg);
163}
164
165/**
166 * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
167 * @msg: Interrupt message
168 *
169 * The peer has responded to a signal request.
170 */
171void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg)
172{
173 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
174 struct scif_fence_info *fence_req =
175 (struct scif_fence_info *)msg->payload[3];
176
177 mutex_lock(&ep->rma_info.rma_lock);
178 if (msg->uop == SCIF_SIG_ACK)
179 fence_req->state = OP_COMPLETED;
180 else
181 fence_req->state = OP_FAILED;
182 mutex_unlock(&ep->rma_info.rma_lock);
183 complete(&fence_req->comp);
184}
185
186static inline void *scif_get_local_va(off_t off, struct scif_window *window)
187{
188 struct page **pages = window->pinned_pages->pages;
189 int page_nr = (off - window->offset) >> PAGE_SHIFT;
190 off_t page_off = off & ~PAGE_MASK;
191
192 return page_address(pages[page_nr]) + page_off;
193}
194
195static void scif_prog_signal_cb(void *arg)
196{
197 struct scif_status *status = arg;
198
199 dma_pool_free(status->ep->remote_dev->signal_pool, status,
200 status->src_dma_addr);
201}
202
203static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
204{
205 struct scif_endpt *ep = (struct scif_endpt *)epd;
206 struct dma_chan *chan = ep->rma_info.dma_chan;
207 struct dma_device *ddev = chan->device;
208 bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
209 struct dma_async_tx_descriptor *tx;
210 struct scif_status *status = NULL;
211 dma_addr_t src;
212 dma_cookie_t cookie;
213 int err;
214
215 tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
216 if (!tx) {
217 err = -ENOMEM;
218 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
219 __func__, __LINE__, err);
220 goto alloc_fail;
221 }
222 cookie = tx->tx_submit(tx);
223 if (dma_submit_error(cookie)) {
224 err = (int)cookie;
225 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
226 __func__, __LINE__, err);
227 goto alloc_fail;
228 }
229 dma_async_issue_pending(chan);
230 if (x100) {
231 /*
232 * For X100 use the status descriptor to write the value to
233 * the destination.
234 */
235 tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0);
236 } else {
237 status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL,
238 &src);
239 if (!status) {
240 err = -ENOMEM;
241 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
242 __func__, __LINE__, err);
243 goto alloc_fail;
244 }
245 status->val = val;
246 status->src_dma_addr = src;
247 status->ep = ep;
248 src += offsetof(struct scif_status, val);
249 tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val),
250 DMA_PREP_INTERRUPT);
251 }
252 if (!tx) {
253 err = -ENOMEM;
254 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
255 __func__, __LINE__, err);
256 goto dma_fail;
257 }
258 if (!x100) {
259 tx->callback = scif_prog_signal_cb;
260 tx->callback_param = status;
261 }
262 cookie = tx->tx_submit(tx);
263 if (dma_submit_error(cookie)) {
264 err = -EIO;
265 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
266 __func__, __LINE__, err);
267 goto dma_fail;
268 }
269 dma_async_issue_pending(chan);
270 return 0;
271dma_fail:
272 if (!x100)
273 dma_pool_free(ep->remote_dev->signal_pool, status,
274 status->src_dma_addr);
275alloc_fail:
276 return err;
277}
278
279/*
280 * scif_prog_signal:
281 * @epd - Endpoint Descriptor
282 * @offset - registered address to write @val to
283 * @val - Value to be written at @offset
284 * @type - Type of the window.
285 *
286 * Arrange to write a value to the registered offset after ensuring that the
287 * offset provided is indeed valid.
288 */
289int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
290 enum scif_window_type type)
291{
292 struct scif_endpt *ep = (struct scif_endpt *)epd;
293 struct scif_window *window = NULL;
294 struct scif_rma_req req;
295 dma_addr_t dst_dma_addr;
296 int err;
297
298 mutex_lock(&ep->rma_info.rma_lock);
299 req.out_window = &window;
300 req.offset = offset;
301 req.nr_bytes = sizeof(u64);
302 req.prot = SCIF_PROT_WRITE;
303 req.type = SCIF_WINDOW_SINGLE;
304 if (type == SCIF_WINDOW_SELF)
305 req.head = &ep->rma_info.reg_list;
306 else
307 req.head = &ep->rma_info.remote_reg_list;
308 /* Does a valid window exist? */
309 err = scif_query_window(&req);
310 if (err) {
311 dev_err(scif_info.mdev.this_device,
312 "%s %d err %d\n", __func__, __LINE__, err);
313 goto unlock_ret;
314 }
315
316 if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) {
317 u64 *dst_virt;
318
319 if (type == SCIF_WINDOW_SELF)
320 dst_virt = scif_get_local_va(offset, window);
321 else
322 dst_virt =
323 scif_get_local_va(offset, (struct scif_window *)
324 window->peer_window);
325 *dst_virt = val;
326 } else {
327 dst_dma_addr = __scif_off_to_dma_addr(window, offset);
328 err = _scif_prog_signal(epd, dst_dma_addr, val);
329 }
330unlock_ret:
331 mutex_unlock(&ep->rma_info.rma_lock);
332 return err;
333}
334
335static int _scif_fence_wait(scif_epd_t epd, int mark)
336{
337 struct scif_endpt *ep = (struct scif_endpt *)epd;
338 dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE;
339 int err;
340
341 /* Wait for DMA callback in scif_fence_mark_cb(..) */
342 err = wait_event_interruptible_timeout(ep->rma_info.markwq,
343 dma_async_is_tx_complete(
344 ep->rma_info.dma_chan,
345 cookie, NULL, NULL) ==
346 DMA_COMPLETE,
347 SCIF_NODE_ALIVE_TIMEOUT);
348 if (!err)
349 err = -ETIMEDOUT;
350 else if (err > 0)
351 err = 0;
352 return err;
353}
354
355/**
356 * scif_rma_handle_remote_fences:
357 *
358 * This routine services remote fence requests.
359 */
360void scif_rma_handle_remote_fences(void)
361{
362 struct list_head *item, *tmp;
363 struct scif_remote_fence_info *fence;
364 struct scif_endpt *ep;
365 int mark, err;
366
367 might_sleep();
368 mutex_lock(&scif_info.fencelock);
369 list_for_each_safe(item, tmp, &scif_info.fence) {
370 fence = list_entry(item, struct scif_remote_fence_info,
371 list);
372 /* Remove fence from global list */
373 list_del(&fence->list);
374
375 /* Initiate the fence operation */
376 ep = (struct scif_endpt *)fence->msg.payload[0];
377 mark = fence->msg.payload[2];
378 err = _scif_fence_wait(ep, mark);
379 if (err)
380 fence->msg.uop = SCIF_WAIT_NACK;
381 else
382 fence->msg.uop = SCIF_WAIT_ACK;
383 fence->msg.payload[0] = ep->remote_ep;
384 scif_nodeqp_send(ep->remote_dev, &fence->msg);
385 kfree(fence);
386 if (!atomic_sub_return(1, &ep->rma_info.fence_refcount))
387 schedule_work(&scif_info.misc_work);
388 }
389 mutex_unlock(&scif_info.fencelock);
390}
391
392static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark)
393{
394 int err;
395 struct scifmsg msg;
396 struct scif_fence_info *fence_req;
397 struct scif_endpt *ep = (struct scif_endpt *)epd;
398
399 fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
400 if (!fence_req) {
401 err = -ENOMEM;
402 goto error;
403 }
404
405 fence_req->state = OP_IN_PROGRESS;
406 init_completion(&fence_req->comp);
407
408 msg.src = ep->port;
409 msg.uop = uop;
410 msg.payload[0] = ep->remote_ep;
411 msg.payload[1] = (u64)fence_req;
412 if (uop == SCIF_WAIT)
413 msg.payload[2] = mark;
414 spin_lock(&ep->lock);
415 if (ep->state == SCIFEP_CONNECTED)
416 err = scif_nodeqp_send(ep->remote_dev, &msg);
417 else
418 err = -ENOTCONN;
419 spin_unlock(&ep->lock);
420 if (err)
421 goto error_free;
422retry:
423 /* Wait for a SCIF_WAIT_(N)ACK message */
424 err = wait_for_completion_timeout(&fence_req->comp,
425 SCIF_NODE_ALIVE_TIMEOUT);
426 if (!err && scifdev_alive(ep))
427 goto retry;
428 if (!err)
429 err = -ENODEV;
430 if (err > 0)
431 err = 0;
432 mutex_lock(&ep->rma_info.rma_lock);
433 if (err < 0) {
434 if (fence_req->state == OP_IN_PROGRESS)
435 fence_req->state = OP_FAILED;
436 }
437 if (fence_req->state == OP_FAILED && !err)
438 err = -ENOMEM;
439 if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED)
440 *out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark;
441 mutex_unlock(&ep->rma_info.rma_lock);
442error_free:
443 kfree(fence_req);
444error:
445 return err;
446}
447
448/**
449 * scif_send_fence_mark:
450 * @epd: end point descriptor.
451 * @out_mark: Output DMA mark reported by peer.
452 *
453 * Send a remote fence mark request.
454 */
455static int scif_send_fence_mark(scif_epd_t epd, int *out_mark)
456{
457 return _scif_send_fence(epd, SCIF_MARK, 0, out_mark);
458}
459
460/**
461 * scif_send_fence_wait:
462 * @epd: end point descriptor.
463 * @mark: DMA mark to wait for.
464 *
465 * Send a remote fence wait request.
466 */
467static int scif_send_fence_wait(scif_epd_t epd, int mark)
468{
469 return _scif_send_fence(epd, SCIF_WAIT, mark, NULL);
470}
471
472static int _scif_send_fence_signal_wait(struct scif_endpt *ep,
473 struct scif_fence_info *fence_req)
474{
475 int err;
476
477retry:
478 /* Wait for a SCIF_SIG_(N)ACK message */
479 err = wait_for_completion_timeout(&fence_req->comp,
480 SCIF_NODE_ALIVE_TIMEOUT);
481 if (!err && scifdev_alive(ep))
482 goto retry;
483 if (!err)
484 err = -ENODEV;
485 if (err > 0)
486 err = 0;
487 if (err < 0) {
488 mutex_lock(&ep->rma_info.rma_lock);
489 if (fence_req->state == OP_IN_PROGRESS)
490 fence_req->state = OP_FAILED;
491 mutex_unlock(&ep->rma_info.rma_lock);
492 }
493 if (fence_req->state == OP_FAILED && !err)
494 err = -ENXIO;
495 return err;
496}
497
498/**
499 * scif_send_fence_signal:
500 * @epd - endpoint descriptor
501 * @loff - local offset
502 * @lval - local value to write to loffset
503 * @roff - remote offset
504 * @rval - remote value to write to roffset
505 * @flags - flags
506 *
507 * Sends a remote fence signal request
508 */
509static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval,
510 off_t loff, u64 lval, int flags)
511{
512 int err = 0;
513 struct scifmsg msg;
514 struct scif_fence_info *fence_req;
515 struct scif_endpt *ep = (struct scif_endpt *)epd;
516
517 fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
518 if (!fence_req) {
519 err = -ENOMEM;
520 goto error;
521 }
522
523 fence_req->state = OP_IN_PROGRESS;
524 init_completion(&fence_req->comp);
525 msg.src = ep->port;
526 if (flags & SCIF_SIGNAL_LOCAL) {
527 msg.uop = SCIF_SIG_LOCAL;
528 msg.payload[0] = ep->remote_ep;
529 msg.payload[1] = roff;
530 msg.payload[2] = rval;
531 msg.payload[3] = (u64)fence_req;
532 spin_lock(&ep->lock);
533 if (ep->state == SCIFEP_CONNECTED)
534 err = scif_nodeqp_send(ep->remote_dev, &msg);
535 else
536 err = -ENOTCONN;
537 spin_unlock(&ep->lock);
538 if (err)
539 goto error_free;
540 err = _scif_send_fence_signal_wait(ep, fence_req);
541 if (err)
542 goto error_free;
543 }
544 fence_req->state = OP_IN_PROGRESS;
545
546 if (flags & SCIF_SIGNAL_REMOTE) {
547 msg.uop = SCIF_SIG_REMOTE;
548 msg.payload[0] = ep->remote_ep;
549 msg.payload[1] = loff;
550 msg.payload[2] = lval;
551 msg.payload[3] = (u64)fence_req;
552 spin_lock(&ep->lock);
553 if (ep->state == SCIFEP_CONNECTED)
554 err = scif_nodeqp_send(ep->remote_dev, &msg);
555 else
556 err = -ENOTCONN;
557 spin_unlock(&ep->lock);
558 if (err)
559 goto error_free;
560 err = _scif_send_fence_signal_wait(ep, fence_req);
561 }
562error_free:
563 kfree(fence_req);
564error:
565 return err;
566}
567
568static void scif_fence_mark_cb(void *arg)
569{
570 struct scif_endpt *ep = (struct scif_endpt *)arg;
571
572 wake_up_interruptible(&ep->rma_info.markwq);
573 atomic_dec(&ep->rma_info.fence_refcount);
574}
575
576/*
577 * _scif_fence_mark:
578 *
579 * @epd - endpoint descriptor
580 * Set up a mark for this endpoint and return the value of the mark.
581 */
582int _scif_fence_mark(scif_epd_t epd, int *mark)
583{
584 struct scif_endpt *ep = (struct scif_endpt *)epd;
585 struct dma_chan *chan = ep->rma_info.dma_chan;
586 struct dma_device *ddev = chan->device;
587 struct dma_async_tx_descriptor *tx;
588 dma_cookie_t cookie;
589 int err;
590
591 tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
592 if (!tx) {
593 err = -ENOMEM;
594 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
595 __func__, __LINE__, err);
596 return err;
597 }
598 cookie = tx->tx_submit(tx);
599 if (dma_submit_error(cookie)) {
600 err = (int)cookie;
601 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
602 __func__, __LINE__, err);
603 return err;
604 }
605 dma_async_issue_pending(chan);
606 tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT);
607 if (!tx) {
608 err = -ENOMEM;
609 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
610 __func__, __LINE__, err);
611 return err;
612 }
613 tx->callback = scif_fence_mark_cb;
614 tx->callback_param = ep;
615 *mark = cookie = tx->tx_submit(tx);
616 if (dma_submit_error(cookie)) {
617 err = (int)cookie;
618 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
619 __func__, __LINE__, err);
620 return err;
621 }
622 atomic_inc(&ep->rma_info.fence_refcount);
623 dma_async_issue_pending(chan);
624 return 0;
625}
626
627#define SCIF_LOOPB_MAGIC_MARK 0xdead
628
629int scif_fence_mark(scif_epd_t epd, int flags, int *mark)
630{
631 struct scif_endpt *ep = (struct scif_endpt *)epd;
632 int err = 0;
633
634 dev_dbg(scif_info.mdev.this_device,
635 "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n",
636 ep, flags, *mark);
637 err = scif_verify_epd(ep);
638 if (err)
639 return err;
640
641 /* Invalid flags? */
642 if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))
643 return -EINVAL;
644
645 /* At least one of init self or peer RMA should be set */
646 if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
647 return -EINVAL;
648
649 /* Exactly one of init self or peer RMA should be set but not both */
650 if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
651 return -EINVAL;
652
653 /*
654 * Management node loopback does not need to use DMA.
655 * Return a valid mark to be symmetric.
656 */
657 if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
658 *mark = SCIF_LOOPB_MAGIC_MARK;
659 return 0;
660 }
661
662 if (flags & SCIF_FENCE_INIT_SELF)
663 err = _scif_fence_mark(epd, mark);
664 else
665 err = scif_send_fence_mark(ep, mark);
666
667 if (err)
668 dev_err(scif_info.mdev.this_device,
669 "%s %d err %d\n", __func__, __LINE__, err);
670 dev_dbg(scif_info.mdev.this_device,
671 "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n",
672 ep, flags, *mark, err);
673 return err;
674}
675EXPORT_SYMBOL_GPL(scif_fence_mark);
676
677int scif_fence_wait(scif_epd_t epd, int mark)
678{
679 struct scif_endpt *ep = (struct scif_endpt *)epd;
680 int err = 0;
681
682 dev_dbg(scif_info.mdev.this_device,
683 "SCIFAPI fence_wait: ep %p mark 0x%x\n",
684 ep, mark);
685 err = scif_verify_epd(ep);
686 if (err)
687 return err;
688 /*
689 * Management node loopback does not need to use DMA.
690 * The only valid mark provided is 0 so simply
691 * return success if the mark is valid.
692 */
693 if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
694 if (mark == SCIF_LOOPB_MAGIC_MARK)
695 return 0;
696 else
697 return -EINVAL;
698 }
699 if (mark & SCIF_REMOTE_FENCE)
700 err = scif_send_fence_wait(epd, mark);
701 else
702 err = _scif_fence_wait(epd, mark);
703 if (err < 0)
704 dev_err(scif_info.mdev.this_device,
705 "%s %d err %d\n", __func__, __LINE__, err);
706 return err;
707}
708EXPORT_SYMBOL_GPL(scif_fence_wait);
709
710int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval,
711 off_t roff, u64 rval, int flags)
712{
713 struct scif_endpt *ep = (struct scif_endpt *)epd;
714 int err = 0;
715
716 dev_dbg(scif_info.mdev.this_device,
717 "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n",
718 ep, loff, lval, roff, rval, flags);
719 err = scif_verify_epd(ep);
720 if (err)
721 return err;
722
723 /* Invalid flags? */
724 if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER |
725 SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))
726 return -EINVAL;
727
728 /* At least one of init self or peer RMA should be set */
729 if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
730 return -EINVAL;
731
732 /* Exactly one of init self or peer RMA should be set but not both */
733 if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
734 return -EINVAL;
735
736 /* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */
737 if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)))
738 return -EINVAL;
739
740 /* Only Dword offsets allowed */
741 if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1)))
742 return -EINVAL;
743
744 /* Only Dword aligned offsets allowed */
745 if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1)))
746 return -EINVAL;
747
748 if (flags & SCIF_FENCE_INIT_PEER) {
749 err = scif_send_fence_signal(epd, roff, rval, loff,
750 lval, flags);
751 } else {
752 /* Local Signal in Local RAS */
753 if (flags & SCIF_SIGNAL_LOCAL) {
754 err = scif_prog_signal(epd, loff, lval,
755 SCIF_WINDOW_SELF);
756 if (err)
757 goto error_ret;
758 }
759
760 /* Signal in Remote RAS */
761 if (flags & SCIF_SIGNAL_REMOTE)
762 err = scif_prog_signal(epd, roff,
763 rval, SCIF_WINDOW_PEER);
764 }
765error_ret:
766 if (err)
767 dev_err(scif_info.mdev.this_device,
768 "%s %d err %d\n", __func__, __LINE__, err);
769 return err;
770}
771EXPORT_SYMBOL_GPL(scif_fence_signal);
diff --git a/drivers/misc/mic/scif/scif_main.c b/drivers/misc/mic/scif/scif_main.c
index 6ce851f5c7e6..36d847af1209 100644
--- a/drivers/misc/mic/scif/scif_main.c
+++ b/drivers/misc/mic/scif/scif_main.c
@@ -34,6 +34,7 @@ struct scif_info scif_info = {
34}; 34};
35 35
36struct scif_dev *scif_dev; 36struct scif_dev *scif_dev;
37struct kmem_cache *unaligned_cache;
37static atomic_t g_loopb_cnt; 38static atomic_t g_loopb_cnt;
38 39
39/* Runs in the context of intr_wq */ 40/* Runs in the context of intr_wq */
@@ -80,35 +81,6 @@ irqreturn_t scif_intr_handler(int irq, void *data)
80 return IRQ_HANDLED; 81 return IRQ_HANDLED;
81} 82}
82 83
83static int scif_peer_probe(struct scif_peer_dev *spdev)
84{
85 struct scif_dev *scifdev = &scif_dev[spdev->dnode];
86
87 mutex_lock(&scif_info.conflock);
88 scif_info.total++;
89 scif_info.maxid = max_t(u32, spdev->dnode, scif_info.maxid);
90 mutex_unlock(&scif_info.conflock);
91 rcu_assign_pointer(scifdev->spdev, spdev);
92
93 /* In the future SCIF kernel client devices will be added here */
94 return 0;
95}
96
97static void scif_peer_remove(struct scif_peer_dev *spdev)
98{
99 struct scif_dev *scifdev = &scif_dev[spdev->dnode];
100
101 /* In the future SCIF kernel client devices will be removed here */
102 spdev = rcu_dereference(scifdev->spdev);
103 if (spdev)
104 RCU_INIT_POINTER(scifdev->spdev, NULL);
105 synchronize_rcu();
106
107 mutex_lock(&scif_info.conflock);
108 scif_info.total--;
109 mutex_unlock(&scif_info.conflock);
110}
111
112static void scif_qp_setup_handler(struct work_struct *work) 84static void scif_qp_setup_handler(struct work_struct *work)
113{ 85{
114 struct scif_dev *scifdev = container_of(work, struct scif_dev, 86 struct scif_dev *scifdev = container_of(work, struct scif_dev,
@@ -139,20 +111,13 @@ static void scif_qp_setup_handler(struct work_struct *work)
139 } 111 }
140} 112}
141 113
142static int scif_setup_scifdev(struct scif_hw_dev *sdev) 114static int scif_setup_scifdev(void)
143{ 115{
116 /* We support a maximum of 129 SCIF nodes including the mgmt node */
117#define MAX_SCIF_NODES 129
144 int i; 118 int i;
145 u8 num_nodes; 119 u8 num_nodes = MAX_SCIF_NODES;
146
147 if (sdev->snode) {
148 struct mic_bootparam __iomem *bp = sdev->rdp;
149
150 num_nodes = ioread8(&bp->tot_nodes);
151 } else {
152 struct mic_bootparam *bp = sdev->dp;
153 120
154 num_nodes = bp->tot_nodes;
155 }
156 scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL); 121 scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
157 if (!scif_dev) 122 if (!scif_dev)
158 return -ENOMEM; 123 return -ENOMEM;
@@ -163,7 +128,7 @@ static int scif_setup_scifdev(struct scif_hw_dev *sdev)
163 scifdev->exit = OP_IDLE; 128 scifdev->exit = OP_IDLE;
164 init_waitqueue_head(&scifdev->disconn_wq); 129 init_waitqueue_head(&scifdev->disconn_wq);
165 mutex_init(&scifdev->lock); 130 mutex_init(&scifdev->lock);
166 INIT_WORK(&scifdev->init_msg_work, scif_qp_response_ack); 131 INIT_WORK(&scifdev->peer_add_work, scif_add_peer_device);
167 INIT_DELAYED_WORK(&scifdev->p2p_dwork, 132 INIT_DELAYED_WORK(&scifdev->p2p_dwork,
168 scif_poll_qp_state); 133 scif_poll_qp_state);
169 INIT_DELAYED_WORK(&scifdev->qp_dwork, 134 INIT_DELAYED_WORK(&scifdev->qp_dwork,
@@ -181,27 +146,21 @@ static void scif_destroy_scifdev(void)
181 146
182static int scif_probe(struct scif_hw_dev *sdev) 147static int scif_probe(struct scif_hw_dev *sdev)
183{ 148{
184 struct scif_dev *scifdev; 149 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
185 int rc; 150 int rc;
186 151
187 dev_set_drvdata(&sdev->dev, sdev); 152 dev_set_drvdata(&sdev->dev, sdev);
153 scifdev->sdev = sdev;
154
188 if (1 == atomic_add_return(1, &g_loopb_cnt)) { 155 if (1 == atomic_add_return(1, &g_loopb_cnt)) {
189 struct scif_dev *loopb_dev; 156 struct scif_dev *loopb_dev = &scif_dev[sdev->snode];
190 157
191 rc = scif_setup_scifdev(sdev);
192 if (rc)
193 goto exit;
194 scifdev = &scif_dev[sdev->dnode];
195 scifdev->sdev = sdev;
196 loopb_dev = &scif_dev[sdev->snode];
197 loopb_dev->sdev = sdev; 158 loopb_dev->sdev = sdev;
198 rc = scif_setup_loopback_qp(loopb_dev); 159 rc = scif_setup_loopback_qp(loopb_dev);
199 if (rc) 160 if (rc)
200 goto free_sdev; 161 goto exit;
201 } else {
202 scifdev = &scif_dev[sdev->dnode];
203 scifdev->sdev = sdev;
204 } 162 }
163
205 rc = scif_setup_intr_wq(scifdev); 164 rc = scif_setup_intr_wq(scifdev);
206 if (rc) 165 if (rc)
207 goto destroy_loopb; 166 goto destroy_loopb;
@@ -237,8 +196,6 @@ destroy_intr:
237destroy_loopb: 196destroy_loopb:
238 if (atomic_dec_and_test(&g_loopb_cnt)) 197 if (atomic_dec_and_test(&g_loopb_cnt))
239 scif_destroy_loopback_qp(&scif_dev[sdev->snode]); 198 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
240free_sdev:
241 scif_destroy_scifdev();
242exit: 199exit:
243 return rc; 200 return rc;
244} 201}
@@ -290,13 +247,6 @@ static void scif_remove(struct scif_hw_dev *sdev)
290 scifdev->sdev = NULL; 247 scifdev->sdev = NULL;
291} 248}
292 249
293static struct scif_peer_driver scif_peer_driver = {
294 .driver.name = KBUILD_MODNAME,
295 .driver.owner = THIS_MODULE,
296 .probe = scif_peer_probe,
297 .remove = scif_peer_remove,
298};
299
300static struct scif_hw_dev_id id_table[] = { 250static struct scif_hw_dev_id id_table[] = {
301 { MIC_SCIF_DEV, SCIF_DEV_ANY_ID }, 251 { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
302 { 0 }, 252 { 0 },
@@ -312,29 +262,54 @@ static struct scif_driver scif_driver = {
312 262
313static int _scif_init(void) 263static int _scif_init(void)
314{ 264{
315 spin_lock_init(&scif_info.eplock); 265 int rc;
266
267 mutex_init(&scif_info.eplock);
268 spin_lock_init(&scif_info.rmalock);
316 spin_lock_init(&scif_info.nb_connect_lock); 269 spin_lock_init(&scif_info.nb_connect_lock);
317 spin_lock_init(&scif_info.port_lock); 270 spin_lock_init(&scif_info.port_lock);
318 mutex_init(&scif_info.conflock); 271 mutex_init(&scif_info.conflock);
319 mutex_init(&scif_info.connlock); 272 mutex_init(&scif_info.connlock);
273 mutex_init(&scif_info.fencelock);
320 INIT_LIST_HEAD(&scif_info.uaccept); 274 INIT_LIST_HEAD(&scif_info.uaccept);
321 INIT_LIST_HEAD(&scif_info.listen); 275 INIT_LIST_HEAD(&scif_info.listen);
322 INIT_LIST_HEAD(&scif_info.zombie); 276 INIT_LIST_HEAD(&scif_info.zombie);
323 INIT_LIST_HEAD(&scif_info.connected); 277 INIT_LIST_HEAD(&scif_info.connected);
324 INIT_LIST_HEAD(&scif_info.disconnected); 278 INIT_LIST_HEAD(&scif_info.disconnected);
279 INIT_LIST_HEAD(&scif_info.rma);
280 INIT_LIST_HEAD(&scif_info.rma_tc);
281 INIT_LIST_HEAD(&scif_info.mmu_notif_cleanup);
282 INIT_LIST_HEAD(&scif_info.fence);
325 INIT_LIST_HEAD(&scif_info.nb_connect_list); 283 INIT_LIST_HEAD(&scif_info.nb_connect_list);
326 init_waitqueue_head(&scif_info.exitwq); 284 init_waitqueue_head(&scif_info.exitwq);
285 scif_info.rma_tc_limit = SCIF_RMA_TEMP_CACHE_LIMIT;
327 scif_info.en_msg_log = 0; 286 scif_info.en_msg_log = 0;
328 scif_info.p2p_enable = 1; 287 scif_info.p2p_enable = 1;
288 rc = scif_setup_scifdev();
289 if (rc)
290 goto error;
291 unaligned_cache = kmem_cache_create("Unaligned_DMA",
292 SCIF_KMEM_UNALIGNED_BUF_SIZE,
293 0, SLAB_HWCACHE_ALIGN, NULL);
294 if (!unaligned_cache) {
295 rc = -ENOMEM;
296 goto free_sdev;
297 }
329 INIT_WORK(&scif_info.misc_work, scif_misc_handler); 298 INIT_WORK(&scif_info.misc_work, scif_misc_handler);
299 INIT_WORK(&scif_info.mmu_notif_work, scif_mmu_notif_handler);
330 INIT_WORK(&scif_info.conn_work, scif_conn_handler); 300 INIT_WORK(&scif_info.conn_work, scif_conn_handler);
331 idr_init(&scif_ports); 301 idr_init(&scif_ports);
332 return 0; 302 return 0;
303free_sdev:
304 scif_destroy_scifdev();
305error:
306 return rc;
333} 307}
334 308
335static void _scif_exit(void) 309static void _scif_exit(void)
336{ 310{
337 idr_destroy(&scif_ports); 311 idr_destroy(&scif_ports);
312 kmem_cache_destroy(unaligned_cache);
338 scif_destroy_scifdev(); 313 scif_destroy_scifdev();
339} 314}
340 315
@@ -344,15 +319,13 @@ static int __init scif_init(void)
344 int rc; 319 int rc;
345 320
346 _scif_init(); 321 _scif_init();
322 iova_cache_get();
347 rc = scif_peer_bus_init(); 323 rc = scif_peer_bus_init();
348 if (rc) 324 if (rc)
349 goto exit; 325 goto exit;
350 rc = scif_peer_register_driver(&scif_peer_driver);
351 if (rc)
352 goto peer_bus_exit;
353 rc = scif_register_driver(&scif_driver); 326 rc = scif_register_driver(&scif_driver);
354 if (rc) 327 if (rc)
355 goto unreg_scif_peer; 328 goto peer_bus_exit;
356 rc = misc_register(mdev); 329 rc = misc_register(mdev);
357 if (rc) 330 if (rc)
358 goto unreg_scif; 331 goto unreg_scif;
@@ -360,8 +333,6 @@ static int __init scif_init(void)
360 return 0; 333 return 0;
361unreg_scif: 334unreg_scif:
362 scif_unregister_driver(&scif_driver); 335 scif_unregister_driver(&scif_driver);
363unreg_scif_peer:
364 scif_peer_unregister_driver(&scif_peer_driver);
365peer_bus_exit: 336peer_bus_exit:
366 scif_peer_bus_exit(); 337 scif_peer_bus_exit();
367exit: 338exit:
@@ -374,8 +345,8 @@ static void __exit scif_exit(void)
374 scif_exit_debugfs(); 345 scif_exit_debugfs();
375 misc_deregister(&scif_info.mdev); 346 misc_deregister(&scif_info.mdev);
376 scif_unregister_driver(&scif_driver); 347 scif_unregister_driver(&scif_driver);
377 scif_peer_unregister_driver(&scif_peer_driver);
378 scif_peer_bus_exit(); 348 scif_peer_bus_exit();
349 iova_cache_put();
379 _scif_exit(); 350 _scif_exit();
380} 351}
381 352
diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h
index 580bc63e1b23..a08f0b600a9e 100644
--- a/drivers/misc/mic/scif/scif_main.h
+++ b/drivers/misc/mic/scif/scif_main.h
@@ -22,15 +22,18 @@
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/miscdevice.h> 23#include <linux/miscdevice.h>
24#include <linux/dmaengine.h> 24#include <linux/dmaengine.h>
25#include <linux/iova.h>
26#include <linux/anon_inodes.h>
25#include <linux/file.h> 27#include <linux/file.h>
28#include <linux/vmalloc.h>
26#include <linux/scif.h> 29#include <linux/scif.h>
27
28#include "../common/mic_dev.h" 30#include "../common/mic_dev.h"
29 31
30#define SCIF_MGMT_NODE 0 32#define SCIF_MGMT_NODE 0
31#define SCIF_DEFAULT_WATCHDOG_TO 30 33#define SCIF_DEFAULT_WATCHDOG_TO 30
32#define SCIF_NODE_ACCEPT_TIMEOUT (3 * HZ) 34#define SCIF_NODE_ACCEPT_TIMEOUT (3 * HZ)
33#define SCIF_NODE_ALIVE_TIMEOUT (SCIF_DEFAULT_WATCHDOG_TO * HZ) 35#define SCIF_NODE_ALIVE_TIMEOUT (SCIF_DEFAULT_WATCHDOG_TO * HZ)
36#define SCIF_RMA_TEMP_CACHE_LIMIT 0x20000
34 37
35/* 38/*
36 * Generic state used for certain node QP message exchanges 39 * Generic state used for certain node QP message exchanges
@@ -73,13 +76,21 @@ enum scif_msg_state {
73 * @loopb_work: Used for submitting work to loopb_wq 76 * @loopb_work: Used for submitting work to loopb_wq
74 * @loopb_recv_q: List of messages received on the loopb_wq 77 * @loopb_recv_q: List of messages received on the loopb_wq
75 * @card_initiated_exit: set when the card has initiated the exit 78 * @card_initiated_exit: set when the card has initiated the exit
79 * @rmalock: Synchronize access to RMA operations
80 * @fencelock: Synchronize access to list of remote fences requested.
81 * @rma: List of temporary registered windows to be destroyed.
82 * @rma_tc: List of temporary registered & cached Windows to be destroyed
83 * @fence: List of remote fence requests
84 * @mmu_notif_work: Work for registration caching MMU notifier workqueue
85 * @mmu_notif_cleanup: List of temporary cached windows for reg cache
86 * @rma_tc_limit: RMA temporary cache limit
76 */ 87 */
77struct scif_info { 88struct scif_info {
78 u8 nodeid; 89 u8 nodeid;
79 u8 maxid; 90 u8 maxid;
80 u8 total; 91 u8 total;
81 u32 nr_zombies; 92 u32 nr_zombies;
82 spinlock_t eplock; 93 struct mutex eplock;
83 struct mutex connlock; 94 struct mutex connlock;
84 spinlock_t nb_connect_lock; 95 spinlock_t nb_connect_lock;
85 spinlock_t port_lock; 96 spinlock_t port_lock;
@@ -102,6 +113,14 @@ struct scif_info {
102 struct work_struct loopb_work; 113 struct work_struct loopb_work;
103 struct list_head loopb_recv_q; 114 struct list_head loopb_recv_q;
104 bool card_initiated_exit; 115 bool card_initiated_exit;
116 spinlock_t rmalock;
117 struct mutex fencelock;
118 struct list_head rma;
119 struct list_head rma_tc;
120 struct list_head fence;
121 struct work_struct mmu_notif_work;
122 struct list_head mmu_notif_cleanup;
123 unsigned long rma_tc_limit;
105}; 124};
106 125
107/* 126/*
@@ -139,7 +158,7 @@ struct scif_p2p_info {
139 * @db: doorbell the peer will trigger to generate an interrupt on self 158 * @db: doorbell the peer will trigger to generate an interrupt on self
140 * @rdb: Doorbell to trigger on the peer to generate an interrupt on the peer 159 * @rdb: Doorbell to trigger on the peer to generate an interrupt on the peer
141 * @cookie: Cookie received while registering the interrupt handler 160 * @cookie: Cookie received while registering the interrupt handler
142 * init_msg_work: work scheduled for SCIF_INIT message processing 161 * @peer_add_work: Work for handling device_add for peer devices
143 * @p2p_dwork: Delayed work to enable polling for P2P state 162 * @p2p_dwork: Delayed work to enable polling for P2P state
144 * @qp_dwork: Delayed work for enabling polling for remote QP information 163 * @qp_dwork: Delayed work for enabling polling for remote QP information
145 * @p2p_retry: Number of times to retry polling of P2P state 164 * @p2p_retry: Number of times to retry polling of P2P state
@@ -152,6 +171,8 @@ struct scif_p2p_info {
152 * @disconn_rescnt: Keeps track of number of node remove requests sent 171 * @disconn_rescnt: Keeps track of number of node remove requests sent
153 * @exit: Status of exit message 172 * @exit: Status of exit message
154 * @qp_dma_addr: Queue pair DMA address passed to the peer 173 * @qp_dma_addr: Queue pair DMA address passed to the peer
174 * @dma_ch_idx: Round robin index for DMA channels
175 * @signal_pool: DMA pool used for scheduling scif_fence_signal DMA's
155*/ 176*/
156struct scif_dev { 177struct scif_dev {
157 u8 node; 178 u8 node;
@@ -165,7 +186,7 @@ struct scif_dev {
165 int db; 186 int db;
166 int rdb; 187 int rdb;
167 struct mic_irq *cookie; 188 struct mic_irq *cookie;
168 struct work_struct init_msg_work; 189 struct work_struct peer_add_work;
169 struct delayed_work p2p_dwork; 190 struct delayed_work p2p_dwork;
170 struct delayed_work qp_dwork; 191 struct delayed_work qp_dwork;
171 int p2p_retry; 192 int p2p_retry;
@@ -178,17 +199,25 @@ struct scif_dev {
178 atomic_t disconn_rescnt; 199 atomic_t disconn_rescnt;
179 enum scif_msg_state exit; 200 enum scif_msg_state exit;
180 dma_addr_t qp_dma_addr; 201 dma_addr_t qp_dma_addr;
202 int dma_ch_idx;
203 struct dma_pool *signal_pool;
181}; 204};
182 205
206extern bool scif_reg_cache_enable;
207extern bool scif_ulimit_check;
183extern struct scif_info scif_info; 208extern struct scif_info scif_info;
184extern struct idr scif_ports; 209extern struct idr scif_ports;
210extern struct bus_type scif_peer_bus;
185extern struct scif_dev *scif_dev; 211extern struct scif_dev *scif_dev;
186extern const struct file_operations scif_fops; 212extern const struct file_operations scif_fops;
213extern const struct file_operations scif_anon_fops;
187 214
188/* Size of the RB for the Node QP */ 215/* Size of the RB for the Node QP */
189#define SCIF_NODE_QP_SIZE 0x10000 216#define SCIF_NODE_QP_SIZE 0x10000
190 217
191#include "scif_nodeqp.h" 218#include "scif_nodeqp.h"
219#include "scif_rma.h"
220#include "scif_rma_list.h"
192 221
193/* 222/*
194 * scifdev_self: 223 * scifdev_self:
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
index 20e50b4e19b2..3e86360ba5a6 100644
--- a/drivers/misc/mic/scif/scif_map.h
+++ b/drivers/misc/mic/scif/scif_map.h
@@ -80,7 +80,7 @@ scif_unmap_single(dma_addr_t local, struct scif_dev *scifdev,
80 size_t size) 80 size_t size)
81{ 81{
82 if (!scifdev_self(scifdev)) { 82 if (!scifdev_self(scifdev)) {
83 if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr) 83 if (scifdev_is_p2p(scifdev))
84 local = local - scifdev->base_addr; 84 local = local - scifdev->base_addr;
85 dma_unmap_single(&scifdev->sdev->dev, local, 85 dma_unmap_single(&scifdev->sdev->dev, local,
86 size, DMA_BIDIRECTIONAL); 86 size, DMA_BIDIRECTIONAL);
@@ -110,4 +110,27 @@ scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
110 sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt); 110 sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt);
111 } 111 }
112} 112}
113
114static __always_inline int
115scif_map_page(dma_addr_t *dma_handle, struct page *page,
116 struct scif_dev *scifdev)
117{
118 int err = 0;
119
120 if (scifdev_self(scifdev)) {
121 *dma_handle = page_to_phys(page);
122 } else {
123 struct scif_hw_dev *sdev = scifdev->sdev;
124 *dma_handle = dma_map_page(&sdev->dev,
125 page, 0x0, PAGE_SIZE,
126 DMA_BIDIRECTIONAL);
127 if (dma_mapping_error(&sdev->dev, *dma_handle))
128 err = -ENOMEM;
129 else if (scifdev_is_p2p(scifdev))
130 *dma_handle = *dma_handle + scifdev->base_addr;
131 }
132 if (err)
133 *dma_handle = 0;
134 return err;
135}
113#endif /* SCIF_MAP_H */ 136#endif /* SCIF_MAP_H */
diff --git a/drivers/misc/mic/scif/scif_mmap.c b/drivers/misc/mic/scif/scif_mmap.c
new file mode 100644
index 000000000000..49cb8f7b4672
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_mmap.c
@@ -0,0 +1,699 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19
20/*
21 * struct scif_vma_info - Information about a remote memory mapping
22 * created via scif_mmap(..)
23 * @vma: VM area struct
24 * @list: link to list of active vmas
25 */
26struct scif_vma_info {
27 struct vm_area_struct *vma;
28 struct list_head list;
29};
30
31void scif_recv_munmap(struct scif_dev *scifdev, struct scifmsg *msg)
32{
33 struct scif_rma_req req;
34 struct scif_window *window = NULL;
35 struct scif_window *recv_window =
36 (struct scif_window *)msg->payload[0];
37 struct scif_endpt *ep;
38
39 ep = (struct scif_endpt *)recv_window->ep;
40 req.out_window = &window;
41 req.offset = recv_window->offset;
42 req.prot = recv_window->prot;
43 req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT;
44 req.type = SCIF_WINDOW_FULL;
45 req.head = &ep->rma_info.reg_list;
46 msg->payload[0] = ep->remote_ep;
47
48 mutex_lock(&ep->rma_info.rma_lock);
49 /* Does a valid window exist? */
50 if (scif_query_window(&req)) {
51 dev_err(&scifdev->sdev->dev,
52 "%s %d -ENXIO\n", __func__, __LINE__);
53 msg->uop = SCIF_UNREGISTER_ACK;
54 goto error;
55 }
56
57 scif_put_window(window, window->nr_pages);
58
59 if (!window->ref_count) {
60 atomic_inc(&ep->rma_info.tw_refcount);
61 ep->rma_info.async_list_del = 1;
62 list_del_init(&window->list);
63 scif_free_window_offset(ep, window, window->offset);
64 }
65error:
66 mutex_unlock(&ep->rma_info.rma_lock);
67 if (window && !window->ref_count)
68 scif_queue_for_cleanup(window, &scif_info.rma);
69}
70
71/*
72 * Remove valid remote memory mappings created via scif_mmap(..) from the
73 * process address space since the remote node is lost
74 */
75static void __scif_zap_mmaps(struct scif_endpt *ep)
76{
77 struct list_head *item;
78 struct scif_vma_info *info;
79 struct vm_area_struct *vma;
80 unsigned long size;
81
82 spin_lock(&ep->lock);
83 list_for_each(item, &ep->rma_info.vma_list) {
84 info = list_entry(item, struct scif_vma_info, list);
85 vma = info->vma;
86 size = vma->vm_end - vma->vm_start;
87 zap_vma_ptes(vma, vma->vm_start, size);
88 dev_dbg(scif_info.mdev.this_device,
89 "%s ep %p zap vma %p size 0x%lx\n",
90 __func__, ep, info->vma, size);
91 }
92 spin_unlock(&ep->lock);
93}
94
95/*
96 * Traverse the list of endpoints for a particular remote node and
97 * zap valid remote memory mappings since the remote node is lost
98 */
99static void _scif_zap_mmaps(int node, struct list_head *head)
100{
101 struct scif_endpt *ep;
102 struct list_head *item;
103
104 mutex_lock(&scif_info.connlock);
105 list_for_each(item, head) {
106 ep = list_entry(item, struct scif_endpt, list);
107 if (ep->remote_dev->node == node)
108 __scif_zap_mmaps(ep);
109 }
110 mutex_unlock(&scif_info.connlock);
111}
112
113/*
114 * Wrapper for removing remote memory mappings for a particular node. This API
115 * is called by peer nodes as part of handling a lost node.
116 */
117void scif_zap_mmaps(int node)
118{
119 _scif_zap_mmaps(node, &scif_info.connected);
120 _scif_zap_mmaps(node, &scif_info.disconnected);
121}
122
123/*
124 * This API is only called while handling a lost node:
125 * a) Remote node is dead.
126 * b) Remote memory mappings have been zapped
127 * So we can traverse the remote_reg_list without any locks. Since
128 * the window has not yet been unregistered we can drop the ref count
129 * and queue it to the cleanup thread.
130 */
131static void __scif_cleanup_rma_for_zombies(struct scif_endpt *ep)
132{
133 struct list_head *pos, *tmp;
134 struct scif_window *window;
135
136 list_for_each_safe(pos, tmp, &ep->rma_info.remote_reg_list) {
137 window = list_entry(pos, struct scif_window, list);
138 if (window->ref_count)
139 scif_put_window(window, window->nr_pages);
140 else
141 dev_err(scif_info.mdev.this_device,
142 "%s %d unexpected\n",
143 __func__, __LINE__);
144 if (!window->ref_count) {
145 atomic_inc(&ep->rma_info.tw_refcount);
146 list_del_init(&window->list);
147 scif_queue_for_cleanup(window, &scif_info.rma);
148 }
149 }
150}
151
152/* Cleanup remote registration lists for zombie endpoints */
153void scif_cleanup_rma_for_zombies(int node)
154{
155 struct scif_endpt *ep;
156 struct list_head *item;
157
158 mutex_lock(&scif_info.eplock);
159 list_for_each(item, &scif_info.zombie) {
160 ep = list_entry(item, struct scif_endpt, list);
161 if (ep->remote_dev && ep->remote_dev->node == node)
162 __scif_cleanup_rma_for_zombies(ep);
163 }
164 mutex_unlock(&scif_info.eplock);
165 flush_work(&scif_info.misc_work);
166}
167
168/* Insert the VMA into the per endpoint VMA list */
169static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma)
170{
171 struct scif_vma_info *info;
172 int err = 0;
173
174 info = kzalloc(sizeof(*info), GFP_KERNEL);
175 if (!info) {
176 err = -ENOMEM;
177 goto done;
178 }
179 info->vma = vma;
180 spin_lock(&ep->lock);
181 list_add_tail(&info->list, &ep->rma_info.vma_list);
182 spin_unlock(&ep->lock);
183done:
184 return err;
185}
186
187/* Delete the VMA from the per endpoint VMA list */
188static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma)
189{
190 struct list_head *item;
191 struct scif_vma_info *info;
192
193 spin_lock(&ep->lock);
194 list_for_each(item, &ep->rma_info.vma_list) {
195 info = list_entry(item, struct scif_vma_info, list);
196 if (info->vma == vma) {
197 list_del(&info->list);
198 kfree(info);
199 break;
200 }
201 }
202 spin_unlock(&ep->lock);
203}
204
205static phys_addr_t scif_get_phys(phys_addr_t phys, struct scif_endpt *ep)
206{
207 struct scif_dev *scifdev = (struct scif_dev *)ep->remote_dev;
208 struct scif_hw_dev *sdev = scifdev->sdev;
209 phys_addr_t out_phys, apt_base = 0;
210
211 /*
212 * If the DMA address is card relative then we need to add the
213 * aperture base for mmap to work correctly
214 */
215 if (!scifdev_self(scifdev) && sdev->aper && sdev->card_rel_da)
216 apt_base = sdev->aper->pa;
217 out_phys = apt_base + phys;
218 return out_phys;
219}
220
221int scif_get_pages(scif_epd_t epd, off_t offset, size_t len,
222 struct scif_range **pages)
223{
224 struct scif_endpt *ep = (struct scif_endpt *)epd;
225 struct scif_rma_req req;
226 struct scif_window *window = NULL;
227 int nr_pages, err, i;
228
229 dev_dbg(scif_info.mdev.this_device,
230 "SCIFAPI get_pinned_pages: ep %p offset 0x%lx len 0x%lx\n",
231 ep, offset, len);
232 err = scif_verify_epd(ep);
233 if (err)
234 return err;
235
236 if (!len || (offset < 0) ||
237 (offset + len < offset) ||
238 (ALIGN(offset, PAGE_SIZE) != offset) ||
239 (ALIGN(len, PAGE_SIZE) != len))
240 return -EINVAL;
241
242 nr_pages = len >> PAGE_SHIFT;
243
244 req.out_window = &window;
245 req.offset = offset;
246 req.prot = 0;
247 req.nr_bytes = len;
248 req.type = SCIF_WINDOW_SINGLE;
249 req.head = &ep->rma_info.remote_reg_list;
250
251 mutex_lock(&ep->rma_info.rma_lock);
252 /* Does a valid window exist? */
253 err = scif_query_window(&req);
254 if (err) {
255 dev_err(&ep->remote_dev->sdev->dev,
256 "%s %d err %d\n", __func__, __LINE__, err);
257 goto error;
258 }
259
260 /* Allocate scif_range */
261 *pages = kzalloc(sizeof(**pages), GFP_KERNEL);
262 if (!*pages) {
263 err = -ENOMEM;
264 goto error;
265 }
266
267 /* Allocate phys addr array */
268 (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t));
269 if (!((*pages)->phys_addr)) {
270 err = -ENOMEM;
271 goto error;
272 }
273
274 if (scif_is_mgmt_node() && !scifdev_self(ep->remote_dev)) {
275 /* Allocate virtual address array */
276 ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *)));
277 if (!(*pages)->va) {
278 err = -ENOMEM;
279 goto error;
280 }
281 }
282 /* Populate the values */
283 (*pages)->cookie = window;
284 (*pages)->nr_pages = nr_pages;
285 (*pages)->prot_flags = window->prot;
286
287 for (i = 0; i < nr_pages; i++) {
288 (*pages)->phys_addr[i] =
289 __scif_off_to_dma_addr(window, offset +
290 (i * PAGE_SIZE));
291 (*pages)->phys_addr[i] = scif_get_phys((*pages)->phys_addr[i],
292 ep);
293 if (scif_is_mgmt_node() && !scifdev_self(ep->remote_dev))
294 (*pages)->va[i] =
295 ep->remote_dev->sdev->aper->va +
296 (*pages)->phys_addr[i] -
297 ep->remote_dev->sdev->aper->pa;
298 }
299
300 scif_get_window(window, nr_pages);
301error:
302 mutex_unlock(&ep->rma_info.rma_lock);
303 if (err) {
304 if (*pages) {
305 scif_free((*pages)->phys_addr,
306 nr_pages * sizeof(dma_addr_t));
307 scif_free((*pages)->va,
308 nr_pages * sizeof(void *));
309 kfree(*pages);
310 *pages = NULL;
311 }
312 dev_err(&ep->remote_dev->sdev->dev,
313 "%s %d err %d\n", __func__, __LINE__, err);
314 }
315 return err;
316}
317EXPORT_SYMBOL_GPL(scif_get_pages);
318
319int scif_put_pages(struct scif_range *pages)
320{
321 struct scif_endpt *ep;
322 struct scif_window *window;
323 struct scifmsg msg;
324
325 if (!pages || !pages->cookie)
326 return -EINVAL;
327
328 window = pages->cookie;
329
330 if (!window || window->magic != SCIFEP_MAGIC)
331 return -EINVAL;
332
333 ep = (struct scif_endpt *)window->ep;
334 /*
335 * If the state is SCIFEP_CONNECTED or SCIFEP_DISCONNECTED then the
336 * callee should be allowed to release references to the pages,
337 * else the endpoint was not connected in the first place,
338 * hence the ENOTCONN.
339 */
340 if (ep->state != SCIFEP_CONNECTED && ep->state != SCIFEP_DISCONNECTED)
341 return -ENOTCONN;
342
343 mutex_lock(&ep->rma_info.rma_lock);
344
345 scif_put_window(window, pages->nr_pages);
346
347 /* Initiate window destruction if ref count is zero */
348 if (!window->ref_count) {
349 list_del(&window->list);
350 mutex_unlock(&ep->rma_info.rma_lock);
351 scif_drain_dma_intr(ep->remote_dev->sdev,
352 ep->rma_info.dma_chan);
353 /* Inform the peer about this window being destroyed. */
354 msg.uop = SCIF_MUNMAP;
355 msg.src = ep->port;
356 msg.payload[0] = window->peer_window;
357 /* No error handling for notification messages */
358 scif_nodeqp_send(ep->remote_dev, &msg);
359 /* Destroy this window from the peer's registered AS */
360 scif_destroy_remote_window(window);
361 } else {
362 mutex_unlock(&ep->rma_info.rma_lock);
363 }
364
365 scif_free(pages->phys_addr, pages->nr_pages * sizeof(dma_addr_t));
366 scif_free(pages->va, pages->nr_pages * sizeof(void *));
367 kfree(pages);
368 return 0;
369}
370EXPORT_SYMBOL_GPL(scif_put_pages);
371
372/*
373 * scif_rma_list_mmap:
374 *
375 * Traverse the remote registration list starting from start_window:
376 * 1) Create VtoP mappings via remap_pfn_range(..)
377 * 2) Once step 1) and 2) complete successfully then traverse the range of
378 * windows again and bump the reference count.
379 * RMA lock must be held.
380 */
381static int scif_rma_list_mmap(struct scif_window *start_window, s64 offset,
382 int nr_pages, struct vm_area_struct *vma)
383{
384 s64 end_offset, loop_offset = offset;
385 struct scif_window *window = start_window;
386 int loop_nr_pages, nr_pages_left = nr_pages;
387 struct scif_endpt *ep = (struct scif_endpt *)start_window->ep;
388 struct list_head *head = &ep->rma_info.remote_reg_list;
389 int i, err = 0;
390 dma_addr_t phys_addr;
391 struct scif_window_iter src_win_iter;
392 size_t contig_bytes = 0;
393
394 might_sleep();
395 list_for_each_entry_from(window, head, list) {
396 end_offset = window->offset +
397 (window->nr_pages << PAGE_SHIFT);
398 loop_nr_pages = min_t(int,
399 (end_offset - loop_offset) >> PAGE_SHIFT,
400 nr_pages_left);
401 scif_init_window_iter(window, &src_win_iter);
402 for (i = 0; i < loop_nr_pages; i++) {
403 phys_addr = scif_off_to_dma_addr(window, loop_offset,
404 &contig_bytes,
405 &src_win_iter);
406 phys_addr = scif_get_phys(phys_addr, ep);
407 err = remap_pfn_range(vma,
408 vma->vm_start +
409 loop_offset - offset,
410 phys_addr >> PAGE_SHIFT,
411 PAGE_SIZE,
412 vma->vm_page_prot);
413 if (err)
414 goto error;
415 loop_offset += PAGE_SIZE;
416 }
417 nr_pages_left -= loop_nr_pages;
418 if (!nr_pages_left)
419 break;
420 }
421 /*
422 * No more failures expected. Bump up the ref count for all
423 * the windows. Another traversal from start_window required
424 * for handling errors encountered across windows during
425 * remap_pfn_range(..).
426 */
427 loop_offset = offset;
428 nr_pages_left = nr_pages;
429 window = start_window;
430 head = &ep->rma_info.remote_reg_list;
431 list_for_each_entry_from(window, head, list) {
432 end_offset = window->offset +
433 (window->nr_pages << PAGE_SHIFT);
434 loop_nr_pages = min_t(int,
435 (end_offset - loop_offset) >> PAGE_SHIFT,
436 nr_pages_left);
437 scif_get_window(window, loop_nr_pages);
438 nr_pages_left -= loop_nr_pages;
439 loop_offset += (loop_nr_pages << PAGE_SHIFT);
440 if (!nr_pages_left)
441 break;
442 }
443error:
444 if (err)
445 dev_err(scif_info.mdev.this_device,
446 "%s %d err %d\n", __func__, __LINE__, err);
447 return err;
448}
449
450/*
451 * scif_rma_list_munmap:
452 *
453 * Traverse the remote registration list starting from window:
454 * 1) Decrement ref count.
455 * 2) If the ref count drops to zero then send a SCIF_MUNMAP message to peer.
456 * RMA lock must be held.
457 */
458static void scif_rma_list_munmap(struct scif_window *start_window,
459 s64 offset, int nr_pages)
460{
461 struct scifmsg msg;
462 s64 loop_offset = offset, end_offset;
463 int loop_nr_pages, nr_pages_left = nr_pages;
464 struct scif_endpt *ep = (struct scif_endpt *)start_window->ep;
465 struct list_head *head = &ep->rma_info.remote_reg_list;
466 struct scif_window *window = start_window, *_window;
467
468 msg.uop = SCIF_MUNMAP;
469 msg.src = ep->port;
470 loop_offset = offset;
471 nr_pages_left = nr_pages;
472 list_for_each_entry_safe_from(window, _window, head, list) {
473 end_offset = window->offset +
474 (window->nr_pages << PAGE_SHIFT);
475 loop_nr_pages = min_t(int,
476 (end_offset - loop_offset) >> PAGE_SHIFT,
477 nr_pages_left);
478 scif_put_window(window, loop_nr_pages);
479 if (!window->ref_count) {
480 struct scif_dev *rdev = ep->remote_dev;
481
482 scif_drain_dma_intr(rdev->sdev,
483 ep->rma_info.dma_chan);
484 /* Inform the peer about this munmap */
485 msg.payload[0] = window->peer_window;
486 /* No error handling for Notification messages. */
487 scif_nodeqp_send(ep->remote_dev, &msg);
488 list_del(&window->list);
489 /* Destroy this window from the peer's registered AS */
490 scif_destroy_remote_window(window);
491 }
492 nr_pages_left -= loop_nr_pages;
493 loop_offset += (loop_nr_pages << PAGE_SHIFT);
494 if (!nr_pages_left)
495 break;
496 }
497}
498
499/*
500 * The private data field of each VMA used to mmap a remote window
501 * points to an instance of struct vma_pvt
502 */
503struct vma_pvt {
504 struct scif_endpt *ep; /* End point for remote window */
505 s64 offset; /* offset within remote window */
506 bool valid_offset; /* offset is valid only if the original
507 * mmap request was for a single page
508 * else the offset within the vma is
509 * the correct offset
510 */
511 struct kref ref;
512};
513
514static void vma_pvt_release(struct kref *ref)
515{
516 struct vma_pvt *vmapvt = container_of(ref, struct vma_pvt, ref);
517
518 kfree(vmapvt);
519}
520
521/**
522 * scif_vma_open - VMA open driver callback
523 * @vma: VMM memory area.
524 * The open method is called by the kernel to allow the subsystem implementing
525 * the VMA to initialize the area. This method is invoked any time a new
526 * reference to the VMA is made (when a process forks, for example).
527 * The one exception happens when the VMA is first created by mmap;
528 * in this case, the driver's mmap method is called instead.
529 * This function is also invoked when an existing VMA is split by the kernel
530 * due to a call to munmap on a subset of the VMA resulting in two VMAs.
531 * The kernel invokes this function only on one of the two VMAs.
532 */
533static void scif_vma_open(struct vm_area_struct *vma)
534{
535 struct vma_pvt *vmapvt = vma->vm_private_data;
536
537 dev_dbg(scif_info.mdev.this_device,
538 "SCIFAPI vma open: vma_start 0x%lx vma_end 0x%lx\n",
539 vma->vm_start, vma->vm_end);
540 scif_insert_vma(vmapvt->ep, vma);
541 kref_get(&vmapvt->ref);
542}
543
544/**
545 * scif_munmap - VMA close driver callback.
546 * @vma: VMM memory area.
547 * When an area is destroyed, the kernel calls its close operation.
548 * Note that there's no usage count associated with VMA's; the area
549 * is opened and closed exactly once by each process that uses it.
550 */
551static void scif_munmap(struct vm_area_struct *vma)
552{
553 struct scif_endpt *ep;
554 struct vma_pvt *vmapvt = vma->vm_private_data;
555 int nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
556 s64 offset;
557 struct scif_rma_req req;
558 struct scif_window *window = NULL;
559 int err;
560
561 might_sleep();
562 dev_dbg(scif_info.mdev.this_device,
563 "SCIFAPI munmap: vma_start 0x%lx vma_end 0x%lx\n",
564 vma->vm_start, vma->vm_end);
565 ep = vmapvt->ep;
566 offset = vmapvt->valid_offset ? vmapvt->offset :
567 (vma->vm_pgoff) << PAGE_SHIFT;
568 dev_dbg(scif_info.mdev.this_device,
569 "SCIFAPI munmap: ep %p nr_pages 0x%x offset 0x%llx\n",
570 ep, nr_pages, offset);
571 req.out_window = &window;
572 req.offset = offset;
573 req.nr_bytes = vma->vm_end - vma->vm_start;
574 req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
575 req.type = SCIF_WINDOW_PARTIAL;
576 req.head = &ep->rma_info.remote_reg_list;
577
578 mutex_lock(&ep->rma_info.rma_lock);
579
580 err = scif_query_window(&req);
581 if (err)
582 dev_err(scif_info.mdev.this_device,
583 "%s %d err %d\n", __func__, __LINE__, err);
584 else
585 scif_rma_list_munmap(window, offset, nr_pages);
586
587 mutex_unlock(&ep->rma_info.rma_lock);
588 /*
589 * The kernel probably zeroes these out but we still want
590 * to clean up our own mess just in case.
591 */
592 vma->vm_ops = NULL;
593 vma->vm_private_data = NULL;
594 kref_put(&vmapvt->ref, vma_pvt_release);
595 scif_delete_vma(ep, vma);
596}
597
598static const struct vm_operations_struct scif_vm_ops = {
599 .open = scif_vma_open,
600 .close = scif_munmap,
601};
602
603/**
604 * scif_mmap - Map pages in virtual address space to a remote window.
605 * @vma: VMM memory area.
606 * @epd: endpoint descriptor
607 *
608 * Return: Upon successful completion, scif_mmap() returns zero
609 * else an apt error is returned as documented in scif.h
610 */
611int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd)
612{
613 struct scif_rma_req req;
614 struct scif_window *window = NULL;
615 struct scif_endpt *ep = (struct scif_endpt *)epd;
616 s64 start_offset = vma->vm_pgoff << PAGE_SHIFT;
617 int nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
618 int err;
619 struct vma_pvt *vmapvt;
620
621 dev_dbg(scif_info.mdev.this_device,
622 "SCIFAPI mmap: ep %p start_offset 0x%llx nr_pages 0x%x\n",
623 ep, start_offset, nr_pages);
624 err = scif_verify_epd(ep);
625 if (err)
626 return err;
627
628 might_sleep();
629
630 err = scif_insert_vma(ep, vma);
631 if (err)
632 return err;
633
634 vmapvt = kzalloc(sizeof(*vmapvt), GFP_KERNEL);
635 if (!vmapvt) {
636 scif_delete_vma(ep, vma);
637 return -ENOMEM;
638 }
639
640 vmapvt->ep = ep;
641 kref_init(&vmapvt->ref);
642
643 req.out_window = &window;
644 req.offset = start_offset;
645 req.nr_bytes = vma->vm_end - vma->vm_start;
646 req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
647 req.type = SCIF_WINDOW_PARTIAL;
648 req.head = &ep->rma_info.remote_reg_list;
649
650 mutex_lock(&ep->rma_info.rma_lock);
651 /* Does a valid window exist? */
652 err = scif_query_window(&req);
653 if (err) {
654 dev_err(&ep->remote_dev->sdev->dev,
655 "%s %d err %d\n", __func__, __LINE__, err);
656 goto error_unlock;
657 }
658
659 /* Default prot for loopback */
660 if (!scifdev_self(ep->remote_dev))
661 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
662
663 /*
664 * VM_DONTCOPY - Do not copy this vma on fork
665 * VM_DONTEXPAND - Cannot expand with mremap()
666 * VM_RESERVED - Count as reserved_vm like IO
667 * VM_PFNMAP - Page-ranges managed without "struct page"
668 * VM_IO - Memory mapped I/O or similar
669 *
670 * We do not want to copy this VMA automatically on a fork(),
671 * expand this VMA due to mremap() or swap out these pages since
672 * the VMA is actually backed by physical pages in the remote
673 * node's physical memory and not via a struct page.
674 */
675 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
676
677 if (!scifdev_self(ep->remote_dev))
678 vma->vm_flags |= VM_IO | VM_PFNMAP;
679
680 /* Map this range of windows */
681 err = scif_rma_list_mmap(window, start_offset, nr_pages, vma);
682 if (err) {
683 dev_err(&ep->remote_dev->sdev->dev,
684 "%s %d err %d\n", __func__, __LINE__, err);
685 goto error_unlock;
686 }
687 /* Set up the driver call back */
688 vma->vm_ops = &scif_vm_ops;
689 vma->vm_private_data = vmapvt;
690error_unlock:
691 mutex_unlock(&ep->rma_info.rma_lock);
692 if (err) {
693 kfree(vmapvt);
694 dev_err(&ep->remote_dev->sdev->dev,
695 "%s %d err %d\n", __func__, __LINE__, err);
696 scif_delete_vma(ep, vma);
697 }
698 return err;
699}
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
index 9b4c5382d6a7..79f26a02a1cb 100644
--- a/drivers/misc/mic/scif/scif_nm.c
+++ b/drivers/misc/mic/scif/scif_nm.c
@@ -34,6 +34,7 @@ static void scif_invalidate_ep(int node)
34 list_for_each_safe(pos, tmpq, &scif_info.disconnected) { 34 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
35 ep = list_entry(pos, struct scif_endpt, list); 35 ep = list_entry(pos, struct scif_endpt, list);
36 if (ep->remote_dev->node == node) { 36 if (ep->remote_dev->node == node) {
37 scif_unmap_all_windows(ep);
37 spin_lock(&ep->lock); 38 spin_lock(&ep->lock);
38 scif_cleanup_ep_qp(ep); 39 scif_cleanup_ep_qp(ep);
39 spin_unlock(&ep->lock); 40 spin_unlock(&ep->lock);
@@ -50,6 +51,7 @@ static void scif_invalidate_ep(int node)
50 wake_up_interruptible(&ep->sendwq); 51 wake_up_interruptible(&ep->sendwq);
51 wake_up_interruptible(&ep->recvwq); 52 wake_up_interruptible(&ep->recvwq);
52 spin_unlock(&ep->lock); 53 spin_unlock(&ep->lock);
54 scif_unmap_all_windows(ep);
53 } 55 }
54 } 56 }
55 mutex_unlock(&scif_info.connlock); 57 mutex_unlock(&scif_info.connlock);
@@ -61,8 +63,8 @@ void scif_free_qp(struct scif_dev *scifdev)
61 63
62 if (!qp) 64 if (!qp)
63 return; 65 return;
64 scif_free_coherent((void *)qp->inbound_q.rb_base, 66 scif_unmap_single(qp->local_buf, scifdev, qp->inbound_q.size);
65 qp->local_buf, scifdev, qp->inbound_q.size); 67 kfree(qp->inbound_q.rb_base);
66 scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp)); 68 scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp));
67 kfree(scifdev->qpairs); 69 kfree(scifdev->qpairs);
68 scifdev->qpairs = NULL; 70 scifdev->qpairs = NULL;
@@ -125,8 +127,12 @@ void scif_cleanup_scifdev(struct scif_dev *dev)
125 } 127 }
126 scif_destroy_intr_wq(dev); 128 scif_destroy_intr_wq(dev);
127 } 129 }
130 flush_work(&scif_info.misc_work);
128 scif_destroy_p2p(dev); 131 scif_destroy_p2p(dev);
129 scif_invalidate_ep(dev->node); 132 scif_invalidate_ep(dev->node);
133 scif_zap_mmaps(dev->node);
134 scif_cleanup_rma_for_zombies(dev->node);
135 flush_work(&scif_info.misc_work);
130 scif_send_acks(dev); 136 scif_send_acks(dev);
131 if (!dev->node && scif_info.card_initiated_exit) { 137 if (!dev->node && scif_info.card_initiated_exit) {
132 /* 138 /*
@@ -147,14 +153,8 @@ void scif_cleanup_scifdev(struct scif_dev *dev)
147void scif_handle_remove_node(int node) 153void scif_handle_remove_node(int node)
148{ 154{
149 struct scif_dev *scifdev = &scif_dev[node]; 155 struct scif_dev *scifdev = &scif_dev[node];
150 struct scif_peer_dev *spdev; 156
151 157 if (scif_peer_unregister_device(scifdev))
152 rcu_read_lock();
153 spdev = rcu_dereference(scifdev->spdev);
154 rcu_read_unlock();
155 if (spdev)
156 scif_peer_unregister_device(spdev);
157 else
158 scif_send_acks(scifdev); 158 scif_send_acks(scifdev);
159} 159}
160 160
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index 6dfdae3452d6..c66ca1a5814e 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -105,18 +105,22 @@
105int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset, 105int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
106 int local_size, struct scif_dev *scifdev) 106 int local_size, struct scif_dev *scifdev)
107{ 107{
108 void *local_q = NULL; 108 void *local_q = qp->inbound_q.rb_base;
109 int err = 0; 109 int err = 0;
110 u32 tmp_rd = 0; 110 u32 tmp_rd = 0;
111 111
112 spin_lock_init(&qp->send_lock); 112 spin_lock_init(&qp->send_lock);
113 spin_lock_init(&qp->recv_lock); 113 spin_lock_init(&qp->recv_lock);
114 114
115 local_q = kzalloc(local_size, GFP_KERNEL); 115 /* Allocate rb only if not already allocated */
116 if (!local_q) { 116 if (!local_q) {
117 err = -ENOMEM; 117 local_q = kzalloc(local_size, GFP_KERNEL);
118 return err; 118 if (!local_q) {
119 err = -ENOMEM;
120 return err;
121 }
119 } 122 }
123
120 err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size); 124 err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
121 if (err) 125 if (err)
122 goto kfree; 126 goto kfree;
@@ -260,6 +264,11 @@ int scif_setup_qp_connect_response(struct scif_dev *scifdev,
260 r_buf, 264 r_buf,
261 get_count_order(remote_size)); 265 get_count_order(remote_size));
262 /* 266 /*
267 * Because the node QP may already be processing an INIT message, set
268 * the read pointer so the cached read offset isn't lost
269 */
270 qp->remote_qp->local_read = qp->inbound_q.current_read_offset;
271 /*
263 * resetup the inbound_q now that we know where the 272 * resetup the inbound_q now that we know where the
264 * inbound_read really is. 273 * inbound_read really is.
265 */ 274 */
@@ -426,6 +435,21 @@ free_p2p:
426 return NULL; 435 return NULL;
427} 436}
428 437
438/* Uninitialize and release resources from a p2p mapping */
439static void scif_deinit_p2p_info(struct scif_dev *scifdev,
440 struct scif_p2p_info *p2p)
441{
442 struct scif_hw_dev *sdev = scifdev->sdev;
443
444 dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
445 p2p->sg_nentries[SCIF_PPI_MMIO], DMA_BIDIRECTIONAL);
446 dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
447 p2p->sg_nentries[SCIF_PPI_APER], DMA_BIDIRECTIONAL);
448 scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
449 scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
450 kfree(p2p);
451}
452
429/** 453/**
430 * scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message 454 * scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
431 * @dst: Destination node 455 * @dst: Destination node
@@ -468,8 +492,10 @@ static void scif_node_connect(struct scif_dev *scifdev, int dst)
468 if (!p2p_ij) 492 if (!p2p_ij)
469 return; 493 return;
470 p2p_ji = scif_init_p2p_info(dev_j, dev_i); 494 p2p_ji = scif_init_p2p_info(dev_j, dev_i);
471 if (!p2p_ji) 495 if (!p2p_ji) {
496 scif_deinit_p2p_info(dev_i, p2p_ij);
472 return; 497 return;
498 }
473 list_add_tail(&p2p_ij->ppi_list, &dev_i->p2p); 499 list_add_tail(&p2p_ij->ppi_list, &dev_i->p2p);
474 list_add_tail(&p2p_ji->ppi_list, &dev_j->p2p); 500 list_add_tail(&p2p_ji->ppi_list, &dev_j->p2p);
475 501
@@ -529,27 +555,6 @@ static void scif_p2p_setup(void)
529 } 555 }
530} 556}
531 557
532void scif_qp_response_ack(struct work_struct *work)
533{
534 struct scif_dev *scifdev = container_of(work, struct scif_dev,
535 init_msg_work);
536 struct scif_peer_dev *spdev;
537
538 /* Drop the INIT message if it has already been received */
539 if (_scifdev_alive(scifdev))
540 return;
541
542 spdev = scif_peer_register_device(scifdev);
543 if (IS_ERR(spdev))
544 return;
545
546 if (scif_is_mgmt_node()) {
547 mutex_lock(&scif_info.conflock);
548 scif_p2p_setup();
549 mutex_unlock(&scif_info.conflock);
550 }
551}
552
553static char *message_types[] = {"BAD", 558static char *message_types[] = {"BAD",
554 "INIT", 559 "INIT",
555 "EXIT", 560 "EXIT",
@@ -568,7 +573,29 @@ static char *message_types[] = {"BAD",
568 "DISCNT_ACK", 573 "DISCNT_ACK",
569 "CLIENT_SENT", 574 "CLIENT_SENT",
570 "CLIENT_RCVD", 575 "CLIENT_RCVD",
571 "SCIF_GET_NODE_INFO"}; 576 "SCIF_GET_NODE_INFO",
577 "REGISTER",
578 "REGISTER_ACK",
579 "REGISTER_NACK",
580 "UNREGISTER",
581 "UNREGISTER_ACK",
582 "UNREGISTER_NACK",
583 "ALLOC_REQ",
584 "ALLOC_GNT",
585 "ALLOC_REJ",
586 "FREE_PHYS",
587 "FREE_VIRT",
588 "MUNMAP",
589 "MARK",
590 "MARK_ACK",
591 "MARK_NACK",
592 "WAIT",
593 "WAIT_ACK",
594 "WAIT_NACK",
595 "SIGNAL_LOCAL",
596 "SIGNAL_REMOTE",
597 "SIG_ACK",
598 "SIG_NACK"};
572 599
573static void 600static void
574scif_display_message(struct scif_dev *scifdev, struct scifmsg *msg, 601scif_display_message(struct scif_dev *scifdev, struct scifmsg *msg,
@@ -662,10 +689,16 @@ int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
662 * 689 *
663 * Work queue handler for servicing miscellaneous SCIF tasks. 690 * Work queue handler for servicing miscellaneous SCIF tasks.
664 * Examples include: 691 * Examples include:
665 * 1) Cleanup of zombie endpoints. 692 * 1) Remote fence requests.
693 * 2) Destruction of temporary registered windows
694 * created during scif_vreadfrom()/scif_vwriteto().
695 * 3) Cleanup of zombie endpoints.
666 */ 696 */
667void scif_misc_handler(struct work_struct *work) 697void scif_misc_handler(struct work_struct *work)
668{ 698{
699 scif_rma_handle_remote_fences();
700 scif_rma_destroy_windows();
701 scif_rma_destroy_tcw_invalid();
669 scif_cleanup_zombie_epd(); 702 scif_cleanup_zombie_epd();
670} 703}
671 704
@@ -682,13 +715,14 @@ scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
682 * address to complete initializing the inbound_q. 715 * address to complete initializing the inbound_q.
683 */ 716 */
684 flush_delayed_work(&scifdev->qp_dwork); 717 flush_delayed_work(&scifdev->qp_dwork);
685 /* 718
686 * Delegate the peer device registration to a workqueue, otherwise if 719 scif_peer_register_device(scifdev);
687 * SCIF client probe (called during peer device registration) calls 720
688 * scif_connect(..), it will block the message processing thread causing 721 if (scif_is_mgmt_node()) {
689 * a deadlock. 722 mutex_lock(&scif_info.conflock);
690 */ 723 scif_p2p_setup();
691 schedule_work(&scifdev->init_msg_work); 724 mutex_unlock(&scif_info.conflock);
725 }
692} 726}
693 727
694/** 728/**
@@ -838,13 +872,13 @@ void scif_poll_qp_state(struct work_struct *work)
838 msecs_to_jiffies(SCIF_NODE_QP_TIMEOUT)); 872 msecs_to_jiffies(SCIF_NODE_QP_TIMEOUT));
839 return; 873 return;
840 } 874 }
841 scif_peer_register_device(peerdev);
842 return; 875 return;
843timeout: 876timeout:
844 dev_err(&peerdev->sdev->dev, 877 dev_err(&peerdev->sdev->dev,
845 "%s %d remote node %d offline, state = 0x%x\n", 878 "%s %d remote node %d offline, state = 0x%x\n",
846 __func__, __LINE__, peerdev->node, qp->qp_state); 879 __func__, __LINE__, peerdev->node, qp->qp_state);
847 qp->remote_qp->qp_state = SCIF_QP_OFFLINE; 880 qp->remote_qp->qp_state = SCIF_QP_OFFLINE;
881 scif_peer_unregister_device(peerdev);
848 scif_cleanup_scifdev(peerdev); 882 scif_cleanup_scifdev(peerdev);
849} 883}
850 884
@@ -894,6 +928,9 @@ scif_node_add_ack(struct scif_dev *scifdev, struct scifmsg *msg)
894 goto local_error; 928 goto local_error;
895 peerdev->rdb = msg->payload[2]; 929 peerdev->rdb = msg->payload[2];
896 qp->remote_qp->qp_state = SCIF_QP_ONLINE; 930 qp->remote_qp->qp_state = SCIF_QP_ONLINE;
931
932 scif_peer_register_device(peerdev);
933
897 schedule_delayed_work(&peerdev->p2p_dwork, 0); 934 schedule_delayed_work(&peerdev->p2p_dwork, 0);
898 return; 935 return;
899local_error: 936local_error:
@@ -1007,6 +1044,27 @@ static void (*scif_intr_func[SCIF_MAX_MSG + 1])
1007 scif_clientsend, /* SCIF_CLIENT_SENT */ 1044 scif_clientsend, /* SCIF_CLIENT_SENT */
1008 scif_clientrcvd, /* SCIF_CLIENT_RCVD */ 1045 scif_clientrcvd, /* SCIF_CLIENT_RCVD */
1009 scif_get_node_info_resp,/* SCIF_GET_NODE_INFO */ 1046 scif_get_node_info_resp,/* SCIF_GET_NODE_INFO */
1047 scif_recv_reg, /* SCIF_REGISTER */
1048 scif_recv_reg_ack, /* SCIF_REGISTER_ACK */
1049 scif_recv_reg_nack, /* SCIF_REGISTER_NACK */
1050 scif_recv_unreg, /* SCIF_UNREGISTER */
1051 scif_recv_unreg_ack, /* SCIF_UNREGISTER_ACK */
1052 scif_recv_unreg_nack, /* SCIF_UNREGISTER_NACK */
1053 scif_alloc_req, /* SCIF_ALLOC_REQ */
1054 scif_alloc_gnt_rej, /* SCIF_ALLOC_GNT */
1055 scif_alloc_gnt_rej, /* SCIF_ALLOC_REJ */
1056 scif_free_virt, /* SCIF_FREE_VIRT */
1057 scif_recv_munmap, /* SCIF_MUNMAP */
1058 scif_recv_mark, /* SCIF_MARK */
1059 scif_recv_mark_resp, /* SCIF_MARK_ACK */
1060 scif_recv_mark_resp, /* SCIF_MARK_NACK */
1061 scif_recv_wait, /* SCIF_WAIT */
1062 scif_recv_wait_resp, /* SCIF_WAIT_ACK */
1063 scif_recv_wait_resp, /* SCIF_WAIT_NACK */
1064 scif_recv_sig_local, /* SCIF_SIG_LOCAL */
1065 scif_recv_sig_remote, /* SCIF_SIG_REMOTE */
1066 scif_recv_sig_resp, /* SCIF_SIG_ACK */
1067 scif_recv_sig_resp, /* SCIF_SIG_NACK */
1010}; 1068};
1011 1069
1012/** 1070/**
@@ -1169,7 +1227,6 @@ int scif_setup_loopback_qp(struct scif_dev *scifdev)
1169 int err = 0; 1227 int err = 0;
1170 void *local_q; 1228 void *local_q;
1171 struct scif_qp *qp; 1229 struct scif_qp *qp;
1172 struct scif_peer_dev *spdev;
1173 1230
1174 err = scif_setup_intr_wq(scifdev); 1231 err = scif_setup_intr_wq(scifdev);
1175 if (err) 1232 if (err)
@@ -1216,15 +1273,11 @@ int scif_setup_loopback_qp(struct scif_dev *scifdev)
1216 &qp->local_write, 1273 &qp->local_write,
1217 local_q, get_count_order(SCIF_NODE_QP_SIZE)); 1274 local_q, get_count_order(SCIF_NODE_QP_SIZE));
1218 scif_info.nodeid = scifdev->node; 1275 scif_info.nodeid = scifdev->node;
1219 spdev = scif_peer_register_device(scifdev); 1276
1220 if (IS_ERR(spdev)) { 1277 scif_peer_register_device(scifdev);
1221 err = PTR_ERR(spdev); 1278
1222 goto free_local_q;
1223 }
1224 scif_info.loopb_dev = scifdev; 1279 scif_info.loopb_dev = scifdev;
1225 return err; 1280 return err;
1226free_local_q:
1227 kfree(local_q);
1228free_qpairs: 1281free_qpairs:
1229 kfree(scifdev->qpairs); 1282 kfree(scifdev->qpairs);
1230destroy_loopb_wq: 1283destroy_loopb_wq:
@@ -1243,13 +1296,7 @@ exit:
1243 */ 1296 */
1244int scif_destroy_loopback_qp(struct scif_dev *scifdev) 1297int scif_destroy_loopback_qp(struct scif_dev *scifdev)
1245{ 1298{
1246 struct scif_peer_dev *spdev; 1299 scif_peer_unregister_device(scifdev);
1247
1248 rcu_read_lock();
1249 spdev = rcu_dereference(scifdev->spdev);
1250 rcu_read_unlock();
1251 if (spdev)
1252 scif_peer_unregister_device(spdev);
1253 destroy_workqueue(scif_info.loopb_wq); 1300 destroy_workqueue(scif_info.loopb_wq);
1254 scif_destroy_intr_wq(scifdev); 1301 scif_destroy_intr_wq(scifdev);
1255 kfree(scifdev->qpairs->outbound_q.rb_base); 1302 kfree(scifdev->qpairs->outbound_q.rb_base);
diff --git a/drivers/misc/mic/scif/scif_nodeqp.h b/drivers/misc/mic/scif/scif_nodeqp.h
index 6c0ed6783479..95896273138e 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.h
+++ b/drivers/misc/mic/scif/scif_nodeqp.h
@@ -74,7 +74,28 @@
74#define SCIF_CLIENT_SENT 16 /* Notify the peer that data has been written */ 74#define SCIF_CLIENT_SENT 16 /* Notify the peer that data has been written */
75#define SCIF_CLIENT_RCVD 17 /* Notify the peer that data has been read */ 75#define SCIF_CLIENT_RCVD 17 /* Notify the peer that data has been read */
76#define SCIF_GET_NODE_INFO 18 /* Get current node mask from the mgmt node*/ 76#define SCIF_GET_NODE_INFO 18 /* Get current node mask from the mgmt node*/
77#define SCIF_MAX_MSG SCIF_GET_NODE_INFO 77#define SCIF_REGISTER 19 /* Tell peer about a new registered window */
78#define SCIF_REGISTER_ACK 20 /* Notify peer about unregistration success */
79#define SCIF_REGISTER_NACK 21 /* Notify peer about registration success */
80#define SCIF_UNREGISTER 22 /* Tell peer about unregistering a window */
81#define SCIF_UNREGISTER_ACK 23 /* Notify peer about registration failure */
82#define SCIF_UNREGISTER_NACK 24 /* Notify peer about unregistration failure */
83#define SCIF_ALLOC_REQ 25 /* Request a mapped buffer */
84#define SCIF_ALLOC_GNT 26 /* Notify peer about allocation success */
85#define SCIF_ALLOC_REJ 27 /* Notify peer about allocation failure */
86#define SCIF_FREE_VIRT 28 /* Free previously allocated virtual memory */
87#define SCIF_MUNMAP 29 /* Acknowledgment for a SCIF_MMAP request */
88#define SCIF_MARK 30 /* SCIF Remote Fence Mark Request */
89#define SCIF_MARK_ACK 31 /* SCIF Remote Fence Mark Success */
90#define SCIF_MARK_NACK 32 /* SCIF Remote Fence Mark Failure */
91#define SCIF_WAIT 33 /* SCIF Remote Fence Wait Request */
92#define SCIF_WAIT_ACK 34 /* SCIF Remote Fence Wait Success */
93#define SCIF_WAIT_NACK 35 /* SCIF Remote Fence Wait Failure */
94#define SCIF_SIG_LOCAL 36 /* SCIF Remote Fence Local Signal Request */
95#define SCIF_SIG_REMOTE 37 /* SCIF Remote Fence Remote Signal Request */
96#define SCIF_SIG_ACK 38 /* SCIF Remote Fence Remote Signal Success */
97#define SCIF_SIG_NACK 39 /* SCIF Remote Fence Remote Signal Failure */
98#define SCIF_MAX_MSG SCIF_SIG_NACK
78 99
79/* 100/*
80 * struct scifmsg - Node QP message format 101 * struct scifmsg - Node QP message format
@@ -92,6 +113,24 @@ struct scifmsg {
92} __packed; 113} __packed;
93 114
94/* 115/*
116 * struct scif_allocmsg - Used with SCIF_ALLOC_REQ to request
117 * the remote note to allocate memory
118 *
119 * phys_addr: Physical address of the buffer
120 * vaddr: Virtual address of the buffer
121 * size: Size of the buffer
122 * state: Current state
123 * allocwq: wait queue for status
124 */
125struct scif_allocmsg {
126 dma_addr_t phys_addr;
127 unsigned long vaddr;
128 size_t size;
129 enum scif_msg_state state;
130 wait_queue_head_t allocwq;
131};
132
133/*
95 * struct scif_qp - Node Queue Pair 134 * struct scif_qp - Node Queue Pair
96 * 135 *
97 * Interesting structure -- a little difficult because we can only 136 * Interesting structure -- a little difficult because we can only
@@ -158,7 +197,6 @@ int scif_setup_qp_connect_response(struct scif_dev *scifdev,
158int scif_setup_loopback_qp(struct scif_dev *scifdev); 197int scif_setup_loopback_qp(struct scif_dev *scifdev);
159int scif_destroy_loopback_qp(struct scif_dev *scifdev); 198int scif_destroy_loopback_qp(struct scif_dev *scifdev);
160void scif_poll_qp_state(struct work_struct *work); 199void scif_poll_qp_state(struct work_struct *work);
161void scif_qp_response_ack(struct work_struct *work);
162void scif_destroy_p2p(struct scif_dev *scifdev); 200void scif_destroy_p2p(struct scif_dev *scifdev);
163void scif_send_exit(struct scif_dev *scifdev); 201void scif_send_exit(struct scif_dev *scifdev);
164static inline struct device *scif_get_peer_dev(struct scif_dev *scifdev) 202static inline struct device *scif_get_peer_dev(struct scif_dev *scifdev)
diff --git a/drivers/misc/mic/scif/scif_peer_bus.c b/drivers/misc/mic/scif/scif_peer_bus.c
index 589ae9ad2501..6ffa3bdbd45b 100644
--- a/drivers/misc/mic/scif/scif_peer_bus.c
+++ b/drivers/misc/mic/scif/scif_peer_bus.c
@@ -24,93 +24,152 @@ dev_to_scif_peer(struct device *dev)
24 return container_of(dev, struct scif_peer_dev, dev); 24 return container_of(dev, struct scif_peer_dev, dev);
25} 25}
26 26
27static inline struct scif_peer_driver * 27struct bus_type scif_peer_bus = {
28drv_to_scif_peer(struct device_driver *drv) 28 .name = "scif_peer_bus",
29{ 29};
30 return container_of(drv, struct scif_peer_driver, driver);
31}
32 30
33static int scif_peer_dev_match(struct device *dv, struct device_driver *dr) 31static void scif_peer_release_dev(struct device *d)
34{ 32{
35 return !strncmp(dev_name(dv), dr->name, 4); 33 struct scif_peer_dev *sdev = dev_to_scif_peer(d);
34 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
35
36 scif_cleanup_scifdev(scifdev);
37 kfree(sdev);
36} 38}
37 39
38static int scif_peer_dev_probe(struct device *d) 40static int scif_peer_initialize_device(struct scif_dev *scifdev)
39{ 41{
40 struct scif_peer_dev *dev = dev_to_scif_peer(d); 42 struct scif_peer_dev *spdev;
41 struct scif_peer_driver *drv = drv_to_scif_peer(dev->dev.driver); 43 int ret;
42 44
43 return drv->probe(dev); 45 spdev = kzalloc(sizeof(*spdev), GFP_KERNEL);
44} 46 if (!spdev) {
47 ret = -ENOMEM;
48 goto err;
49 }
45 50
46static int scif_peer_dev_remove(struct device *d) 51 spdev->dev.parent = scifdev->sdev->dev.parent;
47{ 52 spdev->dev.release = scif_peer_release_dev;
48 struct scif_peer_dev *dev = dev_to_scif_peer(d); 53 spdev->dnode = scifdev->node;
49 struct scif_peer_driver *drv = drv_to_scif_peer(dev->dev.driver); 54 spdev->dev.bus = &scif_peer_bus;
55 dev_set_name(&spdev->dev, "scif_peer-dev%u", spdev->dnode);
56
57 device_initialize(&spdev->dev);
58 get_device(&spdev->dev);
59 rcu_assign_pointer(scifdev->spdev, spdev);
50 60
51 drv->remove(dev); 61 mutex_lock(&scif_info.conflock);
62 scif_info.total++;
63 scif_info.maxid = max_t(u32, spdev->dnode, scif_info.maxid);
64 mutex_unlock(&scif_info.conflock);
52 return 0; 65 return 0;
66err:
67 dev_err(&scifdev->sdev->dev,
68 "dnode %d: initialize_device rc %d\n", scifdev->node, ret);
69 return ret;
53} 70}
54 71
55static struct bus_type scif_peer_bus = { 72static int scif_peer_add_device(struct scif_dev *scifdev)
56 .name = "scif_peer_bus",
57 .match = scif_peer_dev_match,
58 .probe = scif_peer_dev_probe,
59 .remove = scif_peer_dev_remove,
60};
61
62int scif_peer_register_driver(struct scif_peer_driver *driver)
63{ 73{
64 driver->driver.bus = &scif_peer_bus; 74 struct scif_peer_dev *spdev = rcu_dereference(scifdev->spdev);
65 return driver_register(&driver->driver); 75 char pool_name[16];
76 int ret;
77
78 ret = device_add(&spdev->dev);
79 put_device(&spdev->dev);
80 if (ret) {
81 dev_err(&scifdev->sdev->dev,
82 "dnode %d: peer device_add failed\n", scifdev->node);
83 goto put_spdev;
84 }
85
86 scnprintf(pool_name, sizeof(pool_name), "scif-%d", spdev->dnode);
87 scifdev->signal_pool = dmam_pool_create(pool_name, &scifdev->sdev->dev,
88 sizeof(struct scif_status), 1,
89 0);
90 if (!scifdev->signal_pool) {
91 dev_err(&scifdev->sdev->dev,
92 "dnode %d: dmam_pool_create failed\n", scifdev->node);
93 ret = -ENOMEM;
94 goto del_spdev;
95 }
96 dev_dbg(&spdev->dev, "Added peer dnode %d\n", spdev->dnode);
97 return 0;
98del_spdev:
99 device_del(&spdev->dev);
100put_spdev:
101 RCU_INIT_POINTER(scifdev->spdev, NULL);
102 synchronize_rcu();
103 put_device(&spdev->dev);
104
105 mutex_lock(&scif_info.conflock);
106 scif_info.total--;
107 mutex_unlock(&scif_info.conflock);
108 return ret;
66} 109}
67 110
68void scif_peer_unregister_driver(struct scif_peer_driver *driver) 111void scif_add_peer_device(struct work_struct *work)
69{ 112{
70 driver_unregister(&driver->driver); 113 struct scif_dev *scifdev = container_of(work, struct scif_dev,
114 peer_add_work);
115
116 scif_peer_add_device(scifdev);
71} 117}
72 118
73static void scif_peer_release_dev(struct device *d) 119/*
120 * Peer device registration is split into a device_initialize and a device_add.
121 * The reason for doing this is as follows: First, peer device registration
122 * itself cannot be done in the message processing thread and must be delegated
123 * to another workqueue, otherwise if SCIF client probe, called during peer
124 * device registration, calls scif_connect(..), it will block the message
125 * processing thread causing a deadlock. Next, device_initialize is done in the
126 * "top-half" message processing thread and device_add in the "bottom-half"
127 * workqueue. If this is not done, SCIF_CNCT_REQ message processing executing
128 * concurrently with SCIF_INIT message processing is unable to get a reference
129 * on the peer device, thereby failing the connect request.
130 */
131void scif_peer_register_device(struct scif_dev *scifdev)
74{ 132{
75 struct scif_peer_dev *sdev = dev_to_scif_peer(d); 133 int ret;
76 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
77 134
78 scif_cleanup_scifdev(scifdev); 135 mutex_lock(&scifdev->lock);
79 kfree(sdev); 136 ret = scif_peer_initialize_device(scifdev);
137 if (ret)
138 goto exit;
139 schedule_work(&scifdev->peer_add_work);
140exit:
141 mutex_unlock(&scifdev->lock);
80} 142}
81 143
82struct scif_peer_dev * 144int scif_peer_unregister_device(struct scif_dev *scifdev)
83scif_peer_register_device(struct scif_dev *scifdev)
84{ 145{
85 int ret;
86 struct scif_peer_dev *spdev; 146 struct scif_peer_dev *spdev;
87 147
88 spdev = kzalloc(sizeof(*spdev), GFP_KERNEL); 148 mutex_lock(&scifdev->lock);
89 if (!spdev) 149 /* Flush work to ensure device register is complete */
90 return ERR_PTR(-ENOMEM); 150 flush_work(&scifdev->peer_add_work);
91
92 spdev->dev.parent = scifdev->sdev->dev.parent;
93 spdev->dev.release = scif_peer_release_dev;
94 spdev->dnode = scifdev->node;
95 spdev->dev.bus = &scif_peer_bus;
96 151
97 dev_set_name(&spdev->dev, "scif_peer-dev%u", spdev->dnode);
98 /* 152 /*
99 * device_register() causes the bus infrastructure to look for a 153 * Continue holding scifdev->lock since theoretically unregister_device
100 * matching driver. 154 * can be called simultaneously from multiple threads
101 */ 155 */
102 ret = device_register(&spdev->dev); 156 spdev = rcu_dereference(scifdev->spdev);
103 if (ret) 157 if (!spdev) {
104 goto free_spdev; 158 mutex_unlock(&scifdev->lock);
105 return spdev; 159 return -ENODEV;
106free_spdev: 160 }
107 kfree(spdev); 161
108 return ERR_PTR(ret); 162 RCU_INIT_POINTER(scifdev->spdev, NULL);
109} 163 synchronize_rcu();
110 164 mutex_unlock(&scifdev->lock);
111void scif_peer_unregister_device(struct scif_peer_dev *sdev) 165
112{ 166 dev_dbg(&spdev->dev, "Removing peer dnode %d\n", spdev->dnode);
113 device_unregister(&sdev->dev); 167 device_unregister(&spdev->dev);
168
169 mutex_lock(&scif_info.conflock);
170 scif_info.total--;
171 mutex_unlock(&scif_info.conflock);
172 return 0;
114} 173}
115 174
116int scif_peer_bus_init(void) 175int scif_peer_bus_init(void)
diff --git a/drivers/misc/mic/scif/scif_peer_bus.h b/drivers/misc/mic/scif/scif_peer_bus.h
index 33f0dbb30152..a3b8dd2edaa5 100644
--- a/drivers/misc/mic/scif/scif_peer_bus.h
+++ b/drivers/misc/mic/scif/scif_peer_bus.h
@@ -19,47 +19,13 @@
19 19
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/mic_common.h> 21#include <linux/mic_common.h>
22 22#include <linux/scif.h>
23/*
24 * Peer devices show up as PCIe devices for the mgmt node but not the cards.
25 * The mgmt node discovers all the cards on the PCIe bus and informs the other
26 * cards about their peers. Upon notification of a peer a node adds a peer
27 * device to the peer bus to maintain symmetry in the way devices are
28 * discovered across all nodes in the SCIF network.
29 */
30/**
31 * scif_peer_dev - representation of a peer SCIF device
32 * @dev: underlying device
33 * @dnode - The destination node which this device will communicate with.
34 */
35struct scif_peer_dev {
36 struct device dev;
37 u8 dnode;
38};
39
40/**
41 * scif_peer_driver - operations for a scif_peer I/O driver
42 * @driver: underlying device driver (populate name and owner).
43 * @id_table: the ids serviced by this driver.
44 * @probe: the function to call when a device is found. Returns 0 or -errno.
45 * @remove: the function to call when a device is removed.
46 */
47struct scif_peer_driver {
48 struct device_driver driver;
49 const struct scif_peer_dev_id *id_table;
50
51 int (*probe)(struct scif_peer_dev *dev);
52 void (*remove)(struct scif_peer_dev *dev);
53};
54 23
55struct scif_dev; 24struct scif_dev;
56 25
57int scif_peer_register_driver(struct scif_peer_driver *driver); 26void scif_add_peer_device(struct work_struct *work);
58void scif_peer_unregister_driver(struct scif_peer_driver *driver); 27void scif_peer_register_device(struct scif_dev *sdev);
59 28int scif_peer_unregister_device(struct scif_dev *scifdev);
60struct scif_peer_dev *scif_peer_register_device(struct scif_dev *sdev);
61void scif_peer_unregister_device(struct scif_peer_dev *sdev);
62
63int scif_peer_bus_init(void); 29int scif_peer_bus_init(void);
64void scif_peer_bus_exit(void); 30void scif_peer_bus_exit(void);
65#endif /* _SCIF_PEER_BUS_H */ 31#endif /* _SCIF_PEER_BUS_H */
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
new file mode 100644
index 000000000000..8310b4dbff06
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -0,0 +1,1775 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include <linux/dma_remapping.h>
19#include <linux/pagemap.h>
20#include "scif_main.h"
21#include "scif_map.h"
22
23/* Used to skip ulimit checks for registrations with SCIF_MAP_KERNEL flag */
24#define SCIF_MAP_ULIMIT 0x40
25
26bool scif_ulimit_check = 1;
27
28/**
29 * scif_rma_ep_init:
30 * @ep: end point
31 *
32 * Initialize RMA per EP data structures.
33 */
34void scif_rma_ep_init(struct scif_endpt *ep)
35{
36 struct scif_endpt_rma_info *rma = &ep->rma_info;
37
38 mutex_init(&rma->rma_lock);
39 init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN,
40 SCIF_DMA_64BIT_PFN);
41 spin_lock_init(&rma->tc_lock);
42 mutex_init(&rma->mmn_lock);
43 INIT_LIST_HEAD(&rma->reg_list);
44 INIT_LIST_HEAD(&rma->remote_reg_list);
45 atomic_set(&rma->tw_refcount, 0);
46 atomic_set(&rma->tcw_refcount, 0);
47 atomic_set(&rma->tcw_total_pages, 0);
48 atomic_set(&rma->fence_refcount, 0);
49
50 rma->async_list_del = 0;
51 rma->dma_chan = NULL;
52 INIT_LIST_HEAD(&rma->mmn_list);
53 INIT_LIST_HEAD(&rma->vma_list);
54 init_waitqueue_head(&rma->markwq);
55}
56
57/**
58 * scif_rma_ep_can_uninit:
59 * @ep: end point
60 *
61 * Returns 1 if an endpoint can be uninitialized and 0 otherwise.
62 */
63int scif_rma_ep_can_uninit(struct scif_endpt *ep)
64{
65 int ret = 0;
66
67 mutex_lock(&ep->rma_info.rma_lock);
68 /* Destroy RMA Info only if both lists are empty */
69 if (list_empty(&ep->rma_info.reg_list) &&
70 list_empty(&ep->rma_info.remote_reg_list) &&
71 list_empty(&ep->rma_info.mmn_list) &&
72 !atomic_read(&ep->rma_info.tw_refcount) &&
73 !atomic_read(&ep->rma_info.tcw_refcount) &&
74 !atomic_read(&ep->rma_info.fence_refcount))
75 ret = 1;
76 mutex_unlock(&ep->rma_info.rma_lock);
77 return ret;
78}
79
80/**
81 * scif_create_pinned_pages:
82 * @nr_pages: number of pages in window
83 * @prot: read/write protection
84 *
85 * Allocate and prepare a set of pinned pages.
86 */
87static struct scif_pinned_pages *
88scif_create_pinned_pages(int nr_pages, int prot)
89{
90 struct scif_pinned_pages *pin;
91
92 might_sleep();
93 pin = scif_zalloc(sizeof(*pin));
94 if (!pin)
95 goto error;
96
97 pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages));
98 if (!pin->pages)
99 goto error_free_pinned_pages;
100
101 pin->prot = prot;
102 pin->magic = SCIFEP_MAGIC;
103 return pin;
104
105error_free_pinned_pages:
106 scif_free(pin, sizeof(*pin));
107error:
108 return NULL;
109}
110
111/**
112 * scif_destroy_pinned_pages:
113 * @pin: A set of pinned pages.
114 *
115 * Deallocate resources for pinned pages.
116 */
117static int scif_destroy_pinned_pages(struct scif_pinned_pages *pin)
118{
119 int j;
120 int writeable = pin->prot & SCIF_PROT_WRITE;
121 int kernel = SCIF_MAP_KERNEL & pin->map_flags;
122
123 for (j = 0; j < pin->nr_pages; j++) {
124 if (pin->pages[j] && !kernel) {
125 if (writeable)
126 SetPageDirty(pin->pages[j]);
127 put_page(pin->pages[j]);
128 }
129 }
130
131 scif_free(pin->pages,
132 pin->nr_pages * sizeof(*pin->pages));
133 scif_free(pin, sizeof(*pin));
134 return 0;
135}
136
137/*
138 * scif_create_window:
139 * @ep: end point
140 * @nr_pages: number of pages
141 * @offset: registration offset
142 * @temp: true if a temporary window is being created
143 *
144 * Allocate and prepare a self registration window.
145 */
146struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
147 s64 offset, bool temp)
148{
149 struct scif_window *window;
150
151 might_sleep();
152 window = scif_zalloc(sizeof(*window));
153 if (!window)
154 goto error;
155
156 window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr));
157 if (!window->dma_addr)
158 goto error_free_window;
159
160 window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages));
161 if (!window->num_pages)
162 goto error_free_window;
163
164 window->offset = offset;
165 window->ep = (u64)ep;
166 window->magic = SCIFEP_MAGIC;
167 window->reg_state = OP_IDLE;
168 init_waitqueue_head(&window->regwq);
169 window->unreg_state = OP_IDLE;
170 init_waitqueue_head(&window->unregwq);
171 INIT_LIST_HEAD(&window->list);
172 window->type = SCIF_WINDOW_SELF;
173 window->temp = temp;
174 return window;
175
176error_free_window:
177 scif_free(window->dma_addr,
178 nr_pages * sizeof(*window->dma_addr));
179 scif_free(window, sizeof(*window));
180error:
181 return NULL;
182}
183
184/**
185 * scif_destroy_incomplete_window:
186 * @ep: end point
187 * @window: registration window
188 *
189 * Deallocate resources for self window.
190 */
191static void scif_destroy_incomplete_window(struct scif_endpt *ep,
192 struct scif_window *window)
193{
194 int err;
195 int nr_pages = window->nr_pages;
196 struct scif_allocmsg *alloc = &window->alloc_handle;
197 struct scifmsg msg;
198
199retry:
200 /* Wait for a SCIF_ALLOC_GNT/REJ message */
201 err = wait_event_timeout(alloc->allocwq,
202 alloc->state != OP_IN_PROGRESS,
203 SCIF_NODE_ALIVE_TIMEOUT);
204 if (!err && scifdev_alive(ep))
205 goto retry;
206
207 mutex_lock(&ep->rma_info.rma_lock);
208 if (alloc->state == OP_COMPLETED) {
209 msg.uop = SCIF_FREE_VIRT;
210 msg.src = ep->port;
211 msg.payload[0] = ep->remote_ep;
212 msg.payload[1] = window->alloc_handle.vaddr;
213 msg.payload[2] = (u64)window;
214 msg.payload[3] = SCIF_REGISTER;
215 _scif_nodeqp_send(ep->remote_dev, &msg);
216 }
217 mutex_unlock(&ep->rma_info.rma_lock);
218
219 scif_free_window_offset(ep, window, window->offset);
220 scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr));
221 scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages));
222 scif_free(window, sizeof(*window));
223}
224
225/**
226 * scif_unmap_window:
227 * @remote_dev: SCIF remote device
228 * @window: registration window
229 *
230 * Delete any DMA mappings created for a registered self window
231 */
232void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window)
233{
234 int j;
235
236 if (scif_is_iommu_enabled() && !scifdev_self(remote_dev)) {
237 if (window->st) {
238 dma_unmap_sg(&remote_dev->sdev->dev,
239 window->st->sgl, window->st->nents,
240 DMA_BIDIRECTIONAL);
241 sg_free_table(window->st);
242 kfree(window->st);
243 window->st = NULL;
244 }
245 } else {
246 for (j = 0; j < window->nr_contig_chunks; j++) {
247 if (window->dma_addr[j]) {
248 scif_unmap_single(window->dma_addr[j],
249 remote_dev,
250 window->num_pages[j] <<
251 PAGE_SHIFT);
252 window->dma_addr[j] = 0x0;
253 }
254 }
255 }
256}
257
258static inline struct mm_struct *__scif_acquire_mm(void)
259{
260 if (scif_ulimit_check)
261 return get_task_mm(current);
262 return NULL;
263}
264
265static inline void __scif_release_mm(struct mm_struct *mm)
266{
267 if (mm)
268 mmput(mm);
269}
270
271static inline int
272__scif_dec_pinned_vm_lock(struct mm_struct *mm,
273 int nr_pages, bool try_lock)
274{
275 if (!mm || !nr_pages || !scif_ulimit_check)
276 return 0;
277 if (try_lock) {
278 if (!down_write_trylock(&mm->mmap_sem)) {
279 dev_err(scif_info.mdev.this_device,
280 "%s %d err\n", __func__, __LINE__);
281 return -1;
282 }
283 } else {
284 down_write(&mm->mmap_sem);
285 }
286 mm->pinned_vm -= nr_pages;
287 up_write(&mm->mmap_sem);
288 return 0;
289}
290
291static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm,
292 int nr_pages)
293{
294 unsigned long locked, lock_limit;
295
296 if (!mm || !nr_pages || !scif_ulimit_check)
297 return 0;
298
299 locked = nr_pages;
300 locked += mm->pinned_vm;
301 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
302 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
303 dev_err(scif_info.mdev.this_device,
304 "locked(%lu) > lock_limit(%lu)\n",
305 locked, lock_limit);
306 return -ENOMEM;
307 }
308 mm->pinned_vm = locked;
309 return 0;
310}
311
312/**
313 * scif_destroy_window:
314 * @ep: end point
315 * @window: registration window
316 *
317 * Deallocate resources for self window.
318 */
319int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window)
320{
321 int j;
322 struct scif_pinned_pages *pinned_pages = window->pinned_pages;
323 int nr_pages = window->nr_pages;
324
325 might_sleep();
326 if (!window->temp && window->mm) {
327 __scif_dec_pinned_vm_lock(window->mm, window->nr_pages, 0);
328 __scif_release_mm(window->mm);
329 window->mm = NULL;
330 }
331
332 scif_free_window_offset(ep, window, window->offset);
333 scif_unmap_window(ep->remote_dev, window);
334 /*
335 * Decrement references for this set of pinned pages from
336 * this window.
337 */
338 j = atomic_sub_return(1, &pinned_pages->ref_count);
339 if (j < 0)
340 dev_err(scif_info.mdev.this_device,
341 "%s %d incorrect ref count %d\n",
342 __func__, __LINE__, j);
343 /*
344 * If the ref count for pinned_pages is zero then someone
345 * has already called scif_unpin_pages() for it and we should
346 * destroy the page cache.
347 */
348 if (!j)
349 scif_destroy_pinned_pages(window->pinned_pages);
350 scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr));
351 scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages));
352 window->magic = 0;
353 scif_free(window, sizeof(*window));
354 return 0;
355}
356
357/**
358 * scif_create_remote_lookup:
359 * @remote_dev: SCIF remote device
360 * @window: remote window
361 *
362 * Allocate and prepare lookup entries for the remote
363 * end to copy over the physical addresses.
364 * Returns 0 on success and appropriate errno on failure.
365 */
366static int scif_create_remote_lookup(struct scif_dev *remote_dev,
367 struct scif_window *window)
368{
369 int i, j, err = 0;
370 int nr_pages = window->nr_pages;
371 bool vmalloc_dma_phys, vmalloc_num_pages;
372
373 might_sleep();
374 /* Map window */
375 err = scif_map_single(&window->mapped_offset,
376 window, remote_dev, sizeof(*window));
377 if (err)
378 goto error_window;
379
380 /* Compute the number of lookup entries. 21 == 2MB Shift */
381 window->nr_lookup = ALIGN(nr_pages * PAGE_SIZE,
382 ((2) * 1024 * 1024)) >> 21;
383
384 window->dma_addr_lookup.lookup =
385 scif_alloc_coherent(&window->dma_addr_lookup.offset,
386 remote_dev, window->nr_lookup *
387 sizeof(*window->dma_addr_lookup.lookup),
388 GFP_KERNEL | __GFP_ZERO);
389 if (!window->dma_addr_lookup.lookup) {
390 err = -ENOMEM;
391 goto error_window;
392 }
393
394 window->num_pages_lookup.lookup =
395 scif_alloc_coherent(&window->num_pages_lookup.offset,
396 remote_dev, window->nr_lookup *
397 sizeof(*window->num_pages_lookup.lookup),
398 GFP_KERNEL | __GFP_ZERO);
399 if (!window->num_pages_lookup.lookup) {
400 err = -ENOMEM;
401 goto error_window;
402 }
403
404 vmalloc_dma_phys = is_vmalloc_addr(&window->dma_addr[0]);
405 vmalloc_num_pages = is_vmalloc_addr(&window->num_pages[0]);
406
407 /* Now map each of the pages containing physical addresses */
408 for (i = 0, j = 0; i < nr_pages; i += SCIF_NR_ADDR_IN_PAGE, j++) {
409 err = scif_map_page(&window->dma_addr_lookup.lookup[j],
410 vmalloc_dma_phys ?
411 vmalloc_to_page(&window->dma_addr[i]) :
412 virt_to_page(&window->dma_addr[i]),
413 remote_dev);
414 if (err)
415 goto error_window;
416 err = scif_map_page(&window->num_pages_lookup.lookup[j],
417 vmalloc_dma_phys ?
418 vmalloc_to_page(&window->num_pages[i]) :
419 virt_to_page(&window->num_pages[i]),
420 remote_dev);
421 if (err)
422 goto error_window;
423 }
424 return 0;
425error_window:
426 return err;
427}
428
429/**
430 * scif_destroy_remote_lookup:
431 * @remote_dev: SCIF remote device
432 * @window: remote window
433 *
434 * Destroy lookup entries used for the remote
435 * end to copy over the physical addresses.
436 */
437static void scif_destroy_remote_lookup(struct scif_dev *remote_dev,
438 struct scif_window *window)
439{
440 int i, j;
441
442 if (window->nr_lookup) {
443 struct scif_rma_lookup *lup = &window->dma_addr_lookup;
444 struct scif_rma_lookup *npup = &window->num_pages_lookup;
445
446 for (i = 0, j = 0; i < window->nr_pages;
447 i += SCIF_NR_ADDR_IN_PAGE, j++) {
448 if (lup->lookup && lup->lookup[j])
449 scif_unmap_single(lup->lookup[j],
450 remote_dev,
451 PAGE_SIZE);
452 if (npup->lookup && npup->lookup[j])
453 scif_unmap_single(npup->lookup[j],
454 remote_dev,
455 PAGE_SIZE);
456 }
457 if (lup->lookup)
458 scif_free_coherent(lup->lookup, lup->offset,
459 remote_dev, window->nr_lookup *
460 sizeof(*lup->lookup));
461 if (npup->lookup)
462 scif_free_coherent(npup->lookup, npup->offset,
463 remote_dev, window->nr_lookup *
464 sizeof(*npup->lookup));
465 if (window->mapped_offset)
466 scif_unmap_single(window->mapped_offset,
467 remote_dev, sizeof(*window));
468 window->nr_lookup = 0;
469 }
470}
471
472/**
473 * scif_create_remote_window:
474 * @ep: end point
475 * @nr_pages: number of pages in window
476 *
477 * Allocate and prepare a remote registration window.
478 */
479static struct scif_window *
480scif_create_remote_window(struct scif_dev *scifdev, int nr_pages)
481{
482 struct scif_window *window;
483
484 might_sleep();
485 window = scif_zalloc(sizeof(*window));
486 if (!window)
487 goto error_ret;
488
489 window->magic = SCIFEP_MAGIC;
490 window->nr_pages = nr_pages;
491
492 window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr));
493 if (!window->dma_addr)
494 goto error_window;
495
496 window->num_pages = scif_zalloc(nr_pages *
497 sizeof(*window->num_pages));
498 if (!window->num_pages)
499 goto error_window;
500
501 if (scif_create_remote_lookup(scifdev, window))
502 goto error_window;
503
504 window->type = SCIF_WINDOW_PEER;
505 window->unreg_state = OP_IDLE;
506 INIT_LIST_HEAD(&window->list);
507 return window;
508error_window:
509 scif_destroy_remote_window(window);
510error_ret:
511 return NULL;
512}
513
514/**
515 * scif_destroy_remote_window:
516 * @ep: end point
517 * @window: remote registration window
518 *
519 * Deallocate resources for remote window.
520 */
521void
522scif_destroy_remote_window(struct scif_window *window)
523{
524 scif_free(window->dma_addr, window->nr_pages *
525 sizeof(*window->dma_addr));
526 scif_free(window->num_pages, window->nr_pages *
527 sizeof(*window->num_pages));
528 window->magic = 0;
529 scif_free(window, sizeof(*window));
530}
531
532/**
533 * scif_iommu_map: create DMA mappings if the IOMMU is enabled
534 * @remote_dev: SCIF remote device
535 * @window: remote registration window
536 *
537 * Map the physical pages using dma_map_sg(..) and then detect the number
538 * of contiguous DMA mappings allocated
539 */
540static int scif_iommu_map(struct scif_dev *remote_dev,
541 struct scif_window *window)
542{
543 struct scatterlist *sg;
544 int i, err;
545 scif_pinned_pages_t pin = window->pinned_pages;
546
547 window->st = kzalloc(sizeof(*window->st), GFP_KERNEL);
548 if (!window->st)
549 return -ENOMEM;
550
551 err = sg_alloc_table(window->st, window->nr_pages, GFP_KERNEL);
552 if (err)
553 return err;
554
555 for_each_sg(window->st->sgl, sg, window->st->nents, i)
556 sg_set_page(sg, pin->pages[i], PAGE_SIZE, 0x0);
557
558 err = dma_map_sg(&remote_dev->sdev->dev, window->st->sgl,
559 window->st->nents, DMA_BIDIRECTIONAL);
560 if (!err)
561 return -ENOMEM;
562 /* Detect contiguous ranges of DMA mappings */
563 sg = window->st->sgl;
564 for (i = 0; sg; i++) {
565 dma_addr_t last_da;
566
567 window->dma_addr[i] = sg_dma_address(sg);
568 window->num_pages[i] = sg_dma_len(sg) >> PAGE_SHIFT;
569 last_da = sg_dma_address(sg) + sg_dma_len(sg);
570 while ((sg = sg_next(sg)) && sg_dma_address(sg) == last_da) {
571 window->num_pages[i] +=
572 (sg_dma_len(sg) >> PAGE_SHIFT);
573 last_da = window->dma_addr[i] +
574 sg_dma_len(sg);
575 }
576 window->nr_contig_chunks++;
577 }
578 return 0;
579}
580
581/**
582 * scif_map_window:
583 * @remote_dev: SCIF remote device
584 * @window: self registration window
585 *
586 * Map pages of a window into the aperture/PCI.
587 * Also determine addresses required for DMA.
588 */
589int
590scif_map_window(struct scif_dev *remote_dev, struct scif_window *window)
591{
592 int i, j, k, err = 0, nr_contig_pages;
593 scif_pinned_pages_t pin;
594 phys_addr_t phys_prev, phys_curr;
595
596 might_sleep();
597
598 pin = window->pinned_pages;
599
600 if (intel_iommu_enabled && !scifdev_self(remote_dev))
601 return scif_iommu_map(remote_dev, window);
602
603 for (i = 0, j = 0; i < window->nr_pages; i += nr_contig_pages, j++) {
604 phys_prev = page_to_phys(pin->pages[i]);
605 nr_contig_pages = 1;
606
607 /* Detect physically contiguous chunks */
608 for (k = i + 1; k < window->nr_pages; k++) {
609 phys_curr = page_to_phys(pin->pages[k]);
610 if (phys_curr != (phys_prev + PAGE_SIZE))
611 break;
612 phys_prev = phys_curr;
613 nr_contig_pages++;
614 }
615 window->num_pages[j] = nr_contig_pages;
616 window->nr_contig_chunks++;
617 if (scif_is_mgmt_node()) {
618 /*
619 * Management node has to deal with SMPT on X100 and
620 * hence the DMA mapping is required
621 */
622 err = scif_map_single(&window->dma_addr[j],
623 phys_to_virt(page_to_phys(
624 pin->pages[i])),
625 remote_dev,
626 nr_contig_pages << PAGE_SHIFT);
627 if (err)
628 return err;
629 } else {
630 window->dma_addr[j] = page_to_phys(pin->pages[i]);
631 }
632 }
633 return err;
634}
635
636/**
637 * scif_send_scif_unregister:
638 * @ep: end point
639 * @window: self registration window
640 *
641 * Send a SCIF_UNREGISTER message.
642 */
643static int scif_send_scif_unregister(struct scif_endpt *ep,
644 struct scif_window *window)
645{
646 struct scifmsg msg;
647
648 msg.uop = SCIF_UNREGISTER;
649 msg.src = ep->port;
650 msg.payload[0] = window->alloc_handle.vaddr;
651 msg.payload[1] = (u64)window;
652 return scif_nodeqp_send(ep->remote_dev, &msg);
653}
654
655/**
656 * scif_unregister_window:
657 * @window: self registration window
658 *
659 * Send an unregistration request and wait for a response.
660 */
661int scif_unregister_window(struct scif_window *window)
662{
663 int err = 0;
664 struct scif_endpt *ep = (struct scif_endpt *)window->ep;
665 bool send_msg = false;
666
667 might_sleep();
668 switch (window->unreg_state) {
669 case OP_IDLE:
670 {
671 window->unreg_state = OP_IN_PROGRESS;
672 send_msg = true;
673 /* fall through */
674 }
675 case OP_IN_PROGRESS:
676 {
677 scif_get_window(window, 1);
678 mutex_unlock(&ep->rma_info.rma_lock);
679 if (send_msg) {
680 err = scif_send_scif_unregister(ep, window);
681 if (err) {
682 window->unreg_state = OP_COMPLETED;
683 goto done;
684 }
685 } else {
686 /* Return ENXIO since unregistration is in progress */
687 mutex_lock(&ep->rma_info.rma_lock);
688 return -ENXIO;
689 }
690retry:
691 /* Wait for a SCIF_UNREGISTER_(N)ACK message */
692 err = wait_event_timeout(window->unregwq,
693 window->unreg_state != OP_IN_PROGRESS,
694 SCIF_NODE_ALIVE_TIMEOUT);
695 if (!err && scifdev_alive(ep))
696 goto retry;
697 if (!err) {
698 err = -ENODEV;
699 window->unreg_state = OP_COMPLETED;
700 dev_err(scif_info.mdev.this_device,
701 "%s %d err %d\n", __func__, __LINE__, err);
702 }
703 if (err > 0)
704 err = 0;
705done:
706 mutex_lock(&ep->rma_info.rma_lock);
707 scif_put_window(window, 1);
708 break;
709 }
710 case OP_FAILED:
711 {
712 if (!scifdev_alive(ep)) {
713 err = -ENODEV;
714 window->unreg_state = OP_COMPLETED;
715 }
716 break;
717 }
718 case OP_COMPLETED:
719 break;
720 default:
721 err = -ENODEV;
722 }
723
724 if (window->unreg_state == OP_COMPLETED && window->ref_count)
725 scif_put_window(window, window->nr_pages);
726
727 if (!window->ref_count) {
728 atomic_inc(&ep->rma_info.tw_refcount);
729 list_del_init(&window->list);
730 scif_free_window_offset(ep, window, window->offset);
731 mutex_unlock(&ep->rma_info.rma_lock);
732 if ((!!(window->pinned_pages->map_flags & SCIF_MAP_KERNEL)) &&
733 scifdev_alive(ep)) {
734 scif_drain_dma_intr(ep->remote_dev->sdev,
735 ep->rma_info.dma_chan);
736 } else {
737 if (!__scif_dec_pinned_vm_lock(window->mm,
738 window->nr_pages, 1)) {
739 __scif_release_mm(window->mm);
740 window->mm = NULL;
741 }
742 }
743 scif_queue_for_cleanup(window, &scif_info.rma);
744 mutex_lock(&ep->rma_info.rma_lock);
745 }
746 return err;
747}
748
749/**
750 * scif_send_alloc_request:
751 * @ep: end point
752 * @window: self registration window
753 *
754 * Send a remote window allocation request
755 */
756static int scif_send_alloc_request(struct scif_endpt *ep,
757 struct scif_window *window)
758{
759 struct scifmsg msg;
760 struct scif_allocmsg *alloc = &window->alloc_handle;
761
762 /* Set up the Alloc Handle */
763 alloc->state = OP_IN_PROGRESS;
764 init_waitqueue_head(&alloc->allocwq);
765
766 /* Send out an allocation request */
767 msg.uop = SCIF_ALLOC_REQ;
768 msg.payload[1] = window->nr_pages;
769 msg.payload[2] = (u64)&window->alloc_handle;
770 return _scif_nodeqp_send(ep->remote_dev, &msg);
771}
772
773/**
774 * scif_prep_remote_window:
775 * @ep: end point
776 * @window: self registration window
777 *
778 * Send a remote window allocation request, wait for an allocation response,
779 * and prepares the remote window by copying over the page lists
780 */
781static int scif_prep_remote_window(struct scif_endpt *ep,
782 struct scif_window *window)
783{
784 struct scifmsg msg;
785 struct scif_window *remote_window;
786 struct scif_allocmsg *alloc = &window->alloc_handle;
787 dma_addr_t *dma_phys_lookup, *tmp, *num_pages_lookup, *tmp1;
788 int i = 0, j = 0;
789 int nr_contig_chunks, loop_nr_contig_chunks;
790 int remaining_nr_contig_chunks, nr_lookup;
791 int err, map_err;
792
793 map_err = scif_map_window(ep->remote_dev, window);
794 if (map_err)
795 dev_err(&ep->remote_dev->sdev->dev,
796 "%s %d map_err %d\n", __func__, __LINE__, map_err);
797 remaining_nr_contig_chunks = window->nr_contig_chunks;
798 nr_contig_chunks = window->nr_contig_chunks;
799retry:
800 /* Wait for a SCIF_ALLOC_GNT/REJ message */
801 err = wait_event_timeout(alloc->allocwq,
802 alloc->state != OP_IN_PROGRESS,
803 SCIF_NODE_ALIVE_TIMEOUT);
804 mutex_lock(&ep->rma_info.rma_lock);
805 /* Synchronize with the thread waking up allocwq */
806 mutex_unlock(&ep->rma_info.rma_lock);
807 if (!err && scifdev_alive(ep))
808 goto retry;
809
810 if (!err)
811 err = -ENODEV;
812
813 if (err > 0)
814 err = 0;
815 else
816 return err;
817
818 /* Bail out. The remote end rejected this request */
819 if (alloc->state == OP_FAILED)
820 return -ENOMEM;
821
822 if (map_err) {
823 dev_err(&ep->remote_dev->sdev->dev,
824 "%s %d err %d\n", __func__, __LINE__, map_err);
825 msg.uop = SCIF_FREE_VIRT;
826 msg.src = ep->port;
827 msg.payload[0] = ep->remote_ep;
828 msg.payload[1] = window->alloc_handle.vaddr;
829 msg.payload[2] = (u64)window;
830 msg.payload[3] = SCIF_REGISTER;
831 spin_lock(&ep->lock);
832 if (ep->state == SCIFEP_CONNECTED)
833 err = _scif_nodeqp_send(ep->remote_dev, &msg);
834 else
835 err = -ENOTCONN;
836 spin_unlock(&ep->lock);
837 return err;
838 }
839
840 remote_window = scif_ioremap(alloc->phys_addr, sizeof(*window),
841 ep->remote_dev);
842
843 /* Compute the number of lookup entries. 21 == 2MB Shift */
844 nr_lookup = ALIGN(nr_contig_chunks, SCIF_NR_ADDR_IN_PAGE)
845 >> ilog2(SCIF_NR_ADDR_IN_PAGE);
846
847 dma_phys_lookup =
848 scif_ioremap(remote_window->dma_addr_lookup.offset,
849 nr_lookup *
850 sizeof(*remote_window->dma_addr_lookup.lookup),
851 ep->remote_dev);
852 num_pages_lookup =
853 scif_ioremap(remote_window->num_pages_lookup.offset,
854 nr_lookup *
855 sizeof(*remote_window->num_pages_lookup.lookup),
856 ep->remote_dev);
857
858 while (remaining_nr_contig_chunks) {
859 loop_nr_contig_chunks = min_t(int, remaining_nr_contig_chunks,
860 (int)SCIF_NR_ADDR_IN_PAGE);
861 /* #1/2 - Copy physical addresses over to the remote side */
862
863 /* #2/2 - Copy DMA addresses (addresses that are fed into the
864 * DMA engine) We transfer bus addresses which are then
865 * converted into a MIC physical address on the remote
866 * side if it is a MIC, if the remote node is a mgmt node we
867 * transfer the MIC physical address
868 */
869 tmp = scif_ioremap(dma_phys_lookup[j],
870 loop_nr_contig_chunks *
871 sizeof(*window->dma_addr),
872 ep->remote_dev);
873 tmp1 = scif_ioremap(num_pages_lookup[j],
874 loop_nr_contig_chunks *
875 sizeof(*window->num_pages),
876 ep->remote_dev);
877 if (scif_is_mgmt_node()) {
878 memcpy_toio((void __force __iomem *)tmp,
879 &window->dma_addr[i], loop_nr_contig_chunks
880 * sizeof(*window->dma_addr));
881 memcpy_toio((void __force __iomem *)tmp1,
882 &window->num_pages[i], loop_nr_contig_chunks
883 * sizeof(*window->num_pages));
884 } else {
885 if (scifdev_is_p2p(ep->remote_dev)) {
886 /*
887 * add remote node's base address for this node
888 * to convert it into a MIC address
889 */
890 int m;
891 dma_addr_t dma_addr;
892
893 for (m = 0; m < loop_nr_contig_chunks; m++) {
894 dma_addr = window->dma_addr[i + m] +
895 ep->remote_dev->base_addr;
896 writeq(dma_addr,
897 (void __force __iomem *)&tmp[m]);
898 }
899 memcpy_toio((void __force __iomem *)tmp1,
900 &window->num_pages[i],
901 loop_nr_contig_chunks
902 * sizeof(*window->num_pages));
903 } else {
904 /* Mgmt node or loopback - transfer DMA
905 * addresses as is, this is the same as a
906 * MIC physical address (we use the dma_addr
907 * and not the phys_addr array since the
908 * phys_addr is only setup if there is a mmap()
909 * request from the mgmt node)
910 */
911 memcpy_toio((void __force __iomem *)tmp,
912 &window->dma_addr[i],
913 loop_nr_contig_chunks *
914 sizeof(*window->dma_addr));
915 memcpy_toio((void __force __iomem *)tmp1,
916 &window->num_pages[i],
917 loop_nr_contig_chunks *
918 sizeof(*window->num_pages));
919 }
920 }
921 remaining_nr_contig_chunks -= loop_nr_contig_chunks;
922 i += loop_nr_contig_chunks;
923 j++;
924 scif_iounmap(tmp, loop_nr_contig_chunks *
925 sizeof(*window->dma_addr), ep->remote_dev);
926 scif_iounmap(tmp1, loop_nr_contig_chunks *
927 sizeof(*window->num_pages), ep->remote_dev);
928 }
929
930 /* Prepare the remote window for the peer */
931 remote_window->peer_window = (u64)window;
932 remote_window->offset = window->offset;
933 remote_window->prot = window->prot;
934 remote_window->nr_contig_chunks = nr_contig_chunks;
935 remote_window->ep = ep->remote_ep;
936 scif_iounmap(num_pages_lookup,
937 nr_lookup *
938 sizeof(*remote_window->num_pages_lookup.lookup),
939 ep->remote_dev);
940 scif_iounmap(dma_phys_lookup,
941 nr_lookup *
942 sizeof(*remote_window->dma_addr_lookup.lookup),
943 ep->remote_dev);
944 scif_iounmap(remote_window, sizeof(*remote_window), ep->remote_dev);
945 window->peer_window = alloc->vaddr;
946 return err;
947}
948
949/**
950 * scif_send_scif_register:
951 * @ep: end point
952 * @window: self registration window
953 *
954 * Send a SCIF_REGISTER message if EP is connected and wait for a
955 * SCIF_REGISTER_(N)ACK message else send a SCIF_FREE_VIRT
956 * message so that the peer can free its remote window allocated earlier.
957 */
958static int scif_send_scif_register(struct scif_endpt *ep,
959 struct scif_window *window)
960{
961 int err = 0;
962 struct scifmsg msg;
963
964 msg.src = ep->port;
965 msg.payload[0] = ep->remote_ep;
966 msg.payload[1] = window->alloc_handle.vaddr;
967 msg.payload[2] = (u64)window;
968 spin_lock(&ep->lock);
969 if (ep->state == SCIFEP_CONNECTED) {
970 msg.uop = SCIF_REGISTER;
971 window->reg_state = OP_IN_PROGRESS;
972 err = _scif_nodeqp_send(ep->remote_dev, &msg);
973 spin_unlock(&ep->lock);
974 if (!err) {
975retry:
976 /* Wait for a SCIF_REGISTER_(N)ACK message */
977 err = wait_event_timeout(window->regwq,
978 window->reg_state !=
979 OP_IN_PROGRESS,
980 SCIF_NODE_ALIVE_TIMEOUT);
981 if (!err && scifdev_alive(ep))
982 goto retry;
983 err = !err ? -ENODEV : 0;
984 if (window->reg_state == OP_FAILED)
985 err = -ENOTCONN;
986 }
987 } else {
988 msg.uop = SCIF_FREE_VIRT;
989 msg.payload[3] = SCIF_REGISTER;
990 err = _scif_nodeqp_send(ep->remote_dev, &msg);
991 spin_unlock(&ep->lock);
992 if (!err)
993 err = -ENOTCONN;
994 }
995 return err;
996}
997
998/**
999 * scif_get_window_offset:
1000 * @ep: end point descriptor
1001 * @flags: flags
1002 * @offset: offset hint
1003 * @num_pages: number of pages
1004 * @out_offset: computed offset returned by reference.
1005 *
1006 * Compute/Claim a new offset for this EP.
1007 */
1008int scif_get_window_offset(struct scif_endpt *ep, int flags, s64 offset,
1009 int num_pages, s64 *out_offset)
1010{
1011 s64 page_index;
1012 struct iova *iova_ptr;
1013 int err = 0;
1014
1015 if (flags & SCIF_MAP_FIXED) {
1016 page_index = SCIF_IOVA_PFN(offset);
1017 iova_ptr = reserve_iova(&ep->rma_info.iovad, page_index,
1018 page_index + num_pages - 1);
1019 if (!iova_ptr)
1020 err = -EADDRINUSE;
1021 } else {
1022 iova_ptr = alloc_iova(&ep->rma_info.iovad, num_pages,
1023 SCIF_DMA_63BIT_PFN - 1, 0);
1024 if (!iova_ptr)
1025 err = -ENOMEM;
1026 }
1027 if (!err)
1028 *out_offset = (iova_ptr->pfn_lo) << PAGE_SHIFT;
1029 return err;
1030}
1031
1032/**
1033 * scif_free_window_offset:
1034 * @ep: end point descriptor
1035 * @window: registration window
1036 * @offset: Offset to be freed
1037 *
1038 * Free offset for this EP. The callee is supposed to grab
1039 * the RMA mutex before calling this API.
1040 */
1041void scif_free_window_offset(struct scif_endpt *ep,
1042 struct scif_window *window, s64 offset)
1043{
1044 if ((window && !window->offset_freed) || !window) {
1045 free_iova(&ep->rma_info.iovad, offset >> PAGE_SHIFT);
1046 if (window)
1047 window->offset_freed = true;
1048 }
1049}
1050
1051/**
1052 * scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message
1053 * @msg: Interrupt message
1054 *
1055 * Remote side is requesting a memory allocation.
1056 */
1057void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg)
1058{
1059 int err;
1060 struct scif_window *window = NULL;
1061 int nr_pages = msg->payload[1];
1062
1063 window = scif_create_remote_window(scifdev, nr_pages);
1064 if (!window) {
1065 err = -ENOMEM;
1066 goto error;
1067 }
1068
1069 /* The peer's allocation request is granted */
1070 msg->uop = SCIF_ALLOC_GNT;
1071 msg->payload[0] = (u64)window;
1072 msg->payload[1] = window->mapped_offset;
1073 err = scif_nodeqp_send(scifdev, msg);
1074 if (err)
1075 scif_destroy_remote_window(window);
1076 return;
1077error:
1078 /* The peer's allocation request is rejected */
1079 dev_err(&scifdev->sdev->dev,
1080 "%s %d error %d alloc_ptr %p nr_pages 0x%x\n",
1081 __func__, __LINE__, err, window, nr_pages);
1082 msg->uop = SCIF_ALLOC_REJ;
1083 scif_nodeqp_send(scifdev, msg);
1084}
1085
1086/**
1087 * scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message
1088 * @msg: Interrupt message
1089 *
1090 * Remote side responded to a memory allocation.
1091 */
1092void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg)
1093{
1094 struct scif_allocmsg *handle = (struct scif_allocmsg *)msg->payload[2];
1095 struct scif_window *window = container_of(handle, struct scif_window,
1096 alloc_handle);
1097 struct scif_endpt *ep = (struct scif_endpt *)window->ep;
1098
1099 mutex_lock(&ep->rma_info.rma_lock);
1100 handle->vaddr = msg->payload[0];
1101 handle->phys_addr = msg->payload[1];
1102 if (msg->uop == SCIF_ALLOC_GNT)
1103 handle->state = OP_COMPLETED;
1104 else
1105 handle->state = OP_FAILED;
1106 wake_up(&handle->allocwq);
1107 mutex_unlock(&ep->rma_info.rma_lock);
1108}
1109
1110/**
1111 * scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message
1112 * @msg: Interrupt message
1113 *
1114 * Free up memory kmalloc'd earlier.
1115 */
1116void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg)
1117{
1118 struct scif_window *window = (struct scif_window *)msg->payload[1];
1119
1120 scif_destroy_remote_window(window);
1121}
1122
1123static void
1124scif_fixup_aper_base(struct scif_dev *dev, struct scif_window *window)
1125{
1126 int j;
1127 struct scif_hw_dev *sdev = dev->sdev;
1128 phys_addr_t apt_base = 0;
1129
1130 /*
1131 * Add the aperture base if the DMA address is not card relative
1132 * since the DMA addresses need to be an offset into the bar
1133 */
1134 if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER &&
1135 sdev->aper && !sdev->card_rel_da)
1136 apt_base = sdev->aper->pa;
1137 else
1138 return;
1139
1140 for (j = 0; j < window->nr_contig_chunks; j++) {
1141 if (window->num_pages[j])
1142 window->dma_addr[j] += apt_base;
1143 else
1144 break;
1145 }
1146}
1147
1148/**
1149 * scif_recv_reg: Respond to SCIF_REGISTER interrupt message
1150 * @msg: Interrupt message
1151 *
1152 * Update remote window list with a new registered window.
1153 */
1154void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg)
1155{
1156 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
1157 struct scif_window *window =
1158 (struct scif_window *)msg->payload[1];
1159
1160 mutex_lock(&ep->rma_info.rma_lock);
1161 spin_lock(&ep->lock);
1162 if (ep->state == SCIFEP_CONNECTED) {
1163 msg->uop = SCIF_REGISTER_ACK;
1164 scif_nodeqp_send(ep->remote_dev, msg);
1165 scif_fixup_aper_base(ep->remote_dev, window);
1166 /* No further failures expected. Insert new window */
1167 scif_insert_window(window, &ep->rma_info.remote_reg_list);
1168 } else {
1169 msg->uop = SCIF_REGISTER_NACK;
1170 scif_nodeqp_send(ep->remote_dev, msg);
1171 }
1172 spin_unlock(&ep->lock);
1173 mutex_unlock(&ep->rma_info.rma_lock);
1174 /* free up any lookup resources now that page lists are transferred */
1175 scif_destroy_remote_lookup(ep->remote_dev, window);
1176 /*
1177 * We could not insert the window but we need to
1178 * destroy the window.
1179 */
1180 if (msg->uop == SCIF_REGISTER_NACK)
1181 scif_destroy_remote_window(window);
1182}
1183
1184/**
1185 * scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message
1186 * @msg: Interrupt message
1187 *
1188 * Remove window from remote registration list;
1189 */
1190void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg)
1191{
1192 struct scif_rma_req req;
1193 struct scif_window *window = NULL;
1194 struct scif_window *recv_window =
1195 (struct scif_window *)msg->payload[0];
1196 struct scif_endpt *ep;
1197 int del_window = 0;
1198
1199 ep = (struct scif_endpt *)recv_window->ep;
1200 req.out_window = &window;
1201 req.offset = recv_window->offset;
1202 req.prot = 0;
1203 req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT;
1204 req.type = SCIF_WINDOW_FULL;
1205 req.head = &ep->rma_info.remote_reg_list;
1206 msg->payload[0] = ep->remote_ep;
1207
1208 mutex_lock(&ep->rma_info.rma_lock);
1209 /* Does a valid window exist? */
1210 if (scif_query_window(&req)) {
1211 dev_err(&scifdev->sdev->dev,
1212 "%s %d -ENXIO\n", __func__, __LINE__);
1213 msg->uop = SCIF_UNREGISTER_ACK;
1214 goto error;
1215 }
1216 if (window) {
1217 if (window->ref_count)
1218 scif_put_window(window, window->nr_pages);
1219 else
1220 dev_err(&scifdev->sdev->dev,
1221 "%s %d ref count should be +ve\n",
1222 __func__, __LINE__);
1223 window->unreg_state = OP_COMPLETED;
1224 if (!window->ref_count) {
1225 msg->uop = SCIF_UNREGISTER_ACK;
1226 atomic_inc(&ep->rma_info.tw_refcount);
1227 ep->rma_info.async_list_del = 1;
1228 list_del_init(&window->list);
1229 del_window = 1;
1230 } else {
1231 /* NACK! There are valid references to this window */
1232 msg->uop = SCIF_UNREGISTER_NACK;
1233 }
1234 } else {
1235 /* The window did not make its way to the list at all. ACK */
1236 msg->uop = SCIF_UNREGISTER_ACK;
1237 scif_destroy_remote_window(recv_window);
1238 }
1239error:
1240 mutex_unlock(&ep->rma_info.rma_lock);
1241 if (del_window)
1242 scif_drain_dma_intr(ep->remote_dev->sdev,
1243 ep->rma_info.dma_chan);
1244 scif_nodeqp_send(ep->remote_dev, msg);
1245 if (del_window)
1246 scif_queue_for_cleanup(window, &scif_info.rma);
1247}
1248
1249/**
1250 * scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message
1251 * @msg: Interrupt message
1252 *
1253 * Wake up the window waiting to complete registration.
1254 */
1255void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
1256{
1257 struct scif_window *window =
1258 (struct scif_window *)msg->payload[2];
1259 struct scif_endpt *ep = (struct scif_endpt *)window->ep;
1260
1261 mutex_lock(&ep->rma_info.rma_lock);
1262 window->reg_state = OP_COMPLETED;
1263 wake_up(&window->regwq);
1264 mutex_unlock(&ep->rma_info.rma_lock);
1265}
1266
1267/**
1268 * scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message
1269 * @msg: Interrupt message
1270 *
1271 * Wake up the window waiting to inform it that registration
1272 * cannot be completed.
1273 */
1274void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
1275{
1276 struct scif_window *window =
1277 (struct scif_window *)msg->payload[2];
1278 struct scif_endpt *ep = (struct scif_endpt *)window->ep;
1279
1280 mutex_lock(&ep->rma_info.rma_lock);
1281 window->reg_state = OP_FAILED;
1282 wake_up(&window->regwq);
1283 mutex_unlock(&ep->rma_info.rma_lock);
1284}
1285
1286/**
1287 * scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message
1288 * @msg: Interrupt message
1289 *
1290 * Wake up the window waiting to complete unregistration.
1291 */
1292void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
1293{
1294 struct scif_window *window =
1295 (struct scif_window *)msg->payload[1];
1296 struct scif_endpt *ep = (struct scif_endpt *)window->ep;
1297
1298 mutex_lock(&ep->rma_info.rma_lock);
1299 window->unreg_state = OP_COMPLETED;
1300 wake_up(&window->unregwq);
1301 mutex_unlock(&ep->rma_info.rma_lock);
1302}
1303
1304/**
1305 * scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message
1306 * @msg: Interrupt message
1307 *
1308 * Wake up the window waiting to inform it that unregistration
1309 * cannot be completed immediately.
1310 */
1311void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
1312{
1313 struct scif_window *window =
1314 (struct scif_window *)msg->payload[1];
1315 struct scif_endpt *ep = (struct scif_endpt *)window->ep;
1316
1317 mutex_lock(&ep->rma_info.rma_lock);
1318 window->unreg_state = OP_FAILED;
1319 wake_up(&window->unregwq);
1320 mutex_unlock(&ep->rma_info.rma_lock);
1321}
1322
1323int __scif_pin_pages(void *addr, size_t len, int *out_prot,
1324 int map_flags, scif_pinned_pages_t *pages)
1325{
1326 struct scif_pinned_pages *pinned_pages;
1327 int nr_pages, err = 0, i;
1328 bool vmalloc_addr = false;
1329 bool try_upgrade = false;
1330 int prot = *out_prot;
1331 int ulimit = 0;
1332 struct mm_struct *mm = NULL;
1333
1334 /* Unsupported flags */
1335 if (map_flags & ~(SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT))
1336 return -EINVAL;
1337 ulimit = !!(map_flags & SCIF_MAP_ULIMIT);
1338
1339 /* Unsupported protection requested */
1340 if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE))
1341 return -EINVAL;
1342
1343 /* addr/len must be page aligned. len should be non zero */
1344 if (!len ||
1345 (ALIGN((u64)addr, PAGE_SIZE) != (u64)addr) ||
1346 (ALIGN((u64)len, PAGE_SIZE) != (u64)len))
1347 return -EINVAL;
1348
1349 might_sleep();
1350
1351 nr_pages = len >> PAGE_SHIFT;
1352
1353 /* Allocate a set of pinned pages */
1354 pinned_pages = scif_create_pinned_pages(nr_pages, prot);
1355 if (!pinned_pages)
1356 return -ENOMEM;
1357
1358 if (map_flags & SCIF_MAP_KERNEL) {
1359 if (is_vmalloc_addr(addr))
1360 vmalloc_addr = true;
1361
1362 for (i = 0; i < nr_pages; i++) {
1363 if (vmalloc_addr)
1364 pinned_pages->pages[i] =
1365 vmalloc_to_page(addr + (i * PAGE_SIZE));
1366 else
1367 pinned_pages->pages[i] =
1368 virt_to_page(addr + (i * PAGE_SIZE));
1369 }
1370 pinned_pages->nr_pages = nr_pages;
1371 pinned_pages->map_flags = SCIF_MAP_KERNEL;
1372 } else {
1373 /*
1374 * SCIF supports registration caching. If a registration has
1375 * been requested with read only permissions, then we try
1376 * to pin the pages with RW permissions so that a subsequent
1377 * transfer with RW permission can hit the cache instead of
1378 * invalidating it. If the upgrade fails with RW then we
1379 * revert back to R permission and retry
1380 */
1381 if (prot == SCIF_PROT_READ)
1382 try_upgrade = true;
1383 prot |= SCIF_PROT_WRITE;
1384retry:
1385 mm = current->mm;
1386 down_write(&mm->mmap_sem);
1387 if (ulimit) {
1388 err = __scif_check_inc_pinned_vm(mm, nr_pages);
1389 if (err) {
1390 up_write(&mm->mmap_sem);
1391 pinned_pages->nr_pages = 0;
1392 goto error_unmap;
1393 }
1394 }
1395
1396 pinned_pages->nr_pages = get_user_pages(
1397 current,
1398 mm,
1399 (u64)addr,
1400 nr_pages,
1401 !!(prot & SCIF_PROT_WRITE),
1402 0,
1403 pinned_pages->pages,
1404 NULL);
1405 up_write(&mm->mmap_sem);
1406 if (nr_pages != pinned_pages->nr_pages) {
1407 if (try_upgrade) {
1408 if (ulimit)
1409 __scif_dec_pinned_vm_lock(mm,
1410 nr_pages, 0);
1411 /* Roll back any pinned pages */
1412 for (i = 0; i < pinned_pages->nr_pages; i++) {
1413 if (pinned_pages->pages[i])
1414 put_page(
1415 pinned_pages->pages[i]);
1416 }
1417 prot &= ~SCIF_PROT_WRITE;
1418 try_upgrade = false;
1419 goto retry;
1420 }
1421 }
1422 pinned_pages->map_flags = 0;
1423 }
1424
1425 if (pinned_pages->nr_pages < nr_pages) {
1426 err = -EFAULT;
1427 pinned_pages->nr_pages = nr_pages;
1428 goto dec_pinned;
1429 }
1430
1431 *out_prot = prot;
1432 atomic_set(&pinned_pages->ref_count, 1);
1433 *pages = pinned_pages;
1434 return err;
1435dec_pinned:
1436 if (ulimit)
1437 __scif_dec_pinned_vm_lock(mm, nr_pages, 0);
1438 /* Something went wrong! Rollback */
1439error_unmap:
1440 pinned_pages->nr_pages = nr_pages;
1441 scif_destroy_pinned_pages(pinned_pages);
1442 *pages = NULL;
1443 dev_dbg(scif_info.mdev.this_device,
1444 "%s %d err %d len 0x%lx\n", __func__, __LINE__, err, len);
1445 return err;
1446}
1447
1448int scif_pin_pages(void *addr, size_t len, int prot,
1449 int map_flags, scif_pinned_pages_t *pages)
1450{
1451 return __scif_pin_pages(addr, len, &prot, map_flags, pages);
1452}
1453EXPORT_SYMBOL_GPL(scif_pin_pages);
1454
1455int scif_unpin_pages(scif_pinned_pages_t pinned_pages)
1456{
1457 int err = 0, ret;
1458
1459 if (!pinned_pages || SCIFEP_MAGIC != pinned_pages->magic)
1460 return -EINVAL;
1461
1462 ret = atomic_sub_return(1, &pinned_pages->ref_count);
1463 if (ret < 0) {
1464 dev_err(scif_info.mdev.this_device,
1465 "%s %d scif_unpin_pages called without pinning? rc %d\n",
1466 __func__, __LINE__, ret);
1467 return -EINVAL;
1468 }
1469 /*
1470 * Destroy the window if the ref count for this set of pinned
1471 * pages has dropped to zero. If it is positive then there is
1472 * a valid registered window which is backed by these pages and
1473 * it will be destroyed once all such windows are unregistered.
1474 */
1475 if (!ret)
1476 err = scif_destroy_pinned_pages(pinned_pages);
1477
1478 return err;
1479}
1480EXPORT_SYMBOL_GPL(scif_unpin_pages);
1481
1482static inline void
1483scif_insert_local_window(struct scif_window *window, struct scif_endpt *ep)
1484{
1485 mutex_lock(&ep->rma_info.rma_lock);
1486 scif_insert_window(window, &ep->rma_info.reg_list);
1487 mutex_unlock(&ep->rma_info.rma_lock);
1488}
1489
1490off_t scif_register_pinned_pages(scif_epd_t epd,
1491 scif_pinned_pages_t pinned_pages,
1492 off_t offset, int map_flags)
1493{
1494 struct scif_endpt *ep = (struct scif_endpt *)epd;
1495 s64 computed_offset;
1496 struct scif_window *window;
1497 int err;
1498 size_t len;
1499 struct device *spdev;
1500
1501 /* Unsupported flags */
1502 if (map_flags & ~SCIF_MAP_FIXED)
1503 return -EINVAL;
1504
1505 len = pinned_pages->nr_pages << PAGE_SHIFT;
1506
1507 /*
1508 * Offset is not page aligned/negative or offset+len
1509 * wraps around with SCIF_MAP_FIXED.
1510 */
1511 if ((map_flags & SCIF_MAP_FIXED) &&
1512 ((ALIGN(offset, PAGE_SIZE) != offset) ||
1513 (offset < 0) ||
1514 (offset + (off_t)len < offset)))
1515 return -EINVAL;
1516
1517 might_sleep();
1518
1519 err = scif_verify_epd(ep);
1520 if (err)
1521 return err;
1522 /*
1523 * It is an error to pass pinned_pages to scif_register_pinned_pages()
1524 * after calling scif_unpin_pages().
1525 */
1526 if (!atomic_add_unless(&pinned_pages->ref_count, 1, 0))
1527 return -EINVAL;
1528
1529 /* Compute the offset for this registration */
1530 err = scif_get_window_offset(ep, map_flags, offset,
1531 len, &computed_offset);
1532 if (err) {
1533 atomic_sub(1, &pinned_pages->ref_count);
1534 return err;
1535 }
1536
1537 /* Allocate and prepare self registration window */
1538 window = scif_create_window(ep, pinned_pages->nr_pages,
1539 computed_offset, false);
1540 if (!window) {
1541 atomic_sub(1, &pinned_pages->ref_count);
1542 scif_free_window_offset(ep, NULL, computed_offset);
1543 return -ENOMEM;
1544 }
1545
1546 window->pinned_pages = pinned_pages;
1547 window->nr_pages = pinned_pages->nr_pages;
1548 window->prot = pinned_pages->prot;
1549
1550 spdev = scif_get_peer_dev(ep->remote_dev);
1551 if (IS_ERR(spdev)) {
1552 err = PTR_ERR(spdev);
1553 scif_destroy_window(ep, window);
1554 return err;
1555 }
1556 err = scif_send_alloc_request(ep, window);
1557 if (err) {
1558 dev_err(&ep->remote_dev->sdev->dev,
1559 "%s %d err %d\n", __func__, __LINE__, err);
1560 goto error_unmap;
1561 }
1562
1563 /* Prepare the remote registration window */
1564 err = scif_prep_remote_window(ep, window);
1565 if (err) {
1566 dev_err(&ep->remote_dev->sdev->dev,
1567 "%s %d err %d\n", __func__, __LINE__, err);
1568 goto error_unmap;
1569 }
1570
1571 /* Tell the peer about the new window */
1572 err = scif_send_scif_register(ep, window);
1573 if (err) {
1574 dev_err(&ep->remote_dev->sdev->dev,
1575 "%s %d err %d\n", __func__, __LINE__, err);
1576 goto error_unmap;
1577 }
1578
1579 scif_put_peer_dev(spdev);
1580 /* No further failures expected. Insert new window */
1581 scif_insert_local_window(window, ep);
1582 return computed_offset;
1583error_unmap:
1584 scif_destroy_window(ep, window);
1585 scif_put_peer_dev(spdev);
1586 dev_err(&ep->remote_dev->sdev->dev,
1587 "%s %d err %d\n", __func__, __LINE__, err);
1588 return err;
1589}
1590EXPORT_SYMBOL_GPL(scif_register_pinned_pages);
1591
1592off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
1593 int prot, int map_flags)
1594{
1595 scif_pinned_pages_t pinned_pages;
1596 off_t err;
1597 struct scif_endpt *ep = (struct scif_endpt *)epd;
1598 s64 computed_offset;
1599 struct scif_window *window;
1600 struct mm_struct *mm = NULL;
1601 struct device *spdev;
1602
1603 dev_dbg(scif_info.mdev.this_device,
1604 "SCIFAPI register: ep %p addr %p len 0x%lx offset 0x%lx prot 0x%x map_flags 0x%x\n",
1605 epd, addr, len, offset, prot, map_flags);
1606 /* Unsupported flags */
1607 if (map_flags & ~(SCIF_MAP_FIXED | SCIF_MAP_KERNEL))
1608 return -EINVAL;
1609
1610 /*
1611 * Offset is not page aligned/negative or offset+len
1612 * wraps around with SCIF_MAP_FIXED.
1613 */
1614 if ((map_flags & SCIF_MAP_FIXED) &&
1615 ((ALIGN(offset, PAGE_SIZE) != offset) ||
1616 (offset < 0) ||
1617 (offset + (off_t)len < offset)))
1618 return -EINVAL;
1619
1620 /* Unsupported protection requested */
1621 if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE))
1622 return -EINVAL;
1623
1624 /* addr/len must be page aligned. len should be non zero */
1625 if (!len || (ALIGN((u64)addr, PAGE_SIZE) != (u64)addr) ||
1626 (ALIGN(len, PAGE_SIZE) != len))
1627 return -EINVAL;
1628
1629 might_sleep();
1630
1631 err = scif_verify_epd(ep);
1632 if (err)
1633 return err;
1634
1635 /* Compute the offset for this registration */
1636 err = scif_get_window_offset(ep, map_flags, offset,
1637 len >> PAGE_SHIFT, &computed_offset);
1638 if (err)
1639 return err;
1640
1641 spdev = scif_get_peer_dev(ep->remote_dev);
1642 if (IS_ERR(spdev)) {
1643 err = PTR_ERR(spdev);
1644 scif_free_window_offset(ep, NULL, computed_offset);
1645 return err;
1646 }
1647 /* Allocate and prepare self registration window */
1648 window = scif_create_window(ep, len >> PAGE_SHIFT,
1649 computed_offset, false);
1650 if (!window) {
1651 scif_free_window_offset(ep, NULL, computed_offset);
1652 scif_put_peer_dev(spdev);
1653 return -ENOMEM;
1654 }
1655
1656 window->nr_pages = len >> PAGE_SHIFT;
1657
1658 err = scif_send_alloc_request(ep, window);
1659 if (err) {
1660 scif_destroy_incomplete_window(ep, window);
1661 scif_put_peer_dev(spdev);
1662 return err;
1663 }
1664
1665 if (!(map_flags & SCIF_MAP_KERNEL)) {
1666 mm = __scif_acquire_mm();
1667 map_flags |= SCIF_MAP_ULIMIT;
1668 }
1669 /* Pin down the pages */
1670 err = __scif_pin_pages(addr, len, &prot,
1671 map_flags & (SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT),
1672 &pinned_pages);
1673 if (err) {
1674 scif_destroy_incomplete_window(ep, window);
1675 __scif_release_mm(mm);
1676 goto error;
1677 }
1678
1679 window->pinned_pages = pinned_pages;
1680 window->prot = pinned_pages->prot;
1681 window->mm = mm;
1682
1683 /* Prepare the remote registration window */
1684 err = scif_prep_remote_window(ep, window);
1685 if (err) {
1686 dev_err(&ep->remote_dev->sdev->dev,
1687 "%s %d err %ld\n", __func__, __LINE__, err);
1688 goto error_unmap;
1689 }
1690
1691 /* Tell the peer about the new window */
1692 err = scif_send_scif_register(ep, window);
1693 if (err) {
1694 dev_err(&ep->remote_dev->sdev->dev,
1695 "%s %d err %ld\n", __func__, __LINE__, err);
1696 goto error_unmap;
1697 }
1698
1699 scif_put_peer_dev(spdev);
1700 /* No further failures expected. Insert new window */
1701 scif_insert_local_window(window, ep);
1702 dev_dbg(&ep->remote_dev->sdev->dev,
1703 "SCIFAPI register: ep %p addr %p len 0x%lx computed_offset 0x%llx\n",
1704 epd, addr, len, computed_offset);
1705 return computed_offset;
1706error_unmap:
1707 scif_destroy_window(ep, window);
1708error:
1709 scif_put_peer_dev(spdev);
1710 dev_err(&ep->remote_dev->sdev->dev,
1711 "%s %d err %ld\n", __func__, __LINE__, err);
1712 return err;
1713}
1714EXPORT_SYMBOL_GPL(scif_register);
1715
1716int
1717scif_unregister(scif_epd_t epd, off_t offset, size_t len)
1718{
1719 struct scif_endpt *ep = (struct scif_endpt *)epd;
1720 struct scif_window *window = NULL;
1721 struct scif_rma_req req;
1722 int nr_pages, err;
1723 struct device *spdev;
1724
1725 dev_dbg(scif_info.mdev.this_device,
1726 "SCIFAPI unregister: ep %p offset 0x%lx len 0x%lx\n",
1727 ep, offset, len);
1728 /* len must be page aligned. len should be non zero */
1729 if (!len ||
1730 (ALIGN((u64)len, PAGE_SIZE) != (u64)len))
1731 return -EINVAL;
1732
1733 /* Offset is not page aligned or offset+len wraps around */
1734 if ((ALIGN(offset, PAGE_SIZE) != offset) ||
1735 (offset + (off_t)len < offset))
1736 return -EINVAL;
1737
1738 err = scif_verify_epd(ep);
1739 if (err)
1740 return err;
1741
1742 might_sleep();
1743 nr_pages = len >> PAGE_SHIFT;
1744
1745 req.out_window = &window;
1746 req.offset = offset;
1747 req.prot = 0;
1748 req.nr_bytes = len;
1749 req.type = SCIF_WINDOW_FULL;
1750 req.head = &ep->rma_info.reg_list;
1751
1752 spdev = scif_get_peer_dev(ep->remote_dev);
1753 if (IS_ERR(spdev)) {
1754 err = PTR_ERR(spdev);
1755 return err;
1756 }
1757 mutex_lock(&ep->rma_info.rma_lock);
1758 /* Does a valid window exist? */
1759 err = scif_query_window(&req);
1760 if (err) {
1761 dev_err(&ep->remote_dev->sdev->dev,
1762 "%s %d err %d\n", __func__, __LINE__, err);
1763 goto error;
1764 }
1765 /* Unregister all the windows in this range */
1766 err = scif_rma_list_unregister(window, offset, nr_pages);
1767 if (err)
1768 dev_err(&ep->remote_dev->sdev->dev,
1769 "%s %d err %d\n", __func__, __LINE__, err);
1770error:
1771 mutex_unlock(&ep->rma_info.rma_lock);
1772 scif_put_peer_dev(spdev);
1773 return err;
1774}
1775EXPORT_SYMBOL_GPL(scif_unregister);
diff --git a/drivers/misc/mic/scif/scif_rma.h b/drivers/misc/mic/scif/scif_rma.h
new file mode 100644
index 000000000000..fa6722279196
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rma.h
@@ -0,0 +1,464 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2015 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * BSD LICENSE
21 *
22 * Copyright(c) 2015 Intel Corporation.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 * Intel SCIF driver.
51 *
52 */
53#ifndef SCIF_RMA_H
54#define SCIF_RMA_H
55
56#include <linux/dma_remapping.h>
57#include <linux/mmu_notifier.h>
58
59#include "../bus/scif_bus.h"
60
61/* If this bit is set then the mark is a remote fence mark */
62#define SCIF_REMOTE_FENCE_BIT 31
63/* Magic value used to indicate a remote fence request */
64#define SCIF_REMOTE_FENCE BIT_ULL(SCIF_REMOTE_FENCE_BIT)
65
66#define SCIF_MAX_UNALIGNED_BUF_SIZE (1024 * 1024ULL)
67#define SCIF_KMEM_UNALIGNED_BUF_SIZE (SCIF_MAX_UNALIGNED_BUF_SIZE + \
68 (L1_CACHE_BYTES << 1))
69
70#define SCIF_IOVA_START_PFN (1)
71#define SCIF_IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
72#define SCIF_DMA_64BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(64))
73#define SCIF_DMA_63BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(63))
74
75/*
76 * struct scif_endpt_rma_info - Per Endpoint Remote Memory Access Information
77 *
78 * @reg_list: List of registration windows for self
79 * @remote_reg_list: List of registration windows for peer
80 * @iovad: Offset generator
81 * @rma_lock: Synchronizes access to self/remote list and also protects the
82 * window from being destroyed while RMAs are in progress.
83 * @tc_lock: Synchronizes access to temporary cached windows list
84 * for SCIF Registration Caching.
85 * @mmn_lock: Synchronizes access to the list of MMU notifiers registered
86 * @tw_refcount: Keeps track of number of outstanding temporary registered
87 * windows created by scif_vreadfrom/scif_vwriteto which have
88 * not been destroyed.
89 * @tcw_refcount: Same as tw_refcount but for temporary cached windows
90 * @tcw_total_pages: Same as tcw_refcount but in terms of pages pinned
91 * @mmn_list: MMU notifier so that we can destroy the windows when required
92 * @fence_refcount: Keeps track of number of outstanding remote fence
93 * requests which have been received by the peer.
94 * @dma_chan: DMA channel used for all DMA transfers for this endpoint.
95 * @async_list_del: Detect asynchronous list entry deletion
96 * @vma_list: List of vmas with remote memory mappings
97 * @markwq: Wait queue used for scif_fence_mark/scif_fence_wait
98*/
99struct scif_endpt_rma_info {
100 struct list_head reg_list;
101 struct list_head remote_reg_list;
102 struct iova_domain iovad;
103 struct mutex rma_lock;
104 spinlock_t tc_lock;
105 struct mutex mmn_lock;
106 atomic_t tw_refcount;
107 atomic_t tcw_refcount;
108 atomic_t tcw_total_pages;
109 struct list_head mmn_list;
110 atomic_t fence_refcount;
111 struct dma_chan *dma_chan;
112 int async_list_del;
113 struct list_head vma_list;
114 wait_queue_head_t markwq;
115};
116
117/*
118 * struct scif_fence_info - used for tracking fence requests
119 *
120 * @state: State of this transfer
121 * @wq: Fences wait on this queue
122 * @dma_mark: Used for storing the DMA mark
123 */
124struct scif_fence_info {
125 enum scif_msg_state state;
126 struct completion comp;
127 int dma_mark;
128};
129
130/*
131 * struct scif_remote_fence_info - used for tracking remote fence requests
132 *
133 * @msg: List of SCIF node QP fence messages
134 * @list: Link to list of remote fence requests
135 */
136struct scif_remote_fence_info {
137 struct scifmsg msg;
138 struct list_head list;
139};
140
141/*
142 * Specifies whether an RMA operation can span across partial windows, a single
143 * window or multiple contiguous windows. Mmaps can span across partial windows.
144 * Unregistration can span across complete windows. scif_get_pages() can span a
145 * single window. A window can also be of type self or peer.
146 */
147enum scif_window_type {
148 SCIF_WINDOW_PARTIAL,
149 SCIF_WINDOW_SINGLE,
150 SCIF_WINDOW_FULL,
151 SCIF_WINDOW_SELF,
152 SCIF_WINDOW_PEER
153};
154
155/* The number of physical addresses that can be stored in a PAGE. */
156#define SCIF_NR_ADDR_IN_PAGE (0x1000 >> 3)
157
158/*
159 * struct scif_rma_lookup - RMA lookup data structure for page list transfers
160 *
161 * Store an array of lookup offsets. Each offset in this array maps
162 * one 4K page containing 512 physical addresses i.e. 2MB. 512 such
163 * offsets in a 4K page will correspond to 1GB of registered address space.
164
165 * @lookup: Array of offsets
166 * @offset: DMA offset of lookup array
167 */
168struct scif_rma_lookup {
169 dma_addr_t *lookup;
170 dma_addr_t offset;
171};
172
173/*
174 * struct scif_pinned_pages - A set of pinned pages obtained with
175 * scif_pin_pages() which could be part of multiple registered
176 * windows across different end points.
177 *
178 * @nr_pages: Number of pages which is defined as a s64 instead of an int
179 * to avoid sign extension with buffers >= 2GB
180 * @prot: read/write protections
181 * @map_flags: Flags specified during the pin operation
182 * @ref_count: Reference count bumped in terms of number of pages
183 * @magic: A magic value
184 * @pages: Array of pointers to struct pages populated with get_user_pages(..)
185 */
186struct scif_pinned_pages {
187 s64 nr_pages;
188 int prot;
189 int map_flags;
190 atomic_t ref_count;
191 u64 magic;
192 struct page **pages;
193};
194
195/*
196 * struct scif_status - Stores DMA status update information
197 *
198 * @src_dma_addr: Source buffer DMA address
199 * @val: src location for value to be written to the destination
200 * @ep: SCIF endpoint
201 */
202struct scif_status {
203 dma_addr_t src_dma_addr;
204 u64 val;
205 struct scif_endpt *ep;
206};
207
208/*
209 * struct scif_window - Registration Window for Self and Remote
210 *
211 * @nr_pages: Number of pages which is defined as a s64 instead of an int
212 * to avoid sign extension with buffers >= 2GB
213 * @nr_contig_chunks: Number of contiguous physical chunks
214 * @prot: read/write protections
215 * @ref_count: reference count in terms of number of pages
216 * @magic: Cookie to detect corruption
217 * @offset: registered offset
218 * @va_for_temp: va address that this window represents
219 * @dma_mark: Used to determine if all DMAs against the window are done
220 * @ep: Pointer to EP. Useful for passing EP around with messages to
221 avoid expensive list traversals.
222 * @list: link to list of windows for the endpoint
223 * @type: self or peer window
224 * @peer_window: Pointer to peer window. Useful for sending messages to peer
225 * without requiring an extra list traversal
226 * @unreg_state: unregistration state
227 * @offset_freed: True if the offset has been freed
228 * @temp: True for temporary windows created via scif_vreadfrom/scif_vwriteto
229 * @mm: memory descriptor for the task_struct which initiated the RMA
230 * @st: scatter gather table for DMA mappings with IOMMU enabled
231 * @pinned_pages: The set of pinned_pages backing this window
232 * @alloc_handle: Handle for sending ALLOC_REQ
233 * @regwq: Wait Queue for an registration (N)ACK
234 * @reg_state: Registration state
235 * @unregwq: Wait Queue for an unregistration (N)ACK
236 * @dma_addr_lookup: Lookup for physical addresses used for DMA
237 * @nr_lookup: Number of entries in lookup
238 * @mapped_offset: Offset used to map the window by the peer
239 * @dma_addr: Array of physical addresses used for Mgmt node & MIC initiated DMA
240 * @num_pages: Array specifying number of pages for each physical address
241 */
242struct scif_window {
243 s64 nr_pages;
244 int nr_contig_chunks;
245 int prot;
246 int ref_count;
247 u64 magic;
248 s64 offset;
249 unsigned long va_for_temp;
250 int dma_mark;
251 u64 ep;
252 struct list_head list;
253 enum scif_window_type type;
254 u64 peer_window;
255 enum scif_msg_state unreg_state;
256 bool offset_freed;
257 bool temp;
258 struct mm_struct *mm;
259 struct sg_table *st;
260 union {
261 struct {
262 struct scif_pinned_pages *pinned_pages;
263 struct scif_allocmsg alloc_handle;
264 wait_queue_head_t regwq;
265 enum scif_msg_state reg_state;
266 wait_queue_head_t unregwq;
267 };
268 struct {
269 struct scif_rma_lookup dma_addr_lookup;
270 struct scif_rma_lookup num_pages_lookup;
271 int nr_lookup;
272 dma_addr_t mapped_offset;
273 };
274 };
275 dma_addr_t *dma_addr;
276 u64 *num_pages;
277} __packed;
278
279/*
280 * scif_mmu_notif - SCIF mmu notifier information
281 *
282 * @mmu_notifier ep_mmu_notifier: MMU notifier operations
283 * @tc_reg_list: List of temp registration windows for self
284 * @mm: memory descriptor for the task_struct which initiated the RMA
285 * @ep: SCIF endpoint
286 * @list: link to list of MMU notifier information
287 */
288struct scif_mmu_notif {
289#ifdef CONFIG_MMU_NOTIFIER
290 struct mmu_notifier ep_mmu_notifier;
291#endif
292 struct list_head tc_reg_list;
293 struct mm_struct *mm;
294 struct scif_endpt *ep;
295 struct list_head list;
296};
297
298enum scif_rma_dir {
299 SCIF_LOCAL_TO_REMOTE,
300 SCIF_REMOTE_TO_LOCAL
301};
302
303extern struct kmem_cache *unaligned_cache;
304/* Initialize RMA for this EP */
305void scif_rma_ep_init(struct scif_endpt *ep);
306/* Check if epd can be uninitialized */
307int scif_rma_ep_can_uninit(struct scif_endpt *ep);
308/* Obtain a new offset. Callee must grab RMA lock */
309int scif_get_window_offset(struct scif_endpt *ep, int flags,
310 s64 offset, int nr_pages, s64 *out_offset);
311/* Free offset. Callee must grab RMA lock */
312void scif_free_window_offset(struct scif_endpt *ep,
313 struct scif_window *window, s64 offset);
314/* Create self registration window */
315struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
316 s64 offset, bool temp);
317/* Destroy self registration window.*/
318int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window);
319void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window);
320/* Map pages of self window to Aperture/PCI */
321int scif_map_window(struct scif_dev *remote_dev,
322 struct scif_window *window);
323/* Unregister a self window */
324int scif_unregister_window(struct scif_window *window);
325/* Destroy remote registration window */
326void
327scif_destroy_remote_window(struct scif_window *window);
328/* remove valid remote memory mappings from process address space */
329void scif_zap_mmaps(int node);
330/* Query if any applications have remote memory mappings */
331bool scif_rma_do_apps_have_mmaps(int node);
332/* Cleanup remote registration lists for zombie endpoints */
333void scif_cleanup_rma_for_zombies(int node);
334/* Reserve a DMA channel for a particular endpoint */
335int scif_reserve_dma_chan(struct scif_endpt *ep);
336/* Setup a DMA mark for an endpoint */
337int _scif_fence_mark(scif_epd_t epd, int *mark);
338int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
339 enum scif_window_type type);
340void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg);
341void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg);
342void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg);
343void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg);
344void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg);
345void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
346void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
347void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
348void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
349void scif_recv_munmap(struct scif_dev *scifdev, struct scifmsg *msg);
350void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg);
351void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg);
352void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg);
353void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg);
354void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg);
355void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg);
356void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg);
357void scif_mmu_notif_handler(struct work_struct *work);
358void scif_rma_handle_remote_fences(void);
359void scif_rma_destroy_windows(void);
360void scif_rma_destroy_tcw_invalid(void);
361int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan);
362
363struct scif_window_iter {
364 s64 offset;
365 int index;
366};
367
368static inline void
369scif_init_window_iter(struct scif_window *window, struct scif_window_iter *iter)
370{
371 iter->offset = window->offset;
372 iter->index = 0;
373}
374
375dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
376 size_t *nr_bytes,
377 struct scif_window_iter *iter);
378static inline
379dma_addr_t __scif_off_to_dma_addr(struct scif_window *window, s64 off)
380{
381 return scif_off_to_dma_addr(window, off, NULL, NULL);
382}
383
384static inline bool scif_unaligned(off_t src_offset, off_t dst_offset)
385{
386 src_offset = src_offset & (L1_CACHE_BYTES - 1);
387 dst_offset = dst_offset & (L1_CACHE_BYTES - 1);
388 return !(src_offset == dst_offset);
389}
390
391/*
392 * scif_zalloc:
393 * @size: Size of the allocation request.
394 *
395 * Helper API which attempts to allocate zeroed pages via
396 * __get_free_pages(..) first and then falls back on
397 * vzalloc(..) if that fails.
398 */
399static inline void *scif_zalloc(size_t size)
400{
401 void *ret = NULL;
402 size_t align = ALIGN(size, PAGE_SIZE);
403
404 if (align && get_order(align) < MAX_ORDER)
405 ret = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
406 get_order(align));
407 return ret ? ret : vzalloc(align);
408}
409
410/*
411 * scif_free:
412 * @addr: Address to be freed.
413 * @size: Size of the allocation.
414 * Helper API which frees memory allocated via scif_zalloc().
415 */
416static inline void scif_free(void *addr, size_t size)
417{
418 size_t align = ALIGN(size, PAGE_SIZE);
419
420 if (is_vmalloc_addr(addr))
421 vfree(addr);
422 else
423 free_pages((unsigned long)addr, get_order(align));
424}
425
426static inline void scif_get_window(struct scif_window *window, int nr_pages)
427{
428 window->ref_count += nr_pages;
429}
430
431static inline void scif_put_window(struct scif_window *window, int nr_pages)
432{
433 window->ref_count -= nr_pages;
434}
435
436static inline void scif_set_window_ref(struct scif_window *window, int nr_pages)
437{
438 window->ref_count = nr_pages;
439}
440
441static inline void
442scif_queue_for_cleanup(struct scif_window *window, struct list_head *list)
443{
444 spin_lock(&scif_info.rmalock);
445 list_add_tail(&window->list, list);
446 spin_unlock(&scif_info.rmalock);
447 schedule_work(&scif_info.misc_work);
448}
449
450static inline void __scif_rma_destroy_tcw_helper(struct scif_window *window)
451{
452 list_del_init(&window->list);
453 scif_queue_for_cleanup(window, &scif_info.rma_tc);
454}
455
456static inline bool scif_is_iommu_enabled(void)
457{
458#ifdef CONFIG_INTEL_IOMMU
459 return intel_iommu_enabled;
460#else
461 return false;
462#endif
463}
464#endif /* SCIF_RMA_H */
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c
new file mode 100644
index 000000000000..e1ef8daedd5a
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rma_list.c
@@ -0,0 +1,291 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19#include <linux/mmu_notifier.h>
20#include <linux/highmem.h>
21
22/*
23 * scif_insert_tcw:
24 *
25 * Insert a temp window to the temp registration list sorted by va_for_temp.
26 * RMA lock must be held.
27 */
28void scif_insert_tcw(struct scif_window *window, struct list_head *head)
29{
30 struct scif_window *curr = NULL;
31 struct scif_window *prev = list_entry(head, struct scif_window, list);
32 struct list_head *item;
33
34 INIT_LIST_HEAD(&window->list);
35 /* Compare with tail and if the entry is new tail add it to the end */
36 if (!list_empty(head)) {
37 curr = list_entry(head->prev, struct scif_window, list);
38 if (curr->va_for_temp < window->va_for_temp) {
39 list_add_tail(&window->list, head);
40 return;
41 }
42 }
43 list_for_each(item, head) {
44 curr = list_entry(item, struct scif_window, list);
45 if (curr->va_for_temp > window->va_for_temp)
46 break;
47 prev = curr;
48 }
49 list_add(&window->list, &prev->list);
50}
51
52/*
53 * scif_insert_window:
54 *
55 * Insert a window to the self registration list sorted by offset.
56 * RMA lock must be held.
57 */
58void scif_insert_window(struct scif_window *window, struct list_head *head)
59{
60 struct scif_window *curr = NULL, *prev = NULL;
61 struct list_head *item;
62
63 INIT_LIST_HEAD(&window->list);
64 list_for_each(item, head) {
65 curr = list_entry(item, struct scif_window, list);
66 if (curr->offset > window->offset)
67 break;
68 prev = curr;
69 }
70 if (!prev)
71 list_add(&window->list, head);
72 else
73 list_add(&window->list, &prev->list);
74 scif_set_window_ref(window, window->nr_pages);
75}
76
77/*
78 * scif_query_tcw:
79 *
80 * Query the temp cached registration list of ep for an overlapping window
81 * in case of permission mismatch, destroy the previous window. if permissions
82 * match and overlap is partial, destroy the window but return the new range
83 * RMA lock must be held.
84 */
85int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req)
86{
87 struct list_head *item, *temp, *head = req->head;
88 struct scif_window *window;
89 u64 start_va_window, start_va_req = req->va_for_temp;
90 u64 end_va_window, end_va_req = start_va_req + req->nr_bytes;
91
92 if (!req->nr_bytes)
93 return -EINVAL;
94 /*
95 * Avoid traversing the entire list to find out that there
96 * is no entry that matches
97 */
98 if (!list_empty(head)) {
99 window = list_last_entry(head, struct scif_window, list);
100 end_va_window = window->va_for_temp +
101 (window->nr_pages << PAGE_SHIFT);
102 if (start_va_req > end_va_window)
103 return -ENXIO;
104 }
105 list_for_each_safe(item, temp, head) {
106 window = list_entry(item, struct scif_window, list);
107 start_va_window = window->va_for_temp;
108 end_va_window = window->va_for_temp +
109 (window->nr_pages << PAGE_SHIFT);
110 if (start_va_req < start_va_window &&
111 end_va_req < start_va_window)
112 break;
113 if (start_va_req >= end_va_window)
114 continue;
115 if ((window->prot & req->prot) == req->prot) {
116 if (start_va_req >= start_va_window &&
117 end_va_req <= end_va_window) {
118 *req->out_window = window;
119 return 0;
120 }
121 /* expand window */
122 if (start_va_req < start_va_window) {
123 req->nr_bytes +=
124 start_va_window - start_va_req;
125 req->va_for_temp = start_va_window;
126 }
127 if (end_va_req >= end_va_window)
128 req->nr_bytes += end_va_window - end_va_req;
129 }
130 /* Destroy the old window to create a new one */
131 __scif_rma_destroy_tcw_helper(window);
132 break;
133 }
134 return -ENXIO;
135}
136
137/*
138 * scif_query_window:
139 *
140 * Query the registration list and check if a valid contiguous
141 * range of windows exist.
142 * RMA lock must be held.
143 */
144int scif_query_window(struct scif_rma_req *req)
145{
146 struct list_head *item;
147 struct scif_window *window;
148 s64 end_offset, offset = req->offset;
149 u64 tmp_min, nr_bytes_left = req->nr_bytes;
150
151 if (!req->nr_bytes)
152 return -EINVAL;
153
154 list_for_each(item, req->head) {
155 window = list_entry(item, struct scif_window, list);
156 end_offset = window->offset +
157 (window->nr_pages << PAGE_SHIFT);
158 if (offset < window->offset)
159 /* Offset not found! */
160 return -ENXIO;
161 if (offset >= end_offset)
162 continue;
163 /* Check read/write protections. */
164 if ((window->prot & req->prot) != req->prot)
165 return -EPERM;
166 if (nr_bytes_left == req->nr_bytes)
167 /* Store the first window */
168 *req->out_window = window;
169 tmp_min = min((u64)end_offset - offset, nr_bytes_left);
170 nr_bytes_left -= tmp_min;
171 offset += tmp_min;
172 /*
173 * Range requested encompasses
174 * multiple windows contiguously.
175 */
176 if (!nr_bytes_left) {
177 /* Done for partial window */
178 if (req->type == SCIF_WINDOW_PARTIAL ||
179 req->type == SCIF_WINDOW_SINGLE)
180 return 0;
181 /* Extra logic for full windows */
182 if (offset == end_offset)
183 /* Spanning multiple whole windows */
184 return 0;
185 /* Not spanning multiple whole windows */
186 return -ENXIO;
187 }
188 if (req->type == SCIF_WINDOW_SINGLE)
189 break;
190 }
191 dev_err(scif_info.mdev.this_device,
192 "%s %d ENXIO\n", __func__, __LINE__);
193 return -ENXIO;
194}
195
196/*
197 * scif_rma_list_unregister:
198 *
199 * Traverse the self registration list starting from window:
200 * 1) Call scif_unregister_window(..)
201 * RMA lock must be held.
202 */
203int scif_rma_list_unregister(struct scif_window *window,
204 s64 offset, int nr_pages)
205{
206 struct scif_endpt *ep = (struct scif_endpt *)window->ep;
207 struct list_head *head = &ep->rma_info.reg_list;
208 s64 end_offset;
209 int err = 0;
210 int loop_nr_pages;
211 struct scif_window *_window;
212
213 list_for_each_entry_safe_from(window, _window, head, list) {
214 end_offset = window->offset + (window->nr_pages << PAGE_SHIFT);
215 loop_nr_pages = min((int)((end_offset - offset) >> PAGE_SHIFT),
216 nr_pages);
217 err = scif_unregister_window(window);
218 if (err)
219 return err;
220 nr_pages -= loop_nr_pages;
221 offset += (loop_nr_pages << PAGE_SHIFT);
222 if (!nr_pages)
223 break;
224 }
225 return 0;
226}
227
228/*
229 * scif_unmap_all_window:
230 *
231 * Traverse all the windows in the self registration list and:
232 * 1) Delete any DMA mappings created
233 */
234void scif_unmap_all_windows(scif_epd_t epd)
235{
236 struct list_head *item, *tmp;
237 struct scif_window *window;
238 struct scif_endpt *ep = (struct scif_endpt *)epd;
239 struct list_head *head = &ep->rma_info.reg_list;
240
241 mutex_lock(&ep->rma_info.rma_lock);
242 list_for_each_safe(item, tmp, head) {
243 window = list_entry(item, struct scif_window, list);
244 scif_unmap_window(ep->remote_dev, window);
245 }
246 mutex_unlock(&ep->rma_info.rma_lock);
247}
248
249/*
250 * scif_unregister_all_window:
251 *
252 * Traverse all the windows in the self registration list and:
253 * 1) Call scif_unregister_window(..)
254 * RMA lock must be held.
255 */
256int scif_unregister_all_windows(scif_epd_t epd)
257{
258 struct list_head *item, *tmp;
259 struct scif_window *window;
260 struct scif_endpt *ep = (struct scif_endpt *)epd;
261 struct list_head *head = &ep->rma_info.reg_list;
262 int err = 0;
263
264 mutex_lock(&ep->rma_info.rma_lock);
265retry:
266 item = NULL;
267 tmp = NULL;
268 list_for_each_safe(item, tmp, head) {
269 window = list_entry(item, struct scif_window, list);
270 ep->rma_info.async_list_del = 0;
271 err = scif_unregister_window(window);
272 if (err)
273 dev_err(scif_info.mdev.this_device,
274 "%s %d err %d\n",
275 __func__, __LINE__, err);
276 /*
277 * Need to restart list traversal if there has been
278 * an asynchronous list entry deletion.
279 */
280 if (ACCESS_ONCE(ep->rma_info.async_list_del))
281 goto retry;
282 }
283 mutex_unlock(&ep->rma_info.rma_lock);
284 if (!list_empty(&ep->rma_info.mmn_list)) {
285 spin_lock(&scif_info.rmalock);
286 list_add_tail(&ep->mmu_list, &scif_info.mmu_notif_cleanup);
287 spin_unlock(&scif_info.rmalock);
288 schedule_work(&scif_info.mmu_notif_work);
289 }
290 return err;
291}
diff --git a/drivers/misc/mic/scif/scif_rma_list.h b/drivers/misc/mic/scif/scif_rma_list.h
new file mode 100644
index 000000000000..7d58d1d551b0
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rma_list.h
@@ -0,0 +1,57 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#ifndef SCIF_RMA_LIST_H
19#define SCIF_RMA_LIST_H
20
21/*
22 * struct scif_rma_req - Self Registration list RMA Request query
23 *
24 * @out_window - Returns the window if found
25 * @offset: Starting offset
26 * @nr_bytes: number of bytes
27 * @prot: protection requested i.e. read or write or both
28 * @type: Specify single, partial or multiple windows
29 * @head: Head of list on which to search
30 * @va_for_temp: VA for searching temporary cached windows
31 */
32struct scif_rma_req {
33 struct scif_window **out_window;
34 union {
35 s64 offset;
36 unsigned long va_for_temp;
37 };
38 size_t nr_bytes;
39 int prot;
40 enum scif_window_type type;
41 struct list_head *head;
42};
43
44/* Insert */
45void scif_insert_window(struct scif_window *window, struct list_head *head);
46void scif_insert_tcw(struct scif_window *window,
47 struct list_head *head);
48/* Query */
49int scif_query_window(struct scif_rma_req *request);
50int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *request);
51/* Called from close to unregister all self windows */
52int scif_unregister_all_windows(scif_epd_t epd);
53void scif_unmap_all_windows(scif_epd_t epd);
54/* Traverse list and unregister */
55int scif_rma_list_unregister(struct scif_window *window, s64 offset,
56 int nr_pages);
57#endif /* SCIF_RMA_LIST_H */
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index 2f30badc6ffd..1ee8e82ba710 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -196,12 +196,6 @@ void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
196 start_instruction(tfh); 196 start_instruction(tfh);
197} 197}
198 198
199void tfh_restart(struct gru_tlb_fault_handle *tfh)
200{
201 tfh->opc = TFHOP_RESTART;
202 start_instruction(tfh);
203}
204
205void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh) 199void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
206{ 200{
207 tfh->opc = TFHOP_USER_POLLING_MODE; 201 tfh->opc = TFHOP_USER_POLLING_MODE;
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
index 3f998b924d8f..3d7bd36a1c89 100644
--- a/drivers/misc/sgi-gru/gruhandles.h
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -524,7 +524,6 @@ int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
524 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); 524 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
525void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, 525void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
526 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); 526 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
527void tfh_restart(struct gru_tlb_fault_handle *tfh);
528void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh); 527void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh);
529void tfh_exception(struct gru_tlb_fault_handle *tfh); 528void tfh_exception(struct gru_tlb_fault_handle *tfh);
530 529
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index a3700a56b8ff..313da3150262 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -78,11 +78,10 @@ static int gru_dump_tfm(struct gru_state *gru,
78 void __user *ubuf, void __user *ubufend) 78 void __user *ubuf, void __user *ubufend)
79{ 79{
80 struct gru_tlb_fault_map *tfm; 80 struct gru_tlb_fault_map *tfm;
81 int i, ret, bytes; 81 int i;
82 82
83 bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; 83 if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
84 if (bytes > ubufend - ubuf) 84 return -EFBIG;
85 ret = -EFBIG;
86 85
87 for (i = 0; i < GRU_NUM_TFM; i++) { 86 for (i = 0; i < GRU_NUM_TFM; i++) {
88 tfm = get_tfm(gru->gs_gru_base_vaddr, i); 87 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
@@ -99,11 +98,10 @@ static int gru_dump_tgh(struct gru_state *gru,
99 void __user *ubuf, void __user *ubufend) 98 void __user *ubuf, void __user *ubufend)
100{ 99{
101 struct gru_tlb_global_handle *tgh; 100 struct gru_tlb_global_handle *tgh;
102 int i, ret, bytes; 101 int i;
103 102
104 bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; 103 if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
105 if (bytes > ubufend - ubuf) 104 return -EFBIG;
106 ret = -EFBIG;
107 105
108 for (i = 0; i < GRU_NUM_TGH; i++) { 106 for (i = 0; i < GRU_NUM_TGH; i++) {
109 tgh = get_tgh(gru->gs_gru_base_vaddr, i); 107 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
@@ -196,7 +194,7 @@ int gru_dump_chiplet_request(unsigned long arg)
196 return -EFAULT; 194 return -EFAULT;
197 195
198 /* Currently, only dump by gid is implemented */ 196 /* Currently, only dump by gid is implemented */
199 if (req.gid >= gru_max_gids || req.gid < 0) 197 if (req.gid >= gru_max_gids)
200 return -EINVAL; 198 return -EINVAL;
201 199
202 gru = GID_TO_GRU(req.gid); 200 gru = GID_TO_GRU(req.gid);
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 913de07e577c..967b9dd24fe9 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -160,7 +160,12 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
160 down_write(&bs->bs_kgts_sema); 160 down_write(&bs->bs_kgts_sema);
161 161
162 if (!bs->bs_kgts) { 162 if (!bs->bs_kgts) {
163 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0); 163 do {
164 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
165 if (!IS_ERR(bs->bs_kgts))
166 break;
167 msleep(1);
168 } while (true);
164 bs->bs_kgts->ts_user_blade_id = blade_id; 169 bs->bs_kgts->ts_user_blade_id = blade_id;
165 } 170 }
166 kgts = bs->bs_kgts; 171 kgts = bs->bs_kgts;
@@ -429,8 +434,8 @@ int gru_get_cb_exception_detail(void *cb,
429 return 0; 434 return 0;
430} 435}
431 436
432char *gru_get_cb_exception_detail_str(int ret, void *cb, 437static char *gru_get_cb_exception_detail_str(int ret, void *cb,
433 char *buf, int size) 438 char *buf, int size)
434{ 439{
435 struct gru_control_block_status *gen = (void *)cb; 440 struct gru_control_block_status *gen = (void *)cb;
436 struct control_block_extended_exc_detail excdet; 441 struct control_block_extended_exc_detail excdet;
@@ -505,7 +510,7 @@ int gru_wait_proc(void *cb)
505 return ret; 510 return ret;
506} 511}
507 512
508void gru_abort(int ret, void *cb, char *str) 513static void gru_abort(int ret, void *cb, char *str)
509{ 514{
510 char buf[GRU_EXC_STR_SIZE]; 515 char buf[GRU_EXC_STR_SIZE];
511 516
@@ -997,7 +1002,6 @@ static int quicktest1(unsigned long arg)
997{ 1002{
998 struct gru_message_queue_desc mqd; 1003 struct gru_message_queue_desc mqd;
999 void *p, *mq; 1004 void *p, *mq;
1000 unsigned long *dw;
1001 int i, ret = -EIO; 1005 int i, ret = -EIO;
1002 char mes[GRU_CACHE_LINE_BYTES], *m; 1006 char mes[GRU_CACHE_LINE_BYTES], *m;
1003 1007
@@ -1007,7 +1011,6 @@ static int quicktest1(unsigned long arg)
1007 return -ENOMEM; 1011 return -ENOMEM;
1008 mq = ALIGNUP(p, 1024); 1012 mq = ALIGNUP(p, 1024);
1009 memset(mes, 0xee, sizeof(mes)); 1013 memset(mes, 0xee, sizeof(mes));
1010 dw = mq;
1011 1014
1012 gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0); 1015 gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
1013 for (i = 0; i < 6; i++) { 1016 for (i = 0; i < 6; i++) {
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index ae16c8cb4f3e..1525870f460a 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -930,6 +930,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
930{ 930{
931 struct gru_thread_state *gts; 931 struct gru_thread_state *gts;
932 unsigned long paddr, vaddr; 932 unsigned long paddr, vaddr;
933 unsigned long expires;
933 934
934 vaddr = (unsigned long)vmf->virtual_address; 935 vaddr = (unsigned long)vmf->virtual_address;
935 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 936 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@@ -954,7 +955,8 @@ again:
954 mutex_unlock(&gts->ts_ctxlock); 955 mutex_unlock(&gts->ts_ctxlock);
955 set_current_state(TASK_INTERRUPTIBLE); 956 set_current_state(TASK_INTERRUPTIBLE);
956 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 957 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
957 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 958 expires = gts->ts_steal_jiffies + GRU_STEAL_DELAY;
959 if (time_before(expires, jiffies))
958 gru_steal_context(gts); 960 gru_steal_context(gts);
959 goto again; 961 goto again;
960 } 962 }
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 2129274ef7ab..e936d43895d2 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -306,19 +306,20 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
306 atomic_inc(&gms->ms_refcnt); 306 atomic_inc(&gms->ms_refcnt);
307 } else { 307 } else {
308 gms = kzalloc(sizeof(*gms), GFP_KERNEL); 308 gms = kzalloc(sizeof(*gms), GFP_KERNEL);
309 if (gms) { 309 if (!gms)
310 STAT(gms_alloc); 310 return ERR_PTR(-ENOMEM);
311 spin_lock_init(&gms->ms_asid_lock); 311 STAT(gms_alloc);
312 gms->ms_notifier.ops = &gru_mmuops; 312 spin_lock_init(&gms->ms_asid_lock);
313 atomic_set(&gms->ms_refcnt, 1); 313 gms->ms_notifier.ops = &gru_mmuops;
314 init_waitqueue_head(&gms->ms_wait_queue); 314 atomic_set(&gms->ms_refcnt, 1);
315 err = __mmu_notifier_register(&gms->ms_notifier, current->mm); 315 init_waitqueue_head(&gms->ms_wait_queue);
316 if (err) 316 err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
317 goto error; 317 if (err)
318 } 318 goto error;
319 } 319 }
320 gru_dbg(grudev, "gms %p, refcnt %d\n", gms, 320 if (gms)
321 atomic_read(&gms->ms_refcnt)); 321 gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
322 atomic_read(&gms->ms_refcnt));
322 return gms; 323 return gms;
323error: 324error:
324 kfree(gms); 325 kfree(gms);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 431e1dd528bc..736dae715dbf 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -28,20 +28,144 @@
28 28
29#define SRAM_GRANULARITY 32 29#define SRAM_GRANULARITY 32
30 30
31struct sram_partition {
32 void __iomem *base;
33
34 struct gen_pool *pool;
35 struct bin_attribute battr;
36 struct mutex lock;
37};
38
31struct sram_dev { 39struct sram_dev {
32 struct device *dev; 40 struct device *dev;
33 void __iomem *virt_base; 41 void __iomem *virt_base;
34 42
35 struct gen_pool *pool; 43 struct gen_pool *pool;
36 struct clk *clk; 44 struct clk *clk;
45
46 struct sram_partition *partition;
47 u32 partitions;
37}; 48};
38 49
39struct sram_reserve { 50struct sram_reserve {
40 struct list_head list; 51 struct list_head list;
41 u32 start; 52 u32 start;
42 u32 size; 53 u32 size;
54 bool export;
55 bool pool;
56 const char *label;
43}; 57};
44 58
59static ssize_t sram_read(struct file *filp, struct kobject *kobj,
60 struct bin_attribute *attr,
61 char *buf, loff_t pos, size_t count)
62{
63 struct sram_partition *part;
64
65 part = container_of(attr, struct sram_partition, battr);
66
67 mutex_lock(&part->lock);
68 memcpy_fromio(buf, part->base + pos, count);
69 mutex_unlock(&part->lock);
70
71 return count;
72}
73
74static ssize_t sram_write(struct file *filp, struct kobject *kobj,
75 struct bin_attribute *attr,
76 char *buf, loff_t pos, size_t count)
77{
78 struct sram_partition *part;
79
80 part = container_of(attr, struct sram_partition, battr);
81
82 mutex_lock(&part->lock);
83 memcpy_toio(part->base + pos, buf, count);
84 mutex_unlock(&part->lock);
85
86 return count;
87}
88
89static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block,
90 phys_addr_t start, struct sram_partition *part)
91{
92 int ret;
93
94 part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
95 NUMA_NO_NODE, block->label);
96 if (IS_ERR(part->pool))
97 return PTR_ERR(part->pool);
98
99 ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start,
100 block->size, NUMA_NO_NODE);
101 if (ret < 0) {
102 dev_err(sram->dev, "failed to register subpool: %d\n", ret);
103 return ret;
104 }
105
106 return 0;
107}
108
109static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
110 phys_addr_t start, struct sram_partition *part)
111{
112 sysfs_bin_attr_init(&part->battr);
113 part->battr.attr.name = devm_kasprintf(sram->dev, GFP_KERNEL,
114 "%llx.sram",
115 (unsigned long long)start);
116 if (!part->battr.attr.name)
117 return -ENOMEM;
118
119 part->battr.attr.mode = S_IRUSR | S_IWUSR;
120 part->battr.read = sram_read;
121 part->battr.write = sram_write;
122 part->battr.size = block->size;
123
124 return device_create_bin_file(sram->dev, &part->battr);
125}
126
127static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
128 phys_addr_t start)
129{
130 int ret;
131 struct sram_partition *part = &sram->partition[sram->partitions];
132
133 mutex_init(&part->lock);
134 part->base = sram->virt_base + block->start;
135
136 if (block->pool) {
137 ret = sram_add_pool(sram, block, start, part);
138 if (ret)
139 return ret;
140 }
141 if (block->export) {
142 ret = sram_add_export(sram, block, start, part);
143 if (ret)
144 return ret;
145 }
146 sram->partitions++;
147
148 return 0;
149}
150
151static void sram_free_partitions(struct sram_dev *sram)
152{
153 struct sram_partition *part;
154
155 if (!sram->partitions)
156 return;
157
158 part = &sram->partition[sram->partitions - 1];
159 for (; sram->partitions; sram->partitions--, part--) {
160 if (part->battr.size)
161 device_remove_bin_file(sram->dev, &part->battr);
162
163 if (part->pool &&
164 gen_pool_avail(part->pool) < gen_pool_size(part->pool))
165 dev_err(sram->dev, "removed pool while SRAM allocated\n");
166 }
167}
168
45static int sram_reserve_cmp(void *priv, struct list_head *a, 169static int sram_reserve_cmp(void *priv, struct list_head *a,
46 struct list_head *b) 170 struct list_head *b)
47{ 171{
@@ -57,7 +181,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
57 unsigned long size, cur_start, cur_size; 181 unsigned long size, cur_start, cur_size;
58 struct sram_reserve *rblocks, *block; 182 struct sram_reserve *rblocks, *block;
59 struct list_head reserve_list; 183 struct list_head reserve_list;
60 unsigned int nblocks; 184 unsigned int nblocks, exports = 0;
185 const char *label;
61 int ret = 0; 186 int ret = 0;
62 187
63 INIT_LIST_HEAD(&reserve_list); 188 INIT_LIST_HEAD(&reserve_list);
@@ -69,7 +194,7 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
69 * after the reserved blocks from the dt are processed. 194 * after the reserved blocks from the dt are processed.
70 */ 195 */
71 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1; 196 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
72 rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL); 197 rblocks = kzalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
73 if (!rblocks) 198 if (!rblocks)
74 return -ENOMEM; 199 return -ENOMEM;
75 200
@@ -82,7 +207,6 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
82 dev_err(sram->dev, 207 dev_err(sram->dev,
83 "could not get address for node %s\n", 208 "could not get address for node %s\n",
84 child->full_name); 209 child->full_name);
85 of_node_put(child);
86 goto err_chunks; 210 goto err_chunks;
87 } 211 }
88 212
@@ -91,7 +215,6 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
91 "reserved block %s outside the sram area\n", 215 "reserved block %s outside the sram area\n",
92 child->full_name); 216 child->full_name);
93 ret = -EINVAL; 217 ret = -EINVAL;
94 of_node_put(child);
95 goto err_chunks; 218 goto err_chunks;
96 } 219 }
97 220
@@ -99,11 +222,42 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
99 block->size = resource_size(&child_res); 222 block->size = resource_size(&child_res);
100 list_add_tail(&block->list, &reserve_list); 223 list_add_tail(&block->list, &reserve_list);
101 224
102 dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n", 225 if (of_find_property(child, "export", NULL))
103 block->start, block->start + block->size); 226 block->export = true;
227
228 if (of_find_property(child, "pool", NULL))
229 block->pool = true;
230
231 if ((block->export || block->pool) && block->size) {
232 exports++;
233
234 label = NULL;
235 ret = of_property_read_string(child, "label", &label);
236 if (ret && ret != -EINVAL) {
237 dev_err(sram->dev,
238 "%s has invalid label name\n",
239 child->full_name);
240 goto err_chunks;
241 }
242 if (!label)
243 label = child->name;
244
245 block->label = devm_kstrdup(sram->dev,
246 label, GFP_KERNEL);
247 if (!block->label)
248 goto err_chunks;
249
250 dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
251 block->export ? "exported " : "", block->label,
252 block->start, block->start + block->size);
253 } else {
254 dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
255 block->start, block->start + block->size);
256 }
104 257
105 block++; 258 block++;
106 } 259 }
260 child = NULL;
107 261
108 /* the last chunk marks the end of the region */ 262 /* the last chunk marks the end of the region */
109 rblocks[nblocks - 1].start = size; 263 rblocks[nblocks - 1].start = size;
@@ -112,8 +266,17 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
112 266
113 list_sort(NULL, &reserve_list, sram_reserve_cmp); 267 list_sort(NULL, &reserve_list, sram_reserve_cmp);
114 268
115 cur_start = 0; 269 if (exports) {
270 sram->partition = devm_kzalloc(sram->dev,
271 exports * sizeof(*sram->partition),
272 GFP_KERNEL);
273 if (!sram->partition) {
274 ret = -ENOMEM;
275 goto err_chunks;
276 }
277 }
116 278
279 cur_start = 0;
117 list_for_each_entry(block, &reserve_list, list) { 280 list_for_each_entry(block, &reserve_list, list) {
118 /* can only happen if sections overlap */ 281 /* can only happen if sections overlap */
119 if (block->start < cur_start) { 282 if (block->start < cur_start) {
@@ -121,9 +284,19 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
121 "block at 0x%x starts after current offset 0x%lx\n", 284 "block at 0x%x starts after current offset 0x%lx\n",
122 block->start, cur_start); 285 block->start, cur_start);
123 ret = -EINVAL; 286 ret = -EINVAL;
287 sram_free_partitions(sram);
124 goto err_chunks; 288 goto err_chunks;
125 } 289 }
126 290
291 if ((block->export || block->pool) && block->size) {
292 ret = sram_add_partition(sram, block,
293 res->start + block->start);
294 if (ret) {
295 sram_free_partitions(sram);
296 goto err_chunks;
297 }
298 }
299
127 /* current start is in a reserved block, so continue after it */ 300 /* current start is in a reserved block, so continue after it */
128 if (block->start == cur_start) { 301 if (block->start == cur_start) {
129 cur_start = block->start + block->size; 302 cur_start = block->start + block->size;
@@ -143,14 +316,19 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
143 ret = gen_pool_add_virt(sram->pool, 316 ret = gen_pool_add_virt(sram->pool,
144 (unsigned long)sram->virt_base + cur_start, 317 (unsigned long)sram->virt_base + cur_start,
145 res->start + cur_start, cur_size, -1); 318 res->start + cur_start, cur_size, -1);
146 if (ret < 0) 319 if (ret < 0) {
320 sram_free_partitions(sram);
147 goto err_chunks; 321 goto err_chunks;
322 }
148 323
149 /* next allocation after this reserved block */ 324 /* next allocation after this reserved block */
150 cur_start = block->start + block->size; 325 cur_start = block->start + block->size;
151 } 326 }
152 327
153 err_chunks: 328 err_chunks:
329 if (child)
330 of_node_put(child);
331
154 kfree(rblocks); 332 kfree(rblocks);
155 333
156 return ret; 334 return ret;
@@ -213,6 +391,8 @@ static int sram_remove(struct platform_device *pdev)
213{ 391{
214 struct sram_dev *sram = platform_get_drvdata(pdev); 392 struct sram_dev *sram = platform_get_drvdata(pdev);
215 393
394 sram_free_partitions(sram);
395
216 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) 396 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
217 dev_err(sram->dev, "removed while SRAM allocated\n"); 397 dev_err(sram->dev, "removed while SRAM allocated\n");
218 398
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index c8c6a363069c..6e3af8b42cdd 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -460,6 +460,13 @@ static void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
460 * - TTY layer when write's finished 460 * - TTY layer when write's finished
461 * - st_write (in context of the protocol stack) 461 * - st_write (in context of the protocol stack)
462 */ 462 */
463static void work_fn_write_wakeup(struct work_struct *work)
464{
465 struct st_data_s *st_gdata = container_of(work, struct st_data_s,
466 work_write_wakeup);
467
468 st_tx_wakeup((void *)st_gdata);
469}
463void st_tx_wakeup(struct st_data_s *st_data) 470void st_tx_wakeup(struct st_data_s *st_data)
464{ 471{
465 struct sk_buff *skb; 472 struct sk_buff *skb;
@@ -812,8 +819,12 @@ static void st_tty_wakeup(struct tty_struct *tty)
812 /* don't do an wakeup for now */ 819 /* don't do an wakeup for now */
813 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 820 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
814 821
815 /* call our internal wakeup */ 822 /*
816 st_tx_wakeup((void *)st_gdata); 823 * schedule the internal wakeup instead of calling directly to
824 * avoid lockup (port->lock needed in tty->ops->write is
825 * already taken here
826 */
827 schedule_work(&st_gdata->work_write_wakeup);
817} 828}
818 829
819static void st_tty_flush_buffer(struct tty_struct *tty) 830static void st_tty_flush_buffer(struct tty_struct *tty)
@@ -881,6 +892,9 @@ int st_core_init(struct st_data_s **core_data)
881 pr_err("unable to un-register ldisc"); 892 pr_err("unable to un-register ldisc");
882 return err; 893 return err;
883 } 894 }
895
896 INIT_WORK(&st_gdata->work_write_wakeup, work_fn_write_wakeup);
897
884 *core_data = st_gdata; 898 *core_data = st_gdata;
885 return 0; 899 return 0;
886} 900}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index ffb56340d0c7..89300870fefb 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * VMware Balloon driver. 2 * VMware Balloon driver.
3 * 3 *
4 * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved. 4 * Copyright (C) 2000-2014, VMware, Inc. All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -37,16 +37,19 @@
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/mm.h> 39#include <linux/mm.h>
40#include <linux/vmalloc.h>
40#include <linux/sched.h> 41#include <linux/sched.h>
41#include <linux/module.h> 42#include <linux/module.h>
42#include <linux/workqueue.h> 43#include <linux/workqueue.h>
43#include <linux/debugfs.h> 44#include <linux/debugfs.h>
44#include <linux/seq_file.h> 45#include <linux/seq_file.h>
46#include <linux/vmw_vmci_defs.h>
47#include <linux/vmw_vmci_api.h>
45#include <asm/hypervisor.h> 48#include <asm/hypervisor.h>
46 49
47MODULE_AUTHOR("VMware, Inc."); 50MODULE_AUTHOR("VMware, Inc.");
48MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); 51MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
49MODULE_VERSION("1.3.0.0-k"); 52MODULE_VERSION("1.5.0.0-k");
50MODULE_ALIAS("dmi:*:svnVMware*:*"); 53MODULE_ALIAS("dmi:*:svnVMware*:*");
51MODULE_ALIAS("vmware_vmmemctl"); 54MODULE_ALIAS("vmware_vmmemctl");
52MODULE_LICENSE("GPL"); 55MODULE_LICENSE("GPL");
@@ -57,12 +60,6 @@ MODULE_LICENSE("GPL");
57 */ 60 */
58 61
59/* 62/*
60 * Rate of allocating memory when there is no memory pressure
61 * (driver performs non-sleeping allocations).
62 */
63#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
64
65/*
66 * Rates of memory allocaton when guest experiences memory pressure 63 * Rates of memory allocaton when guest experiences memory pressure
67 * (driver performs sleeping allocations). 64 * (driver performs sleeping allocations).
68 */ 65 */
@@ -71,13 +68,6 @@ MODULE_LICENSE("GPL");
71#define VMW_BALLOON_RATE_ALLOC_INC 16U 68#define VMW_BALLOON_RATE_ALLOC_INC 16U
72 69
73/* 70/*
74 * Rates for releasing pages while deflating balloon.
75 */
76#define VMW_BALLOON_RATE_FREE_MIN 512U
77#define VMW_BALLOON_RATE_FREE_MAX 16384U
78#define VMW_BALLOON_RATE_FREE_INC 16U
79
80/*
81 * When guest is under memory pressure, use a reduced page allocation 71 * When guest is under memory pressure, use a reduced page allocation
82 * rate for next several cycles. 72 * rate for next several cycles.
83 */ 73 */
@@ -99,9 +89,6 @@ MODULE_LICENSE("GPL");
99 */ 89 */
100#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER) 90#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
101 91
102/* Maximum number of page allocations without yielding processor */
103#define VMW_BALLOON_YIELD_THRESHOLD 1024
104
105/* Maximum number of refused pages we accumulate during inflation cycle */ 92/* Maximum number of refused pages we accumulate during inflation cycle */
106#define VMW_BALLOON_MAX_REFUSED 16 93#define VMW_BALLOON_MAX_REFUSED 16
107 94
@@ -116,17 +103,45 @@ enum vmwballoon_capabilities {
116 /* 103 /*
117 * Bit 0 is reserved and not associated to any capability. 104 * Bit 0 is reserved and not associated to any capability.
118 */ 105 */
119 VMW_BALLOON_BASIC_CMDS = (1 << 1), 106 VMW_BALLOON_BASIC_CMDS = (1 << 1),
120 VMW_BALLOON_BATCHED_CMDS = (1 << 2) 107 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
108 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
109 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
121}; 110};
122 111
123#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS) 112#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
113 | VMW_BALLOON_BATCHED_CMDS \
114 | VMW_BALLOON_BATCHED_2M_CMDS \
115 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
116
117#define VMW_BALLOON_2M_SHIFT (9)
118#define VMW_BALLOON_NUM_PAGE_SIZES (2)
119
120/*
121 * Backdoor commands availability:
122 *
123 * START, GET_TARGET and GUEST_ID are always available,
124 *
125 * VMW_BALLOON_BASIC_CMDS:
126 * LOCK and UNLOCK commands,
127 * VMW_BALLOON_BATCHED_CMDS:
128 * BATCHED_LOCK and BATCHED_UNLOCK commands.
129 * VMW BALLOON_BATCHED_2M_CMDS:
130 * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
131 * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
132 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
133 */
134#define VMW_BALLOON_CMD_START 0
135#define VMW_BALLOON_CMD_GET_TARGET 1
136#define VMW_BALLOON_CMD_LOCK 2
137#define VMW_BALLOON_CMD_UNLOCK 3
138#define VMW_BALLOON_CMD_GUEST_ID 4
139#define VMW_BALLOON_CMD_BATCHED_LOCK 6
140#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
141#define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
142#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
143#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
124 144
125#define VMW_BALLOON_CMD_START 0
126#define VMW_BALLOON_CMD_GET_TARGET 1
127#define VMW_BALLOON_CMD_LOCK 2
128#define VMW_BALLOON_CMD_UNLOCK 3
129#define VMW_BALLOON_CMD_GUEST_ID 4
130 145
131/* error codes */ 146/* error codes */
132#define VMW_BALLOON_SUCCESS 0 147#define VMW_BALLOON_SUCCESS 0
@@ -142,18 +157,60 @@ enum vmwballoon_capabilities {
142 157
143#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000) 158#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
144 159
145#define VMWARE_BALLOON_CMD(cmd, data, result) \ 160/* Batch page description */
161
162/*
163 * Layout of a page in the batch page:
164 *
165 * +-------------+----------+--------+
166 * | | | |
167 * | Page number | Reserved | Status |
168 * | | | |
169 * +-------------+----------+--------+
170 * 64 PAGE_SHIFT 6 0
171 *
172 * The reserved field should be set to 0.
173 */
174#define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
175#define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
176#define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
177
178struct vmballoon_batch_page {
179 u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
180};
181
182static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
183{
184 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
185}
186
187static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
188 int idx)
189{
190 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
191}
192
193static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
194 u64 pa)
195{
196 batch->pages[idx] = pa;
197}
198
199
200#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
146({ \ 201({ \
147 unsigned long __status, __dummy1, __dummy2; \ 202 unsigned long __status, __dummy1, __dummy2, __dummy3; \
148 __asm__ __volatile__ ("inl %%dx" : \ 203 __asm__ __volatile__ ("inl %%dx" : \
149 "=a"(__status), \ 204 "=a"(__status), \
150 "=c"(__dummy1), \ 205 "=c"(__dummy1), \
151 "=d"(__dummy2), \ 206 "=d"(__dummy2), \
152 "=b"(result) : \ 207 "=b"(result), \
208 "=S" (__dummy3) : \
153 "0"(VMW_BALLOON_HV_MAGIC), \ 209 "0"(VMW_BALLOON_HV_MAGIC), \
154 "1"(VMW_BALLOON_CMD_##cmd), \ 210 "1"(VMW_BALLOON_CMD_##cmd), \
155 "2"(VMW_BALLOON_HV_PORT), \ 211 "2"(VMW_BALLOON_HV_PORT), \
156 "3"(data) : \ 212 "3"(arg1), \
213 "4" (arg2) : \
157 "memory"); \ 214 "memory"); \
158 if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \ 215 if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
159 result = __dummy1; \ 216 result = __dummy1; \
@@ -164,27 +221,30 @@ enum vmwballoon_capabilities {
164#ifdef CONFIG_DEBUG_FS 221#ifdef CONFIG_DEBUG_FS
165struct vmballoon_stats { 222struct vmballoon_stats {
166 unsigned int timer; 223 unsigned int timer;
224 unsigned int doorbell;
167 225
168 /* allocation statistics */ 226 /* allocation statistics */
169 unsigned int alloc; 227 unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
170 unsigned int alloc_fail; 228 unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
171 unsigned int sleep_alloc; 229 unsigned int sleep_alloc;
172 unsigned int sleep_alloc_fail; 230 unsigned int sleep_alloc_fail;
173 unsigned int refused_alloc; 231 unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
174 unsigned int refused_free; 232 unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
175 unsigned int free; 233 unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
176 234
177 /* monitor operations */ 235 /* monitor operations */
178 unsigned int lock; 236 unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
179 unsigned int lock_fail; 237 unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
180 unsigned int unlock; 238 unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
181 unsigned int unlock_fail; 239 unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
182 unsigned int target; 240 unsigned int target;
183 unsigned int target_fail; 241 unsigned int target_fail;
184 unsigned int start; 242 unsigned int start;
185 unsigned int start_fail; 243 unsigned int start_fail;
186 unsigned int guest_type; 244 unsigned int guest_type;
187 unsigned int guest_type_fail; 245 unsigned int guest_type_fail;
246 unsigned int doorbell_set;
247 unsigned int doorbell_unset;
188}; 248};
189 249
190#define STATS_INC(stat) (stat)++ 250#define STATS_INC(stat) (stat)++
@@ -192,14 +252,30 @@ struct vmballoon_stats {
192#define STATS_INC(stat) 252#define STATS_INC(stat)
193#endif 253#endif
194 254
195struct vmballoon { 255struct vmballoon;
196 256
257struct vmballoon_ops {
258 void (*add_page)(struct vmballoon *b, int idx, struct page *p);
259 int (*lock)(struct vmballoon *b, unsigned int num_pages,
260 bool is_2m_pages, unsigned int *target);
261 int (*unlock)(struct vmballoon *b, unsigned int num_pages,
262 bool is_2m_pages, unsigned int *target);
263};
264
265struct vmballoon_page_size {
197 /* list of reserved physical pages */ 266 /* list of reserved physical pages */
198 struct list_head pages; 267 struct list_head pages;
199 268
200 /* transient list of non-balloonable pages */ 269 /* transient list of non-balloonable pages */
201 struct list_head refused_pages; 270 struct list_head refused_pages;
202 unsigned int n_refused_pages; 271 unsigned int n_refused_pages;
272};
273
274struct vmballoon {
275 struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
276
277 /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
278 unsigned supported_page_sizes;
203 279
204 /* balloon size in pages */ 280 /* balloon size in pages */
205 unsigned int size; 281 unsigned int size;
@@ -210,11 +286,18 @@ struct vmballoon {
210 286
211 /* adjustment rates (pages per second) */ 287 /* adjustment rates (pages per second) */
212 unsigned int rate_alloc; 288 unsigned int rate_alloc;
213 unsigned int rate_free;
214 289
215 /* slowdown page allocations for next few cycles */ 290 /* slowdown page allocations for next few cycles */
216 unsigned int slow_allocation_cycles; 291 unsigned int slow_allocation_cycles;
217 292
293 unsigned long capabilities;
294
295 struct vmballoon_batch_page *batch_page;
296 unsigned int batch_max_pages;
297 struct page *page;
298
299 const struct vmballoon_ops *ops;
300
218#ifdef CONFIG_DEBUG_FS 301#ifdef CONFIG_DEBUG_FS
219 /* statistics */ 302 /* statistics */
220 struct vmballoon_stats stats; 303 struct vmballoon_stats stats;
@@ -226,6 +309,8 @@ struct vmballoon {
226 struct sysinfo sysinfo; 309 struct sysinfo sysinfo;
227 310
228 struct delayed_work dwork; 311 struct delayed_work dwork;
312
313 struct vmci_handle vmci_doorbell;
229}; 314};
230 315
231static struct vmballoon balloon; 316static struct vmballoon balloon;
@@ -234,20 +319,38 @@ static struct vmballoon balloon;
234 * Send "start" command to the host, communicating supported version 319 * Send "start" command to the host, communicating supported version
235 * of the protocol. 320 * of the protocol.
236 */ 321 */
237static bool vmballoon_send_start(struct vmballoon *b) 322static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
238{ 323{
239 unsigned long status, capabilities; 324 unsigned long status, capabilities, dummy = 0;
325 bool success;
240 326
241 STATS_INC(b->stats.start); 327 STATS_INC(b->stats.start);
242 328
243 status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_CAPABILITIES, 329 status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
244 capabilities);
245 if (status == VMW_BALLOON_SUCCESS)
246 return true;
247 330
248 pr_debug("%s - failed, hv returns %ld\n", __func__, status); 331 switch (status) {
249 STATS_INC(b->stats.start_fail); 332 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
250 return false; 333 b->capabilities = capabilities;
334 success = true;
335 break;
336 case VMW_BALLOON_SUCCESS:
337 b->capabilities = VMW_BALLOON_BASIC_CMDS;
338 success = true;
339 break;
340 default:
341 success = false;
342 }
343
344 if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
345 b->supported_page_sizes = 2;
346 else
347 b->supported_page_sizes = 1;
348
349 if (!success) {
350 pr_debug("%s - failed, hv returns %ld\n", __func__, status);
351 STATS_INC(b->stats.start_fail);
352 }
353 return success;
251} 354}
252 355
253static bool vmballoon_check_status(struct vmballoon *b, unsigned long status) 356static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
@@ -273,9 +376,10 @@ static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
273 */ 376 */
274static bool vmballoon_send_guest_id(struct vmballoon *b) 377static bool vmballoon_send_guest_id(struct vmballoon *b)
275{ 378{
276 unsigned long status, dummy; 379 unsigned long status, dummy = 0;
277 380
278 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy); 381 status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
382 dummy);
279 383
280 STATS_INC(b->stats.guest_type); 384 STATS_INC(b->stats.guest_type);
281 385
@@ -287,6 +391,14 @@ static bool vmballoon_send_guest_id(struct vmballoon *b)
287 return false; 391 return false;
288} 392}
289 393
394static u16 vmballoon_page_size(bool is_2m_page)
395{
396 if (is_2m_page)
397 return 1 << VMW_BALLOON_2M_SHIFT;
398
399 return 1;
400}
401
290/* 402/*
291 * Retrieve desired balloon size from the host. 403 * Retrieve desired balloon size from the host.
292 */ 404 */
@@ -295,6 +407,7 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
295 unsigned long status; 407 unsigned long status;
296 unsigned long target; 408 unsigned long target;
297 unsigned long limit; 409 unsigned long limit;
410 unsigned long dummy = 0;
298 u32 limit32; 411 u32 limit32;
299 412
300 /* 413 /*
@@ -313,7 +426,7 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
313 /* update stats */ 426 /* update stats */
314 STATS_INC(b->stats.target); 427 STATS_INC(b->stats.target);
315 428
316 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target); 429 status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
317 if (vmballoon_check_status(b, status)) { 430 if (vmballoon_check_status(b, status)) {
318 *new_target = target; 431 *new_target = target;
319 return true; 432 return true;
@@ -330,23 +443,46 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
330 * check the return value and maybe submit a different page. 443 * check the return value and maybe submit a different page.
331 */ 444 */
332static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, 445static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
333 unsigned int *hv_status) 446 unsigned int *hv_status, unsigned int *target)
334{ 447{
335 unsigned long status, dummy; 448 unsigned long status, dummy = 0;
336 u32 pfn32; 449 u32 pfn32;
337 450
338 pfn32 = (u32)pfn; 451 pfn32 = (u32)pfn;
339 if (pfn32 != pfn) 452 if (pfn32 != pfn)
340 return -1; 453 return -1;
341 454
342 STATS_INC(b->stats.lock); 455 STATS_INC(b->stats.lock[false]);
343 456
344 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy); 457 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
345 if (vmballoon_check_status(b, status)) 458 if (vmballoon_check_status(b, status))
346 return 0; 459 return 0;
347 460
348 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); 461 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
349 STATS_INC(b->stats.lock_fail); 462 STATS_INC(b->stats.lock_fail[false]);
463 return 1;
464}
465
466static int vmballoon_send_batched_lock(struct vmballoon *b,
467 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
468{
469 unsigned long status;
470 unsigned long pfn = page_to_pfn(b->page);
471
472 STATS_INC(b->stats.lock[is_2m_pages]);
473
474 if (is_2m_pages)
475 status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
476 *target);
477 else
478 status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
479 *target);
480
481 if (vmballoon_check_status(b, status))
482 return 0;
483
484 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
485 STATS_INC(b->stats.lock_fail[is_2m_pages]);
350 return 1; 486 return 1;
351} 487}
352 488
@@ -354,26 +490,66 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
354 * Notify the host that guest intends to release given page back into 490 * Notify the host that guest intends to release given page back into
355 * the pool of available (to the guest) pages. 491 * the pool of available (to the guest) pages.
356 */ 492 */
357static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) 493static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
494 unsigned int *target)
358{ 495{
359 unsigned long status, dummy; 496 unsigned long status, dummy = 0;
360 u32 pfn32; 497 u32 pfn32;
361 498
362 pfn32 = (u32)pfn; 499 pfn32 = (u32)pfn;
363 if (pfn32 != pfn) 500 if (pfn32 != pfn)
364 return false; 501 return false;
365 502
366 STATS_INC(b->stats.unlock); 503 STATS_INC(b->stats.unlock[false]);
367 504
368 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy); 505 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
369 if (vmballoon_check_status(b, status)) 506 if (vmballoon_check_status(b, status))
370 return true; 507 return true;
371 508
372 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); 509 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
373 STATS_INC(b->stats.unlock_fail); 510 STATS_INC(b->stats.unlock_fail[false]);
511 return false;
512}
513
514static bool vmballoon_send_batched_unlock(struct vmballoon *b,
515 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
516{
517 unsigned long status;
518 unsigned long pfn = page_to_pfn(b->page);
519
520 STATS_INC(b->stats.unlock[is_2m_pages]);
521
522 if (is_2m_pages)
523 status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
524 *target);
525 else
526 status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
527 *target);
528
529 if (vmballoon_check_status(b, status))
530 return true;
531
532 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
533 STATS_INC(b->stats.unlock_fail[is_2m_pages]);
374 return false; 534 return false;
375} 535}
376 536
537static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
538{
539 if (is_2m_page)
540 return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
541
542 return alloc_page(flags);
543}
544
545static void vmballoon_free_page(struct page *page, bool is_2m_page)
546{
547 if (is_2m_page)
548 __free_pages(page, VMW_BALLOON_2M_SHIFT);
549 else
550 __free_page(page);
551}
552
377/* 553/*
378 * Quickly release all pages allocated for the balloon. This function is 554 * Quickly release all pages allocated for the balloon. This function is
379 * called when host decides to "reset" balloon for one reason or another. 555 * called when host decides to "reset" balloon for one reason or another.
@@ -383,35 +559,31 @@ static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
383static void vmballoon_pop(struct vmballoon *b) 559static void vmballoon_pop(struct vmballoon *b)
384{ 560{
385 struct page *page, *next; 561 struct page *page, *next;
386 unsigned int count = 0; 562 unsigned is_2m_pages;
387 563
388 list_for_each_entry_safe(page, next, &b->pages, lru) { 564 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
389 list_del(&page->lru); 565 is_2m_pages++) {
390 __free_page(page); 566 struct vmballoon_page_size *page_size =
391 STATS_INC(b->stats.free); 567 &b->page_sizes[is_2m_pages];
392 b->size--; 568 u16 size_per_page = vmballoon_page_size(is_2m_pages);
393 569
394 if (++count >= b->rate_free) { 570 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
395 count = 0; 571 list_del(&page->lru);
572 vmballoon_free_page(page, is_2m_pages);
573 STATS_INC(b->stats.free[is_2m_pages]);
574 b->size -= size_per_page;
396 cond_resched(); 575 cond_resched();
397 } 576 }
398 } 577 }
399}
400 578
401/* 579 if (b->batch_page) {
402 * Perform standard reset sequence by popping the balloon (in case it 580 vunmap(b->batch_page);
403 * is not empty) and then restarting protocol. This operation normally 581 b->batch_page = NULL;
404 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command. 582 }
405 */
406static void vmballoon_reset(struct vmballoon *b)
407{
408 /* free all pages, skipping monitor unlock */
409 vmballoon_pop(b);
410 583
411 if (vmballoon_send_start(b)) { 584 if (b->page) {
412 b->reset_required = false; 585 __free_page(b->page);
413 if (!vmballoon_send_guest_id(b)) 586 b->page = NULL;
414 pr_err("failed to send guest ID to the host\n");
415 } 587 }
416} 588}
417 589
@@ -420,17 +592,23 @@ static void vmballoon_reset(struct vmballoon *b)
420 * refuse list, those refused page are then released at the end of the 592 * refuse list, those refused page are then released at the end of the
421 * inflation cycle. 593 * inflation cycle.
422 */ 594 */
423static int vmballoon_lock_page(struct vmballoon *b, struct page *page) 595static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
596 bool is_2m_pages, unsigned int *target)
424{ 597{
425 int locked, hv_status; 598 int locked, hv_status;
599 struct page *page = b->page;
600 struct vmballoon_page_size *page_size = &b->page_sizes[false];
601
602 /* is_2m_pages can never happen as 2m pages support implies batching */
426 603
427 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status); 604 locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
605 target);
428 if (locked > 0) { 606 if (locked > 0) {
429 STATS_INC(b->stats.refused_alloc); 607 STATS_INC(b->stats.refused_alloc[false]);
430 608
431 if (hv_status == VMW_BALLOON_ERROR_RESET || 609 if (hv_status == VMW_BALLOON_ERROR_RESET ||
432 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { 610 hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
433 __free_page(page); 611 vmballoon_free_page(page, false);
434 return -EIO; 612 return -EIO;
435 } 613 }
436 614
@@ -439,17 +617,17 @@ static int vmballoon_lock_page(struct vmballoon *b, struct page *page)
439 * and retry allocation, unless we already accumulated 617 * and retry allocation, unless we already accumulated
440 * too many of them, in which case take a breather. 618 * too many of them, in which case take a breather.
441 */ 619 */
442 if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) { 620 if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
443 b->n_refused_pages++; 621 page_size->n_refused_pages++;
444 list_add(&page->lru, &b->refused_pages); 622 list_add(&page->lru, &page_size->refused_pages);
445 } else { 623 } else {
446 __free_page(page); 624 vmballoon_free_page(page, false);
447 } 625 }
448 return -EIO; 626 return -EIO;
449 } 627 }
450 628
451 /* track allocated page */ 629 /* track allocated page */
452 list_add(&page->lru, &b->pages); 630 list_add(&page->lru, &page_size->pages);
453 631
454 /* update balloon size */ 632 /* update balloon size */
455 b->size++; 633 b->size++;
@@ -457,21 +635,81 @@ static int vmballoon_lock_page(struct vmballoon *b, struct page *page)
457 return 0; 635 return 0;
458} 636}
459 637
638static int vmballoon_lock_batched_page(struct vmballoon *b,
639 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
640{
641 int locked, i;
642 u16 size_per_page = vmballoon_page_size(is_2m_pages);
643
644 locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
645 target);
646 if (locked > 0) {
647 for (i = 0; i < num_pages; i++) {
648 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
649 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
650
651 vmballoon_free_page(p, is_2m_pages);
652 }
653
654 return -EIO;
655 }
656
657 for (i = 0; i < num_pages; i++) {
658 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
659 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
660 struct vmballoon_page_size *page_size =
661 &b->page_sizes[is_2m_pages];
662
663 locked = vmballoon_batch_get_status(b->batch_page, i);
664
665 switch (locked) {
666 case VMW_BALLOON_SUCCESS:
667 list_add(&p->lru, &page_size->pages);
668 b->size += size_per_page;
669 break;
670 case VMW_BALLOON_ERROR_PPN_PINNED:
671 case VMW_BALLOON_ERROR_PPN_INVALID:
672 if (page_size->n_refused_pages
673 < VMW_BALLOON_MAX_REFUSED) {
674 list_add(&p->lru, &page_size->refused_pages);
675 page_size->n_refused_pages++;
676 break;
677 }
678 /* Fallthrough */
679 case VMW_BALLOON_ERROR_RESET:
680 case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
681 vmballoon_free_page(p, is_2m_pages);
682 break;
683 default:
684 /* This should never happen */
685 WARN_ON_ONCE(true);
686 }
687 }
688
689 return 0;
690}
691
460/* 692/*
461 * Release the page allocated for the balloon. Note that we first notify 693 * Release the page allocated for the balloon. Note that we first notify
462 * the host so it can make sure the page will be available for the guest 694 * the host so it can make sure the page will be available for the guest
463 * to use, if needed. 695 * to use, if needed.
464 */ 696 */
465static int vmballoon_release_page(struct vmballoon *b, struct page *page) 697static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
698 bool is_2m_pages, unsigned int *target)
466{ 699{
467 if (!vmballoon_send_unlock_page(b, page_to_pfn(page))) 700 struct page *page = b->page;
468 return -EIO; 701 struct vmballoon_page_size *page_size = &b->page_sizes[false];
702
703 /* is_2m_pages can never happen as 2m pages support implies batching */
469 704
470 list_del(&page->lru); 705 if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
706 list_add(&page->lru, &page_size->pages);
707 return -EIO;
708 }
471 709
472 /* deallocate page */ 710 /* deallocate page */
473 __free_page(page); 711 vmballoon_free_page(page, false);
474 STATS_INC(b->stats.free); 712 STATS_INC(b->stats.free[false]);
475 713
476 /* update balloon size */ 714 /* update balloon size */
477 b->size--; 715 b->size--;
@@ -479,21 +717,76 @@ static int vmballoon_release_page(struct vmballoon *b, struct page *page)
479 return 0; 717 return 0;
480} 718}
481 719
720static int vmballoon_unlock_batched_page(struct vmballoon *b,
721 unsigned int num_pages, bool is_2m_pages,
722 unsigned int *target)
723{
724 int locked, i, ret = 0;
725 bool hv_success;
726 u16 size_per_page = vmballoon_page_size(is_2m_pages);
727
728 hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
729 target);
730 if (!hv_success)
731 ret = -EIO;
732
733 for (i = 0; i < num_pages; i++) {
734 u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
735 struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
736 struct vmballoon_page_size *page_size =
737 &b->page_sizes[is_2m_pages];
738
739 locked = vmballoon_batch_get_status(b->batch_page, i);
740 if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
741 /*
742 * That page wasn't successfully unlocked by the
743 * hypervisor, re-add it to the list of pages owned by
744 * the balloon driver.
745 */
746 list_add(&p->lru, &page_size->pages);
747 } else {
748 /* deallocate page */
749 vmballoon_free_page(p, is_2m_pages);
750 STATS_INC(b->stats.free[is_2m_pages]);
751
752 /* update balloon size */
753 b->size -= size_per_page;
754 }
755 }
756
757 return ret;
758}
759
482/* 760/*
483 * Release pages that were allocated while attempting to inflate the 761 * Release pages that were allocated while attempting to inflate the
484 * balloon but were refused by the host for one reason or another. 762 * balloon but were refused by the host for one reason or another.
485 */ 763 */
486static void vmballoon_release_refused_pages(struct vmballoon *b) 764static void vmballoon_release_refused_pages(struct vmballoon *b,
765 bool is_2m_pages)
487{ 766{
488 struct page *page, *next; 767 struct page *page, *next;
768 struct vmballoon_page_size *page_size =
769 &b->page_sizes[is_2m_pages];
489 770
490 list_for_each_entry_safe(page, next, &b->refused_pages, lru) { 771 list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
491 list_del(&page->lru); 772 list_del(&page->lru);
492 __free_page(page); 773 vmballoon_free_page(page, is_2m_pages);
493 STATS_INC(b->stats.refused_free); 774 STATS_INC(b->stats.refused_free[is_2m_pages]);
494 } 775 }
495 776
496 b->n_refused_pages = 0; 777 page_size->n_refused_pages = 0;
778}
779
780static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
781{
782 b->page = p;
783}
784
785static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
786 struct page *p)
787{
788 vmballoon_batch_set_pa(b->batch_page, idx,
789 (u64)page_to_pfn(p) << PAGE_SHIFT);
497} 790}
498 791
499/* 792/*
@@ -503,12 +796,12 @@ static void vmballoon_release_refused_pages(struct vmballoon *b)
503 */ 796 */
504static void vmballoon_inflate(struct vmballoon *b) 797static void vmballoon_inflate(struct vmballoon *b)
505{ 798{
506 unsigned int goal; 799 unsigned rate;
507 unsigned int rate;
508 unsigned int i;
509 unsigned int allocations = 0; 800 unsigned int allocations = 0;
801 unsigned int num_pages = 0;
510 int error = 0; 802 int error = 0;
511 gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP; 803 gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
804 bool is_2m_pages;
512 805
513 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); 806 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
514 807
@@ -527,27 +820,50 @@ static void vmballoon_inflate(struct vmballoon *b)
527 * slowdown page allocations considerably. 820 * slowdown page allocations considerably.
528 */ 821 */
529 822
530 goal = b->target - b->size;
531 /* 823 /*
532 * Start with no sleep allocation rate which may be higher 824 * Start with no sleep allocation rate which may be higher
533 * than sleeping allocation rate. 825 * than sleeping allocation rate.
534 */ 826 */
535 rate = b->slow_allocation_cycles ? 827 if (b->slow_allocation_cycles) {
536 b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX; 828 rate = b->rate_alloc;
829 is_2m_pages = false;
830 } else {
831 rate = UINT_MAX;
832 is_2m_pages =
833 b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
834 }
537 835
538 pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n", 836 pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
539 __func__, goal, rate, b->rate_alloc); 837 __func__, b->target - b->size, rate, b->rate_alloc);
540 838
541 for (i = 0; i < goal; i++) { 839 while (!b->reset_required &&
840 b->size + num_pages * vmballoon_page_size(is_2m_pages)
841 < b->target) {
542 struct page *page; 842 struct page *page;
543 843
544 if (flags == VMW_PAGE_ALLOC_NOSLEEP) 844 if (flags == VMW_PAGE_ALLOC_NOSLEEP)
545 STATS_INC(b->stats.alloc); 845 STATS_INC(b->stats.alloc[is_2m_pages]);
546 else 846 else
547 STATS_INC(b->stats.sleep_alloc); 847 STATS_INC(b->stats.sleep_alloc);
548 848
549 page = alloc_page(flags); 849 page = vmballoon_alloc_page(flags, is_2m_pages);
550 if (!page) { 850 if (!page) {
851 STATS_INC(b->stats.alloc_fail[is_2m_pages]);
852
853 if (is_2m_pages) {
854 b->ops->lock(b, num_pages, true, &b->target);
855
856 /*
857 * ignore errors from locking as we now switch
858 * to 4k pages and we might get different
859 * errors.
860 */
861
862 num_pages = 0;
863 is_2m_pages = false;
864 continue;
865 }
866
551 if (flags == VMW_PAGE_ALLOC_CANSLEEP) { 867 if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
552 /* 868 /*
553 * CANSLEEP page allocation failed, so guest 869 * CANSLEEP page allocation failed, so guest
@@ -559,7 +875,6 @@ static void vmballoon_inflate(struct vmballoon *b)
559 STATS_INC(b->stats.sleep_alloc_fail); 875 STATS_INC(b->stats.sleep_alloc_fail);
560 break; 876 break;
561 } 877 }
562 STATS_INC(b->stats.alloc_fail);
563 878
564 /* 879 /*
565 * NOSLEEP page allocation failed, so the guest is 880 * NOSLEEP page allocation failed, so the guest is
@@ -571,7 +886,7 @@ static void vmballoon_inflate(struct vmballoon *b)
571 */ 886 */
572 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; 887 b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
573 888
574 if (i >= b->rate_alloc) 889 if (allocations >= b->rate_alloc)
575 break; 890 break;
576 891
577 flags = VMW_PAGE_ALLOC_CANSLEEP; 892 flags = VMW_PAGE_ALLOC_CANSLEEP;
@@ -580,34 +895,40 @@ static void vmballoon_inflate(struct vmballoon *b)
580 continue; 895 continue;
581 } 896 }
582 897
583 error = vmballoon_lock_page(b, page); 898 b->ops->add_page(b, num_pages++, page);
584 if (error) 899 if (num_pages == b->batch_max_pages) {
585 break; 900 error = b->ops->lock(b, num_pages, is_2m_pages,
586 901 &b->target);
587 if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) { 902 num_pages = 0;
588 cond_resched(); 903 if (error)
589 allocations = 0; 904 break;
590 } 905 }
591 906
592 if (i >= rate) { 907 cond_resched();
908
909 if (allocations >= rate) {
593 /* We allocated enough pages, let's take a break. */ 910 /* We allocated enough pages, let's take a break. */
594 break; 911 break;
595 } 912 }
596 } 913 }
597 914
915 if (num_pages > 0)
916 b->ops->lock(b, num_pages, is_2m_pages, &b->target);
917
598 /* 918 /*
599 * We reached our goal without failures so try increasing 919 * We reached our goal without failures so try increasing
600 * allocation rate. 920 * allocation rate.
601 */ 921 */
602 if (error == 0 && i >= b->rate_alloc) { 922 if (error == 0 && allocations >= b->rate_alloc) {
603 unsigned int mult = i / b->rate_alloc; 923 unsigned int mult = allocations / b->rate_alloc;
604 924
605 b->rate_alloc = 925 b->rate_alloc =
606 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, 926 min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
607 VMW_BALLOON_RATE_ALLOC_MAX); 927 VMW_BALLOON_RATE_ALLOC_MAX);
608 } 928 }
609 929
610 vmballoon_release_refused_pages(b); 930 vmballoon_release_refused_pages(b, true);
931 vmballoon_release_refused_pages(b, false);
611} 932}
612 933
613/* 934/*
@@ -615,35 +936,176 @@ static void vmballoon_inflate(struct vmballoon *b)
615 */ 936 */
616static void vmballoon_deflate(struct vmballoon *b) 937static void vmballoon_deflate(struct vmballoon *b)
617{ 938{
618 struct page *page, *next; 939 unsigned is_2m_pages;
619 unsigned int i = 0;
620 unsigned int goal;
621 int error;
622 940
623 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target); 941 pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
624 942
625 /* limit deallocation rate */ 943 /* free pages to reach target */
626 goal = min(b->size - b->target, b->rate_free); 944 for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
945 is_2m_pages++) {
946 struct page *page, *next;
947 unsigned int num_pages = 0;
948 struct vmballoon_page_size *page_size =
949 &b->page_sizes[is_2m_pages];
950
951 list_for_each_entry_safe(page, next, &page_size->pages, lru) {
952 if (b->reset_required ||
953 (b->target > 0 &&
954 b->size - num_pages
955 * vmballoon_page_size(is_2m_pages)
956 < b->target + vmballoon_page_size(true)))
957 break;
958
959 list_del(&page->lru);
960 b->ops->add_page(b, num_pages++, page);
627 961
628 pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free); 962 if (num_pages == b->batch_max_pages) {
963 int error;
629 964
630 /* free pages to reach target */ 965 error = b->ops->unlock(b, num_pages,
631 list_for_each_entry_safe(page, next, &b->pages, lru) { 966 is_2m_pages, &b->target);
632 error = vmballoon_release_page(b, page); 967 num_pages = 0;
633 if (error) { 968 if (error)
634 /* quickly decrease rate in case of error */ 969 return;
635 b->rate_free = max(b->rate_free / 2, 970 }
636 VMW_BALLOON_RATE_FREE_MIN); 971
637 return; 972 cond_resched();
638 } 973 }
639 974
640 if (++i >= goal) 975 if (num_pages > 0)
641 break; 976 b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
977 }
978}
979
980static const struct vmballoon_ops vmballoon_basic_ops = {
981 .add_page = vmballoon_add_page,
982 .lock = vmballoon_lock_page,
983 .unlock = vmballoon_unlock_page
984};
985
986static const struct vmballoon_ops vmballoon_batched_ops = {
987 .add_page = vmballoon_add_batched_page,
988 .lock = vmballoon_lock_batched_page,
989 .unlock = vmballoon_unlock_batched_page
990};
991
992static bool vmballoon_init_batching(struct vmballoon *b)
993{
994 b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
995 if (!b->page)
996 return false;
997
998 b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
999 if (!b->batch_page) {
1000 __free_page(b->page);
1001 return false;
1002 }
1003
1004 return true;
1005}
1006
1007/*
1008 * Receive notification and resize balloon
1009 */
1010static void vmballoon_doorbell(void *client_data)
1011{
1012 struct vmballoon *b = client_data;
1013
1014 STATS_INC(b->stats.doorbell);
1015
1016 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1017}
1018
1019/*
1020 * Clean up vmci doorbell
1021 */
1022static void vmballoon_vmci_cleanup(struct vmballoon *b)
1023{
1024 int error;
1025
1026 VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
1027 VMCI_INVALID_ID, error);
1028 STATS_INC(b->stats.doorbell_unset);
1029
1030 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1031 vmci_doorbell_destroy(b->vmci_doorbell);
1032 b->vmci_doorbell = VMCI_INVALID_HANDLE;
1033 }
1034}
1035
1036/*
1037 * Initialize vmci doorbell, to get notified as soon as balloon changes
1038 */
1039static int vmballoon_vmci_init(struct vmballoon *b)
1040{
1041 int error = 0;
1042
1043 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
1044 error = vmci_doorbell_create(&b->vmci_doorbell,
1045 VMCI_FLAG_DELAYED_CB,
1046 VMCI_PRIVILEGE_FLAG_RESTRICTED,
1047 vmballoon_doorbell, b);
1048
1049 if (error == VMCI_SUCCESS) {
1050 VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
1051 b->vmci_doorbell.context,
1052 b->vmci_doorbell.resource, error);
1053 STATS_INC(b->stats.doorbell_set);
1054 }
1055 }
1056
1057 if (error != 0) {
1058 vmballoon_vmci_cleanup(b);
1059
1060 return -EIO;
642 } 1061 }
643 1062
644 /* slowly increase rate if there were no errors */ 1063 return 0;
645 b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC, 1064}
646 VMW_BALLOON_RATE_FREE_MAX); 1065
1066/*
1067 * Perform standard reset sequence by popping the balloon (in case it
1068 * is not empty) and then restarting protocol. This operation normally
1069 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1070 */
1071static void vmballoon_reset(struct vmballoon *b)
1072{
1073 int error;
1074
1075 vmballoon_vmci_cleanup(b);
1076
1077 /* free all pages, skipping monitor unlock */
1078 vmballoon_pop(b);
1079
1080 if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1081 return;
1082
1083 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1084 b->ops = &vmballoon_batched_ops;
1085 b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
1086 if (!vmballoon_init_batching(b)) {
1087 /*
1088 * We failed to initialize batching, inform the monitor
1089 * about it by sending a null capability.
1090 *
1091 * The guest will retry in one second.
1092 */
1093 vmballoon_send_start(b, 0);
1094 return;
1095 }
1096 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1097 b->ops = &vmballoon_basic_ops;
1098 b->batch_max_pages = 1;
1099 }
1100
1101 b->reset_required = false;
1102
1103 error = vmballoon_vmci_init(b);
1104 if (error)
1105 pr_err("failed to initialize vmci doorbell\n");
1106
1107 if (!vmballoon_send_guest_id(b))
1108 pr_err("failed to send guest ID to the host\n");
647} 1109}
648 1110
649/* 1111/*
@@ -664,13 +1126,14 @@ static void vmballoon_work(struct work_struct *work)
664 if (b->slow_allocation_cycles > 0) 1126 if (b->slow_allocation_cycles > 0)
665 b->slow_allocation_cycles--; 1127 b->slow_allocation_cycles--;
666 1128
667 if (vmballoon_send_get_target(b, &target)) { 1129 if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
668 /* update target, adjust size */ 1130 /* update target, adjust size */
669 b->target = target; 1131 b->target = target;
670 1132
671 if (b->size < target) 1133 if (b->size < target)
672 vmballoon_inflate(b); 1134 vmballoon_inflate(b);
673 else if (b->size > target) 1135 else if (target == 0 ||
1136 b->size > target + vmballoon_page_size(true))
674 vmballoon_deflate(b); 1137 vmballoon_deflate(b);
675 } 1138 }
676 1139
@@ -692,6 +1155,14 @@ static int vmballoon_debug_show(struct seq_file *f, void *offset)
692 struct vmballoon *b = f->private; 1155 struct vmballoon *b = f->private;
693 struct vmballoon_stats *stats = &b->stats; 1156 struct vmballoon_stats *stats = &b->stats;
694 1157
1158 /* format capabilities info */
1159 seq_printf(f,
1160 "balloon capabilities: %#4x\n"
1161 "used capabilities: %#4lx\n"
1162 "is resetting: %c\n",
1163 VMW_BALLOON_CAPABILITIES, b->capabilities,
1164 b->reset_required ? 'y' : 'n');
1165
695 /* format size info */ 1166 /* format size info */
696 seq_printf(f, 1167 seq_printf(f,
697 "target: %8d pages\n" 1168 "target: %8d pages\n"
@@ -700,35 +1171,48 @@ static int vmballoon_debug_show(struct seq_file *f, void *offset)
700 1171
701 /* format rate info */ 1172 /* format rate info */
702 seq_printf(f, 1173 seq_printf(f,
703 "rateNoSleepAlloc: %8d pages/sec\n" 1174 "rateSleepAlloc: %8d pages/sec\n",
704 "rateSleepAlloc: %8d pages/sec\n" 1175 b->rate_alloc);
705 "rateFree: %8d pages/sec\n",
706 VMW_BALLOON_NOSLEEP_ALLOC_MAX,
707 b->rate_alloc, b->rate_free);
708 1176
709 seq_printf(f, 1177 seq_printf(f,
710 "\n" 1178 "\n"
711 "timer: %8u\n" 1179 "timer: %8u\n"
1180 "doorbell: %8u\n"
712 "start: %8u (%4u failed)\n" 1181 "start: %8u (%4u failed)\n"
713 "guestType: %8u (%4u failed)\n" 1182 "guestType: %8u (%4u failed)\n"
1183 "2m-lock: %8u (%4u failed)\n"
714 "lock: %8u (%4u failed)\n" 1184 "lock: %8u (%4u failed)\n"
1185 "2m-unlock: %8u (%4u failed)\n"
715 "unlock: %8u (%4u failed)\n" 1186 "unlock: %8u (%4u failed)\n"
716 "target: %8u (%4u failed)\n" 1187 "target: %8u (%4u failed)\n"
1188 "prim2mAlloc: %8u (%4u failed)\n"
717 "primNoSleepAlloc: %8u (%4u failed)\n" 1189 "primNoSleepAlloc: %8u (%4u failed)\n"
718 "primCanSleepAlloc: %8u (%4u failed)\n" 1190 "primCanSleepAlloc: %8u (%4u failed)\n"
1191 "prim2mFree: %8u\n"
719 "primFree: %8u\n" 1192 "primFree: %8u\n"
1193 "err2mAlloc: %8u\n"
720 "errAlloc: %8u\n" 1194 "errAlloc: %8u\n"
721 "errFree: %8u\n", 1195 "err2mFree: %8u\n"
1196 "errFree: %8u\n"
1197 "doorbellSet: %8u\n"
1198 "doorbellUnset: %8u\n",
722 stats->timer, 1199 stats->timer,
1200 stats->doorbell,
723 stats->start, stats->start_fail, 1201 stats->start, stats->start_fail,
724 stats->guest_type, stats->guest_type_fail, 1202 stats->guest_type, stats->guest_type_fail,
725 stats->lock, stats->lock_fail, 1203 stats->lock[true], stats->lock_fail[true],
726 stats->unlock, stats->unlock_fail, 1204 stats->lock[false], stats->lock_fail[false],
1205 stats->unlock[true], stats->unlock_fail[true],
1206 stats->unlock[false], stats->unlock_fail[false],
727 stats->target, stats->target_fail, 1207 stats->target, stats->target_fail,
728 stats->alloc, stats->alloc_fail, 1208 stats->alloc[true], stats->alloc_fail[true],
1209 stats->alloc[false], stats->alloc_fail[false],
729 stats->sleep_alloc, stats->sleep_alloc_fail, 1210 stats->sleep_alloc, stats->sleep_alloc_fail,
730 stats->free, 1211 stats->free[true],
731 stats->refused_alloc, stats->refused_free); 1212 stats->free[false],
1213 stats->refused_alloc[true], stats->refused_alloc[false],
1214 stats->refused_free[true], stats->refused_free[false],
1215 stats->doorbell_set, stats->doorbell_unset);
732 1216
733 return 0; 1217 return 0;
734} 1218}
@@ -782,7 +1266,7 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
782static int __init vmballoon_init(void) 1266static int __init vmballoon_init(void)
783{ 1267{
784 int error; 1268 int error;
785 1269 unsigned is_2m_pages;
786 /* 1270 /*
787 * Check if we are running on VMware's hypervisor and bail out 1271 * Check if we are running on VMware's hypervisor and bail out
788 * if we are not. 1272 * if we are not.
@@ -790,32 +1274,26 @@ static int __init vmballoon_init(void)
790 if (x86_hyper != &x86_hyper_vmware) 1274 if (x86_hyper != &x86_hyper_vmware)
791 return -ENODEV; 1275 return -ENODEV;
792 1276
793 INIT_LIST_HEAD(&balloon.pages); 1277 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
794 INIT_LIST_HEAD(&balloon.refused_pages); 1278 is_2m_pages++) {
1279 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
1280 INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
1281 }
795 1282
796 /* initialize rates */ 1283 /* initialize rates */
797 balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX; 1284 balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
798 balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
799 1285
800 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); 1286 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
801 1287
802 /*
803 * Start balloon.
804 */
805 if (!vmballoon_send_start(&balloon)) {
806 pr_err("failed to send start command to the host\n");
807 return -EIO;
808 }
809
810 if (!vmballoon_send_guest_id(&balloon)) {
811 pr_err("failed to send guest ID to the host\n");
812 return -EIO;
813 }
814
815 error = vmballoon_debugfs_init(&balloon); 1288 error = vmballoon_debugfs_init(&balloon);
816 if (error) 1289 if (error)
817 return error; 1290 return error;
818 1291
1292 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1293 balloon.batch_page = NULL;
1294 balloon.page = NULL;
1295 balloon.reset_required = true;
1296
819 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0); 1297 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
820 1298
821 return 0; 1299 return 0;
@@ -824,6 +1302,7 @@ module_init(vmballoon_init);
824 1302
825static void __exit vmballoon_exit(void) 1303static void __exit vmballoon_exit(void)
826{ 1304{
1305 vmballoon_vmci_cleanup(&balloon);
827 cancel_delayed_work_sync(&balloon.dwork); 1306 cancel_delayed_work_sync(&balloon.dwork);
828 1307
829 vmballoon_debugfs_exit(&balloon); 1308 vmballoon_debugfs_exit(&balloon);
@@ -833,7 +1312,7 @@ static void __exit vmballoon_exit(void)
833 * Reset connection before deallocating memory to avoid potential for 1312 * Reset connection before deallocating memory to avoid potential for
834 * additional spurious resets from guest touching deallocated pages. 1313 * additional spurious resets from guest touching deallocated pages.
835 */ 1314 */
836 vmballoon_send_start(&balloon); 1315 vmballoon_send_start(&balloon, 0);
837 vmballoon_pop(&balloon); 1316 vmballoon_pop(&balloon);
838} 1317}
839module_exit(vmballoon_exit); 1318module_exit(vmballoon_exit);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
index 822665245588..8a4b6bbe1bee 100644
--- a/drivers/misc/vmw_vmci/vmci_datagram.c
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -276,11 +276,10 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
276 } 276 }
277 277
278 /* We make a copy to enqueue. */ 278 /* We make a copy to enqueue. */
279 new_dg = kmalloc(dg_size, GFP_KERNEL); 279 new_dg = kmemdup(dg, dg_size, GFP_KERNEL);
280 if (new_dg == NULL) 280 if (new_dg == NULL)
281 return VMCI_ERROR_NO_MEM; 281 return VMCI_ERROR_NO_MEM;
282 282
283 memcpy(new_dg, dg, dg_size);
284 retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg); 283 retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
285 if (retval < VMCI_SUCCESS) { 284 if (retval < VMCI_SUCCESS) {
286 kfree(new_dg); 285 kfree(new_dg);
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 754a9bb0f58d..83deda4bb4d6 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -118,7 +118,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
118 cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION; 118 cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
119 119
120 MEI_DUMP_NFC_HDR("version", &cmd.hdr); 120 MEI_DUMP_NFC_HDR("version", &cmd.hdr);
121 r = mei_cl_send(phy->device, (u8 *)&cmd, sizeof(struct mei_nfc_cmd)); 121 r = mei_cldev_send(phy->cldev, (u8 *)&cmd, sizeof(struct mei_nfc_cmd));
122 if (r < 0) { 122 if (r < 0) {
123 pr_err("Could not send IF version cmd\n"); 123 pr_err("Could not send IF version cmd\n");
124 return r; 124 return r;
@@ -132,7 +132,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
132 if (!reply) 132 if (!reply)
133 return -ENOMEM; 133 return -ENOMEM;
134 134
135 bytes_recv = mei_cl_recv(phy->device, (u8 *)reply, if_version_length); 135 bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length);
136 if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) { 136 if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
137 pr_err("Could not read IF version\n"); 137 pr_err("Could not read IF version\n");
138 r = -EIO; 138 r = -EIO;
@@ -186,13 +186,14 @@ static int mei_nfc_connect(struct nfc_mei_phy *phy)
186 connect->vendor_id = phy->vendor_id; 186 connect->vendor_id = phy->vendor_id;
187 187
188 MEI_DUMP_NFC_HDR("connect request", &cmd->hdr); 188 MEI_DUMP_NFC_HDR("connect request", &cmd->hdr);
189 r = mei_cl_send(phy->device, (u8 *)cmd, connect_length); 189 r = mei_cldev_send(phy->cldev, (u8 *)cmd, connect_length);
190 if (r < 0) { 190 if (r < 0) {
191 pr_err("Could not send connect cmd %d\n", r); 191 pr_err("Could not send connect cmd %d\n", r);
192 goto err; 192 goto err;
193 } 193 }
194 194
195 bytes_recv = mei_cl_recv(phy->device, (u8 *)reply, connect_resp_length); 195 bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply,
196 connect_resp_length);
196 if (bytes_recv < 0) { 197 if (bytes_recv < 0) {
197 r = bytes_recv; 198 r = bytes_recv;
198 pr_err("Could not read connect response %d\n", r); 199 pr_err("Could not read connect response %d\n", r);
@@ -238,7 +239,7 @@ static int mei_nfc_send(struct nfc_mei_phy *phy, u8 *buf, size_t length)
238 MEI_DUMP_NFC_HDR("send", hdr); 239 MEI_DUMP_NFC_HDR("send", hdr);
239 240
240 memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length); 241 memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
241 err = mei_cl_send(phy->device, mei_buf, length + MEI_NFC_HEADER_SIZE); 242 err = mei_cldev_send(phy->cldev, mei_buf, length + MEI_NFC_HEADER_SIZE);
242 if (err < 0) 243 if (err < 0)
243 goto out; 244 goto out;
244 245
@@ -278,7 +279,7 @@ static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length)
278 struct mei_nfc_hdr *hdr; 279 struct mei_nfc_hdr *hdr;
279 int received_length; 280 int received_length;
280 281
281 received_length = mei_cl_recv(phy->device, buf, length); 282 received_length = mei_cldev_recv(phy->cldev, buf, length);
282 if (received_length < 0) 283 if (received_length < 0)
283 return received_length; 284 return received_length;
284 285
@@ -296,7 +297,7 @@ static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length)
296} 297}
297 298
298 299
299static void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, 300static void nfc_mei_event_cb(struct mei_cl_device *cldev, u32 events,
300 void *context) 301 void *context)
301{ 302{
302 struct nfc_mei_phy *phy = context; 303 struct nfc_mei_phy *phy = context;
@@ -337,7 +338,7 @@ static int nfc_mei_phy_enable(void *phy_id)
337 if (phy->powered == 1) 338 if (phy->powered == 1)
338 return 0; 339 return 0;
339 340
340 r = mei_cl_enable_device(phy->device); 341 r = mei_cldev_enable(phy->cldev);
341 if (r < 0) { 342 if (r < 0) {
342 pr_err("Could not enable device %d\n", r); 343 pr_err("Could not enable device %d\n", r);
343 return r; 344 return r;
@@ -355,7 +356,7 @@ static int nfc_mei_phy_enable(void *phy_id)
355 goto err; 356 goto err;
356 } 357 }
357 358
358 r = mei_cl_register_event_cb(phy->device, BIT(MEI_CL_EVENT_RX), 359 r = mei_cldev_register_event_cb(phy->cldev, BIT(MEI_CL_EVENT_RX),
359 nfc_mei_event_cb, phy); 360 nfc_mei_event_cb, phy);
360 if (r) { 361 if (r) {
361 pr_err("Event cb registration failed %d\n", r); 362 pr_err("Event cb registration failed %d\n", r);
@@ -368,7 +369,7 @@ static int nfc_mei_phy_enable(void *phy_id)
368 369
369err: 370err:
370 phy->powered = 0; 371 phy->powered = 0;
371 mei_cl_disable_device(phy->device); 372 mei_cldev_disable(phy->cldev);
372 return r; 373 return r;
373} 374}
374 375
@@ -378,7 +379,7 @@ static void nfc_mei_phy_disable(void *phy_id)
378 379
379 pr_info("%s\n", __func__); 380 pr_info("%s\n", __func__);
380 381
381 mei_cl_disable_device(phy->device); 382 mei_cldev_disable(phy->cldev);
382 383
383 phy->powered = 0; 384 phy->powered = 0;
384} 385}
@@ -390,7 +391,7 @@ struct nfc_phy_ops mei_phy_ops = {
390}; 391};
391EXPORT_SYMBOL_GPL(mei_phy_ops); 392EXPORT_SYMBOL_GPL(mei_phy_ops);
392 393
393struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device) 394struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *cldev)
394{ 395{
395 struct nfc_mei_phy *phy; 396 struct nfc_mei_phy *phy;
396 397
@@ -398,9 +399,9 @@ struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device)
398 if (!phy) 399 if (!phy)
399 return NULL; 400 return NULL;
400 401
401 phy->device = device; 402 phy->cldev = cldev;
402 init_waitqueue_head(&phy->send_wq); 403 init_waitqueue_head(&phy->send_wq);
403 mei_cl_set_drvdata(device, phy); 404 mei_cldev_set_drvdata(cldev, phy);
404 405
405 return phy; 406 return phy;
406} 407}
@@ -408,7 +409,7 @@ EXPORT_SYMBOL_GPL(nfc_mei_phy_alloc);
408 409
409void nfc_mei_phy_free(struct nfc_mei_phy *phy) 410void nfc_mei_phy_free(struct nfc_mei_phy *phy)
410{ 411{
411 mei_cl_disable_device(phy->device); 412 mei_cldev_disable(phy->cldev);
412 kfree(phy); 413 kfree(phy);
413} 414}
414EXPORT_SYMBOL_GPL(nfc_mei_phy_free); 415EXPORT_SYMBOL_GPL(nfc_mei_phy_free);
diff --git a/drivers/nfc/mei_phy.h b/drivers/nfc/mei_phy.h
index fbfa3e61738f..acd3a1fc69e6 100644
--- a/drivers/nfc/mei_phy.h
+++ b/drivers/nfc/mei_phy.h
@@ -13,7 +13,7 @@
13/** 13/**
14 * struct nfc_mei_phy 14 * struct nfc_mei_phy
15 * 15 *
16 * @device: mei device 16 * @cldev: mei client device
17 * @hdev: nfc hci device 17 * @hdev: nfc hci device
18 18
19 * @send_wq: send completion wait queue 19 * @send_wq: send completion wait queue
@@ -28,7 +28,7 @@
28 * and prevents normal operation. 28 * and prevents normal operation.
29 */ 29 */
30struct nfc_mei_phy { 30struct nfc_mei_phy {
31 struct mei_cl_device *device; 31 struct mei_cl_device *cldev;
32 struct nfc_hci_dev *hdev; 32 struct nfc_hci_dev *hdev;
33 33
34 wait_queue_head_t send_wq; 34 wait_queue_head_t send_wq;
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index f9f5fc97cdd7..3092501f26c4 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -29,7 +29,7 @@
29 29
30#define MICROREAD_DRIVER_NAME "microread" 30#define MICROREAD_DRIVER_NAME "microread"
31 31
32static int microread_mei_probe(struct mei_cl_device *device, 32static int microread_mei_probe(struct mei_cl_device *cldev,
33 const struct mei_cl_device_id *id) 33 const struct mei_cl_device_id *id)
34{ 34{
35 struct nfc_mei_phy *phy; 35 struct nfc_mei_phy *phy;
@@ -37,7 +37,7 @@ static int microread_mei_probe(struct mei_cl_device *device,
37 37
38 pr_info("Probing NFC microread\n"); 38 pr_info("Probing NFC microread\n");
39 39
40 phy = nfc_mei_phy_alloc(device); 40 phy = nfc_mei_phy_alloc(cldev);
41 if (!phy) { 41 if (!phy) {
42 pr_err("Cannot allocate memory for microread mei phy.\n"); 42 pr_err("Cannot allocate memory for microread mei phy.\n");
43 return -ENOMEM; 43 return -ENOMEM;
@@ -55,9 +55,9 @@ static int microread_mei_probe(struct mei_cl_device *device,
55 return 0; 55 return 0;
56} 56}
57 57
58static int microread_mei_remove(struct mei_cl_device *device) 58static int microread_mei_remove(struct mei_cl_device *cldev)
59{ 59{
60 struct nfc_mei_phy *phy = mei_cl_get_drvdata(device); 60 struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
61 61
62 microread_remove(phy->hdev); 62 microread_remove(phy->hdev);
63 63
@@ -67,7 +67,7 @@ static int microread_mei_remove(struct mei_cl_device *device)
67} 67}
68 68
69static struct mei_cl_device_id microread_mei_tbl[] = { 69static struct mei_cl_device_id microread_mei_tbl[] = {
70 { MICROREAD_DRIVER_NAME, MEI_NFC_UUID}, 70 { MICROREAD_DRIVER_NAME, MEI_NFC_UUID, MEI_CL_VERSION_ANY},
71 71
72 /* required last entry */ 72 /* required last entry */
73 { } 73 { }
@@ -88,7 +88,7 @@ static int microread_mei_init(void)
88 88
89 pr_debug(DRIVER_DESC ": %s\n", __func__); 89 pr_debug(DRIVER_DESC ": %s\n", __func__);
90 90
91 r = mei_cl_driver_register(&microread_driver); 91 r = mei_cldev_driver_register(&microread_driver);
92 if (r) { 92 if (r) {
93 pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); 93 pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
94 return r; 94 return r;
@@ -99,7 +99,7 @@ static int microread_mei_init(void)
99 99
100static void microread_mei_exit(void) 100static void microread_mei_exit(void)
101{ 101{
102 mei_cl_driver_unregister(&microread_driver); 102 mei_cldev_driver_unregister(&microread_driver);
103} 103}
104 104
105module_init(microread_mei_init); 105module_init(microread_mei_init);
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 101a37e12efa..46d0eb24eef9 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -27,7 +27,7 @@
27 27
28#define PN544_DRIVER_NAME "pn544" 28#define PN544_DRIVER_NAME "pn544"
29 29
30static int pn544_mei_probe(struct mei_cl_device *device, 30static int pn544_mei_probe(struct mei_cl_device *cldev,
31 const struct mei_cl_device_id *id) 31 const struct mei_cl_device_id *id)
32{ 32{
33 struct nfc_mei_phy *phy; 33 struct nfc_mei_phy *phy;
@@ -35,7 +35,7 @@ static int pn544_mei_probe(struct mei_cl_device *device,
35 35
36 pr_info("Probing NFC pn544\n"); 36 pr_info("Probing NFC pn544\n");
37 37
38 phy = nfc_mei_phy_alloc(device); 38 phy = nfc_mei_phy_alloc(cldev);
39 if (!phy) { 39 if (!phy) {
40 pr_err("Cannot allocate memory for pn544 mei phy.\n"); 40 pr_err("Cannot allocate memory for pn544 mei phy.\n");
41 return -ENOMEM; 41 return -ENOMEM;
@@ -53,9 +53,9 @@ static int pn544_mei_probe(struct mei_cl_device *device,
53 return 0; 53 return 0;
54} 54}
55 55
56static int pn544_mei_remove(struct mei_cl_device *device) 56static int pn544_mei_remove(struct mei_cl_device *cldev)
57{ 57{
58 struct nfc_mei_phy *phy = mei_cl_get_drvdata(device); 58 struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
59 59
60 pr_info("Removing pn544\n"); 60 pr_info("Removing pn544\n");
61 61
@@ -67,7 +67,7 @@ static int pn544_mei_remove(struct mei_cl_device *device)
67} 67}
68 68
69static struct mei_cl_device_id pn544_mei_tbl[] = { 69static struct mei_cl_device_id pn544_mei_tbl[] = {
70 { PN544_DRIVER_NAME, MEI_NFC_UUID}, 70 { PN544_DRIVER_NAME, MEI_NFC_UUID, MEI_CL_VERSION_ANY},
71 71
72 /* required last entry */ 72 /* required last entry */
73 { } 73 { }
@@ -88,7 +88,7 @@ static int pn544_mei_init(void)
88 88
89 pr_debug(DRIVER_DESC ": %s\n", __func__); 89 pr_debug(DRIVER_DESC ": %s\n", __func__);
90 90
91 r = mei_cl_driver_register(&pn544_driver); 91 r = mei_cldev_driver_register(&pn544_driver);
92 if (r) { 92 if (r) {
93 pr_err(PN544_DRIVER_NAME ": driver registration failed\n"); 93 pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
94 return r; 94 return r;
@@ -99,7 +99,7 @@ static int pn544_mei_init(void)
99 99
100static void pn544_mei_exit(void) 100static void pn544_mei_exit(void)
101{ 101{
102 mei_cl_driver_unregister(&pn544_driver); 102 mei_cldev_driver_unregister(&pn544_driver);
103} 103}
104 104
105module_init(pn544_mei_init); 105module_init(pn544_mei_init);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 8db297821f78..bc4ea585b42e 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -14,6 +14,28 @@ menuconfig NVMEM
14 14
15if NVMEM 15if NVMEM
16 16
17config NVMEM_IMX_OCOTP
18 tristate "i.MX6 On-Chip OTP Controller support"
19 depends on SOC_IMX6
20 help
21 This is a driver for the On-Chip OTP Controller (OCOTP) available on
22 i.MX6 SoCs, providing access to 4 Kbits of one-time programmable
23 eFuses.
24
25 This driver can also be built as a module. If so, the module
26 will be called nvmem-imx-ocotp.
27
28config NVMEM_MXS_OCOTP
29 tristate "Freescale MXS On-Chip OTP Memory Support"
30 depends on ARCH_MXS || COMPILE_TEST
31 help
32 If you say Y here, you will get readonly access to the
33 One Time Programmable memory pages that are stored
34 on the Freescale i.MX23/i.MX28 processor.
35
36 This driver can also be built as a module. If so, the module
37 will be called nvmem-mxs-ocotp.
38
17config QCOM_QFPROM 39config QCOM_QFPROM
18 tristate "QCOM QFPROM Support" 40 tristate "QCOM QFPROM Support"
19 depends on ARCH_QCOM || COMPILE_TEST 41 depends on ARCH_QCOM || COMPILE_TEST
@@ -25,6 +47,16 @@ config QCOM_QFPROM
25 This driver can also be built as a module. If so, the module 47 This driver can also be built as a module. If so, the module
26 will be called nvmem_qfprom. 48 will be called nvmem_qfprom.
27 49
50config ROCKCHIP_EFUSE
51 tristate "Rockchip eFuse Support"
52 depends on ARCH_ROCKCHIP || COMPILE_TEST
53 help
54 This is a simple drive to dump specified values of Rockchip SoC
55 from eFuse, such as cpu-leakage.
56
57 This driver can also be built as a module. If so, the module
58 will be called nvmem_rockchip_efuse.
59
28config NVMEM_SUNXI_SID 60config NVMEM_SUNXI_SID
29 tristate "Allwinner SoCs SID support" 61 tristate "Allwinner SoCs SID support"
30 depends on ARCH_SUNXI 62 depends on ARCH_SUNXI
@@ -36,4 +68,14 @@ config NVMEM_SUNXI_SID
36 This driver can also be built as a module. If so, the module 68 This driver can also be built as a module. If so, the module
37 will be called nvmem_sunxi_sid. 69 will be called nvmem_sunxi_sid.
38 70
71config NVMEM_VF610_OCOTP
72 tristate "VF610 SoC OCOTP support"
73 depends on SOC_VF610 || COMPILE_TEST
74 help
75 This is a driver for the 'OCOTP' peripheral available on Vybrid
76 devices like VF5xx and VF6xx.
77
78 This driver can also be build as a module. If so, the module will
79 be called nvmem-vf610-ocotp.
80
39endif 81endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 4328b930ad9a..95dde3f8f085 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -6,7 +6,15 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o
6nvmem_core-y := core.o 6nvmem_core-y := core.o
7 7
8# Devices 8# Devices
9obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
10nvmem-imx-ocotp-y := imx-ocotp.o
11obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
12nvmem-mxs-ocotp-y := mxs-ocotp.o
9obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o 13obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
10nvmem_qfprom-y := qfprom.o 14nvmem_qfprom-y := qfprom.o
15obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
16nvmem_rockchip_efuse-y := rockchip-efuse.o
11obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o 17obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
12nvmem_sunxi_sid-y := sunxi_sid.o 18nvmem_sunxi_sid-y := sunxi_sid.o
19obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
20nvmem-vf610-ocotp-y := vf610-ocotp.o
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
new file mode 100644
index 000000000000..b7971d410b60
--- /dev/null
+++ b/drivers/nvmem/imx-ocotp.c
@@ -0,0 +1,154 @@
1/*
2 * i.MX6 OCOTP fusebox driver
3 *
4 * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
5 *
6 * Based on the barebox ocotp driver,
7 * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>,
8 * Orex Computed Radiography
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
16 */
17
18#include <linux/device.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/nvmem-provider.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/platform_device.h>
25#include <linux/regmap.h>
26#include <linux/slab.h>
27
28struct ocotp_priv {
29 struct device *dev;
30 void __iomem *base;
31 unsigned int nregs;
32};
33
34static int imx_ocotp_read(void *context, const void *reg, size_t reg_size,
35 void *val, size_t val_size)
36{
37 struct ocotp_priv *priv = context;
38 unsigned int offset = *(u32 *)reg;
39 unsigned int count;
40 int i;
41 u32 index;
42
43 index = offset >> 2;
44 count = val_size >> 2;
45
46 if (count > (priv->nregs - index))
47 count = priv->nregs - index;
48
49 for (i = index; i < (index + count); i++) {
50 *(u32 *)val = readl(priv->base + 0x400 + i * 0x10);
51 val += 4;
52 }
53
54 return (i - index) * 4;
55}
56
57static int imx_ocotp_write(void *context, const void *data, size_t count)
58{
59 /* Not implemented */
60 return 0;
61}
62
63static struct regmap_bus imx_ocotp_bus = {
64 .read = imx_ocotp_read,
65 .write = imx_ocotp_write,
66 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
67 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
68};
69
70static bool imx_ocotp_writeable_reg(struct device *dev, unsigned int reg)
71{
72 return false;
73}
74
75static struct regmap_config imx_ocotp_regmap_config = {
76 .reg_bits = 32,
77 .val_bits = 32,
78 .reg_stride = 4,
79 .writeable_reg = imx_ocotp_writeable_reg,
80 .name = "imx-ocotp",
81};
82
83static struct nvmem_config imx_ocotp_nvmem_config = {
84 .name = "imx-ocotp",
85 .read_only = true,
86 .owner = THIS_MODULE,
87};
88
89static const struct of_device_id imx_ocotp_dt_ids[] = {
90 { .compatible = "fsl,imx6q-ocotp", (void *)128 },
91 { .compatible = "fsl,imx6sl-ocotp", (void *)32 },
92 { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
93 { },
94};
95MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
96
97static int imx_ocotp_probe(struct platform_device *pdev)
98{
99 const struct of_device_id *of_id;
100 struct device *dev = &pdev->dev;
101 struct resource *res;
102 struct regmap *regmap;
103 struct ocotp_priv *priv;
104 struct nvmem_device *nvmem;
105
106 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
107 if (!priv)
108 return -ENOMEM;
109
110 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
111 priv->base = devm_ioremap_resource(dev, res);
112 if (IS_ERR(priv->base))
113 return PTR_ERR(priv->base);
114
115 of_id = of_match_device(imx_ocotp_dt_ids, dev);
116 priv->nregs = (unsigned int)of_id->data;
117 imx_ocotp_regmap_config.max_register = 4 * priv->nregs - 4;
118
119 regmap = devm_regmap_init(dev, &imx_ocotp_bus, priv,
120 &imx_ocotp_regmap_config);
121 if (IS_ERR(regmap)) {
122 dev_err(dev, "regmap init failed\n");
123 return PTR_ERR(regmap);
124 }
125 imx_ocotp_nvmem_config.dev = dev;
126 nvmem = nvmem_register(&imx_ocotp_nvmem_config);
127 if (IS_ERR(nvmem))
128 return PTR_ERR(nvmem);
129
130 platform_set_drvdata(pdev, nvmem);
131
132 return 0;
133}
134
135static int imx_ocotp_remove(struct platform_device *pdev)
136{
137 struct nvmem_device *nvmem = platform_get_drvdata(pdev);
138
139 return nvmem_unregister(nvmem);
140}
141
142static struct platform_driver imx_ocotp_driver = {
143 .probe = imx_ocotp_probe,
144 .remove = imx_ocotp_remove,
145 .driver = {
146 .name = "imx_ocotp",
147 .of_match_table = imx_ocotp_dt_ids,
148 },
149};
150module_platform_driver(imx_ocotp_driver);
151
152MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
153MODULE_DESCRIPTION("i.MX6 OCOTP fuse box driver");
154MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
new file mode 100644
index 000000000000..8ba19bba3156
--- /dev/null
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -0,0 +1,257 @@
1/*
2 * Freescale MXS On-Chip OTP driver
3 *
4 * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com>
5 *
6 * Based on the driver from Huang Shijie and Christoph G. Baumann
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19#include <linux/clk.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <linux/err.h>
23#include <linux/io.h>
24#include <linux/module.h>
25#include <linux/nvmem-provider.h>
26#include <linux/of_device.h>
27#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/slab.h>
30#include <linux/stmp_device.h>
31
32/* OCOTP registers and bits */
33
34#define BM_OCOTP_CTRL_RD_BANK_OPEN BIT(12)
35#define BM_OCOTP_CTRL_ERROR BIT(9)
36#define BM_OCOTP_CTRL_BUSY BIT(8)
37
38#define OCOTP_TIMEOUT 10000
39#define OCOTP_DATA_OFFSET 0x20
40
41struct mxs_ocotp {
42 struct clk *clk;
43 void __iomem *base;
44 struct nvmem_device *nvmem;
45};
46
47static int mxs_ocotp_wait(struct mxs_ocotp *otp)
48{
49 int timeout = OCOTP_TIMEOUT;
50 unsigned int status = 0;
51
52 while (timeout--) {
53 status = readl(otp->base);
54
55 if (!(status & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR)))
56 break;
57
58 cpu_relax();
59 }
60
61 if (status & BM_OCOTP_CTRL_BUSY)
62 return -EBUSY;
63 else if (status & BM_OCOTP_CTRL_ERROR)
64 return -EIO;
65
66 return 0;
67}
68
69static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
70 void *val, size_t val_size)
71{
72 struct mxs_ocotp *otp = context;
73 unsigned int offset = *(u32 *)reg;
74 u32 *buf = val;
75 int ret;
76
77 ret = clk_enable(otp->clk);
78 if (ret)
79 return ret;
80
81 writel(BM_OCOTP_CTRL_ERROR, otp->base + STMP_OFFSET_REG_CLR);
82
83 ret = mxs_ocotp_wait(otp);
84 if (ret)
85 goto disable_clk;
86
87 /* open OCOTP banks for read */
88 writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_SET);
89
90 /* approximately wait 33 hclk cycles */
91 udelay(1);
92
93 ret = mxs_ocotp_wait(otp);
94 if (ret)
95 goto close_banks;
96
97 while (val_size) {
98 if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
99 /* fill up non-data register */
100 *buf = 0;
101 } else {
102 *buf = readl(otp->base + offset);
103 }
104
105 buf++;
106 val_size--;
107 offset += reg_size;
108 }
109
110close_banks:
111 /* close banks for power saving */
112 writel(BM_OCOTP_CTRL_RD_BANK_OPEN, otp->base + STMP_OFFSET_REG_CLR);
113
114disable_clk:
115 clk_disable(otp->clk);
116
117 return ret;
118}
119
120static int mxs_ocotp_write(void *context, const void *data, size_t count)
121{
122 /* We don't want to support writing */
123 return 0;
124}
125
126static bool mxs_ocotp_writeable_reg(struct device *dev, unsigned int reg)
127{
128 return false;
129}
130
131static struct nvmem_config ocotp_config = {
132 .name = "mxs-ocotp",
133 .owner = THIS_MODULE,
134};
135
136static const struct regmap_range imx23_ranges[] = {
137 regmap_reg_range(OCOTP_DATA_OFFSET, 0x210),
138};
139
140static const struct regmap_access_table imx23_access = {
141 .yes_ranges = imx23_ranges,
142 .n_yes_ranges = ARRAY_SIZE(imx23_ranges),
143};
144
145static const struct regmap_range imx28_ranges[] = {
146 regmap_reg_range(OCOTP_DATA_OFFSET, 0x290),
147};
148
149static const struct regmap_access_table imx28_access = {
150 .yes_ranges = imx28_ranges,
151 .n_yes_ranges = ARRAY_SIZE(imx28_ranges),
152};
153
154static struct regmap_bus mxs_ocotp_bus = {
155 .read = mxs_ocotp_read,
156 .write = mxs_ocotp_write, /* make regmap_init() happy */
157 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
158 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
159};
160
161static struct regmap_config mxs_ocotp_config = {
162 .reg_bits = 32,
163 .val_bits = 32,
164 .reg_stride = 16,
165 .writeable_reg = mxs_ocotp_writeable_reg,
166};
167
168static const struct of_device_id mxs_ocotp_match[] = {
169 { .compatible = "fsl,imx23-ocotp", .data = &imx23_access },
170 { .compatible = "fsl,imx28-ocotp", .data = &imx28_access },
171 { /* sentinel */},
172};
173MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
174
175static int mxs_ocotp_probe(struct platform_device *pdev)
176{
177 struct device *dev = &pdev->dev;
178 struct mxs_ocotp *otp;
179 struct resource *res;
180 const struct of_device_id *match;
181 struct regmap *regmap;
182 const struct regmap_access_table *access;
183 int ret;
184
185 match = of_match_device(dev->driver->of_match_table, dev);
186 if (!match || !match->data)
187 return -EINVAL;
188
189 otp = devm_kzalloc(dev, sizeof(*otp), GFP_KERNEL);
190 if (!otp)
191 return -ENOMEM;
192
193 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
194 otp->base = devm_ioremap_resource(dev, res);
195 if (IS_ERR(otp->base))
196 return PTR_ERR(otp->base);
197
198 otp->clk = devm_clk_get(&pdev->dev, NULL);
199 if (IS_ERR(otp->clk))
200 return PTR_ERR(otp->clk);
201
202 ret = clk_prepare(otp->clk);
203 if (ret < 0) {
204 dev_err(dev, "failed to prepare clk: %d\n", ret);
205 return ret;
206 }
207
208 access = match->data;
209 mxs_ocotp_config.rd_table = access;
210 mxs_ocotp_config.max_register = access->yes_ranges[0].range_max;
211
212 regmap = devm_regmap_init(dev, &mxs_ocotp_bus, otp, &mxs_ocotp_config);
213 if (IS_ERR(regmap)) {
214 dev_err(dev, "regmap init failed\n");
215 ret = PTR_ERR(regmap);
216 goto err_clk;
217 }
218
219 ocotp_config.dev = dev;
220 otp->nvmem = nvmem_register(&ocotp_config);
221 if (IS_ERR(otp->nvmem)) {
222 ret = PTR_ERR(otp->nvmem);
223 goto err_clk;
224 }
225
226 platform_set_drvdata(pdev, otp);
227
228 return 0;
229
230err_clk:
231 clk_unprepare(otp->clk);
232
233 return ret;
234}
235
236static int mxs_ocotp_remove(struct platform_device *pdev)
237{
238 struct mxs_ocotp *otp = platform_get_drvdata(pdev);
239
240 clk_unprepare(otp->clk);
241
242 return nvmem_unregister(otp->nvmem);
243}
244
245static struct platform_driver mxs_ocotp_driver = {
246 .probe = mxs_ocotp_probe,
247 .remove = mxs_ocotp_remove,
248 .driver = {
249 .name = "mxs-ocotp",
250 .of_match_table = mxs_ocotp_match,
251 },
252};
253
254module_platform_driver(mxs_ocotp_driver);
255MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
256MODULE_DESCRIPTION("driver for OCOTP in i.MX23/i.MX28");
257MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
new file mode 100644
index 000000000000..f55213424222
--- /dev/null
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -0,0 +1,186 @@
1/*
2 * Rockchip eFuse Driver
3 *
4 * Copyright (c) 2015 Rockchip Electronics Co. Ltd.
5 * Author: Caesar Wang <wxt@rock-chips.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/platform_device.h>
18#include <linux/nvmem-provider.h>
19#include <linux/slab.h>
20#include <linux/regmap.h>
21#include <linux/device.h>
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/delay.h>
25#include <linux/of.h>
26#include <linux/clk.h>
27
28#define EFUSE_A_SHIFT 6
29#define EFUSE_A_MASK 0x3ff
30#define EFUSE_PGENB BIT(3)
31#define EFUSE_LOAD BIT(2)
32#define EFUSE_STROBE BIT(1)
33#define EFUSE_CSB BIT(0)
34
35#define REG_EFUSE_CTRL 0x0000
36#define REG_EFUSE_DOUT 0x0004
37
38struct rockchip_efuse_context {
39 struct device *dev;
40 void __iomem *base;
41 struct clk *efuse_clk;
42};
43
44static int rockchip_efuse_write(void *context, const void *data, size_t count)
45{
46 /* Nothing TBD, Read-Only */
47 return 0;
48}
49
50static int rockchip_efuse_read(void *context,
51 const void *reg, size_t reg_size,
52 void *val, size_t val_size)
53{
54 unsigned int offset = *(u32 *)reg;
55 struct rockchip_efuse_context *_context = context;
56 void __iomem *base = _context->base;
57 struct clk *clk = _context->efuse_clk;
58 u8 *buf = val;
59 int ret;
60
61 ret = clk_prepare_enable(clk);
62 if (ret < 0) {
63 dev_err(_context->dev, "failed to prepare/enable efuse clk\n");
64 return ret;
65 }
66
67 writel(EFUSE_LOAD | EFUSE_PGENB, base + REG_EFUSE_CTRL);
68 udelay(1);
69 while (val_size) {
70 writel(readl(base + REG_EFUSE_CTRL) &
71 (~(EFUSE_A_MASK << EFUSE_A_SHIFT)),
72 base + REG_EFUSE_CTRL);
73 writel(readl(base + REG_EFUSE_CTRL) |
74 ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT),
75 base + REG_EFUSE_CTRL);
76 udelay(1);
77 writel(readl(base + REG_EFUSE_CTRL) |
78 EFUSE_STROBE, base + REG_EFUSE_CTRL);
79 udelay(1);
80 *buf++ = readb(base + REG_EFUSE_DOUT);
81 writel(readl(base + REG_EFUSE_CTRL) &
82 (~EFUSE_STROBE), base + REG_EFUSE_CTRL);
83 udelay(1);
84
85 val_size -= 1;
86 offset += 1;
87 }
88
89 /* Switch to standby mode */
90 writel(EFUSE_PGENB | EFUSE_CSB, base + REG_EFUSE_CTRL);
91
92 clk_disable_unprepare(clk);
93
94 return 0;
95}
96
97static struct regmap_bus rockchip_efuse_bus = {
98 .read = rockchip_efuse_read,
99 .write = rockchip_efuse_write,
100 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
101 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
102};
103
104static struct regmap_config rockchip_efuse_regmap_config = {
105 .reg_bits = 32,
106 .reg_stride = 1,
107 .val_bits = 8,
108};
109
110static struct nvmem_config econfig = {
111 .name = "rockchip-efuse",
112 .owner = THIS_MODULE,
113 .read_only = true,
114};
115
116static const struct of_device_id rockchip_efuse_match[] = {
117 { .compatible = "rockchip,rockchip-efuse",},
118 { /* sentinel */},
119};
120MODULE_DEVICE_TABLE(of, rockchip_efuse_match);
121
122static int rockchip_efuse_probe(struct platform_device *pdev)
123{
124 struct device *dev = &pdev->dev;
125 struct resource *res;
126 struct nvmem_device *nvmem;
127 struct regmap *regmap;
128 void __iomem *base;
129 struct clk *clk;
130 struct rockchip_efuse_context *context;
131
132 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
133 base = devm_ioremap_resource(dev, res);
134 if (IS_ERR(base))
135 return PTR_ERR(base);
136
137 context = devm_kzalloc(dev, sizeof(struct rockchip_efuse_context),
138 GFP_KERNEL);
139 if (IS_ERR(context))
140 return PTR_ERR(context);
141
142 clk = devm_clk_get(dev, "pclk_efuse");
143 if (IS_ERR(clk))
144 return PTR_ERR(clk);
145
146 context->dev = dev;
147 context->base = base;
148 context->efuse_clk = clk;
149
150 rockchip_efuse_regmap_config.max_register = resource_size(res) - 1;
151
152 regmap = devm_regmap_init(dev, &rockchip_efuse_bus,
153 context, &rockchip_efuse_regmap_config);
154 if (IS_ERR(regmap)) {
155 dev_err(dev, "regmap init failed\n");
156 return PTR_ERR(regmap);
157 }
158 econfig.dev = dev;
159 nvmem = nvmem_register(&econfig);
160 if (IS_ERR(nvmem))
161 return PTR_ERR(nvmem);
162
163 platform_set_drvdata(pdev, nvmem);
164
165 return 0;
166}
167
168static int rockchip_efuse_remove(struct platform_device *pdev)
169{
170 struct nvmem_device *nvmem = platform_get_drvdata(pdev);
171
172 return nvmem_unregister(nvmem);
173}
174
175static struct platform_driver rockchip_efuse_driver = {
176 .probe = rockchip_efuse_probe,
177 .remove = rockchip_efuse_remove,
178 .driver = {
179 .name = "rockchip-efuse",
180 .of_match_table = rockchip_efuse_match,
181 },
182};
183
184module_platform_driver(rockchip_efuse_driver);
185MODULE_DESCRIPTION("rockchip_efuse driver");
186MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c
new file mode 100644
index 000000000000..8641319efeda
--- /dev/null
+++ b/drivers/nvmem/vf610-ocotp.c
@@ -0,0 +1,302 @@
1/*
2 * Copyright (C) 2015 Toradex AG.
3 *
4 * Author: Sanchayan Maity <sanchayan.maity@toradex.com>
5 *
6 * Based on the barebox ocotp driver,
7 * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>
8 * Orex Computed Radiography
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 and
12 * only version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/device.h>
23#include <linux/io.h>
24#include <linux/module.h>
25#include <linux/nvmem-provider.h>
26#include <linux/of.h>
27#include <linux/platform_device.h>
28#include <linux/regmap.h>
29#include <linux/slab.h>
30
31/* OCOTP Register Offsets */
32#define OCOTP_CTRL_REG 0x00
33#define OCOTP_CTRL_SET 0x04
34#define OCOTP_CTRL_CLR 0x08
35#define OCOTP_TIMING 0x10
36#define OCOTP_DATA 0x20
37#define OCOTP_READ_CTRL_REG 0x30
38#define OCOTP_READ_FUSE_DATA 0x40
39
40/* OCOTP Register bits and masks */
41#define OCOTP_CTRL_WR_UNLOCK 16
42#define OCOTP_CTRL_WR_UNLOCK_KEY 0x3E77
43#define OCOTP_CTRL_WR_UNLOCK_MASK GENMASK(31, 16)
44#define OCOTP_CTRL_ADDR 0
45#define OCOTP_CTRL_ADDR_MASK GENMASK(6, 0)
46#define OCOTP_CTRL_RELOAD_SHADOWS BIT(10)
47#define OCOTP_CTRL_ERR BIT(9)
48#define OCOTP_CTRL_BUSY BIT(8)
49
50#define OCOTP_TIMING_STROBE_READ 16
51#define OCOTP_TIMING_STROBE_READ_MASK GENMASK(21, 16)
52#define OCOTP_TIMING_RELAX 12
53#define OCOTP_TIMING_RELAX_MASK GENMASK(15, 12)
54#define OCOTP_TIMING_STROBE_PROG 0
55#define OCOTP_TIMING_STROBE_PROG_MASK GENMASK(11, 0)
56
57#define OCOTP_READ_CTRL_READ_FUSE 0x1
58
59#define VF610_OCOTP_TIMEOUT 100000
60
61#define BF(value, field) (((value) << field) & field##_MASK)
62
63#define DEF_RELAX 20
64
65static const int base_to_fuse_addr_mappings[][2] = {
66 {0x400, 0x00},
67 {0x410, 0x01},
68 {0x420, 0x02},
69 {0x450, 0x05},
70 {0x4F0, 0x0F},
71 {0x600, 0x20},
72 {0x610, 0x21},
73 {0x620, 0x22},
74 {0x630, 0x23},
75 {0x640, 0x24},
76 {0x650, 0x25},
77 {0x660, 0x26},
78 {0x670, 0x27},
79 {0x6F0, 0x2F},
80 {0x880, 0x38},
81 {0x890, 0x39},
82 {0x8A0, 0x3A},
83 {0x8B0, 0x3B},
84 {0x8C0, 0x3C},
85 {0x8D0, 0x3D},
86 {0x8E0, 0x3E},
87 {0x8F0, 0x3F},
88 {0xC80, 0x78},
89 {0xC90, 0x79},
90 {0xCA0, 0x7A},
91 {0xCB0, 0x7B},
92 {0xCC0, 0x7C},
93 {0xCD0, 0x7D},
94 {0xCE0, 0x7E},
95 {0xCF0, 0x7F},
96};
97
98struct vf610_ocotp {
99 void __iomem *base;
100 struct clk *clk;
101 struct device *dev;
102 struct nvmem_device *nvmem;
103 int timing;
104};
105
106static int vf610_ocotp_wait_busy(void __iomem *base)
107{
108 int timeout = VF610_OCOTP_TIMEOUT;
109
110 while ((readl(base) & OCOTP_CTRL_BUSY) && --timeout)
111 udelay(10);
112
113 if (!timeout) {
114 writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR);
115 return -ETIMEDOUT;
116 }
117
118 udelay(10);
119
120 return 0;
121}
122
123static int vf610_ocotp_calculate_timing(struct vf610_ocotp *ocotp_dev)
124{
125 u32 clk_rate;
126 u32 relax, strobe_read, strobe_prog;
127 u32 timing;
128
129 clk_rate = clk_get_rate(ocotp_dev->clk);
130
131 /* Refer section OTP read/write timing parameters in TRM */
132 relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
133 strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
134 strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
135
136 timing = BF(relax, OCOTP_TIMING_RELAX);
137 timing |= BF(strobe_read, OCOTP_TIMING_STROBE_READ);
138 timing |= BF(strobe_prog, OCOTP_TIMING_STROBE_PROG);
139
140 return timing;
141}
142
143static int vf610_get_fuse_address(int base_addr_offset)
144{
145 int i;
146
147 for (i = 0; i < ARRAY_SIZE(base_to_fuse_addr_mappings); i++) {
148 if (base_to_fuse_addr_mappings[i][0] == base_addr_offset)
149 return base_to_fuse_addr_mappings[i][1];
150 }
151
152 return -EINVAL;
153}
154
155static int vf610_ocotp_write(void *context, const void *data, size_t count)
156{
157 return 0;
158}
159
160static int vf610_ocotp_read(void *context,
161 const void *off, size_t reg_size,
162 void *val, size_t val_size)
163{
164 struct vf610_ocotp *ocotp = context;
165 void __iomem *base = ocotp->base;
166 unsigned int offset = *(u32 *)off;
167 u32 reg, *buf = val;
168 int fuse_addr;
169 int ret;
170
171 while (val_size > 0) {
172 fuse_addr = vf610_get_fuse_address(offset);
173 if (fuse_addr > 0) {
174 writel(ocotp->timing, base + OCOTP_TIMING);
175 ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG);
176 if (ret)
177 return ret;
178
179 reg = readl(base + OCOTP_CTRL_REG);
180 reg &= ~OCOTP_CTRL_ADDR_MASK;
181 reg &= ~OCOTP_CTRL_WR_UNLOCK_MASK;
182 reg |= BF(fuse_addr, OCOTP_CTRL_ADDR);
183 writel(reg, base + OCOTP_CTRL_REG);
184
185 writel(OCOTP_READ_CTRL_READ_FUSE,
186 base + OCOTP_READ_CTRL_REG);
187 ret = vf610_ocotp_wait_busy(base + OCOTP_CTRL_REG);
188 if (ret)
189 return ret;
190
191 if (readl(base) & OCOTP_CTRL_ERR) {
192 dev_dbg(ocotp->dev, "Error reading from fuse address %x\n",
193 fuse_addr);
194 writel(OCOTP_CTRL_ERR, base + OCOTP_CTRL_CLR);
195 }
196
197 /*
198 * In case of error, we do not abort and expect to read
199 * 0xBADABADA as mentioned by the TRM. We just read this
200 * value and return.
201 */
202 *buf = readl(base + OCOTP_READ_FUSE_DATA);
203 } else {
204 *buf = 0;
205 }
206
207 buf++;
208 val_size--;
209 offset += reg_size;
210 }
211
212 return 0;
213}
214
215static struct regmap_bus vf610_ocotp_bus = {
216 .read = vf610_ocotp_read,
217 .write = vf610_ocotp_write,
218 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
219 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
220};
221
222static struct regmap_config ocotp_regmap_config = {
223 .reg_bits = 32,
224 .val_bits = 32,
225 .reg_stride = 4,
226};
227
228static struct nvmem_config ocotp_config = {
229 .name = "ocotp",
230 .owner = THIS_MODULE,
231};
232
233static const struct of_device_id ocotp_of_match[] = {
234 { .compatible = "fsl,vf610-ocotp", },
235 {/* sentinel */},
236};
237MODULE_DEVICE_TABLE(of, ocotp_of_match);
238
239static int vf610_ocotp_remove(struct platform_device *pdev)
240{
241 struct vf610_ocotp *ocotp_dev = platform_get_drvdata(pdev);
242
243 return nvmem_unregister(ocotp_dev->nvmem);
244}
245
246static int vf610_ocotp_probe(struct platform_device *pdev)
247{
248 struct device *dev = &pdev->dev;
249 struct resource *res;
250 struct regmap *regmap;
251 struct vf610_ocotp *ocotp_dev;
252
253 ocotp_dev = devm_kzalloc(&pdev->dev,
254 sizeof(struct vf610_ocotp), GFP_KERNEL);
255 if (!ocotp_dev)
256 return -ENOMEM;
257
258 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
259 ocotp_dev->base = devm_ioremap_resource(dev, res);
260 if (IS_ERR(ocotp_dev->base))
261 return PTR_ERR(ocotp_dev->base);
262
263 ocotp_dev->clk = devm_clk_get(dev, NULL);
264 if (IS_ERR(ocotp_dev->clk)) {
265 dev_err(dev, "failed getting clock, err = %ld\n",
266 PTR_ERR(ocotp_dev->clk));
267 return PTR_ERR(ocotp_dev->clk);
268 }
269
270 ocotp_regmap_config.max_register = resource_size(res);
271 regmap = devm_regmap_init(dev,
272 &vf610_ocotp_bus, ocotp_dev, &ocotp_regmap_config);
273 if (IS_ERR(regmap)) {
274 dev_err(dev, "regmap init failed\n");
275 return PTR_ERR(regmap);
276 }
277 ocotp_config.dev = dev;
278
279 ocotp_dev->nvmem = nvmem_register(&ocotp_config);
280 if (IS_ERR(ocotp_dev->nvmem))
281 return PTR_ERR(ocotp_dev->nvmem);
282
283 ocotp_dev->dev = dev;
284 platform_set_drvdata(pdev, ocotp_dev);
285
286 ocotp_dev->timing = vf610_ocotp_calculate_timing(ocotp_dev);
287
288 return 0;
289}
290
291static struct platform_driver vf610_ocotp_driver = {
292 .probe = vf610_ocotp_probe,
293 .remove = vf610_ocotp_remove,
294 .driver = {
295 .name = "vf610-ocotp",
296 .of_match_table = ocotp_of_match,
297 },
298};
299module_platform_driver(vf610_ocotp_driver);
300MODULE_AUTHOR("Sanchayan Maity <sanchayan.maity@toradex.com>");
301MODULE_DESCRIPTION("Vybrid OCOTP driver");
302MODULE_LICENSE("GPL v2");
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 0decee6c556e..489ea1098c96 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -468,12 +468,10 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
468 if ((length < 2) || (length > 255)) 468 if ((length < 2) || (length > 255))
469 continue; 469 continue;
470 470
471 new = kmalloc(sizeof(char) * length, GFP_KERNEL); 471 new = kstrdup(tmp, GFP_KERNEL);
472 if (!new) 472 if (!new)
473 continue; 473 continue;
474 474
475 new = strncpy(new, tmp, length);
476
477 tmp = p_dev->prod_id[i]; 475 tmp = p_dev->prod_id[i];
478 p_dev->prod_id[i] = new; 476 p_dev->prod_id[i] = new;
479 kfree(tmp); 477 kfree(tmp);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index fb36810ae89a..be822f7a9ce6 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -168,11 +168,6 @@ struct pmic_arb_ver_ops {
168 u32 (*irq_clear)(u8 n); 168 u32 (*irq_clear)(u8 n);
169}; 169};
170 170
171static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset)
172{
173 return readl_relaxed(dev->rd_base + offset);
174}
175
176static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev, 171static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
177 u32 offset, u32 val) 172 u32 offset, u32 val)
178{ 173{
@@ -193,7 +188,7 @@ static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
193 */ 188 */
194static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc) 189static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
195{ 190{
196 u32 data = pmic_arb_base_read(dev, reg); 191 u32 data = __raw_readl(dev->rd_base + reg);
197 memcpy(buf, &data, (bc & 3) + 1); 192 memcpy(buf, &data, (bc & 3) + 1);
198} 193}
199 194
@@ -208,7 +203,7 @@ pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
208{ 203{
209 u32 data = 0; 204 u32 data = 0;
210 memcpy(&data, buf, (bc & 3) + 1); 205 memcpy(&data, buf, (bc & 3) + 1);
211 pmic_arb_base_write(dev, reg, data); 206 __raw_writel(data, dev->wr_base + reg);
212} 207}
213 208
214static int pmic_arb_wait_for_done(struct spmi_controller *ctrl, 209static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
@@ -365,7 +360,7 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
365 opc = PMIC_ARB_OP_EXT_WRITE; 360 opc = PMIC_ARB_OP_EXT_WRITE;
366 else if (opc >= 0x30 && opc <= 0x37) 361 else if (opc >= 0x30 && opc <= 0x37)
367 opc = PMIC_ARB_OP_EXT_WRITEL; 362 opc = PMIC_ARB_OP_EXT_WRITEL;
368 else if (opc >= 0x80 && opc <= 0xFF) 363 else if (opc >= 0x80)
369 opc = PMIC_ARB_OP_ZERO_WRITE; 364 opc = PMIC_ARB_OP_ZERO_WRITE;
370 else 365 else
371 return -EINVAL; 366 return -EINVAL;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 11467e17bdd8..6b3da1bb0d63 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -560,12 +560,13 @@ EXPORT_SYMBOL_GPL(spmi_controller_remove);
560 * This API will register the client driver with the SPMI framework. 560 * This API will register the client driver with the SPMI framework.
561 * It is typically called from the driver's module-init function. 561 * It is typically called from the driver's module-init function.
562 */ 562 */
563int spmi_driver_register(struct spmi_driver *sdrv) 563int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner)
564{ 564{
565 sdrv->driver.bus = &spmi_bus_type; 565 sdrv->driver.bus = &spmi_bus_type;
566 sdrv->driver.owner = owner;
566 return driver_register(&sdrv->driver); 567 return driver_register(&sdrv->driver);
567} 568}
568EXPORT_SYMBOL_GPL(spmi_driver_register); 569EXPORT_SYMBOL_GPL(__spmi_driver_register);
569 570
570static void __exit spmi_exit(void) 571static void __exit spmi_exit(void)
571{ 572{
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 8196581f54c2..bcc1fc027311 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -524,6 +524,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
524 524
525 event_count = atomic_read(&idev->event); 525 event_count = atomic_read(&idev->event);
526 if (event_count != listener->event_count) { 526 if (event_count != listener->event_count) {
527 __set_current_state(TASK_RUNNING);
527 if (copy_to_user(buf, &event_count, count)) 528 if (copy_to_user(buf, &event_count, count))
528 retval = -EFAULT; 529 retval = -EFAULT;
529 else { 530 else {
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index 2bcf80c159c1..b46323d9dc18 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -470,6 +470,7 @@ static const struct of_device_id uio_fsl_elbc_gpcm_match[] = {
470 { .compatible = "fsl,elbc-gpcm-uio", }, 470 { .compatible = "fsl,elbc-gpcm-uio", },
471 {} 471 {}
472}; 472};
473MODULE_DEVICE_TABLE(of, uio_fsl_elbc_gpcm_match);
473 474
474static struct platform_driver uio_fsl_elbc_gpcm_driver = { 475static struct platform_driver uio_fsl_elbc_gpcm_driver = {
475 .driver = { 476 .driver = {
diff --git a/drivers/vme/vme_bridge.h b/drivers/vme/vme_bridge.h
index 397578a73883..b59cbee231dd 100644
--- a/drivers/vme/vme_bridge.h
+++ b/drivers/vme/vme_bridge.h
@@ -1,6 +1,8 @@
1#ifndef _VME_BRIDGE_H_ 1#ifndef _VME_BRIDGE_H_
2#define _VME_BRIDGE_H_ 2#define _VME_BRIDGE_H_
3 3
4#include <linux/vme.h>
5
4#define VME_CRCSR_BUF_SIZE (508*1024) 6#define VME_CRCSR_BUF_SIZE (508*1024)
5/* 7/*
6 * Resource structures 8 * Resource structures
@@ -91,7 +93,7 @@ struct vme_callback {
91 93
92struct vme_irq { 94struct vme_irq {
93 int count; 95 int count;
94 struct vme_callback callback[255]; 96 struct vme_callback callback[VME_NUM_STATUSID];
95}; 97};
96 98
97/* Allow 16 characters for name (including null character) */ 99/* Allow 16 characters for name (including null character) */
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index e7d448963a24..0e2f43bccf1f 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -17,6 +17,7 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/of.h>
20 21
21#include "../w1.h" 22#include "../w1.h"
22#include "../w1_int.h" 23#include "../w1_int.h"
@@ -27,21 +28,23 @@
27#define OMAP_HDQ_TX_DATA 0x04 28#define OMAP_HDQ_TX_DATA 0x04
28#define OMAP_HDQ_RX_DATA 0x08 29#define OMAP_HDQ_RX_DATA 0x08
29#define OMAP_HDQ_CTRL_STATUS 0x0c 30#define OMAP_HDQ_CTRL_STATUS 0x0c
30#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6) 31#define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
31#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5) 32#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
32#define OMAP_HDQ_CTRL_STATUS_GO (1<<4) 33#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
33#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2) 34#define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
34#define OMAP_HDQ_CTRL_STATUS_DIR (1<<1) 35#define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
35#define OMAP_HDQ_CTRL_STATUS_MODE (1<<0) 36#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
37#define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
36#define OMAP_HDQ_INT_STATUS 0x10 38#define OMAP_HDQ_INT_STATUS 0x10
37#define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2) 39#define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
38#define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1) 40#define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
39#define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0) 41#define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
40#define OMAP_HDQ_SYSCONFIG 0x14 42#define OMAP_HDQ_SYSCONFIG 0x14
41#define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1) 43#define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1)
42#define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0) 44#define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0)
45#define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0
43#define OMAP_HDQ_SYSSTATUS 0x18 46#define OMAP_HDQ_SYSSTATUS 0x18
44#define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0) 47#define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0)
45 48
46#define OMAP_HDQ_FLAG_CLEAR 0 49#define OMAP_HDQ_FLAG_CLEAR 0
47#define OMAP_HDQ_FLAG_SET 1 50#define OMAP_HDQ_FLAG_SET 1
@@ -67,6 +70,10 @@ struct hdq_data {
67 * the data wrire or read. 70 * the data wrire or read.
68 */ 71 */
69 int init_trans; 72 int init_trans;
73 int rrw;
74 /* mode: 0-HDQ 1-W1 */
75 int mode;
76
70}; 77};
71 78
72static int omap_hdq_probe(struct platform_device *pdev); 79static int omap_hdq_probe(struct platform_device *pdev);
@@ -74,6 +81,7 @@ static int omap_hdq_remove(struct platform_device *pdev);
74 81
75static const struct of_device_id omap_hdq_dt_ids[] = { 82static const struct of_device_id omap_hdq_dt_ids[] = {
76 { .compatible = "ti,omap3-1w" }, 83 { .compatible = "ti,omap3-1w" },
84 { .compatible = "ti,am4372-hdq" },
77 {} 85 {}
78}; 86};
79MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids); 87MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
@@ -90,15 +98,12 @@ static struct platform_driver omap_hdq_driver = {
90static u8 omap_w1_read_byte(void *_hdq); 98static u8 omap_w1_read_byte(void *_hdq);
91static void omap_w1_write_byte(void *_hdq, u8 byte); 99static void omap_w1_write_byte(void *_hdq, u8 byte);
92static u8 omap_w1_reset_bus(void *_hdq); 100static u8 omap_w1_reset_bus(void *_hdq);
93static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
94 u8 search_type, w1_slave_found_callback slave_found);
95 101
96 102
97static struct w1_bus_master omap_w1_master = { 103static struct w1_bus_master omap_w1_master = {
98 .read_byte = omap_w1_read_byte, 104 .read_byte = omap_w1_read_byte,
99 .write_byte = omap_w1_write_byte, 105 .write_byte = omap_w1_write_byte,
100 .reset_bus = omap_w1_reset_bus, 106 .reset_bus = omap_w1_reset_bus,
101 .search = omap_w1_search_bus,
102}; 107};
103 108
104/* HDQ register I/O routines */ 109/* HDQ register I/O routines */
@@ -122,6 +127,15 @@ static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
122 return new_val; 127 return new_val;
123} 128}
124 129
130static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
131 u32 mask)
132{
133 u32 ie;
134
135 ie = readl(hdq_data->hdq_base + offset);
136 writel(ie & mask, hdq_data->hdq_base + offset);
137}
138
125/* 139/*
126 * Wait for one or more bits in flag change. 140 * Wait for one or more bits in flag change.
127 * HDQ_FLAG_SET: wait until any bit in the flag is set. 141 * HDQ_FLAG_SET: wait until any bit in the flag is set.
@@ -229,13 +243,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq)
229 return IRQ_HANDLED; 243 return IRQ_HANDLED;
230} 244}
231 245
232/* HDQ Mode: always return success */ 246/* W1 search callback function in HDQ mode */
233static u8 omap_w1_reset_bus(void *_hdq)
234{
235 return 0;
236}
237
238/* W1 search callback function */
239static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, 247static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
240 u8 search_type, w1_slave_found_callback slave_found) 248 u8 search_type, w1_slave_found_callback slave_found)
241{ 249{
@@ -262,9 +270,10 @@ static int _omap_hdq_reset(struct hdq_data *hdq_data)
262 int ret; 270 int ret;
263 u8 tmp_status; 271 u8 tmp_status;
264 272
265 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET); 273 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
274 OMAP_HDQ_SYSCONFIG_SOFTRESET);
266 /* 275 /*
267 * Select HDQ mode & enable clocks. 276 * Select HDQ/1W mode & enable clocks.
268 * It is observed that INT flags can't be cleared via a read and GO/INIT 277 * It is observed that INT flags can't be cleared via a read and GO/INIT
269 * won't return to zero if interrupt is disabled. So we always enable 278 * won't return to zero if interrupt is disabled. So we always enable
270 * interrupt. 279 * interrupt.
@@ -282,7 +291,8 @@ static int _omap_hdq_reset(struct hdq_data *hdq_data)
282 else { 291 else {
283 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 292 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
284 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 293 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
285 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 294 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
295 hdq_data->mode);
286 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 296 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
287 OMAP_HDQ_SYSCONFIG_AUTOIDLE); 297 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
288 } 298 }
@@ -334,6 +344,18 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
334 ret = -ETIMEDOUT; 344 ret = -ETIMEDOUT;
335 goto out; 345 goto out;
336 } 346 }
347
348 /*
349 * check for the presence detect bit to get
350 * set to show that the slave is responding
351 */
352 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
353 OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
354 dev_dbg(hdq_data->dev, "Presence bit not set\n");
355 ret = -ETIMEDOUT;
356 goto out;
357 }
358
337 /* 359 /*
338 * wait for both INIT and GO bits rerurn to zero. 360 * wait for both INIT and GO bits rerurn to zero.
339 * zero wait time expected for interrupt mode. 361 * zero wait time expected for interrupt mode.
@@ -368,6 +390,8 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
368 goto out; 390 goto out;
369 } 391 }
370 392
393 hdq_data->hdq_irqstatus = 0;
394
371 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { 395 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
372 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 396 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
373 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, 397 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
@@ -400,7 +424,7 @@ rtn:
400 424
401} 425}
402 426
403/* Enable clocks and set the controller to HDQ mode */ 427/* Enable clocks and set the controller to HDQ/1W mode */
404static int omap_hdq_get(struct hdq_data *hdq_data) 428static int omap_hdq_get(struct hdq_data *hdq_data)
405{ 429{
406 int ret = 0; 430 int ret = 0;
@@ -422,7 +446,7 @@ static int omap_hdq_get(struct hdq_data *hdq_data)
422 446
423 pm_runtime_get_sync(hdq_data->dev); 447 pm_runtime_get_sync(hdq_data->dev);
424 448
425 /* make sure HDQ is out of reset */ 449 /* make sure HDQ/1W is out of reset */
426 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) & 450 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
427 OMAP_HDQ_SYSSTATUS_RESETDONE)) { 451 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
428 ret = _omap_hdq_reset(hdq_data); 452 ret = _omap_hdq_reset(hdq_data);
@@ -430,12 +454,13 @@ static int omap_hdq_get(struct hdq_data *hdq_data)
430 /* back up the count */ 454 /* back up the count */
431 hdq_data->hdq_usecount--; 455 hdq_data->hdq_usecount--;
432 } else { 456 } else {
433 /* select HDQ mode & enable clocks */ 457 /* select HDQ/1W mode & enable clocks */
434 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, 458 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
435 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | 459 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
436 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); 460 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
461 hdq_data->mode);
437 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, 462 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
438 OMAP_HDQ_SYSCONFIG_AUTOIDLE); 463 OMAP_HDQ_SYSCONFIG_NOIDLE);
439 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); 464 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
440 } 465 }
441 } 466 }
@@ -456,6 +481,8 @@ static int omap_hdq_put(struct hdq_data *hdq_data)
456 if (ret < 0) 481 if (ret < 0)
457 return -EINTR; 482 return -EINTR;
458 483
484 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
485 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
459 if (0 == hdq_data->hdq_usecount) { 486 if (0 == hdq_data->hdq_usecount) {
460 dev_dbg(hdq_data->dev, "attempt to decrement use count" 487 dev_dbg(hdq_data->dev, "attempt to decrement use count"
461 " when it is zero"); 488 " when it is zero");
@@ -471,6 +498,100 @@ static int omap_hdq_put(struct hdq_data *hdq_data)
471 return ret; 498 return ret;
472} 499}
473 500
501/*
502 * W1 triplet callback function - used for searching ROM addresses.
503 * Registered only when controller is in 1-wire mode.
504 */
505static u8 omap_w1_triplet(void *_hdq, u8 bdir)
506{
507 u8 id_bit, comp_bit;
508 int err;
509 u8 ret = 0x3; /* no slaves responded */
510 struct hdq_data *hdq_data = _hdq;
511 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
512 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
513 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
514
515 omap_hdq_get(_hdq);
516
517 err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
518 if (err < 0) {
519 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
520 goto rtn;
521 }
522
523 hdq_data->hdq_irqstatus = 0;
524 /* read id_bit */
525 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
526 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
527 err = wait_event_timeout(hdq_wait_queue,
528 (hdq_data->hdq_irqstatus
529 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
530 OMAP_HDQ_TIMEOUT);
531 if (err == 0) {
532 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
533 goto out;
534 }
535 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
536
537 hdq_data->hdq_irqstatus = 0;
538 /* read comp_bit */
539 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
540 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
541 err = wait_event_timeout(hdq_wait_queue,
542 (hdq_data->hdq_irqstatus
543 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
544 OMAP_HDQ_TIMEOUT);
545 if (err == 0) {
546 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
547 goto out;
548 }
549 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
550
551 if (id_bit && comp_bit) {
552 ret = 0x03; /* no slaves responded */
553 goto out;
554 }
555 if (!id_bit && !comp_bit) {
556 /* Both bits are valid, take the direction given */
557 ret = bdir ? 0x04 : 0;
558 } else {
559 /* Only one bit is valid, take that direction */
560 bdir = id_bit;
561 ret = id_bit ? 0x05 : 0x02;
562 }
563
564 /* write bdir bit */
565 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
566 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
567 err = wait_event_timeout(hdq_wait_queue,
568 (hdq_data->hdq_irqstatus
569 & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
570 OMAP_HDQ_TIMEOUT);
571 if (err == 0) {
572 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
573 goto out;
574 }
575
576 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
577 OMAP_HDQ_CTRL_STATUS_SINGLE);
578
579out:
580 mutex_unlock(&hdq_data->hdq_mutex);
581rtn:
582 omap_hdq_put(_hdq);
583 return ret;
584}
585
586/* reset callback */
587static u8 omap_w1_reset_bus(void *_hdq)
588{
589 omap_hdq_get(_hdq);
590 omap_hdq_break(_hdq);
591 omap_hdq_put(_hdq);
592 return 0;
593}
594
474/* Read a byte of data from the device */ 595/* Read a byte of data from the device */
475static u8 omap_w1_read_byte(void *_hdq) 596static u8 omap_w1_read_byte(void *_hdq)
476{ 597{
@@ -478,6 +599,10 @@ static u8 omap_w1_read_byte(void *_hdq)
478 u8 val = 0; 599 u8 val = 0;
479 int ret; 600 int ret;
480 601
602 /* First write to initialize the transfer */
603 if (hdq_data->init_trans == 0)
604 omap_hdq_get(hdq_data);
605
481 ret = hdq_read_byte(hdq_data, &val); 606 ret = hdq_read_byte(hdq_data, &val);
482 if (ret) { 607 if (ret) {
483 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 608 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
@@ -491,6 +616,10 @@ static u8 omap_w1_read_byte(void *_hdq)
491 return -1; 616 return -1;
492 } 617 }
493 618
619 hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
620 ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
621 hdq_data->hdq_usecount = 0;
622
494 /* Write followed by a read, release the module */ 623 /* Write followed by a read, release the module */
495 if (hdq_data->init_trans) { 624 if (hdq_data->init_trans) {
496 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 625 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
@@ -517,6 +646,14 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
517 if (hdq_data->init_trans == 0) 646 if (hdq_data->init_trans == 0)
518 omap_hdq_get(hdq_data); 647 omap_hdq_get(hdq_data);
519 648
649 /*
650 * We need to reset the slave before
651 * issuing the SKIP ROM command, else
652 * the slave will not work.
653 */
654 if (byte == W1_SKIP_ROM)
655 omap_hdq_break(hdq_data);
656
520 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); 657 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
521 if (ret < 0) { 658 if (ret < 0) {
522 dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); 659 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
@@ -551,6 +688,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
551 struct resource *res; 688 struct resource *res;
552 int ret, irq; 689 int ret, irq;
553 u8 rev; 690 u8 rev;
691 const char *mode;
554 692
555 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL); 693 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
556 if (!hdq_data) { 694 if (!hdq_data) {
@@ -567,10 +705,21 @@ static int omap_hdq_probe(struct platform_device *pdev)
567 return PTR_ERR(hdq_data->hdq_base); 705 return PTR_ERR(hdq_data->hdq_base);
568 706
569 hdq_data->hdq_usecount = 0; 707 hdq_data->hdq_usecount = 0;
708 hdq_data->rrw = 0;
570 mutex_init(&hdq_data->hdq_mutex); 709 mutex_init(&hdq_data->hdq_mutex);
571 710
572 pm_runtime_enable(&pdev->dev); 711 pm_runtime_enable(&pdev->dev);
573 pm_runtime_get_sync(&pdev->dev); 712 ret = pm_runtime_get_sync(&pdev->dev);
713 if (ret < 0) {
714 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
715 goto err_w1;
716 }
717
718 ret = _omap_hdq_reset(hdq_data);
719 if (ret) {
720 dev_dbg(&pdev->dev, "reset failed\n");
721 return -EINVAL;
722 }
574 723
575 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION); 724 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
576 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n", 725 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
@@ -594,6 +743,15 @@ static int omap_hdq_probe(struct platform_device *pdev)
594 743
595 pm_runtime_put_sync(&pdev->dev); 744 pm_runtime_put_sync(&pdev->dev);
596 745
746 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
747 if (ret < 0 || !strcmp(mode, "hdq")) {
748 hdq_data->mode = 0;
749 omap_w1_master.search = omap_w1_search_bus;
750 } else {
751 hdq_data->mode = 1;
752 omap_w1_master.triplet = omap_w1_triplet;
753 }
754
597 omap_w1_master.data = hdq_data; 755 omap_w1_master.data = hdq_data;
598 756
599 ret = w1_add_master_device(&omap_w1_master); 757 ret = w1_add_master_device(&omap_w1_master);
@@ -635,8 +793,8 @@ static int omap_hdq_remove(struct platform_device *pdev)
635module_platform_driver(omap_hdq_driver); 793module_platform_driver(omap_hdq_driver);
636 794
637module_param(w1_id, int, S_IRUSR); 795module_param(w1_id, int, S_IRUSR);
638MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection"); 796MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
639 797
640MODULE_AUTHOR("Texas Instruments"); 798MODULE_AUTHOR("Texas Instruments");
641MODULE_DESCRIPTION("HDQ driver Library"); 799MODULE_DESCRIPTION("HDQ-1W driver Library");
642MODULE_LICENSE("GPL"); 800MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 8f7848c62811..a373ae69d9f6 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -198,11 +198,9 @@ static int w1_gpio_remove(struct platform_device *pdev)
198 return 0; 198 return 0;
199} 199}
200 200
201#ifdef CONFIG_PM 201static int __maybe_unused w1_gpio_suspend(struct device *dev)
202
203static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
204{ 202{
205 struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev); 203 struct w1_gpio_platform_data *pdata = dev_get_platdata(dev);
206 204
207 if (pdata->enable_external_pullup) 205 if (pdata->enable_external_pullup)
208 pdata->enable_external_pullup(0); 206 pdata->enable_external_pullup(0);
@@ -210,9 +208,9 @@ static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
210 return 0; 208 return 0;
211} 209}
212 210
213static int w1_gpio_resume(struct platform_device *pdev) 211static int __maybe_unused w1_gpio_resume(struct device *dev)
214{ 212{
215 struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev); 213 struct w1_gpio_platform_data *pdata = dev_get_platdata(dev);
216 214
217 if (pdata->enable_external_pullup) 215 if (pdata->enable_external_pullup)
218 pdata->enable_external_pullup(1); 216 pdata->enable_external_pullup(1);
@@ -220,20 +218,16 @@ static int w1_gpio_resume(struct platform_device *pdev)
220 return 0; 218 return 0;
221} 219}
222 220
223#else 221static SIMPLE_DEV_PM_OPS(w1_gpio_pm_ops, w1_gpio_suspend, w1_gpio_resume);
224#define w1_gpio_suspend NULL
225#define w1_gpio_resume NULL
226#endif
227 222
228static struct platform_driver w1_gpio_driver = { 223static struct platform_driver w1_gpio_driver = {
229 .driver = { 224 .driver = {
230 .name = "w1-gpio", 225 .name = "w1-gpio",
226 .pm = &w1_gpio_pm_ops,
231 .of_match_table = of_match_ptr(w1_gpio_dt_ids), 227 .of_match_table = of_match_ptr(w1_gpio_dt_ids),
232 }, 228 },
233 .probe = w1_gpio_probe, 229 .probe = w1_gpio_probe,
234 .remove = w1_gpio_remove, 230 .remove = w1_gpio_remove,
235 .suspend = w1_gpio_suspend,
236 .resume = w1_gpio_resume,
237}; 231};
238 232
239module_platform_driver(w1_gpio_driver); 233module_platform_driver(w1_gpio_driver);
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 47249a30eae3..20f766afa4c7 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -91,8 +91,7 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
91 err = device_register(&dev->dev); 91 err = device_register(&dev->dev);
92 if (err) { 92 if (err) {
93 pr_err("Failed to register master device. err=%d\n", err); 93 pr_err("Failed to register master device. err=%d\n", err);
94 memset(dev, 0, sizeof(struct w1_master)); 94 put_device(&dev->dev);
95 kfree(dev);
96 dev = NULL; 95 dev = NULL;
97 } 96 }
98 97
diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h
index 7b2000cead43..c40f665e2712 100644
--- a/include/dt-bindings/mfd/arizona.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -107,5 +107,7 @@
107#define ARIZONA_ACCDET_MODE_MIC 0 107#define ARIZONA_ACCDET_MODE_MIC 0
108#define ARIZONA_ACCDET_MODE_HPL 1 108#define ARIZONA_ACCDET_MODE_HPL 1
109#define ARIZONA_ACCDET_MODE_HPR 2 109#define ARIZONA_ACCDET_MODE_HPR 2
110#define ARIZONA_ACCDET_MODE_HPM 4
111#define ARIZONA_ACCDET_MODE_ADC 7
110 112
111#endif 113#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index c69e1b932809..a7cabfa23b55 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -207,7 +207,7 @@ struct coresight_ops_link {
207 * Operations available for sources. 207 * Operations available for sources.
208 * @trace_id: returns the value of the component's trace ID as known 208 * @trace_id: returns the value of the component's trace ID as known
209 to the HW. 209 to the HW.
210 * @enable: enables tracing from a source. 210 * @enable: enables tracing for a source.
211 * @disable: disables tracing for a source. 211 * @disable: disables tracing for a source.
212 */ 212 */
213struct coresight_ops_source { 213struct coresight_ops_source {
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 7ea9184eaa13..c47c68e535e8 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -645,6 +645,7 @@ enum dmaengine_alignment {
645 * The function takes a buffer of size buf_len. The callback function will 645 * The function takes a buffer of size buf_len. The callback function will
646 * be called after period_len bytes have been transferred. 646 * be called after period_len bytes have been transferred.
647 * @device_prep_interleaved_dma: Transfer expression in a generic way. 647 * @device_prep_interleaved_dma: Transfer expression in a generic way.
648 * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
648 * @device_config: Pushes a new configuration to a channel, return 0 or an error 649 * @device_config: Pushes a new configuration to a channel, return 0 or an error
649 * code 650 * code
650 * @device_pause: Pauses any transfer happening on a channel. Returns 651 * @device_pause: Pauses any transfer happening on a channel. Returns
@@ -727,6 +728,9 @@ struct dma_device {
727 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( 728 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
728 struct dma_chan *chan, struct dma_interleaved_template *xt, 729 struct dma_chan *chan, struct dma_interleaved_template *xt,
729 unsigned long flags); 730 unsigned long flags);
731 struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
732 struct dma_chan *chan, dma_addr_t dst, u64 data,
733 unsigned long flags);
730 734
731 int (*device_config)(struct dma_chan *chan, 735 int (*device_config)(struct dma_chan *chan,
732 struct dma_slave_config *config); 736 struct dma_slave_config *config);
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index c0f8c4fc5d45..7abf674c388c 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -31,32 +31,42 @@
31/* 31/*
32 * Define the unique id of supported external connectors 32 * Define the unique id of supported external connectors
33 */ 33 */
34#define EXTCON_NONE 0 34#define EXTCON_NONE 0
35 35
36#define EXTCON_USB 1 /* USB connector */ 36/* USB external connector */
37#define EXTCON_USB_HOST 2 37#define EXTCON_USB 1
38 38#define EXTCON_USB_HOST 2
39#define EXTCON_TA 3 /* Charger connector */ 39
40#define EXTCON_FAST_CHARGER 4 40/* Charging external connector */
41#define EXTCON_SLOW_CHARGER 5 41#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */
42#define EXTCON_CHARGE_DOWNSTREAM 6 42#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */
43 43#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */
44#define EXTCON_LINE_IN 7 /* Audio/Video connector */ 44#define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */
45#define EXTCON_LINE_OUT 8 45#define EXTCON_CHG_USB_FAST 9
46#define EXTCON_MICROPHONE 9 46#define EXTCON_CHG_USB_SLOW 10
47#define EXTCON_HEADPHONE 10 47
48#define EXTCON_HDMI 11 48/* Jack external connector */
49#define EXTCON_MHL 12 49#define EXTCON_JACK_MICROPHONE 20
50#define EXTCON_DVI 13 50#define EXTCON_JACK_HEADPHONE 21
51#define EXTCON_VGA 14 51#define EXTCON_JACK_LINE_IN 22
52#define EXTCON_SPDIF_IN 15 52#define EXTCON_JACK_LINE_OUT 23
53#define EXTCON_SPDIF_OUT 16 53#define EXTCON_JACK_VIDEO_IN 24
54#define EXTCON_VIDEO_IN 17 54#define EXTCON_JACK_VIDEO_OUT 25
55#define EXTCON_VIDEO_OUT 18 55#define EXTCON_JACK_SPDIF_IN 26 /* Sony Philips Digital InterFace */
56 56#define EXTCON_JACK_SPDIF_OUT 27
57#define EXTCON_DOCK 19 /* Misc connector */ 57
58#define EXTCON_JIG 20 58/* Display external connector */
59#define EXTCON_MECHANICAL 21 59#define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */
60#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */
61#define EXTCON_DISP_DVI 42 /* Digital Visual Interface */
62#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
63
64/* Miscellaneous external connector */
65#define EXTCON_DOCK 60
66#define EXTCON_JIG 61
67#define EXTCON_MECHANICAL 62
68
69#define EXTCON_NUM 63
60 70
61struct extcon_cable; 71struct extcon_cable;
62 72
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 0b17ad43fbfc..7cacafb78b09 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * External connector (extcon) class generic GPIO driver 2 * Single-state GPIO extcon driver based on extcon class
3 * 3 *
4 * Copyright (C) 2012 Samsung Electronics 4 * Copyright (C) 2012 Samsung Electronics
5 * Author: MyungJoo Ham <myungjoo.ham@samsung.com> 5 * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -16,43 +16,31 @@
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 */
20*/
21#ifndef __EXTCON_GPIO_H__ 20#ifndef __EXTCON_GPIO_H__
22#define __EXTCON_GPIO_H__ __FILE__ 21#define __EXTCON_GPIO_H__ __FILE__
23 22
24#include <linux/extcon.h> 23#include <linux/extcon.h>
25 24
26/** 25/**
27 * struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device. 26 * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device.
28 * @name: The name of this GPIO extcon device. 27 * @extcon_id: The unique id of specific external connector.
29 * @gpio: Corresponding GPIO. 28 * @gpio: Corresponding GPIO.
30 * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0 29 * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0
31 * If true, low state of gpio means active. 30 * If true, low state of gpio means active.
32 * If false, high state of gpio means active. 31 * If false, high state of gpio means active.
33 * @debounce: Debounce time for GPIO IRQ in ms. 32 * @debounce: Debounce time for GPIO IRQ in ms.
34 * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). 33 * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
35 * @state_on: print_state is overriden with state_on if attached.
36 * If NULL, default method of extcon class is used.
37 * @state_off: print_state is overriden with state_off if detached.
38 * If NUll, default method of extcon class is used.
39 * @check_on_resume: Boolean describing whether to check the state of gpio 34 * @check_on_resume: Boolean describing whether to check the state of gpio
40 * while resuming from sleep. 35 * while resuming from sleep.
41 *
42 * Note that in order for state_on or state_off to be valid, both state_on
43 * and state_off should be not NULL. If at least one of them is NULL,
44 * the print_state is not overriden.
45 */ 36 */
46struct gpio_extcon_platform_data { 37struct gpio_extcon_pdata {
47 const char *name; 38 unsigned int extcon_id;
48 unsigned gpio; 39 unsigned gpio;
49 bool gpio_active_low; 40 bool gpio_active_low;
50 unsigned long debounce; 41 unsigned long debounce;
51 unsigned long irq_flags; 42 unsigned long irq_flags;
52 43
53 /* if NULL, "0" or "1" will be printed */
54 const char *state_on;
55 const char *state_off;
56 bool check_on_resume; 44 bool check_on_resume;
57}; 45};
58 46
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
new file mode 100644
index 000000000000..0940bf45e2f2
--- /dev/null
+++ b/include/linux/fpga/fpga-mgr.h
@@ -0,0 +1,127 @@
1/*
2 * FPGA Framework
3 *
4 * Copyright (C) 2013-2015 Altera Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/mutex.h>
19#include <linux/platform_device.h>
20
21#ifndef _LINUX_FPGA_MGR_H
22#define _LINUX_FPGA_MGR_H
23
24struct fpga_manager;
25
26/**
27 * enum fpga_mgr_states - fpga framework states
28 * @FPGA_MGR_STATE_UNKNOWN: can't determine state
29 * @FPGA_MGR_STATE_POWER_OFF: FPGA power is off
30 * @FPGA_MGR_STATE_POWER_UP: FPGA reports power is up
31 * @FPGA_MGR_STATE_RESET: FPGA in reset state
32 * @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress
33 * @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed
34 * @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming
35 * @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
36 * @FPGA_MGR_STATE_WRITE: writing image to FPGA
37 * @FPGA_MGR_STATE_WRITE_ERR: Error while writing FPGA
38 * @FPGA_MGR_STATE_WRITE_COMPLETE: Doing post programming steps
39 * @FPGA_MGR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE
40 * @FPGA_MGR_STATE_OPERATING: FPGA is programmed and operating
41 */
42enum fpga_mgr_states {
43 /* default FPGA states */
44 FPGA_MGR_STATE_UNKNOWN,
45 FPGA_MGR_STATE_POWER_OFF,
46 FPGA_MGR_STATE_POWER_UP,
47 FPGA_MGR_STATE_RESET,
48
49 /* getting an image for loading */
50 FPGA_MGR_STATE_FIRMWARE_REQ,
51 FPGA_MGR_STATE_FIRMWARE_REQ_ERR,
52
53 /* write sequence: init, write, complete */
54 FPGA_MGR_STATE_WRITE_INIT,
55 FPGA_MGR_STATE_WRITE_INIT_ERR,
56 FPGA_MGR_STATE_WRITE,
57 FPGA_MGR_STATE_WRITE_ERR,
58 FPGA_MGR_STATE_WRITE_COMPLETE,
59 FPGA_MGR_STATE_WRITE_COMPLETE_ERR,
60
61 /* fpga is programmed and operating */
62 FPGA_MGR_STATE_OPERATING,
63};
64
65/*
66 * FPGA Manager flags
67 * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
68 */
69#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
70
71/**
72 * struct fpga_manager_ops - ops for low level fpga manager drivers
73 * @state: returns an enum value of the FPGA's state
74 * @write_init: prepare the FPGA to receive confuration data
75 * @write: write count bytes of configuration data to the FPGA
76 * @write_complete: set FPGA to operating state after writing is done
77 * @fpga_remove: optional: Set FPGA into a specific state during driver remove
78 *
79 * fpga_manager_ops are the low level functions implemented by a specific
80 * fpga manager driver. The optional ones are tested for NULL before being
81 * called, so leaving them out is fine.
82 */
83struct fpga_manager_ops {
84 enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
85 int (*write_init)(struct fpga_manager *mgr, u32 flags,
86 const char *buf, size_t count);
87 int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
88 int (*write_complete)(struct fpga_manager *mgr, u32 flags);
89 void (*fpga_remove)(struct fpga_manager *mgr);
90};
91
92/**
93 * struct fpga_manager - fpga manager structure
94 * @name: name of low level fpga manager
95 * @dev: fpga manager device
96 * @ref_mutex: only allows one reference to fpga manager
97 * @state: state of fpga manager
98 * @mops: pointer to struct of fpga manager ops
99 * @priv: low level driver private date
100 */
101struct fpga_manager {
102 const char *name;
103 struct device dev;
104 struct mutex ref_mutex;
105 enum fpga_mgr_states state;
106 const struct fpga_manager_ops *mops;
107 void *priv;
108};
109
110#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
111
112int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
113 const char *buf, size_t count);
114
115int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
116 const char *image_name);
117
118struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
119
120void fpga_mgr_put(struct fpga_manager *mgr);
121
122int fpga_mgr_register(struct device *dev, const char *name,
123 const struct fpga_manager_ops *mops, void *priv);
124
125void fpga_mgr_unregister(struct device *dev);
126
127#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 0962b2ca628a..e746919530f5 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -8,8 +8,8 @@
8struct mei_cl_device; 8struct mei_cl_device;
9struct mei_device; 9struct mei_device;
10 10
11typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, 11typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev,
12 u32 events, void *context); 12 u32 events, void *context);
13 13
14/** 14/**
15 * struct mei_cl_device - MEI device handle 15 * struct mei_cl_device - MEI device handle
@@ -45,7 +45,7 @@ struct mei_cl_device {
45 char name[MEI_CL_NAME_SIZE]; 45 char name[MEI_CL_NAME_SIZE];
46 46
47 struct work_struct event_work; 47 struct work_struct event_work;
48 mei_cl_event_cb_t event_cb; 48 mei_cldev_event_cb_t event_cb;
49 void *event_context; 49 void *event_context;
50 unsigned long events_mask; 50 unsigned long events_mask;
51 unsigned long events; 51 unsigned long events;
@@ -62,33 +62,37 @@ struct mei_cl_driver {
62 62
63 const struct mei_cl_device_id *id_table; 63 const struct mei_cl_device_id *id_table;
64 64
65 int (*probe)(struct mei_cl_device *dev, 65 int (*probe)(struct mei_cl_device *cldev,
66 const struct mei_cl_device_id *id); 66 const struct mei_cl_device_id *id);
67 int (*remove)(struct mei_cl_device *dev); 67 int (*remove)(struct mei_cl_device *cldev);
68}; 68};
69 69
70int __mei_cl_driver_register(struct mei_cl_driver *driver, 70int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
71 struct module *owner); 71 struct module *owner);
72#define mei_cl_driver_register(driver) \ 72#define mei_cldev_driver_register(cldrv) \
73 __mei_cl_driver_register(driver, THIS_MODULE) 73 __mei_cldev_driver_register(cldrv, THIS_MODULE)
74 74
75void mei_cl_driver_unregister(struct mei_cl_driver *driver); 75void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
76 76
77ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); 77ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
78ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); 78ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
79 79
80int mei_cl_register_event_cb(struct mei_cl_device *device, 80int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
81 unsigned long event_mask, 81 unsigned long event_mask,
82 mei_cl_event_cb_t read_cb, void *context); 82 mei_cldev_event_cb_t read_cb, void *context);
83 83
84#define MEI_CL_EVENT_RX 0 84#define MEI_CL_EVENT_RX 0
85#define MEI_CL_EVENT_TX 1 85#define MEI_CL_EVENT_TX 1
86#define MEI_CL_EVENT_NOTIF 2 86#define MEI_CL_EVENT_NOTIF 2
87 87
88void *mei_cl_get_drvdata(const struct mei_cl_device *device); 88const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
89void mei_cl_set_drvdata(struct mei_cl_device *device, void *data); 89u8 mei_cldev_ver(const struct mei_cl_device *cldev);
90 90
91int mei_cl_enable_device(struct mei_cl_device *device); 91void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev);
92int mei_cl_disable_device(struct mei_cl_device *device); 92void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
93
94int mei_cldev_enable(struct mei_cl_device *cldev);
95int mei_cldev_disable(struct mei_cl_device *cldev);
96bool mei_cldev_enabled(struct mei_cl_device *cldev);
93 97
94#endif /* _LINUX_MEI_CL_BUS_H */ 98#endif /* _LINUX_MEI_CL_BUS_H */
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 1dc385850ba2..57b45caaea80 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -124,6 +124,9 @@ struct arizona_pdata {
124 /** Channel to use for headphone detection */ 124 /** Channel to use for headphone detection */
125 unsigned int hpdet_channel; 125 unsigned int hpdet_channel;
126 126
127 /** Use software comparison to determine mic presence */
128 bool micd_software_compare;
129
127 /** Extra debounce timeout used during initial mic detection (ms) */ 130 /** Extra debounce timeout used during initial mic detection (ms) */
128 unsigned int micd_detect_debounce; 131 unsigned int micd_detect_debounce;
129 132
@@ -181,6 +184,9 @@ struct arizona_pdata {
181 184
182 /** GPIO for primary IRQ (used for edge triggered emulation) */ 185 /** GPIO for primary IRQ (used for edge triggered emulation) */
183 int irq_gpio; 186 int irq_gpio;
187
188 /** General purpose switch control */
189 unsigned int gpsw;
184}; 190};
185 191
186#endif 192#endif
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index fdd70b3c7418..c7c11c900196 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -242,6 +242,7 @@
242#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 242#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
243#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 243#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
244#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 244#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
245#define ARIZONA_HP_TEST_CTRL_1 0x4A4
245#define ARIZONA_SPK_CTRL_2 0x4B5 246#define ARIZONA_SPK_CTRL_2 0x4B5
246#define ARIZONA_SPK_CTRL_3 0x4B6 247#define ARIZONA_SPK_CTRL_3 0x4B6
247#define ARIZONA_DAC_COMP_1 0x4DC 248#define ARIZONA_DAC_COMP_1 0x4DC
@@ -2359,9 +2360,9 @@
2359#define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */ 2360#define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */
2360#define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */ 2361#define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */
2361#define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */ 2362#define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */
2362#define ARIZONA_ACCDET_MODE_MASK 0x0003 /* ACCDET_MODE - [1:0] */ 2363#define ARIZONA_ACCDET_MODE_MASK 0x0007 /* ACCDET_MODE - [2:0] */
2363#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [1:0] */ 2364#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [2:0] */
2364#define ARIZONA_ACCDET_MODE_WIDTH 2 /* ACCDET_MODE - [1:0] */ 2365#define ARIZONA_ACCDET_MODE_WIDTH 3 /* ACCDET_MODE - [2:0] */
2365 2366
2366/* 2367/*
2367 * R667 (0x29B) - Headphone Detect 1 2368 * R667 (0x29B) - Headphone Detect 1
@@ -3702,6 +3703,13 @@
3702#define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */ 3703#define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */
3703 3704
3704/* 3705/*
3706 * R1188 (0x4A4) HP Test Ctrl 1
3707 */
3708#define ARIZONA_HP1_TST_CAP_SEL_MASK 0x0003 /* HP1_TST_CAP_SEL - [1:0] */
3709#define ARIZONA_HP1_TST_CAP_SEL_SHIFT 0 /* HP1_TST_CAP_SEL - [1:0] */
3710#define ARIZONA_HP1_TST_CAP_SEL_WIDTH 2 /* HP1_TST_CAP_SEL - [1:0] */
3711
3712/*
3705 * R1244 (0x4DC) - DAC comp 1 3713 * R1244 (0x4DC) - DAC comp 1
3706 */ 3714 */
3707#define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */ 3715#define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
index d5b5f76d57ef..27d7c95fd0da 100644
--- a/include/linux/mic_bus.h
+++ b/include/linux/mic_bus.h
@@ -91,7 +91,8 @@ struct mbus_hw_ops {
91 91
92struct mbus_device * 92struct mbus_device *
93mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 93mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
94 struct mbus_hw_ops *hw_ops, void __iomem *mmio_va); 94 struct mbus_hw_ops *hw_ops, int index,
95 void __iomem *mmio_va);
95void mbus_unregister_device(struct mbus_device *mbdev); 96void mbus_unregister_device(struct mbus_device *mbdev);
96 97
97int mbus_register_driver(struct mbus_driver *drv); 98int mbus_register_driver(struct mbus_driver *drv);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 688997a24aad..6975cbf1435b 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -601,15 +601,13 @@ struct ipack_device_id {
601 601
602#define MEI_CL_MODULE_PREFIX "mei:" 602#define MEI_CL_MODULE_PREFIX "mei:"
603#define MEI_CL_NAME_SIZE 32 603#define MEI_CL_NAME_SIZE 32
604#define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" 604#define MEI_CL_VERSION_ANY 0xff
605#define MEI_CL_UUID_ARGS(_u) \
606 _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \
607 _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15]
608 605
609/** 606/**
610 * struct mei_cl_device_id - MEI client device identifier 607 * struct mei_cl_device_id - MEI client device identifier
611 * @name: helper name 608 * @name: helper name
612 * @uuid: client uuid 609 * @uuid: client uuid
610 * @version: client protocol version
613 * @driver_info: information used by the driver. 611 * @driver_info: information used by the driver.
614 * 612 *
615 * identifies mei client device by uuid and name 613 * identifies mei client device by uuid and name
@@ -617,6 +615,7 @@ struct ipack_device_id {
617struct mei_cl_device_id { 615struct mei_cl_device_id {
618 char name[MEI_CL_NAME_SIZE]; 616 char name[MEI_CL_NAME_SIZE];
619 uuid_le uuid; 617 uuid_le uuid;
618 __u8 version;
620 kernel_ulong_t driver_info; 619 kernel_ulong_t driver_info;
621}; 620};
622 621
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
deleted file mode 100644
index fe722c1fb61d..000000000000
--- a/include/linux/msm_mdp.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/* include/linux/msm_mdp.h
2 *
3 * Copyright (C) 2007 Google Incorporated
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef _MSM_MDP_H_
15#define _MSM_MDP_H_
16
17#include <linux/types.h>
18
19#define MSMFB_IOCTL_MAGIC 'm'
20#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
21#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
22
23enum {
24 MDP_RGB_565, /* RGB 565 planar */
25 MDP_XRGB_8888, /* RGB 888 padded */
26 MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */
27 MDP_ARGB_8888, /* ARGB 888 */
28 MDP_RGB_888, /* RGB 888 planar */
29 MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */
30 MDP_YCRYCB_H2V1, /* YCrYCb interleave */
31 MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
32 MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
33 MDP_RGBA_8888, /* ARGB 888 */
34 MDP_BGRA_8888, /* ABGR 888 */
35 MDP_RGBX_8888, /* RGBX 888 */
36 MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */
37};
38
39enum {
40 PMEM_IMG,
41 FB_IMG,
42};
43
44/* flag values */
45#define MDP_ROT_NOP 0
46#define MDP_FLIP_LR 0x1
47#define MDP_FLIP_UD 0x2
48#define MDP_ROT_90 0x4
49#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
50#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
51#define MDP_DITHER 0x8
52#define MDP_BLUR 0x10
53
54#define MDP_TRANSP_NOP 0xffffffff
55#define MDP_ALPHA_NOP 0xff
56
57struct mdp_rect {
58 u32 x, y, w, h;
59};
60
61struct mdp_img {
62 u32 width, height, format, offset;
63 int memory_id; /* the file descriptor */
64};
65
66struct mdp_blit_req {
67 struct mdp_img src;
68 struct mdp_img dst;
69 struct mdp_rect src_rect;
70 struct mdp_rect dst_rect;
71 u32 alpha, transp_mask, flags;
72};
73
74struct mdp_blit_req_list {
75 u32 count;
76 struct mdp_blit_req req[];
77};
78
79#endif /* _MSM_MDP_H_ */
diff --git a/include/linux/scif.h b/include/linux/scif.h
index 44f4f3898bbe..49a35d6edc94 100644
--- a/include/linux/scif.h
+++ b/include/linux/scif.h
@@ -55,6 +55,7 @@
55 55
56#include <linux/types.h> 56#include <linux/types.h>
57#include <linux/poll.h> 57#include <linux/poll.h>
58#include <linux/device.h>
58#include <linux/scif_ioctl.h> 59#include <linux/scif_ioctl.h>
59 60
60#define SCIF_ACCEPT_SYNC 1 61#define SCIF_ACCEPT_SYNC 1
@@ -92,6 +93,70 @@ enum {
92#define SCIF_PORT_RSVD 1088 93#define SCIF_PORT_RSVD 1088
93 94
94typedef struct scif_endpt *scif_epd_t; 95typedef struct scif_endpt *scif_epd_t;
96typedef struct scif_pinned_pages *scif_pinned_pages_t;
97
98/**
99 * struct scif_range - SCIF registered range used in kernel mode
100 * @cookie: cookie used internally by SCIF
101 * @nr_pages: number of pages of PAGE_SIZE
102 * @prot_flags: R/W protection
103 * @phys_addr: Array of bus addresses
104 * @va: Array of kernel virtual addresses backed by the pages in the phys_addr
105 * array. The va is populated only when called on the host for a remote
106 * SCIF connection on MIC. This is required to support the use case of DMA
107 * between MIC and another device which is not a SCIF node e.g., an IB or
108 * ethernet NIC.
109 */
110struct scif_range {
111 void *cookie;
112 int nr_pages;
113 int prot_flags;
114 dma_addr_t *phys_addr;
115 void __iomem **va;
116};
117
118/**
119 * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll
120 * @epd: SCIF endpoint
121 * @events: requested events
122 * @revents: returned events
123 */
124struct scif_pollepd {
125 scif_epd_t epd;
126 short events;
127 short revents;
128};
129
130/**
131 * scif_peer_dev - representation of a peer SCIF device
132 *
133 * Peer devices show up as PCIe devices for the mgmt node but not the cards.
134 * The mgmt node discovers all the cards on the PCIe bus and informs the other
135 * cards about their peers. Upon notification of a peer a node adds a peer
136 * device to the peer bus to maintain symmetry in the way devices are
137 * discovered across all nodes in the SCIF network.
138 *
139 * @dev: underlying device
140 * @dnode - The destination node which this device will communicate with.
141 */
142struct scif_peer_dev {
143 struct device dev;
144 u8 dnode;
145};
146
147/**
148 * scif_client - representation of a SCIF client
149 * @name: client name
150 * @probe - client method called when a peer device is registered
151 * @remove - client method called when a peer device is unregistered
152 * @si - subsys_interface used internally for implementing SCIF clients
153 */
154struct scif_client {
155 const char *name;
156 void (*probe)(struct scif_peer_dev *spdev);
157 void (*remove)(struct scif_peer_dev *spdev);
158 struct subsys_interface si;
159};
95 160
96#define SCIF_OPEN_FAILED ((scif_epd_t)-1) 161#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
97#define SCIF_REGISTER_FAILED ((off_t)-1) 162#define SCIF_REGISTER_FAILED ((off_t)-1)
@@ -345,7 +410,6 @@ int scif_close(scif_epd_t epd);
345 * Errors: 410 * Errors:
346 * EBADF, ENOTTY - epd is not a valid endpoint descriptor 411 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
347 * ECONNRESET - Connection reset by peer 412 * ECONNRESET - Connection reset by peer
348 * EFAULT - An invalid address was specified for a parameter
349 * EINVAL - flags is invalid, or len is negative 413 * EINVAL - flags is invalid, or len is negative
350 * ENODEV - The remote node is lost or existed, but is not currently in the 414 * ENODEV - The remote node is lost or existed, but is not currently in the
351 * network since it may have crashed 415 * network since it may have crashed
@@ -398,7 +462,6 @@ int scif_send(scif_epd_t epd, void *msg, int len, int flags);
398 * EAGAIN - The destination node is returning from a low power state 462 * EAGAIN - The destination node is returning from a low power state
399 * EBADF, ENOTTY - epd is not a valid endpoint descriptor 463 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
400 * ECONNRESET - Connection reset by peer 464 * ECONNRESET - Connection reset by peer
401 * EFAULT - An invalid address was specified for a parameter
402 * EINVAL - flags is invalid, or len is negative 465 * EINVAL - flags is invalid, or len is negative
403 * ENODEV - The remote node is lost or existed, but is not currently in the 466 * ENODEV - The remote node is lost or existed, but is not currently in the
404 * network since it may have crashed 467 * network since it may have crashed
@@ -461,9 +524,6 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
461 * SCIF_PROT_READ - allow read operations from the window 524 * SCIF_PROT_READ - allow read operations from the window
462 * SCIF_PROT_WRITE - allow write operations to the window 525 * SCIF_PROT_WRITE - allow write operations to the window
463 * 526 *
464 * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
465 * fixed offset.
466 *
467 * Return: 527 * Return:
468 * Upon successful completion, scif_register() returns the offset at which the 528 * Upon successful completion, scif_register() returns the offset at which the
469 * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that 529 * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
@@ -476,7 +536,6 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
476 * EAGAIN - The mapping could not be performed due to lack of resources 536 * EAGAIN - The mapping could not be performed due to lack of resources
477 * EBADF, ENOTTY - epd is not a valid endpoint descriptor 537 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
478 * ECONNRESET - Connection reset by peer 538 * ECONNRESET - Connection reset by peer
479 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
480 * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is 539 * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
481 * set in flags, and offset is not a multiple of the page size, or addr is not a 540 * set in flags, and offset is not a multiple of the page size, or addr is not a
482 * multiple of the page size, or len is not a multiple of the page size, or is 541 * multiple of the page size, or len is not a multiple of the page size, or is
@@ -759,7 +818,6 @@ int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
759 * EACCESS - Attempt to write to a read-only range 818 * EACCESS - Attempt to write to a read-only range
760 * EBADF, ENOTTY - epd is not a valid endpoint descriptor 819 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
761 * ECONNRESET - Connection reset by peer 820 * ECONNRESET - Connection reset by peer
762 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
763 * EINVAL - rma_flags is invalid 821 * EINVAL - rma_flags is invalid
764 * ENODEV - The remote node is lost or existed, but is not currently in the 822 * ENODEV - The remote node is lost or existed, but is not currently in the
765 * network since it may have crashed 823 * network since it may have crashed
@@ -840,7 +898,6 @@ int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
840 * EACCESS - Attempt to write to a read-only range 898 * EACCESS - Attempt to write to a read-only range
841 * EBADF, ENOTTY - epd is not a valid endpoint descriptor 899 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
842 * ECONNRESET - Connection reset by peer 900 * ECONNRESET - Connection reset by peer
843 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
844 * EINVAL - rma_flags is invalid 901 * EINVAL - rma_flags is invalid
845 * ENODEV - The remote node is lost or existed, but is not currently in the 902 * ENODEV - The remote node is lost or existed, but is not currently in the
846 * network since it may have crashed 903 * network since it may have crashed
@@ -984,10 +1041,299 @@ int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
984 * online nodes in the SCIF network including 'self'; otherwise in user mode 1041 * online nodes in the SCIF network including 'self'; otherwise in user mode
985 * -1 is returned and errno is set to indicate the error; in kernel mode no 1042 * -1 is returned and errno is set to indicate the error; in kernel mode no
986 * errors are returned. 1043 * errors are returned.
1044 */
1045int scif_get_node_ids(u16 *nodes, int len, u16 *self);
1046
1047/**
1048 * scif_pin_pages() - Pin a set of pages
1049 * @addr: Virtual address of range to pin
1050 * @len: Length of range to pin
1051 * @prot_flags: Page protection flags
1052 * @map_flags: Page classification flags
1053 * @pinned_pages: Handle to pinned pages
1054 *
1055 * scif_pin_pages() pins (locks in physical memory) the physical pages which
1056 * back the range of virtual address pages starting at addr and continuing for
1057 * len bytes. addr and len are constrained to be multiples of the page size. A
1058 * successful scif_pin_pages() call returns a handle to pinned_pages which may
1059 * be used in subsequent calls to scif_register_pinned_pages().
1060 *
1061 * The pages will remain pinned as long as there is a reference against the
1062 * scif_pinned_pages_t value returned by scif_pin_pages() and until
1063 * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A
1064 * reference is added to a scif_pinned_pages_t value each time a window is
1065 * created by calling scif_register_pinned_pages() and passing the
1066 * scif_pinned_pages_t value. A reference is removed from a
1067 * scif_pinned_pages_t value each time such a window is deleted.
1068 *
1069 * Subsequent operations which change the memory pages to which virtual
1070 * addresses are mapped (such as mmap(), munmap()) have no effect on the
1071 * scif_pinned_pages_t value or windows created against it.
1072 *
1073 * If the process will fork(), it is recommended that the registered
1074 * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
1075 * problems due to copy-on-write semantics.
1076 *
1077 * The prot_flags argument is formed by OR'ing together one or more of the
1078 * following values.
1079 * SCIF_PROT_READ - allow read operations against the pages
1080 * SCIF_PROT_WRITE - allow write operations against the pages
1081 * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a
1082 * kernel space address. By default, addr is interpreted as a user space
1083 * address.
1084 *
1085 * Return:
1086 * Upon successful completion, scif_pin_pages() returns 0; otherwise the
1087 * negative of one of the following errors is returned.
987 * 1088 *
988 * Errors: 1089 * Errors:
989 * EFAULT - Bad address 1090 * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative
1091 * ENOMEM - Not enough space
990 */ 1092 */
991int scif_get_node_ids(u16 *nodes, int len, u16 *self); 1093int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags,
1094 scif_pinned_pages_t *pinned_pages);
1095
1096/**
1097 * scif_unpin_pages() - Unpin a set of pages
1098 * @pinned_pages: Handle to pinned pages to be unpinned
1099 *
1100 * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new
1101 * windows against pinned_pages. The physical pages represented by pinned_pages
1102 * will remain pinned until all windows previously registered against
1103 * pinned_pages are deleted (the window is scif_unregister()'d and all
1104 * references to the window are removed (see scif_unregister()).
1105 *
1106 * pinned_pages must have been obtain from a previous call to scif_pin_pages().
1107 * After calling scif_unpin_pages(), it is an error to pass pinned_pages to
1108 * scif_register_pinned_pages().
1109 *
1110 * Return:
1111 * Upon successful completion, scif_unpin_pages() returns 0; otherwise the
1112 * negative of one of the following errors is returned.
1113 *
1114 * Errors:
1115 * EINVAL - pinned_pages is not valid
1116 */
1117int scif_unpin_pages(scif_pinned_pages_t pinned_pages);
1118
1119/**
1120 * scif_register_pinned_pages() - Mark a memory region for remote access.
1121 * @epd: endpoint descriptor
1122 * @pinned_pages: Handle to pinned pages
1123 * @offset: Registered address space offset
1124 * @map_flags: Flags which control where pages are mapped
1125 *
1126 * The scif_register_pinned_pages() function opens a window, a range of whole
1127 * pages of the registered address space of the endpoint epd, starting at
1128 * offset po. The value of po, further described below, is a function of the
1129 * parameters offset and pinned_pages, and the value of map_flags. Each page of
1130 * the window represents a corresponding physical memory page of the range
1131 * represented by pinned_pages; the length of the window is the same as the
1132 * length of range represented by pinned_pages. A successful
1133 * scif_register_pinned_pages() call returns po as the return value.
1134 *
1135 * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
1136 * exactly, and offset is constrained to be a multiple of the page size. The
1137 * mapping established by scif_register_pinned_pages() will not replace any
1138 * existing registration; an error is returned if any page of the new window
1139 * would intersect an existing window.
1140 *
1141 * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
1142 * implementation-defined manner to arrive at po. The po so chosen will be an
1143 * area of the registered address space that the implementation deems suitable
1144 * for a mapping of the required size. An offset value of 0 is interpreted as
1145 * granting the implementation complete freedom in selecting po, subject to
1146 * constraints described below. A non-zero value of offset is taken to be a
1147 * suggestion of an offset near which the mapping should be placed. When the
1148 * implementation selects a value for po, it does not replace any extant
1149 * window. In all cases, po will be a multiple of the page size.
1150 *
1151 * The physical pages which are so represented by a window are available for
1152 * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(),
1153 * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
1154 * physical pages represented by the window will not be reused by the memory
1155 * subsystem for any other purpose. Note that the same physical page may be
1156 * represented by multiple windows.
1157 *
1158 * Windows created by scif_register_pinned_pages() are unregistered by
1159 * scif_unregister().
1160 *
1161 * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
1162 * fixed offset.
1163 *
1164 * Return:
1165 * Upon successful completion, scif_register_pinned_pages() returns the offset
1166 * at which the mapping was placed (po); otherwise the negative of one of the
1167 * following errors is returned.
1168 *
1169 * Errors:
1170 * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window
1171 * would intersect an existing window
1172 * EAGAIN - The mapping could not be performed due to lack of resources
1173 * ECONNRESET - Connection reset by peer
1174 * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and
1175 * offset is not a multiple of the page size, or offset is negative
1176 * ENODEV - The remote node is lost or existed, but is not currently in the
1177 * network since it may have crashed
1178 * ENOMEM - Not enough space
1179 * ENOTCONN - The endpoint is not connected
1180 */
1181off_t scif_register_pinned_pages(scif_epd_t epd,
1182 scif_pinned_pages_t pinned_pages,
1183 off_t offset, int map_flags);
1184
1185/**
1186 * scif_get_pages() - Add references to remote registered pages
1187 * @epd: endpoint descriptor
1188 * @offset: remote registered offset
1189 * @len: length of range of pages
1190 * @pages: returned scif_range structure
1191 *
1192 * scif_get_pages() returns the addresses of the physical pages represented by
1193 * those pages of the registered address space of the peer of epd, starting at
1194 * offset and continuing for len bytes. offset and len are constrained to be
1195 * multiples of the page size.
1196 *
1197 * All of the pages in the specified range [offset, offset + len - 1] must be
1198 * within a single window of the registered address space of the peer of epd.
1199 *
1200 * The addresses are returned as a virtually contiguous array pointed to by the
1201 * phys_addr component of the scif_range structure whose address is returned in
1202 * pages. The nr_pages component of scif_range is the length of the array. The
1203 * prot_flags component of scif_range holds the protection flag value passed
1204 * when the pages were registered.
1205 *
1206 * Each physical page whose address is returned by scif_get_pages() remains
1207 * available and will not be released for reuse until the scif_range structure
1208 * is returned in a call to scif_put_pages(). The scif_range structure returned
1209 * by scif_get_pages() must be unmodified.
1210 *
1211 * It is an error to call scif_close() on an endpoint on which a scif_range
1212 * structure of that endpoint has not been returned to scif_put_pages().
1213 *
1214 * Return:
1215 * Upon successful completion, scif_get_pages() returns 0; otherwise the
1216 * negative of one of the following errors is returned.
1217 * Errors:
1218 * ECONNRESET - Connection reset by peer.
1219 * EINVAL - offset is not a multiple of the page size, or offset is negative, or
1220 * len is not a multiple of the page size
1221 * ENODEV - The remote node is lost or existed, but is not currently in the
1222 * network since it may have crashed
1223 * ENOTCONN - The endpoint is not connected
1224 * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid
1225 * for the registered address space of the peer epd
1226 */
1227int scif_get_pages(scif_epd_t epd, off_t offset, size_t len,
1228 struct scif_range **pages);
1229
1230/**
1231 * scif_put_pages() - Remove references from remote registered pages
1232 * @pages: pages to be returned
1233 *
1234 * scif_put_pages() releases a scif_range structure previously obtained by
1235 * calling scif_get_pages(). The physical pages represented by pages may
1236 * be reused when the window which represented those pages is unregistered.
1237 * Therefore, those pages must not be accessed after calling scif_put_pages().
1238 *
1239 * Return:
1240 * Upon successful completion, scif_put_pages() returns 0; otherwise the
1241 * negative of one of the following errors is returned.
1242 * Errors:
1243 * EINVAL - pages does not point to a valid scif_range structure, or
1244 * the scif_range structure pointed to by pages was already returned
1245 * ENODEV - The remote node is lost or existed, but is not currently in the
1246 * network since it may have crashed
1247 * ENOTCONN - The endpoint is not connected
1248 */
1249int scif_put_pages(struct scif_range *pages);
1250
1251/**
1252 * scif_poll() - Wait for some event on an endpoint
1253 * @epds: Array of endpoint descriptors
1254 * @nepds: Length of epds
1255 * @timeout: Upper limit on time for which scif_poll() will block
1256 *
1257 * scif_poll() waits for one of a set of endpoints to become ready to perform
1258 * an I/O operation.
1259 *
1260 * The epds argument specifies the endpoint descriptors to be examined and the
1261 * events of interest for each endpoint descriptor. epds is a pointer to an
1262 * array with one member for each open endpoint descriptor of interest.
1263 *
1264 * The number of items in the epds array is specified in nepds. The epd field
1265 * of scif_pollepd is an endpoint descriptor of an open endpoint. The field
1266 * events is a bitmask specifying the events which the application is
1267 * interested in. The field revents is an output parameter, filled by the
1268 * kernel with the events that actually occurred. The bits returned in revents
1269 * can include any of those specified in events, or one of the values POLLERR,
1270 * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events
1271 * field, and will be set in the revents field whenever the corresponding
1272 * condition is true.)
1273 *
1274 * If none of the events requested (and no error) has occurred for any of the
1275 * endpoint descriptors, then scif_poll() blocks until one of the events occurs.
1276 *
1277 * The timeout argument specifies an upper limit on the time for which
1278 * scif_poll() will block, in milliseconds. Specifying a negative value in
1279 * timeout means an infinite timeout.
1280 *
1281 * The following bits may be set in events and returned in revents.
1282 * POLLIN - Data may be received without blocking. For a connected
1283 * endpoint, this means that scif_recv() may be called without blocking. For a
1284 * listening endpoint, this means that scif_accept() may be called without
1285 * blocking.
1286 * POLLOUT - Data may be sent without blocking. For a connected endpoint, this
1287 * means that scif_send() may be called without blocking. POLLOUT may also be
1288 * used to block waiting for a non-blocking connect to complete. This bit value
1289 * has no meaning for a listening endpoint and is ignored if specified.
1290 *
1291 * The following bits are only returned in revents, and are ignored if set in
1292 * events.
1293 * POLLERR - An error occurred on the endpoint
1294 * POLLHUP - The connection to the peer endpoint was disconnected
1295 * POLLNVAL - The specified endpoint descriptor is invalid.
1296 *
1297 * Return:
1298 * Upon successful completion, scif_poll() returns a non-negative value. A
1299 * positive value indicates the total number of endpoint descriptors that have
1300 * been selected (that is, endpoint descriptors for which the revents member is
1301 * non-zero). A value of 0 indicates that the call timed out and no endpoint
1302 * descriptors have been selected. Otherwise in user mode -1 is returned and
1303 * errno is set to indicate the error; in kernel mode the negative of one of
1304 * the following errors is returned.
1305 *
1306 * Errors:
1307 * EINTR - A signal occurred before any requested event
1308 * EINVAL - The nepds argument is greater than {OPEN_MAX}
1309 * ENOMEM - There was no space to allocate file descriptor tables
1310 */
1311int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout);
1312
1313/**
1314 * scif_client_register() - Register a SCIF client
1315 * @client: client to be registered
1316 *
1317 * scif_client_register() registers a SCIF client. The probe() method
1318 * of the client is called when SCIF peer devices come online and the
1319 * remove() method is called when the peer devices disappear.
1320 *
1321 * Return:
1322 * Upon successful completion, scif_client_register() returns a non-negative
1323 * value. Otherwise the return value is the same as subsys_interface_register()
1324 * in the kernel.
1325 */
1326int scif_client_register(struct scif_client *client);
1327
1328/**
1329 * scif_client_unregister() - Unregister a SCIF client
1330 * @client: client to be unregistered
1331 *
1332 * scif_client_unregister() unregisters a SCIF client.
1333 *
1334 * Return:
1335 * None
1336 */
1337void scif_client_unregister(struct scif_client *client);
992 1338
993#endif /* __SCIF_H__ */ 1339#endif /* __SCIF_H__ */
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index f84212cd3b7d..1396a255d2a2 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -153,7 +153,9 @@ static inline struct spmi_driver *to_spmi_driver(struct device_driver *d)
153 return container_of(d, struct spmi_driver, driver); 153 return container_of(d, struct spmi_driver, driver);
154} 154}
155 155
156int spmi_driver_register(struct spmi_driver *sdrv); 156#define spmi_driver_register(sdrv) \
157 __spmi_driver_register(sdrv, THIS_MODULE)
158int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner);
157 159
158/** 160/**
159 * spmi_driver_unregister() - unregister an SPMI client driver 161 * spmi_driver_unregister() - unregister an SPMI client driver
diff --git a/include/linux/stm.h b/include/linux/stm.h
new file mode 100644
index 000000000000..9d0083d364e6
--- /dev/null
+++ b/include/linux/stm.h
@@ -0,0 +1,126 @@
1/*
2 * System Trace Module (STM) infrastructure apis
3 * Copyright (C) 2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _STM_H_
16#define _STM_H_
17
18#include <linux/device.h>
19
20/**
21 * enum stp_packet_type - STP packets that an STM driver sends
22 */
23enum stp_packet_type {
24 STP_PACKET_DATA = 0,
25 STP_PACKET_FLAG,
26 STP_PACKET_USER,
27 STP_PACKET_MERR,
28 STP_PACKET_GERR,
29 STP_PACKET_TRIG,
30 STP_PACKET_XSYNC,
31};
32
33/**
34 * enum stp_packet_flags - STP packet modifiers
35 */
36enum stp_packet_flags {
37 STP_PACKET_MARKED = 0x1,
38 STP_PACKET_TIMESTAMPED = 0x2,
39};
40
41struct stp_policy;
42
43struct stm_device;
44
45/**
46 * struct stm_data - STM device description and callbacks
47 * @name: device name
48 * @stm: internal structure, only used by stm class code
49 * @sw_start: first STP master available to software
50 * @sw_end: last STP master available to software
51 * @sw_nchannels: number of STP channels per master
52 * @sw_mmiosz: size of one channel's IO space, for mmap, optional
53 * @packet: callback that sends an STP packet
54 * @mmio_addr: mmap callback, optional
55 * @link: called when a new stm_source gets linked to us, optional
56 * @unlink: likewise for unlinking, again optional
57 * @set_options: set device-specific options on a channel
58 *
59 * Fill out this structure before calling stm_register_device() to create
60 * an STM device and stm_unregister_device() to destroy it. It will also be
61 * passed back to @packet(), @mmio_addr(), @link(), @unlink() and @set_options()
62 * callbacks.
63 *
64 * Normally, an STM device will have a range of masters available to software
65 * and the rest being statically assigned to various hardware trace sources.
66 * The former is defined by the the range [@sw_start..@sw_end] of the device
67 * description. That is, the lowest master that can be allocated to software
68 * writers is @sw_start and data from this writer will appear is @sw_start
69 * master in the STP stream.
70 */
71struct stm_data {
72 const char *name;
73 struct stm_device *stm;
74 unsigned int sw_start;
75 unsigned int sw_end;
76 unsigned int sw_nchannels;
77 unsigned int sw_mmiosz;
78 ssize_t (*packet)(struct stm_data *, unsigned int,
79 unsigned int, unsigned int,
80 unsigned int, unsigned int,
81 const unsigned char *);
82 phys_addr_t (*mmio_addr)(struct stm_data *, unsigned int,
83 unsigned int, unsigned int);
84 int (*link)(struct stm_data *, unsigned int,
85 unsigned int);
86 void (*unlink)(struct stm_data *, unsigned int,
87 unsigned int);
88 long (*set_options)(struct stm_data *, unsigned int,
89 unsigned int, unsigned int,
90 unsigned long);
91};
92
93int stm_register_device(struct device *parent, struct stm_data *stm_data,
94 struct module *owner);
95void stm_unregister_device(struct stm_data *stm_data);
96
97struct stm_source_device;
98
99/**
100 * struct stm_source_data - STM source device description and callbacks
101 * @name: device name, will be used for policy lookup
102 * @src: internal structure, only used by stm class code
103 * @nr_chans: number of channels to allocate
104 * @link: called when this source gets linked to an STM device
105 * @unlink: called when this source is about to get unlinked from its STM
106 *
107 * Fill in this structure before calling stm_source_register_device() to
108 * register a source device. Also pass it to unregister and write calls.
109 */
110struct stm_source_data {
111 const char *name;
112 struct stm_source_device *src;
113 unsigned int percpu;
114 unsigned int nr_chans;
115 int (*link)(struct stm_source_data *data);
116 void (*unlink)(struct stm_source_data *data);
117};
118
119int stm_source_register_device(struct device *parent,
120 struct stm_source_data *data);
121void stm_source_unregister_device(struct stm_source_data *data);
122
123int stm_source_write(struct stm_source_data *data, unsigned int chan,
124 const char *buf, size_t count);
125
126#endif /* _STM_H_ */
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index d4217eff489f..0a0d56834c8e 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -158,6 +158,7 @@ struct st_data_s {
158 unsigned long ll_state; 158 unsigned long ll_state;
159 void *kim_data; 159 void *kim_data;
160 struct tty_struct *tty; 160 struct tty_struct *tty;
161 struct work_struct work_write_wakeup;
161}; 162};
162 163
163/* 164/*
diff --git a/include/linux/vme.h b/include/linux/vme.h
index c0131358f351..71e4a6dec5ac 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -81,6 +81,9 @@ struct vme_resource {
81 81
82extern struct bus_type vme_bus_type; 82extern struct bus_type vme_bus_type;
83 83
84/* Number of VME interrupt vectors */
85#define VME_NUM_STATUSID 256
86
84/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */ 87/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
85#define VME_MAX_BRIDGES (sizeof(unsigned int)*8) 88#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
86#define VME_MAX_SLOTS 32 89#define VME_MAX_SLOTS 32
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
index 302a2ced373c..e9686372029d 100644
--- a/include/uapi/linux/mic_common.h
+++ b/include/uapi/linux/mic_common.h
@@ -75,12 +75,7 @@ struct mic_device_ctrl {
75 * struct mic_bootparam: Virtio device independent information in device page 75 * struct mic_bootparam: Virtio device independent information in device page
76 * 76 *
77 * @magic: A magic value used by the card to ensure it can see the host 77 * @magic: A magic value used by the card to ensure it can see the host
78 * @c2h_shutdown_db: Card to Host shutdown doorbell set by host
79 * @h2c_shutdown_db: Host to Card shutdown doorbell set by card
80 * @h2c_config_db: Host to Card Virtio config doorbell set by card 78 * @h2c_config_db: Host to Card Virtio config doorbell set by card
81 * @shutdown_status: Card shutdown status set by card
82 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated
83 * @tot_nodes: Total number of nodes in the SCIF network
84 * @node_id: Unique id of the node 79 * @node_id: Unique id of the node
85 * @h2c_scif_db - Host to card SCIF doorbell set by card 80 * @h2c_scif_db - Host to card SCIF doorbell set by card
86 * @c2h_scif_db - Card to host SCIF doorbell set by host 81 * @c2h_scif_db - Card to host SCIF doorbell set by host
@@ -89,12 +84,7 @@ struct mic_device_ctrl {
89 */ 84 */
90struct mic_bootparam { 85struct mic_bootparam {
91 __le32 magic; 86 __le32 magic;
92 __s8 c2h_shutdown_db;
93 __s8 h2c_shutdown_db;
94 __s8 h2c_config_db; 87 __s8 h2c_config_db;
95 __u8 shutdown_status;
96 __u8 shutdown_card;
97 __u8 tot_nodes;
98 __u8 node_id; 88 __u8 node_id;
99 __u8 h2c_scif_db; 89 __u8 h2c_scif_db;
100 __u8 c2h_scif_db; 90 __u8 c2h_scif_db;
@@ -219,12 +209,12 @@ static inline unsigned mic_total_desc_size(struct mic_device_desc *desc)
219 * enum mic_states - MIC states. 209 * enum mic_states - MIC states.
220 */ 210 */
221enum mic_states { 211enum mic_states {
222 MIC_OFFLINE = 0, 212 MIC_READY = 0,
213 MIC_BOOTING,
223 MIC_ONLINE, 214 MIC_ONLINE,
224 MIC_SHUTTING_DOWN, 215 MIC_SHUTTING_DOWN,
216 MIC_RESETTING,
225 MIC_RESET_FAILED, 217 MIC_RESET_FAILED,
226 MIC_SUSPENDING,
227 MIC_SUSPENDED,
228 MIC_LAST 218 MIC_LAST
229}; 219};
230 220
diff --git a/include/uapi/linux/scif_ioctl.h b/include/uapi/linux/scif_ioctl.h
index 4a94d917cf99..d9048918be52 100644
--- a/include/uapi/linux/scif_ioctl.h
+++ b/include/uapi/linux/scif_ioctl.h
@@ -107,6 +107,82 @@ struct scifioctl_msg {
107}; 107};
108 108
109/** 109/**
110 * struct scifioctl_reg - used for SCIF_REG IOCTL
111 * @addr: starting virtual address
112 * @len: length of range
113 * @offset: offset of window
114 * @prot: read/write protection
115 * @flags: flags
116 * @out_offset: offset returned
117 */
118struct scifioctl_reg {
119 __u64 addr;
120 __u64 len;
121 __s64 offset;
122 __s32 prot;
123 __s32 flags;
124 __s64 out_offset;
125};
126
127/**
128 * struct scifioctl_unreg - used for SCIF_UNREG IOCTL
129 * @offset: start of range to unregister
130 * @len: length of range to unregister
131 */
132struct scifioctl_unreg {
133 __s64 offset;
134 __u64 len;
135};
136
137/**
138 * struct scifioctl_copy - used for SCIF DMA copy IOCTLs
139 *
140 * @loffset: offset in local registered address space to/from
141 * which to copy
142 * @len: length of range to copy
143 * @roffset: offset in remote registered address space to/from
144 * which to copy
145 * @addr: user virtual address to/from which to copy
146 * @flags: flags
147 *
148 * This structure is used for SCIF_READFROM, SCIF_WRITETO, SCIF_VREADFROM
149 * and SCIF_VREADFROM IOCTL's.
150 */
151struct scifioctl_copy {
152 __s64 loffset;
153 __u64 len;
154 __s64 roffset;
155 __u64 addr;
156 __s32 flags;
157};
158
159/**
160 * struct scifioctl_fence_mark - used for SCIF_FENCE_MARK IOCTL
161 * @flags: flags
162 * @mark: fence handle which is a pointer to a __s32
163 */
164struct scifioctl_fence_mark {
165 __s32 flags;
166 __u64 mark;
167};
168
169/**
170 * struct scifioctl_fence_signal - used for SCIF_FENCE_SIGNAL IOCTL
171 * @loff: local offset
172 * @lval: value to write to loffset
173 * @roff: remote offset
174 * @rval: value to write to roffset
175 * @flags: flags
176 */
177struct scifioctl_fence_signal {
178 __s64 loff;
179 __u64 lval;
180 __s64 roff;
181 __u64 rval;
182 __s32 flags;
183};
184
185/**
110 * struct scifioctl_node_ids - used for SCIF_GET_NODEIDS IOCTL 186 * struct scifioctl_node_ids - used for SCIF_GET_NODEIDS IOCTL
111 * @nodes: pointer to an array of node_ids 187 * @nodes: pointer to an array of node_ids
112 * @self: ID of the current node 188 * @self: ID of the current node
@@ -125,6 +201,15 @@ struct scifioctl_node_ids {
125#define SCIF_ACCEPTREG _IOWR('s', 5, __u64) 201#define SCIF_ACCEPTREG _IOWR('s', 5, __u64)
126#define SCIF_SEND _IOWR('s', 6, struct scifioctl_msg) 202#define SCIF_SEND _IOWR('s', 6, struct scifioctl_msg)
127#define SCIF_RECV _IOWR('s', 7, struct scifioctl_msg) 203#define SCIF_RECV _IOWR('s', 7, struct scifioctl_msg)
204#define SCIF_REG _IOWR('s', 8, struct scifioctl_reg)
205#define SCIF_UNREG _IOWR('s', 9, struct scifioctl_unreg)
206#define SCIF_READFROM _IOWR('s', 10, struct scifioctl_copy)
207#define SCIF_WRITETO _IOWR('s', 11, struct scifioctl_copy)
208#define SCIF_VREADFROM _IOWR('s', 12, struct scifioctl_copy)
209#define SCIF_VWRITETO _IOWR('s', 13, struct scifioctl_copy)
128#define SCIF_GET_NODEIDS _IOWR('s', 14, struct scifioctl_node_ids) 210#define SCIF_GET_NODEIDS _IOWR('s', 14, struct scifioctl_node_ids)
211#define SCIF_FENCE_MARK _IOWR('s', 15, struct scifioctl_fence_mark)
212#define SCIF_FENCE_WAIT _IOWR('s', 16, __s32)
213#define SCIF_FENCE_SIGNAL _IOWR('s', 17, struct scifioctl_fence_signal)
129 214
130#endif /* SCIF_IOCTL_H */ 215#endif /* SCIF_IOCTL_H */
diff --git a/include/uapi/linux/stm.h b/include/uapi/linux/stm.h
new file mode 100644
index 000000000000..626a8d3f63b5
--- /dev/null
+++ b/include/uapi/linux/stm.h
@@ -0,0 +1,50 @@
1/*
2 * System Trace Module (STM) userspace interfaces
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * STM class implements generic infrastructure for System Trace Module devices
15 * as defined in MIPI STPv2 specification.
16 */
17
18#ifndef _UAPI_LINUX_STM_H
19#define _UAPI_LINUX_STM_H
20
21#include <linux/types.h>
22
23/**
24 * struct stp_policy_id - identification for the STP policy
25 * @size: size of the structure including real id[] length
26 * @master: assigned master
27 * @channel: first assigned channel
28 * @width: number of requested channels
29 * @id: identification string
30 *
31 * User must calculate the total size of the structure and put it into
32 * @size field, fill out the @id and desired @width. In return, kernel
33 * fills out @master, @channel and @width.
34 */
35struct stp_policy_id {
36 __u32 size;
37 __u16 master;
38 __u16 channel;
39 __u16 width;
40 /* padding */
41 __u16 __reserved_0;
42 __u32 __reserved_1;
43 char id[0];
44};
45
46#define STP_POLICY_ID_SET _IOWR('%', 0, struct stp_policy_id)
47#define STP_POLICY_ID_GET _IOR('%', 1, struct stp_policy_id)
48#define STP_SET_OPTIONS _IOW('%', 2, __u64)
49
50#endif /* _UAPI_LINUX_STM_H */
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index e70fcd12eeeb..5a6edacc85d9 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -185,6 +185,7 @@ int main(void)
185 DEVID(mei_cl_device_id); 185 DEVID(mei_cl_device_id);
186 DEVID_FIELD(mei_cl_device_id, name); 186 DEVID_FIELD(mei_cl_device_id, name);
187 DEVID_FIELD(mei_cl_device_id, uuid); 187 DEVID_FIELD(mei_cl_device_id, uuid);
188 DEVID_FIELD(mei_cl_device_id, version);
188 189
189 DEVID(rio_device_id); 190 DEVID(rio_device_id);
190 DEVID_FIELD(rio_device_id, did); 191 DEVID_FIELD(rio_device_id, did);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 5f2088209132..9bc2cfe0ee37 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -137,10 +137,12 @@ static inline void add_wildcard(char *str)
137static inline void add_uuid(char *str, uuid_le uuid) 137static inline void add_uuid(char *str, uuid_le uuid)
138{ 138{
139 int len = strlen(str); 139 int len = strlen(str);
140 int i;
141 140
142 for (i = 0; i < 16; i++) 141 sprintf(str + len, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
143 sprintf(str + len + (i << 1), "%02x", uuid.b[i]); 142 uuid.b[3], uuid.b[2], uuid.b[1], uuid.b[0],
143 uuid.b[5], uuid.b[4], uuid.b[7], uuid.b[6],
144 uuid.b[8], uuid.b[9], uuid.b[10], uuid.b[11],
145 uuid.b[12], uuid.b[13], uuid.b[14], uuid.b[15]);
144} 146}
145 147
146/** 148/**
@@ -1200,16 +1202,18 @@ static int do_cpu_entry(const char *filename, void *symval, char *alias)
1200} 1202}
1201ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry); 1203ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
1202 1204
1203/* Looks like: mei:S:uuid */ 1205/* Looks like: mei:S:uuid:N:* */
1204static int do_mei_entry(const char *filename, void *symval, 1206static int do_mei_entry(const char *filename, void *symval,
1205 char *alias) 1207 char *alias)
1206{ 1208{
1207 DEF_FIELD_ADDR(symval, mei_cl_device_id, name); 1209 DEF_FIELD_ADDR(symval, mei_cl_device_id, name);
1208 DEF_FIELD_ADDR(symval, mei_cl_device_id, uuid); 1210 DEF_FIELD_ADDR(symval, mei_cl_device_id, uuid);
1211 DEF_FIELD(symval, mei_cl_device_id, version);
1209 1212
1210 sprintf(alias, MEI_CL_MODULE_PREFIX); 1213 sprintf(alias, MEI_CL_MODULE_PREFIX);
1211 sprintf(alias + strlen(alias), "%s:", (*name)[0] ? *name : "*"); 1214 sprintf(alias + strlen(alias), "%s:", (*name)[0] ? *name : "*");
1212 add_uuid(alias, *uuid); 1215 add_uuid(alias, *uuid);
1216 ADD(alias, ":", version != MEI_CL_VERSION_ANY, version);
1213 1217
1214 strcat(alias, ":*"); 1218 strcat(alias, ":*");
1215 1219
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 7de36df4eaa5..024a11ac8b97 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -11,47 +11,95 @@ echo ' '
11uname -a 11uname -a
12echo ' ' 12echo ' '
13 13
14gcc -dumpversion 2>&1| awk \ 14gcc -dumpversion 2>&1 |
15'NR==1{print "Gnu C ", $1}' 15awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
16 16 match($0, /[0-9]+([.]?[0-9]+)+/)
17make --version 2>&1 | awk -F, '{print $1}' | awk \ 17 printf("GNU C\t\t\t%s\n",
18 '/GNU Make/{print "Gnu make ",$NF}' 18 substr($0,RSTART,RLENGTH))
19 19}'
20echo "binutils $(ld -v | egrep -o '[0-9]+\.[0-9\.]+')" 20
21 21make --version 2>&1 |
22echo -n "util-linux " 22awk '/GNU Make/{
23fdformat --version | awk '{print $NF}' | sed -e s/^util-linux-// -e s/\)$// 23 match($0, /[0-9]+([.]?[0-9]+)+/)
24 24 printf("GNU Make\t\t%s\n",
25echo -n "mount " 25 substr($0,RSTART,RLENGTH))
26mount --version | awk '{print $NF}' | sed -e s/^mount-// -e s/\)$// 26}'
27 27
28depmod -V 2>&1 | awk 'NR==1 {print "module-init-tools ",$NF}' 28ld -v 2>&1 |
29 29awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
30tune2fs 2>&1 | grep "^tune2fs" | sed 's/,//' | awk \ 30 match($0, /[0-9]+([.]?[0-9]+)+/)
31'NR==1 {print "e2fsprogs ", $2}' 31 printf("Binutils\t\t%s\n",
32 32 substr($0,RSTART,RLENGTH))
33fsck.jfs -V 2>&1 | grep version | sed 's/,//' | awk \ 33}'
34'NR==1 {print "jfsutils ", $3}' 34
35 35mount --version 2>&1 |
36reiserfsck -V 2>&1 | grep ^reiserfsck | awk \ 36awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
37'NR==1{print "reiserfsprogs ", $2}' 37 match($0, /[0-9]+([.]?[0-9]+)+/)
38 $0 = substr($0,RSTART,RLENGTH)
39 printf("Util-linux\t\t%s\nMount\t\t\t%s\n",$0,$0)
40}'
41
42depmod -V 2>&1 |
43awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
44 match($0, /[0-9]+([.]?[0-9]+)+/)
45 printf("Module-init-tools\t%s\n",
46 substr($0,RSTART,RLENGTH))
47}'
48
49tune2fs 2>&1 |
50awk '/^tune2fs/{
51 match($0, /[0-9]+([.]?[0-9]+)+/)
52 printf("E2fsprogs\t\t%s\n",
53 substr($0,RSTART,RLENGTH))
54}'
55
56fsck.jfs -V 2>&1 |
57awk '/version/{
58 match($0, /[0-9]+([.]?[0-9]+)+/)
59 printf("Jfsutils\t\t%s\n",
60 substr($0,RSTART,RLENGTH))
61}'
62
63reiserfsck -V 2>&1 |
64awk '/^reiserfsck/{
65 match($0, /[0-9]+([.]?[0-9]+)+/)
66 printf("Reiserfsprogs\t\t%s\n",
67 substr($0,RSTART,RLENGTH))
68}'
38 69
39fsck.reiser4 -V 2>&1 | grep ^fsck.reiser4 | awk \ 70fsck.reiser4 -V 2>&1 | grep ^fsck.reiser4 | awk \
40'NR==1{print "reiser4progs ", $2}' 71'NR==1{print "reiser4progs ", $2}'
41 72
42xfs_db -V 2>&1 | grep version | awk \ 73xfs_db -V 2>&1 |
43'NR==1{print "xfsprogs ", $3}' 74awk '/version/{
75 match($0, /[0-9]+([.]?[0-9]+)+/)
76 printf("Xfsprogs\t\t%s\n",
77 substr($0,RSTART,RLENGTH))
78}'
44 79
45pccardctl -V 2>&1| grep pcmciautils | awk '{print "pcmciautils ", $2}' 80pccardctl -V 2>&1 |
81awk '/pcmciautils/{
82 match($0, /[0-9]+([.]?[0-9]+)+/)
83 printf("Pcmciautils\t\t%s\n",
84 substr($0,RSTART,RLENGTH))
85}'
46 86
47cardmgr -V 2>&1| grep version | awk \ 87cardmgr -V 2>&1| grep version | awk \
48'NR==1{print "pcmcia-cs ", $3}' 88'NR==1{print "pcmcia-cs ", $3}'
49 89
50quota -V 2>&1 | grep version | awk \ 90quota -V 2>&1 |
51'NR==1{print "quota-tools ", $NF}' 91awk '/version/{
92 match($0, /[0-9]+([.]?[0-9]+)+/)
93 printf("Quota-tools\t\t%s\n",
94 substr($0,RSTART,RLENGTH))
95}'
52 96
53pppd --version 2>&1| grep version | awk \ 97pppd --version 2>&1 |
54'NR==1{print "PPP ", $3}' 98awk '/version/{
99 match($0, /[0-9]+([.]?[0-9]+)+/)
100 printf("PPP\t\t\t%s\n",
101 substr($0,RSTART,RLENGTH))
102}'
55 103
56isdnctrl 2>&1 | grep version | awk \ 104isdnctrl 2>&1 | grep version | awk \
57'NR==1{print "isdn4k-utils ", $NF}' 105'NR==1{print "isdn4k-utils ", $NF}'
@@ -59,40 +107,87 @@ isdnctrl 2>&1 | grep version | awk \
59showmount --version 2>&1 | grep nfs-utils | awk \ 107showmount --version 2>&1 | grep nfs-utils | awk \
60'NR==1{print "nfs-utils ", $NF}' 108'NR==1{print "nfs-utils ", $NF}'
61 109
62echo -n "Linux C Library " 110test -r /proc/self/maps &&
63sed -n -e '/^.*\/libc-\([^/]*\)\.so$/{s//\1/;p;q}' < /proc/self/maps 111sed '
64 112 /.*libc-\(.*\)\.so$/!d
65ldd -v > /dev/null 2>&1 && ldd -v || ldd --version |head -n 1 | awk \ 113 s//Linux C Library\t\t\1/
66'NR==1{print "Dynamic linker (ldd) ", $NF}' 114 q
67 115' /proc/self/maps
68ls -l /usr/lib/libg++.so /usr/lib/libstdc++.so 2>/dev/null | awk -F. \ 116
69 '{print "Linux C++ Library " $4"."$5"."$6}' 117ldd --version 2>&1 |
70 118awk '/^ldd/{
71ps --version 2>&1 | grep version | awk \ 119 match($0, /[0-9]+([.]?[0-9]+)+/)
72'NR==1{print "Procps ", $NF}' 120 printf("Dynamic linker (ldd)\t%s\n",
73 121 substr($0,RSTART,RLENGTH))
74ifconfig --version 2>&1 | grep tools | awk \ 122}'
75'NR==1{print "Net-tools ", $NF}' 123
76 124libcpp=`ldconfig -p 2>/dev/null |
77# Kbd needs 'loadkeys -h', 125 awk '/(libg|stdc)[+]+\.so/ {
78loadkeys -h 2>&1 | awk \ 126 print $NF
79'(NR==1 && ($3 !~ /option/)) {print "Kbd ", $3}' 127 exit
80 128 }
81# while console-tools needs 'loadkeys -V'. 129'`
82loadkeys -V 2>&1 | awk \ 130test -r "$libcpp" &&
83'(NR==1 && ($2 ~ /console-tools/)) {print "Console-tools ", $3}' 131ls -l $libcpp |
132sed '
133 s!.*so\.!!
134 s!^!Linux C++ Library\t!
135'
136ps --version 2>&1 |
137awk '/version/{
138 match($0, /[0-9]+([.]?[0-9]+)+/)
139 printf("Procps\t\t\t%s\n",
140 substr($0,RSTART,RLENGTH))
141}'
142
143ifconfig --version 2>&1 |
144awk '/tools/{
145 match($0, /[0-9]+([.]?[0-9]+)+/)
146 printf("Net-tools\t\t%s\n",
147 substr($0,RSTART,RLENGTH))
148}'
149
150loadkeys -V 2>&1 |
151awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
152 match($0, /[0-9]+([.]?[0-9]+)+/)
153 $0 = substr($0,RSTART,RLENGTH)
154 printf("Kbd\t\t\t%s\nConsole-tools\t\t%s\n",$0,$0)
155}'
84 156
85oprofiled --version 2>&1 | awk \ 157oprofiled --version 2>&1 | awk \
86'(NR==1 && ($2 == "oprofile")) {print "oprofile ", $3}' 158'(NR==1 && ($2 == "oprofile")) {print "oprofile ", $3}'
87 159
88expr --v 2>&1 | awk 'NR==1{print "Sh-utils ", $NF}' 160expr --v 2>&1 |
89 161awk '/^expr/{
90udevinfo -V 2>&1 | grep version | awk '{print "udev ", $3}' 162 match($0, /[0-9]+([.]?[0-9]+)+/)
91 163 printf("Sh-utils\t\t%s\n",
92iwconfig --version 2>&1 | awk \ 164 substr($0,RSTART,RLENGTH))
93'(NR==1 && ($3 == "version")) {print "wireless-tools ",$4}' 165}'
94 166
95if [ -e /proc/modules ]; then 167udevadm --version 2>&1 |
96 X=`cat /proc/modules | sed -e "s/ .*$//"` 168awk '/[0-9]+([.]?[0-9]+)+/ && !/not found$/{
97 echo "Modules Loaded "$X 169 match($0, /[0-9]+([.]?[0-9]+)+/)
98fi 170 printf("Udev\t\t\t%s\n",
171 substr($0,RSTART,RLENGTH))
172}'
173
174iwconfig --version 2>&1 |
175awk '/version/{
176 match($0, /[0-9]+([.]?[0-9]+)+/)
177 printf("Wireless-tools\t\t%s\n",
178 substr($0,RSTART,RLENGTH))
179}'
180
181test -e /proc/modules &&
182sort /proc/modules |
183sed '
184 s/ .*//
185 H
186${
187 g
188 s/^\n/Modules Loaded\t\t/
189 y/\n/ /
190 q
191}
192 d
193'