aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-26 17:51:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-26 17:51:15 -0400
commitd87823813fe498fdd47894bd28e460a9dee8d771 (patch)
tree214eaf3babd0d61f08022fc1edd99a5128616548
parente382608254e06c8109f40044f5e693f2e04f3899 (diff)
parent3dc196eae1db548f05e53e5875ff87b8ff79f249 (diff)
Merge tag 'char-misc-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here's the big char/misc driver pull request for 4.2-rc1. Lots of mei, extcon, coresight, uio, mic, and other driver updates in here. Full details in the shortlog. All of these have been in linux-next for some time with no reported problems" * tag 'char-misc-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (176 commits) mei: me: wait for power gating exit confirmation mei: reset flow control on the last client disconnection MAINTAINERS: mei: add mei_cl_bus.h to maintained file list misc: sram: sort and clean up included headers misc: sram: move reserved block logic out of probe function misc: sram: add private struct device and virt_base members misc: sram: report correct SRAM pool size misc: sram: bump error message level on unclean driver unbinding misc: sram: fix device node reference leak on error misc: sram: fix enabled clock leak on error path misc: mic: Fix reported static checker warning misc: mic: Fix randconfig build error by including errno.h uio: pruss: Drop depends on ARCH_DAVINCI_DA850 from config uio: pruss: Add CONFIG_HAS_IOMEM dependence uio: pruss: Include <linux/sizes.h> extcon: Redefine the unique id of supported external connectors without 'enum extcon' type char:xilinx_hwicap:buffer_icap - change 1/0 to true/false for bool type variable in function buffer_icap_set_configuration(). Drivers: hv: vmbus: Allocate ring buffer memory in NUMA aware fashion parport: check exclusive access before register w1: use correct lock on error in w1_seq_show() ...
-rw-r--r--Documentation/ABI/stable/sysfs-bus-w111
-rw-r--r--Documentation/ABI/stable/sysfs-driver-w1_ds28ea006
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x450
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mei16
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt12
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt6
-rw-r--r--Documentation/mic/mic_overview.txt28
-rwxr-xr-xDocumentation/mic/mpssd/mpss24
-rw-r--r--Documentation/mic/scif_overview.txt98
-rw-r--r--Documentation/w1/slaves/w1_therm11
-rw-r--r--Documentation/w1/w1.generic30
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/um/os-Linux/drivers/ethertap_user.c2
-rw-r--r--arch/x86/Kconfig25
-rw-r--r--drivers/block/paride/paride.c57
-rw-r--r--drivers/block/paride/paride.h2
-rw-r--r--drivers/block/paride/pcd.c9
-rw-r--r--drivers/block/paride/pd.c12
-rw-r--r--drivers/block/paride/pf.c7
-rw-r--r--drivers/block/paride/pg.c8
-rw-r--r--drivers/block/paride/pt.c8
-rw-r--r--drivers/char/Kconfig8
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/misc.c23
-rw-r--r--drivers/char/msm_smd_pkt.c465
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/snsc.c4
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c6
-rw-r--r--drivers/char/xillybus/Kconfig2
-rw-r--r--drivers/extcon/Kconfig25
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c15
-rw-r--r--drivers/extcon/extcon-arizona.c88
-rw-r--r--drivers/extcon/extcon-axp288.c381
-rw-r--r--drivers/extcon/extcon-gpio.c1
-rw-r--r--drivers/extcon/extcon-max14577.c60
-rw-r--r--drivers/extcon/extcon-max77693.c140
-rw-r--r--drivers/extcon/extcon-max77843.c89
-rw-r--r--drivers/extcon/extcon-max8997.c64
-rw-r--r--drivers/extcon/extcon-palmas.c23
-rw-r--r--drivers/extcon/extcon-rt8973a.c55
-rw-r--r--drivers/extcon/extcon-sm5502.c33
-rw-r--r--drivers/extcon/extcon-usb-gpio.c35
-rw-r--r--drivers/extcon/extcon.c316
-rw-r--r--drivers/hv/Makefile2
-rw-r--r--drivers/hv/channel.c27
-rw-r--r--drivers/hv/channel_mgmt.c156
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hv/hv_balloon.c4
-rw-r--r--drivers/hv/hv_fcopy.c287
-rw-r--r--drivers/hv/hv_kvp.c192
-rw-r--r--drivers/hv/hv_snapshot.c168
-rw-r--r--drivers/hv/hv_utils_transport.c276
-rw-r--r--drivers/hv/hv_utils_transport.h51
-rw-r--r--drivers/hv/hyperv_vmbus.h31
-rw-r--r--drivers/hv/vmbus_drv.c21
-rw-r--r--drivers/hwmon/Kconfig11
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c (renamed from drivers/char/i8k.c)156
-rw-r--r--drivers/hwtracing/coresight/Kconfig19
-rw-r--r--drivers/hwtracing/coresight/Makefile2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c79
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c112
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c2702
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h391
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c61
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator-qcom.c215
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c71
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c60
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.c38
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/carma/Kconfig15
-rw-r--r--drivers/misc/carma/Makefile2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c1182
-rw-r--r--drivers/misc/carma/carma-fpga.c1507
-rw-r--r--drivers/misc/mei/amthif.c28
-rw-r--r--drivers/misc/mei/bus.c150
-rw-r--r--drivers/misc/mei/client.c473
-rw-r--r--drivers/misc/mei/client.h114
-rw-r--r--drivers/misc/mei/debugfs.c15
-rw-r--r--drivers/misc/mei/hbm.c16
-rw-r--r--drivers/misc/mei/hw-me.c59
-rw-r--r--drivers/misc/mei/hw-txe.c33
-rw-r--r--drivers/misc/mei/init.c8
-rw-r--r--drivers/misc/mei/interrupt.c95
-rw-r--r--drivers/misc/mei/main.c57
-rw-r--r--drivers/misc/mei/mei_dev.h102
-rw-r--r--drivers/misc/mei/nfc.c223
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/misc/mei/wd.c22
-rw-r--r--drivers/misc/mic/Kconfig40
-rw-r--r--drivers/misc/mic/Makefile3
-rw-r--r--drivers/misc/mic/bus/Makefile1
-rw-r--r--drivers/misc/mic/bus/scif_bus.c210
-rw-r--r--drivers/misc/mic/bus/scif_bus.h129
-rw-r--r--drivers/misc/mic/card/mic_device.c132
-rw-r--r--drivers/misc/mic/card/mic_device.h11
-rw-r--r--drivers/misc/mic/card/mic_x100.c61
-rw-r--r--drivers/misc/mic/card/mic_x100.h1
-rw-r--r--drivers/misc/mic/common/mic_dev.h3
-rw-r--r--drivers/misc/mic/host/mic_boot.c264
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c13
-rw-r--r--drivers/misc/mic/host/mic_device.h11
-rw-r--r--drivers/misc/mic/host/mic_intr.h3
-rw-r--r--drivers/misc/mic/host/mic_main.c6
-rw-r--r--drivers/misc/mic/host/mic_smpt.c7
-rw-r--r--drivers/misc/mic/host/mic_smpt.h1
-rw-r--r--drivers/misc/mic/host/mic_virtio.c6
-rw-r--r--drivers/misc/mic/host/mic_x100.c3
-rw-r--r--drivers/misc/mic/scif/Makefile15
-rw-r--r--drivers/misc/mic/scif/scif_api.c1276
-rw-r--r--drivers/misc/mic/scif/scif_debugfs.c85
-rw-r--r--drivers/misc/mic/scif/scif_epd.c353
-rw-r--r--drivers/misc/mic/scif/scif_epd.h160
-rw-r--r--drivers/misc/mic/scif/scif_fd.c303
-rw-r--r--drivers/misc/mic/scif/scif_main.c388
-rw-r--r--drivers/misc/mic/scif/scif_main.h254
-rw-r--r--drivers/misc/mic/scif/scif_map.h113
-rw-r--r--drivers/misc/mic/scif/scif_nm.c237
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c1312
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.h183
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.c124
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.h65
-rw-r--r--drivers/misc/mic/scif/scif_ports.c124
-rw-r--r--drivers/misc/mic/scif/scif_rb.c249
-rw-r--r--drivers/misc/mic/scif/scif_rb.h100
-rw-r--r--drivers/misc/sram.c137
-rw-r--r--drivers/misc/ti-st/st_kim.c3
-rw-r--r--drivers/nfc/mei_phy.c295
-rw-r--r--drivers/nfc/mei_phy.h38
-rw-r--r--drivers/nfc/microread/mei.c2
-rw-r--r--drivers/nfc/pn544/mei.c2
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/parport/procfs.c15
-rw-r--r--drivers/parport/share.c373
-rw-r--r--drivers/pcmcia/cs.c10
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c2
-rw-r--r--drivers/spmi/Kconfig1
-rw-r--r--drivers/staging/panel/panel.c14
-rw-r--r--drivers/uio/Kconfig2
-rw-r--r--drivers/uio/uio.c3
-rw-r--r--drivers/uio/uio_pruss.c1
-rw-r--r--drivers/usb/phy/phy-tahvo.c9
-rw-r--r--drivers/w1/masters/ds2482.c1
-rw-r--r--drivers/w1/slaves/w1_therm.c162
-rw-r--r--drivers/w1/w1.c17
-rw-r--r--include/dt-bindings/mfd/arizona.h4
-rw-r--r--include/linux/extcon.h134
-rw-r--r--include/linux/extcon/extcon-adc-jack.h5
-rw-r--r--include/linux/hyperv.h48
-rw-r--r--include/linux/mei_cl_bus.h38
-rw-r--r--include/linux/mfd/arizona/pdata.h3
-rw-r--r--include/linux/mfd/axp20x.h5
-rw-r--r--include/linux/mod_devicetable.h13
-rw-r--r--include/linux/parport.h43
-rw-r--r--include/linux/scif.h993
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/hyperv.h8
-rw-r--r--include/uapi/linux/mic_common.h12
-rw-r--r--include/uapi/linux/scif_ioctl.h130
-rw-r--r--lib/lz4/lz4_decompress.c12
-rwxr-xr-xscripts/checkkconfigsymbols.py34
-rw-r--r--scripts/mod/devicetable-offsets.c1
-rw-r--r--scripts/mod/file2alias.c21
-rw-r--r--tools/hv/hv_fcopy_daemon.c15
-rw-r--r--tools/hv/hv_kvp_daemon.c166
-rw-r--r--tools/hv/hv_vss_daemon.c149
172 files changed, 15485 insertions, 5653 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-w1 b/Documentation/ABI/stable/sysfs-bus-w1
new file mode 100644
index 000000000000..140d85b4ae92
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-w1
@@ -0,0 +1,11 @@
1What: /sys/bus/w1/devices/.../w1_master_timeout_us
2Date: April 2015
3Contact: Dmitry Khromov <dk@icelogic.net>
4Description: Bus scanning interval, microseconds component.
5 Some of 1-Wire devices commonly associated with physical access
6 control systems are attached/generate presence for as short as
7 100 ms - hence the tens-to-hundreds milliseconds scan intervals
8 are required.
9 see Documentation/w1/w1.generic for detailed information.
10Users: any user space application which wants to know bus scanning
11 interval
diff --git a/Documentation/ABI/stable/sysfs-driver-w1_ds28ea00 b/Documentation/ABI/stable/sysfs-driver-w1_ds28ea00
new file mode 100644
index 000000000000..e928def14f28
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-w1_ds28ea00
@@ -0,0 +1,6 @@
1What: /sys/bus/w1/devices/.../w1_seq
2Date: Apr 2015
3Contact: Matt Campbell <mattrcampbell@gmail.com>
4Description: Support for the DS28EA00 chain sequence function
5 see Documentation/w1/slaves/w1_therm for detailed information
6Users: any user space application which wants to communicate with DS28EA00
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
new file mode 100644
index 000000000000..2fe2e3dae487
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
@@ -0,0 +1,450 @@
1What: /sys/bus/coresight/devices/<memory_map>.etm/enable_source
2Date: April 2015
3KernelVersion: 4.01
4Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
5Description: (RW) Enable/disable tracing on this specific trace entiry.
6 Enabling a source implies the source has been configured
7 properly and a sink has been identidifed for it. The path
8 of coresight components linking the source to the sink is
9 configured and managed automatically by the coresight framework.
10
11What: /sys/bus/coresight/devices/<memory_map>.etm/cpu
12Date: April 2015
13KernelVersion: 4.01
14Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
15Description: (R) The CPU this tracing entity is associated with.
16
17What: /sys/bus/coresight/devices/<memory_map>.etm/nr_pe_cmp
18Date: April 2015
19KernelVersion: 4.01
20Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
21Description: (R) Indicates the number of PE comparator inputs that are
22 available for tracing.
23
24What: /sys/bus/coresight/devices/<memory_map>.etm/nr_addr_cmp
25Date: April 2015
26KernelVersion: 4.01
27Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
28Description: (R) Indicates the number of address comparator pairs that are
29 available for tracing.
30
31What: /sys/bus/coresight/devices/<memory_map>.etm/nr_cntr
32Date: April 2015
33KernelVersion: 4.01
34Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
35Description: (R) Indicates the number of counters that are available for
36 tracing.
37
38What: /sys/bus/coresight/devices/<memory_map>.etm/nr_ext_inp
39Date: April 2015
40KernelVersion: 4.01
41Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
42Description: (R) Indicates how many external inputs are implemented.
43
44What: /sys/bus/coresight/devices/<memory_map>.etm/numcidc
45Date: April 2015
46KernelVersion: 4.01
47Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
48Description: (R) Indicates the number of Context ID comparators that are
49 available for tracing.
50
51What: /sys/bus/coresight/devices/<memory_map>.etm/numvmidc
52Date: April 2015
53KernelVersion: 4.01
54Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
55Description: (R) Indicates the number of VMID comparators that are available
56 for tracing.
57
58What: /sys/bus/coresight/devices/<memory_map>.etm/nrseqstate
59Date: April 2015
60KernelVersion: 4.01
61Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
62Description: (R) Indicates the number of sequencer states that are
63 implemented.
64
65What: /sys/bus/coresight/devices/<memory_map>.etm/nr_resource
66Date: April 2015
67KernelVersion: 4.01
68Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
69Description: (R) Indicates the number of resource selection pairs that are
70 available for tracing.
71
72What: /sys/bus/coresight/devices/<memory_map>.etm/nr_ss_cmp
73Date: April 2015
74KernelVersion: 4.01
75Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
76Description: (R) Indicates the number of single-shot comparator controls that
77 are available for tracing.
78
79What: /sys/bus/coresight/devices/<memory_map>.etm/reset
80Date: April 2015
81KernelVersion: 4.01
82Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
83Description: (W) Cancels all configuration on a trace unit and set it back
84 to its boot configuration.
85
86What: /sys/bus/coresight/devices/<memory_map>.etm/mode
87Date: April 2015
88KernelVersion: 4.01
89Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
90Description: (RW) Controls various modes supported by this ETM, for example
91 P0 instruction tracing, branch broadcast, cycle counting and
92 context ID tracing.
93
94What: /sys/bus/coresight/devices/<memory_map>.etm/pe
95Date: April 2015
96KernelVersion: 4.01
97Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
98Description: (RW) Controls which PE to trace.
99
100What: /sys/bus/coresight/devices/<memory_map>.etm/event
101Date: April 2015
102KernelVersion: 4.01
103Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
104Description: (RW) Controls the tracing of arbitrary events from bank 0 to 3.
105
106What: /sys/bus/coresight/devices/<memory_map>.etm/event_instren
107Date: April 2015
108KernelVersion: 4.01
109Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
110Description: (RW) Controls the behavior of the events in bank 0 to 3.
111
112What: /sys/bus/coresight/devices/<memory_map>.etm/event_ts
113Date: April 2015
114KernelVersion: 4.01
115Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
116Description: (RW) Controls the insertion of global timestamps in the trace
117 streams.
118
119What: /sys/bus/coresight/devices/<memory_map>.etm/syncfreq
120Date: April 2015
121KernelVersion: 4.01
122Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
123Description: (RW) Controls how often trace synchronization requests occur.
124
125What: /sys/bus/coresight/devices/<memory_map>.etm/cyc_threshold
126Date: April 2015
127KernelVersion: 4.01
128Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
129Description: (RW) Sets the threshold value for cycle counting.
130
131What: /sys/bus/coresight/devices/<memory_map>.etm/bb_ctrl
132Date: April 2015
133KernelVersion: 4.01
134Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
135Description: (RW) Controls which regions in the memory map are enabled to
136 use branch broadcasting.
137
138What: /sys/bus/coresight/devices/<memory_map>.etm/event_vinst
139Date: April 2015
140KernelVersion: 4.01
141Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
142Description: (RW) Controls instruction trace filtering.
143
144What: /sys/bus/coresight/devices/<memory_map>.etm/s_exlevel_vinst
145Date: April 2015
146KernelVersion: 4.01
147Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
148Description: (RW) In Secure state, each bit controls whether instruction
149 tracing is enabled for the corresponding exception level.
150
151What: /sys/bus/coresight/devices/<memory_map>.etm/ns_exlevel_vinst
152Date: April 2015
153KernelVersion: 4.01
154Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
155Description: (RW) In non-secure state, each bit controls whether instruction
156 tracing is enabled for the corresponding exception level.
157
158What: /sys/bus/coresight/devices/<memory_map>.etm/addr_idx
159Date: April 2015
160KernelVersion: 4.01
161Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
162Description: (RW) Select which address comparator or pair (of comparators) to
163 work with.
164
165What: /sys/bus/coresight/devices/<memory_map>.etm/addr_instdatatype
166Date: April 2015
167KernelVersion: 4.01
168Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
169Description: (RW) Controls what type of comparison the trace unit performs.
170
171What: /sys/bus/coresight/devices/<memory_map>.etm/addr_single
172Date: April 2015
173KernelVersion: 4.01
174Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
175Description: (RW) Used to setup single address comparator values.
176
177What: /sys/bus/coresight/devices/<memory_map>.etm/addr_range
178Date: April 2015
179KernelVersion: 4.01
180Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
181Description: (RW) Used to setup address range comparator values.
182
183What: /sys/bus/coresight/devices/<memory_map>.etm/seq_idx
184Date: April 2015
185KernelVersion: 4.01
186Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
187Description: (RW) Select which sequensor.
188
189What: /sys/bus/coresight/devices/<memory_map>.etm/seq_state
190Date: April 2015
191KernelVersion: 4.01
192Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
193Description: (RW) Use this to set, or read, the sequencer state.
194
195What: /sys/bus/coresight/devices/<memory_map>.etm/seq_event
196Date: April 2015
197KernelVersion: 4.01
198Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
199Description: (RW) Moves the sequencer state to a specific state.
200
201What: /sys/bus/coresight/devices/<memory_map>.etm/seq_reset_event
202Date: April 2015
203KernelVersion: 4.01
204Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
205Description: (RW) Moves the sequencer to state 0 when a programmed event
206 occurs.
207
208What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_idx
209Date: April 2015
210KernelVersion: 4.01
211Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
212Description: (RW) Select which counter unit to work with.
213
214What: /sys/bus/coresight/devices/<memory_map>.etm/cntrldvr
215Date: April 2015
216KernelVersion: 4.01
217Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
218Description: (RW) This sets or returns the reload count value of the
219 specific counter.
220
221What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_val
222Date: April 2015
223KernelVersion: 4.01
224Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
225Description: (RW) This sets or returns the current count value of the
226 specific counter.
227
228What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_ctrl
229Date: April 2015
230KernelVersion: 4.01
231Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
232Description: (RW) Controls the operation of the selected counter.
233
234What: /sys/bus/coresight/devices/<memory_map>.etm/res_idx
235Date: April 2015
236KernelVersion: 4.01
237Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
238Description: (RW) Select which resource selection unit to work with.
239
240What: /sys/bus/coresight/devices/<memory_map>.etm/res_ctrl
241Date: April 2015
242KernelVersion: 4.01
243Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
244Description: (RW) Controls the selection of the resources in the trace unit.
245
246What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_idx
247Date: April 2015
248KernelVersion: 4.01
249Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
250Description: (RW) Select which context ID comparator to work with.
251
252What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_val
253Date: April 2015
254KernelVersion: 4.01
255Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
256Description: (RW) Get/Set the context ID comparator value to trigger on.
257
258What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_masks
259Date: April 2015
260KernelVersion: 4.01
261Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
262Description: (RW) Mask for all 8 context ID comparator value
263 registers (if implemented).
264
265What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_idx
266Date: April 2015
267KernelVersion: 4.01
268Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
269Description: (RW) Select which virtual machine ID comparator to work with.
270
271What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_val
272Date: April 2015
273KernelVersion: 4.01
274Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
275Description: (RW) Get/Set the virtual machine ID comparator value to
276 trigger on.
277
278What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_masks
279Date: April 2015
280KernelVersion: 4.01
281Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
282Description: (RW) Mask for all 8 virtual machine ID comparator value
283 registers (if implemented).
284
285What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcoslsr
286Date: April 2015
287KernelVersion: 4.01
288Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
289Description: (R) Print the content of the OS Lock Status Register (0x304).
290 The value it taken directly from the HW.
291
292What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpdcr
293Date: April 2015
294KernelVersion: 4.01
295Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
296Description: (R) Print the content of the Power Down Control Register
297 (0x310). The value is taken directly from the HW.
298
299What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpdsr
300Date: April 2015
301KernelVersion: 4.01
302Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
303Description: (R) Print the content of the Power Down Status Register
304 (0x314). The value is taken directly from the HW.
305
306What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trclsr
307Date: April 2015
308KernelVersion: 4.01
309Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
310Description: (R) Print the content of the SW Lock Status Register
311 (0xFB4). The value is taken directly from the HW.
312
313What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcauthstatus
314Date: April 2015
315KernelVersion: 4.01
316Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
317Description: (R) Print the content of the Authentication Status Register
318 (0xFB8). The value is taken directly from the HW.
319
320What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcdevid
321Date: April 2015
322KernelVersion: 4.01
323Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
324Description: (R) Print the content of the Device ID Register
325 (0xFC8). The value is taken directly from the HW.
326
327What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcdevtype
328Date: April 2015
329KernelVersion: 4.01
330Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
331Description: (R) Print the content of the Device Type Register
332 (0xFCC). The value is taken directly from the HW.
333
334What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr0
335Date: April 2015
336KernelVersion: 4.01
337Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
338Description: (R) Print the content of the Peripheral ID0 Register
339 (0xFE0). The value is taken directly from the HW.
340
341What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr1
342Date: April 2015
343KernelVersion: 4.01
344Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
345Description: (R) Print the content of the Peripheral ID1 Register
346 (0xFE4). The value is taken directly from the HW.
347
348What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr2
349Date: April 2015
350KernelVersion: 4.01
351Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
352Description: (R) Print the content of the Peripheral ID2 Register
353 (0xFE8). The value is taken directly from the HW.
354
355What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr3
356Date: April 2015
357KernelVersion: 4.01
358Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
359Description: (R) Print the content of the Peripheral ID3 Register
360 (0xFEC). The value is taken directly from the HW.
361
362What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr0
363Date: April 2015
364KernelVersion: 4.01
365Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
366Description: (R) Returns the tracing capabilities of the trace unit (0x1E0).
367 The value is taken directly from the HW.
368
369What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr1
370Date: April 2015
371KernelVersion: 4.01
372Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
373Description: (R) Returns the tracing capabilities of the trace unit (0x1E4).
374 The value is taken directly from the HW.
375
376What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr2
377Date: April 2015
378KernelVersion: 4.01
379Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
380Description: (R) Returns the maximum size of the data value, data address,
381 VMID, context ID and instuction address in the trace unit
382 (0x1E8). The value is taken directly from the HW.
383
384What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr3
385Date: April 2015
386KernelVersion: 4.01
387Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
388Description: (R) Returns the value associated with various resources
389 available to the trace unit. See the Trace Macrocell
390 architecture specification for more details (0x1E8).
391 The value is taken directly from the HW.
392
393What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr4
394Date: April 2015
395KernelVersion: 4.01
396Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
397Description: (R) Returns how many resources the trace unit supports (0x1F0).
398 The value is taken directly from the HW.
399
400What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr5
401Date: April 2015
402KernelVersion: 4.01
403Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
404Description: (R) Returns how many resources the trace unit supports (0x1F4).
405 The value is taken directly from the HW.
406
407What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr8
408Date: April 2015
409KernelVersion: 4.01
410Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
411Description: (R) Returns the maximum speculation depth of the instruction
412 trace stream. (0x180). The value is taken directly from the HW.
413
414What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr9
415Date: April 2015
416KernelVersion: 4.01
417Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
418Description: (R) Returns the number of P0 right-hand keys that the trace unit
419 can use (0x184). The value is taken directly from the HW.
420
421What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr10
422Date: April 2015
423KernelVersion: 4.01
424Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
425Description: (R) Returns the number of P1 right-hand keys that the trace unit
426 can use (0x188). The value is taken directly from the HW.
427
428What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr11
429Date: April 2015
430KernelVersion: 4.01
431Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
432Description: (R) Returns the number of special P1 right-hand keys that the
433 trace unit can use (0x18C). The value is taken directly from
434 the HW.
435
436What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr12
437Date: April 2015
438KernelVersion: 4.01
439Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
440Description: (R) Returns the number of conditional P1 right-hand keys that
441 the trace unit can use (0x190). The value is taken directly
442 from the HW.
443
444What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr13
445Date: April 2015
446KernelVersion: 4.01
447Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
448Description: (R) Returns the number of special conditional P1 right-hand keys
449 that the trace unit can use (0x194). The value is taken
450 directly from the HW.
diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei
index 2066f0bbd453..20e4d1638bac 100644
--- a/Documentation/ABI/testing/sysfs-bus-mei
+++ b/Documentation/ABI/testing/sysfs-bus-mei
@@ -4,4 +4,18 @@ KernelVersion: 3.10
4Contact: Samuel Ortiz <sameo@linux.intel.com> 4Contact: Samuel Ortiz <sameo@linux.intel.com>
5 linux-mei@linux.intel.com 5 linux-mei@linux.intel.com
6Description: Stores the same MODALIAS value emitted by uevent 6Description: Stores the same MODALIAS value emitted by uevent
7 Format: mei:<mei device name> 7 Format: mei:<mei device name>:<device uuid>:
8
9What: /sys/bus/mei/devices/.../name
10Date: May 2015
11KernelVersion: 4.2
12Contact: Tomas Winkler <tomas.winkler@intel.com>
13Description: Stores mei client device name
14 Format: string
15
16What: /sys/bus/mei/devices/.../uuid
17Date: May 2015
18KernelVersion: 4.2
19Contact: Tomas Winkler <tomas.winkler@intel.com>
20Description: Stores mei client device uuid
21 Format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 88602b75418e..65a6db2271a2 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -17,15 +17,19 @@ its hardware characteristcs.
17 - "arm,coresight-tmc", "arm,primecell"; 17 - "arm,coresight-tmc", "arm,primecell";
18 - "arm,coresight-funnel", "arm,primecell"; 18 - "arm,coresight-funnel", "arm,primecell";
19 - "arm,coresight-etm3x", "arm,primecell"; 19 - "arm,coresight-etm3x", "arm,primecell";
20 - "qcom,coresight-replicator1x", "arm,primecell";
20 21
21 * reg: physical base address and length of the register 22 * reg: physical base address and length of the register
22 set(s) of the component. 23 set(s) of the component.
23 24
24 * clocks: the clock associated to this component. 25 * clocks: the clocks associated to this component.
25 26
26 * clock-names: the name of the clock as referenced by the code. 27 * clock-names: the name of the clocks referenced by the code.
27 Since we are using the AMBA framework, the name should be 28 Since we are using the AMBA framework, the name of the clock
28 "apb_pclk". 29 providing the interconnect should be "apb_pclk", and some
30 coresight blocks also have an additional clock "atclk", which
31 clocks the core of that coresight component. The latter clock
32 is optional.
29 33
30 * port or ports: The representation of the component's port 34 * port or ports: The representation of the component's port
31 layout using the generic DT graph presentation found in 35 layout using the generic DT graph presentation found in
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index 64fa3b2de6cd..a8fee60dc20d 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -67,6 +67,12 @@ Optional properties:
67 present, the number of values should be less than or equal to the 67 present, the number of values should be less than or equal to the
68 number of inputs, unspecified inputs will use the chip default. 68 number of inputs, unspecified inputs will use the chip default.
69 69
70 - wlf,hpdet-channel : Headphone detection channel.
71 ARIZONA_ACCDET_MODE_HPL or 1 - Headphone detect mode is set to HPDETL
72 ARIZONA_ACCDET_MODE_HPR or 2 - Headphone detect mode is set to HPDETR
73 If this node is not mentioned or if the value is unknown, then
74 headphone detection mode is set to HPDETL.
75
70 - DCVDD-supply, MICVDD-supply : Power supplies, only need to be specified if 76 - DCVDD-supply, MICVDD-supply : Power supplies, only need to be specified if
71 they are being externally supplied. As covered in 77 they are being externally supplied. As covered in
72 Documentation/devicetree/bindings/regulator/regulator.txt 78 Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/mic/mic_overview.txt b/Documentation/mic/mic_overview.txt
index 77c541802ad9..1a2f2c8ec59e 100644
--- a/Documentation/mic/mic_overview.txt
+++ b/Documentation/mic/mic_overview.txt
@@ -24,6 +24,10 @@ a virtual bus called mic bus is created and virtual dma devices are
24created on it by the host/card drivers. On host the channels are private 24created on it by the host/card drivers. On host the channels are private
25and used only by the host driver to transfer data for the virtio devices. 25and used only by the host driver to transfer data for the virtio devices.
26 26
27The Symmetric Communication Interface (SCIF (pronounced as skiff)) is a
28low level communications API across PCIe currently implemented for MIC.
29More details are available at scif_overview.txt.
30
27Here is a block diagram of the various components described above. The 31Here is a block diagram of the various components described above. The
28virtio backends are situated on the host rather than the card given better 32virtio backends are situated on the host rather than the card given better
29single threaded performance for the host compared to MIC, the ability of 33single threaded performance for the host compared to MIC, the ability of
@@ -47,18 +51,18 @@ the fact that the virtio block storage backend can only be on the host.
47 | | | Virtio over PCIe IOCTLs | 51 | | | Virtio over PCIe IOCTLs |
48 | | +--------------------------+ 52 | | +--------------------------+
49+-----------+ | | | +-----------+ 53+-----------+ | | | +-----------+
50| MIC DMA | | | | | MIC DMA | 54| MIC DMA | | +----------+ | +-----------+ | | MIC DMA |
51| Driver | | | | | Driver | 55| Driver | | | SCIF | | | SCIF | | | Driver |
52+-----------+ | | | +-----------+ 56+-----------+ | +----------+ | +-----------+ | +-----------+
53 | | | | | 57 | | | | | | |
54+---------------+ | | | +----------------+ 58+---------------+ | +-----+-----+ | +-----+-----+ | +---------------+
55|MIC virtual Bus| | | | |MIC virtual Bus | 59|MIC virtual Bus| | |SCIF HW Bus| | |SCIF HW BUS| | |MIC virtual Bus|
56+---------------+ | | | +----------------+ 60+---------------+ | +-----------+ | +-----+-----+ | +---------------+
57 | | | | | 61 | | | | | | |
58 | +--------------+ | +---------------+ | 62 | +--------------+ | | | +---------------+ |
59 | |Intel MIC | | |Intel MIC | | 63 | |Intel MIC | | | | |Intel MIC | |
60 +---|Card Driver | | |Host Driver | | 64 +---|Card Driver +----+ | | |Host Driver | |
61 +--------------+ | +---------------+-----+ 65 +--------------+ | +----+---------------+-----+
62 | | | 66 | | |
63 +-------------------------------------------------------------+ 67 +-------------------------------------------------------------+
64 | | 68 | |
diff --git a/Documentation/mic/mpssd/mpss b/Documentation/mic/mpssd/mpss
index cacbdb0aefb9..582aad4811ae 100755
--- a/Documentation/mic/mpssd/mpss
+++ b/Documentation/mic/mpssd/mpss
@@ -35,6 +35,7 @@
35 35
36exec=/usr/sbin/mpssd 36exec=/usr/sbin/mpssd
37sysfs="/sys/class/mic" 37sysfs="/sys/class/mic"
38mic_modules="mic_host mic_x100_dma scif"
38 39
39start() 40start()
40{ 41{
@@ -48,18 +49,15 @@ start()
48 fi 49 fi
49 50
50 echo -e $"Starting MPSS Stack" 51 echo -e $"Starting MPSS Stack"
51 echo -e $"Loading MIC_X100_DMA & MIC_HOST Modules" 52 echo -e $"Loading MIC drivers:" $mic_modules
52 53
53 for f in "mic_host" "mic_x100_dma" 54 modprobe -a $mic_modules
54 do 55 RETVAL=$?
55 modprobe $f 56 if [ $RETVAL -ne 0 ]; then
56 RETVAL=$? 57 failure
57 if [ $RETVAL -ne 0 ]; then 58 echo
58 failure 59 return $RETVAL
59 echo 60 fi
60 return $RETVAL
61 fi
62 done
63 61
64 # Start the daemon 62 # Start the daemon
65 echo -n $"Starting MPSSD " 63 echo -n $"Starting MPSSD "
@@ -170,8 +168,8 @@ unload()
170 stop 168 stop
171 169
172 sleep 5 170 sleep 5
173 echo -n $"Removing MIC_HOST & MIC_X100_DMA Modules: " 171 echo -n $"Removing MIC drivers:" $mic_modules
174 modprobe -r mic_host mic_x100_dma 172 modprobe -r $mic_modules
175 RETVAL=$? 173 RETVAL=$?
176 [ $RETVAL -ne 0 ] && failure || success 174 [ $RETVAL -ne 0 ] && failure || success
177 echo 175 echo
diff --git a/Documentation/mic/scif_overview.txt b/Documentation/mic/scif_overview.txt
new file mode 100644
index 000000000000..0a280d986731
--- /dev/null
+++ b/Documentation/mic/scif_overview.txt
@@ -0,0 +1,98 @@
1The Symmetric Communication Interface (SCIF (pronounced as skiff)) is a low
2level communications API across PCIe currently implemented for MIC. Currently
3SCIF provides inter-node communication within a single host platform, where a
4node is a MIC Coprocessor or Xeon based host. SCIF abstracts the details of
5communicating over the PCIe bus while providing an API that is symmetric
6across all the nodes in the PCIe network. An important design objective for SCIF
7is to deliver the maximum possible performance given the communication
8abilities of the hardware. SCIF has been used to implement an offload compiler
9runtime and OFED support for MPI implementations for MIC coprocessors.
10
11==== SCIF API Components ====
12The SCIF API has the following parts:
131. Connection establishment using a client server model
142. Byte stream messaging intended for short messages
153. Node enumeration to determine online nodes
164. Poll semantics for detection of incoming connections and messages
175. Memory registration to pin down pages
186. Remote memory mapping for low latency CPU accesses via mmap
197. Remote DMA (RDMA) for high bandwidth DMA transfers
208. Fence APIs for RDMA synchronization
21
22SCIF exposes the notion of a connection which can be used by peer processes on
23nodes in a SCIF PCIe "network" to share memory "windows" and to communicate. A
24process in a SCIF node initiates a SCIF connection to a peer process on a
25different node via a SCIF "endpoint". SCIF endpoints support messaging APIs
26which are similar to connection oriented socket APIs. Connected SCIF endpoints
27can also register local memory which is followed by data transfer using either
28DMA, CPU copies or remote memory mapping via mmap. SCIF supports both user and
29kernel mode clients which are functionally equivalent.
30
31==== SCIF Performance for MIC ====
32DMA bandwidth comparison between the TCP (over ethernet over PCIe) stack versus
33SCIF shows the performance advantages of SCIF for HPC applications and runtimes.
34
35 Comparison of TCP and SCIF based BW
36
37 Throughput (GB/sec)
38 8 + PCIe Bandwidth ******
39 + TCP ######
40 7 + ************************************** SCIF %%%%%%
41 | %%%%%%%%%%%%%%%%%%%
42 6 + %%%%
43 | %%
44 | %%%
45 5 + %%
46 | %%
47 4 + %%
48 | %%
49 3 + %%
50 | %
51 2 + %%
52 | %%
53 | %
54 1 +
55 + ######################################
56 0 +++---+++--+--+-+--+--+-++-+--+-++-+--+-++-+-
57 1 10 100 1000 10000 100000
58 Transfer Size (KBytes)
59
60SCIF allows memory sharing via mmap(..) between processes on different PCIe
61nodes and thus provides bare-metal PCIe latency. The round trip SCIF mmap
62latency from the host to an x100 MIC for an 8 byte message is 0.44 usecs.
63
64SCIF has a user space library which is a thin IOCTL wrapper providing a user
65space API similar to the kernel API in scif.h. The SCIF user space library
66is distributed @ https://software.intel.com/en-us/mic-developer
67
68Here is some pseudo code for an example of how two applications on two PCIe
69nodes would typically use the SCIF API:
70
71Process A (on node A) Process B (on node B)
72
73/* get online node information */
74scif_get_node_ids(..) scif_get_node_ids(..)
75scif_open(..) scif_open(..)
76scif_bind(..) scif_bind(..)
77scif_listen(..)
78scif_accept(..) scif_connect(..)
79/* SCIF connection established */
80
81/* Send and receive short messages */
82scif_send(..)/scif_recv(..) scif_send(..)/scif_recv(..)
83
84/* Register memory */
85scif_register(..) scif_register(..)
86
87/* RDMA */
88scif_readfrom(..)/scif_writeto(..) scif_readfrom(..)/scif_writeto(..)
89
90/* Fence DMAs */
91scif_fence_signal(..) scif_fence_signal(..)
92
93mmap(..) mmap(..)
94
95/* Access remote registered memory */
96
97/* Close the endpoints */
98scif_close(..) scif_close(..)
diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm
index cc62a95e4776..13411fe52f7f 100644
--- a/Documentation/w1/slaves/w1_therm
+++ b/Documentation/w1/slaves/w1_therm
@@ -11,12 +11,14 @@ Author: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11Description 11Description
12----------- 12-----------
13 13
14w1_therm provides basic temperature conversion for ds18*20 devices. 14w1_therm provides basic temperature conversion for ds18*20 devices, and the
15ds28ea00 device.
15supported family codes: 16supported family codes:
16W1_THERM_DS18S20 0x10 17W1_THERM_DS18S20 0x10
17W1_THERM_DS1822 0x22 18W1_THERM_DS1822 0x22
18W1_THERM_DS18B20 0x28 19W1_THERM_DS18B20 0x28
19W1_THERM_DS1825 0x3B 20W1_THERM_DS1825 0x3B
21W1_THERM_DS28EA00 0x42
20 22
21Support is provided through the sysfs w1_slave file. Each open and 23Support is provided through the sysfs w1_slave file. Each open and
22read sequence will initiate a temperature conversion then provide two 24read sequence will initiate a temperature conversion then provide two
@@ -48,3 +50,10 @@ resistor). The DS18b20 temperature sensor specification lists a
48maximum current draw of 1.5mA and that a 5k pullup resistor is not 50maximum current draw of 1.5mA and that a 5k pullup resistor is not
49sufficient. The strong pullup is designed to provide the additional 51sufficient. The strong pullup is designed to provide the additional
50current required. 52current required.
53
54The DS28EA00 provides an additional two pins for implementing a sequence
55detection algorithm. This feature allows you to determine the physical
56location of the chip in the 1-wire bus without needing pre-existing
57knowledge of the bus ordering. Support is provided through the sysfs
58w1_seq file. The file will contain a single line with an integer value
59representing the device index in the bus starting at 0.
diff --git a/Documentation/w1/w1.generic b/Documentation/w1/w1.generic
index b2033c64c7da..b3ffaf8cfab2 100644
--- a/Documentation/w1/w1.generic
+++ b/Documentation/w1/w1.generic
@@ -76,21 +76,24 @@ See struct w1_bus_master definition in w1.h for details.
76 76
77w1 master sysfs interface 77w1 master sysfs interface
78------------------------------------------------------------------ 78------------------------------------------------------------------
79<xx-xxxxxxxxxxxxx> - a directory for a found device. The format is family-serial 79<xx-xxxxxxxxxxxxx> - A directory for a found device. The format is family-serial
80bus - (standard) symlink to the w1 bus 80bus - (standard) symlink to the w1 bus
81driver - (standard) symlink to the w1 driver 81driver - (standard) symlink to the w1 driver
82w1_master_add - Manually register a slave device 82w1_master_add - (rw) manually register a slave device
83w1_master_attempts - the number of times a search was attempted 83w1_master_attempts - (ro) the number of times a search was attempted
84w1_master_max_slave_count 84w1_master_max_slave_count
85 - maximum number of slaves to search for at a time 85 - (rw) maximum number of slaves to search for at a time
86w1_master_name - the name of the device (w1_bus_masterX) 86w1_master_name - (ro) the name of the device (w1_bus_masterX)
87w1_master_pullup - 5V strong pullup 0 enabled, 1 disabled 87w1_master_pullup - (rw) 5V strong pullup 0 enabled, 1 disabled
88w1_master_remove - Manually remove a slave device 88w1_master_remove - (rw) manually remove a slave device
89w1_master_search - the number of searches left to do, -1=continual (default) 89w1_master_search - (rw) the number of searches left to do,
90 -1=continual (default)
90w1_master_slave_count 91w1_master_slave_count
91 - the number of slaves found 92 - (ro) the number of slaves found
92w1_master_slaves - the names of the slaves, one per line 93w1_master_slaves - (ro) the names of the slaves, one per line
93w1_master_timeout - the delay in seconds between searches 94w1_master_timeout - (ro) the delay in seconds between searches
95w1_master_timeout_us
96 - (ro) the delay in microseconds beetwen searches
94 97
95If you have a w1 bus that never changes (you don't add or remove devices), 98If you have a w1 bus that never changes (you don't add or remove devices),
96you can set the module parameter search_count to a small positive number 99you can set the module parameter search_count to a small positive number
@@ -101,6 +104,11 @@ generally only make sense when searching is disabled, as a search will
101redetect manually removed devices that are present and timeout manually 104redetect manually removed devices that are present and timeout manually
102added devices that aren't on the bus. 105added devices that aren't on the bus.
103 106
107Bus searches occur at an interval, specified as a summ of timeout and
108timeout_us module parameters (either of which may be 0) for as long as
109w1_master_search remains greater than 0 or is -1. Each search attempt
110decrements w1_master_search by 1 (down to 0) and increments
111w1_master_attempts by 1.
104 112
105w1 slave sysfs interface 113w1 slave sysfs interface
106------------------------------------------------------------------ 114------------------------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index cae04966af4b..161747bdecf3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -732,7 +732,7 @@ ANDROID DRIVERS
732M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 732M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
733M: Arve Hjønnevåg <arve@android.com> 733M: Arve Hjønnevåg <arve@android.com>
734M: Riley Andrews <riandrews@android.com> 734M: Riley Andrews <riandrews@android.com>
735T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git 735T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
736L: devel@driverdev.osuosl.org 736L: devel@driverdev.osuosl.org
737S: Supported 737S: Supported
738F: drivers/android/ 738F: drivers/android/
@@ -3207,9 +3207,9 @@ S: Maintained
3207F: drivers/platform/x86/dell-smo8800.c 3207F: drivers/platform/x86/dell-smo8800.c
3208 3208
3209DELL LAPTOP SMM DRIVER 3209DELL LAPTOP SMM DRIVER
3210M: Guenter Roeck <linux@roeck-us.net> 3210M: Pali Rohár <pali.rohar@gmail.com>
3211S: Maintained 3211S: Maintained
3212F: drivers/char/i8k.c 3212F: drivers/hwmon/dell-smm-hwmon.c
3213F: include/uapi/linux/i8k.h 3213F: include/uapi/linux/i8k.h
3214 3214
3215DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas) 3215DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas)
@@ -5444,6 +5444,7 @@ M: Tomas Winkler <tomas.winkler@intel.com>
5444L: linux-kernel@vger.kernel.org 5444L: linux-kernel@vger.kernel.org
5445S: Supported 5445S: Supported
5446F: include/uapi/linux/mei.h 5446F: include/uapi/linux/mei.h
5447F: include/linux/mei_cl_bus.h
5447F: drivers/misc/mei/* 5448F: drivers/misc/mei/*
5448F: Documentation/misc-devices/mei/* 5449F: Documentation/misc-devices/mei/*
5449 5450
@@ -7572,13 +7573,16 @@ S: Maintained
7572F: Documentation/mn10300/ 7573F: Documentation/mn10300/
7573F: arch/mn10300/ 7574F: arch/mn10300/
7574 7575
7575PARALLEL PORT SUPPORT 7576PARALLEL PORT SUBSYSTEM
7577M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
7578M: Sudip Mukherjee <sudip@vectorindia.org>
7576L: linux-parport@lists.infradead.org (subscribers-only) 7579L: linux-parport@lists.infradead.org (subscribers-only)
7577S: Orphan 7580S: Maintained
7578F: drivers/parport/ 7581F: drivers/parport/
7579F: include/linux/parport*.h 7582F: include/linux/parport*.h
7580F: drivers/char/ppdev.c 7583F: drivers/char/ppdev.c
7581F: include/uapi/linux/ppdev.h 7584F: include/uapi/linux/ppdev.h
7585F: Documentation/parport*.txt
7582 7586
7583PARAVIRT_OPS INTERFACE 7587PARAVIRT_OPS INTERFACE
7584M: Jeremy Fitzhardinge <jeremy@goop.org> 7588M: Jeremy Fitzhardinge <jeremy@goop.org>
diff --git a/arch/um/os-Linux/drivers/ethertap_user.c b/arch/um/os-Linux/drivers/ethertap_user.c
index b39b6696ac58..6d4918246ffe 100644
--- a/arch/um/os-Linux/drivers/ethertap_user.c
+++ b/arch/um/os-Linux/drivers/ethertap_user.c
@@ -105,7 +105,7 @@ static int etap_tramp(char *dev, char *gate, int control_me,
105 sprintf(data_fd_buf, "%d", data_remote); 105 sprintf(data_fd_buf, "%d", data_remote);
106 sprintf(version_buf, "%d", UML_NET_VERSION); 106 sprintf(version_buf, "%d", UML_NET_VERSION);
107 if (gate != NULL) { 107 if (gate != NULL) {
108 strcpy(gate_buf, gate); 108 strncpy(gate_buf, gate, 15);
109 args = setup_args; 109 args = setup_args;
110 } 110 }
111 else args = nosetup_args; 111 else args = nosetup_args;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8e0b76ad8350..4fcf0ade7e91 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1055,24 +1055,19 @@ config TOSHIBA
1055 Say N otherwise. 1055 Say N otherwise.
1056 1056
1057config I8K 1057config I8K
1058 tristate "Dell laptop support" 1058 tristate "Dell i8k legacy laptop support"
1059 select HWMON 1059 select HWMON
1060 select SENSORS_DELL_SMM
1060 ---help--- 1061 ---help---
1061 This adds a driver to safely access the System Management Mode 1062 This option enables legacy /proc/i8k userspace interface in hwmon
1062 of the CPU on the Dell Inspiron 8000. The System Management Mode 1063 dell-smm-hwmon driver. Character file /proc/i8k reports bios version,
1063 is used to read cpu temperature and cooling fan status and to 1064 temperature and allows controlling fan speeds of Dell laptops via
1064 control the fans on the I8K portables. 1065 System Management Mode. For old Dell laptops (like Dell Inspiron 8000)
1066 it reports also power and hotkey status. For fan speed control is
1067 needed userspace package i8kutils.
1065 1068
1066 This driver has been tested only on the Inspiron 8000 but it may 1069 Say Y if you intend to run this kernel on old Dell laptops or want to
1067 also work with other Dell laptops. You can force loading on other 1070 use userspace package i8kutils.
1068 models by passing the parameter `force=1' to the module. Use at
1069 your own risk.
1070
1071 For information on utilities to make use of this driver see the
1072 I8K Linux utilities web site at:
1073 <http://people.debian.org/~dz/i8k/>
1074
1075 Say Y if you intend to run this kernel on a Dell Inspiron 8000.
1076 Say N otherwise. 1071 Say N otherwise.
1077 1072
1078config X86_REBOOTFIXUPS 1073config X86_REBOOTFIXUPS
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
index 48c50f11f63b..0e287993b778 100644
--- a/drivers/block/paride/paride.c
+++ b/drivers/block/paride/paride.c
@@ -30,6 +30,7 @@
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/sched.h> /* TASK_* */ 31#include <linux/sched.h> /* TASK_* */
32#include <linux/parport.h> 32#include <linux/parport.h>
33#include <linux/slab.h>
33 34
34#include "paride.h" 35#include "paride.h"
35 36
@@ -244,17 +245,19 @@ void paride_unregister(PIP * pr)
244 245
245EXPORT_SYMBOL(paride_unregister); 246EXPORT_SYMBOL(paride_unregister);
246 247
247static int pi_register_parport(PIA * pi, int verbose) 248static int pi_register_parport(PIA *pi, int verbose, int unit)
248{ 249{
249 struct parport *port; 250 struct parport *port;
251 struct pardev_cb par_cb;
250 252
251 port = parport_find_base(pi->port); 253 port = parport_find_base(pi->port);
252 if (!port) 254 if (!port)
253 return 0; 255 return 0;
254 256 memset(&par_cb, 0, sizeof(par_cb));
255 pi->pardev = parport_register_device(port, 257 par_cb.wakeup = pi_wake_up;
256 pi->device, NULL, 258 par_cb.private = (void *)pi;
257 pi_wake_up, NULL, 0, (void *) pi); 259 pi->pardev = parport_register_dev_model(port, pi->device, &par_cb,
260 unit);
258 parport_put_port(port); 261 parport_put_port(port);
259 if (!pi->pardev) 262 if (!pi->pardev)
260 return 0; 263 return 0;
@@ -311,7 +314,7 @@ static int pi_probe_unit(PIA * pi, int unit, char *scratch, int verbose)
311 e = pi->proto->max_units; 314 e = pi->proto->max_units;
312 } 315 }
313 316
314 if (!pi_register_parport(pi, verbose)) 317 if (!pi_register_parport(pi, verbose, s))
315 return 0; 318 return 0;
316 319
317 if (pi->proto->test_port) { 320 if (pi->proto->test_port) {
@@ -432,3 +435,45 @@ int pi_init(PIA * pi, int autoprobe, int port, int mode,
432} 435}
433 436
434EXPORT_SYMBOL(pi_init); 437EXPORT_SYMBOL(pi_init);
438
439static int pi_probe(struct pardevice *par_dev)
440{
441 struct device_driver *drv = par_dev->dev.driver;
442 int len = strlen(drv->name);
443
444 if (strncmp(par_dev->name, drv->name, len))
445 return -ENODEV;
446
447 return 0;
448}
449
450void *pi_register_driver(char *name)
451{
452 struct parport_driver *parp_drv;
453 int ret;
454
455 parp_drv = kzalloc(sizeof(*parp_drv), GFP_KERNEL);
456 if (!parp_drv)
457 return NULL;
458
459 parp_drv->name = name;
460 parp_drv->probe = pi_probe;
461 parp_drv->devmodel = true;
462
463 ret = parport_register_driver(parp_drv);
464 if (ret) {
465 kfree(parp_drv);
466 return NULL;
467 }
468 return (void *)parp_drv;
469}
470EXPORT_SYMBOL(pi_register_driver);
471
472void pi_unregister_driver(void *_drv)
473{
474 struct parport_driver *drv = _drv;
475
476 parport_unregister_driver(drv);
477 kfree(drv);
478}
479EXPORT_SYMBOL(pi_unregister_driver);
diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h
index 2bddbf45518b..ddb9e589da7f 100644
--- a/drivers/block/paride/paride.h
+++ b/drivers/block/paride/paride.h
@@ -165,6 +165,8 @@ typedef struct pi_protocol PIP;
165 165
166extern int paride_register( PIP * ); 166extern int paride_register( PIP * );
167extern void paride_unregister ( PIP * ); 167extern void paride_unregister ( PIP * );
168void *pi_register_driver(char *);
169void pi_unregister_driver(void *);
168 170
169#endif /* __DRIVERS_PARIDE_H__ */ 171#endif /* __DRIVERS_PARIDE_H__ */
170/* end of paride.h */ 172/* end of paride.h */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 3b7c9f1be484..93362362aa55 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -221,6 +221,7 @@ static int pcd_busy; /* request being processed ? */
221static int pcd_sector; /* address of next requested sector */ 221static int pcd_sector; /* address of next requested sector */
222static int pcd_count; /* number of blocks still to do */ 222static int pcd_count; /* number of blocks still to do */
223static char *pcd_buf; /* buffer for request in progress */ 223static char *pcd_buf; /* buffer for request in progress */
224static void *par_drv; /* reference of parport driver */
224 225
225/* kernel glue structures */ 226/* kernel glue structures */
226 227
@@ -690,6 +691,12 @@ static int pcd_detect(void)
690 printk("%s: %s version %s, major %d, nice %d\n", 691 printk("%s: %s version %s, major %d, nice %d\n",
691 name, name, PCD_VERSION, major, nice); 692 name, name, PCD_VERSION, major, nice);
692 693
694 par_drv = pi_register_driver(name);
695 if (!par_drv) {
696 pr_err("failed to register %s driver\n", name);
697 return -1;
698 }
699
693 k = 0; 700 k = 0;
694 if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */ 701 if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
695 cd = pcd; 702 cd = pcd;
@@ -723,6 +730,7 @@ static int pcd_detect(void)
723 printk("%s: No CD-ROM drive found\n", name); 730 printk("%s: No CD-ROM drive found\n", name);
724 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) 731 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
725 put_disk(cd->disk); 732 put_disk(cd->disk);
733 pi_unregister_driver(par_drv);
726 return -1; 734 return -1;
727} 735}
728 736
@@ -984,6 +992,7 @@ static void __exit pcd_exit(void)
984 } 992 }
985 blk_cleanup_queue(pcd_queue); 993 blk_cleanup_queue(pcd_queue);
986 unregister_blkdev(major, name); 994 unregister_blkdev(major, name);
995 pi_unregister_driver(par_drv);
987} 996}
988 997
989MODULE_LICENSE("GPL"); 998MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index dbb4da1cdca8..b9242d78283d 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -247,6 +247,8 @@ static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
247 "IDNF", "MC", "UNC", "???", "TMO" 247 "IDNF", "MC", "UNC", "???", "TMO"
248}; 248};
249 249
250static void *par_drv; /* reference of parport driver */
251
250static inline int status_reg(struct pd_unit *disk) 252static inline int status_reg(struct pd_unit *disk)
251{ 253{
252 return pi_read_regr(disk->pi, 1, 6); 254 return pi_read_regr(disk->pi, 1, 6);
@@ -872,6 +874,12 @@ static int pd_detect(void)
872 pd_drive_count++; 874 pd_drive_count++;
873 } 875 }
874 876
877 par_drv = pi_register_driver(name);
878 if (!par_drv) {
879 pr_err("failed to register %s driver\n", name);
880 return -1;
881 }
882
875 if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */ 883 if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
876 disk = pd; 884 disk = pd;
877 if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch, 885 if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
@@ -902,8 +910,10 @@ static int pd_detect(void)
902 found = 1; 910 found = 1;
903 } 911 }
904 } 912 }
905 if (!found) 913 if (!found) {
906 printk("%s: no valid drive found\n", name); 914 printk("%s: no valid drive found\n", name);
915 pi_unregister_driver(par_drv);
916 }
907 return found; 917 return found;
908} 918}
909 919
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 9a15fd3c9349..7a7d977a76c5 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -264,6 +264,7 @@ static int pf_cmd; /* current command READ/WRITE */
264static struct pf_unit *pf_current;/* unit of current request */ 264static struct pf_unit *pf_current;/* unit of current request */
265static int pf_mask; /* stopper for pseudo-int */ 265static int pf_mask; /* stopper for pseudo-int */
266static char *pf_buf; /* buffer for request in progress */ 266static char *pf_buf; /* buffer for request in progress */
267static void *par_drv; /* reference of parport driver */
267 268
268/* kernel glue structures */ 269/* kernel glue structures */
269 270
@@ -703,6 +704,11 @@ static int pf_detect(void)
703 printk("%s: %s version %s, major %d, cluster %d, nice %d\n", 704 printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
704 name, name, PF_VERSION, major, cluster, nice); 705 name, name, PF_VERSION, major, cluster, nice);
705 706
707 par_drv = pi_register_driver(name);
708 if (!par_drv) {
709 pr_err("failed to register %s driver\n", name);
710 return -1;
711 }
706 k = 0; 712 k = 0;
707 if (pf_drive_count == 0) { 713 if (pf_drive_count == 0) {
708 if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF, 714 if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
@@ -735,6 +741,7 @@ static int pf_detect(void)
735 printk("%s: No ATAPI disk detected\n", name); 741 printk("%s: No ATAPI disk detected\n", name);
736 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) 742 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
737 put_disk(pf->disk); 743 put_disk(pf->disk);
744 pi_unregister_driver(par_drv);
738 return -1; 745 return -1;
739} 746}
740 747
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 876d0c3eaf58..bfbd4c852dd9 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -227,6 +227,7 @@ static int pg_identify(struct pg *dev, int log);
227static char pg_scratch[512]; /* scratch block buffer */ 227static char pg_scratch[512]; /* scratch block buffer */
228 228
229static struct class *pg_class; 229static struct class *pg_class;
230static void *par_drv; /* reference of parport driver */
230 231
231/* kernel glue structures */ 232/* kernel glue structures */
232 233
@@ -481,6 +482,12 @@ static int pg_detect(void)
481 482
482 printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major); 483 printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major);
483 484
485 par_drv = pi_register_driver(name);
486 if (!par_drv) {
487 pr_err("failed to register %s driver\n", name);
488 return -1;
489 }
490
484 k = 0; 491 k = 0;
485 if (pg_drive_count == 0) { 492 if (pg_drive_count == 0) {
486 if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch, 493 if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch,
@@ -511,6 +518,7 @@ static int pg_detect(void)
511 if (k) 518 if (k)
512 return 0; 519 return 0;
513 520
521 pi_unregister_driver(par_drv);
514 printk("%s: No ATAPI device detected\n", name); 522 printk("%s: No ATAPI device detected\n", name);
515 return -1; 523 return -1;
516} 524}
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 2596042eb987..1740d75e8a32 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -232,6 +232,7 @@ static int pt_identify(struct pt_unit *tape);
232static struct pt_unit pt[PT_UNITS]; 232static struct pt_unit pt[PT_UNITS];
233 233
234static char pt_scratch[512]; /* scratch block buffer */ 234static char pt_scratch[512]; /* scratch block buffer */
235static void *par_drv; /* reference of parport driver */
235 236
236/* kernel glue structures */ 237/* kernel glue structures */
237 238
@@ -605,6 +606,12 @@ static int pt_detect(void)
605 606
606 printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major); 607 printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major);
607 608
609 par_drv = pi_register_driver(name);
610 if (!par_drv) {
611 pr_err("failed to register %s driver\n", name);
612 return -1;
613 }
614
608 specified = 0; 615 specified = 0;
609 for (unit = 0; unit < PT_UNITS; unit++) { 616 for (unit = 0; unit < PT_UNITS; unit++) {
610 struct pt_unit *tape = &pt[unit]; 617 struct pt_unit *tape = &pt[unit];
@@ -644,6 +651,7 @@ static int pt_detect(void)
644 if (found) 651 if (found)
645 return 0; 652 return 0;
646 653
654 pi_unregister_driver(par_drv);
647 printk("%s: No ATAPI tape drive detected\n", name); 655 printk("%s: No ATAPI tape drive detected\n", name);
648 return -1; 656 return -1;
649} 657}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index a4af8221751e..a043107da2af 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -590,14 +590,6 @@ config DEVPORT
590 590
591source "drivers/s390/char/Kconfig" 591source "drivers/s390/char/Kconfig"
592 592
593config MSM_SMD_PKT
594 bool "Enable device interface for some SMD packet ports"
595 default n
596 depends on MSM_SMD
597 help
598 Enables userspace clients to read and write to some packet SMD
599 ports via device interface for MSM chipset.
600
601config TILE_SROM 593config TILE_SROM
602 bool "Character-device access via hypervisor to the Tilera SPI ROM" 594 bool "Character-device access via hypervisor to the Tilera SPI ROM"
603 depends on TILE 595 depends on TILE
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d06cde26031b..d8a7579300d2 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
9obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o 9obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
10obj-$(CONFIG_RAW_DRIVER) += raw.o 10obj-$(CONFIG_RAW_DRIVER) += raw.o
11obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 11obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
12obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
13obj-$(CONFIG_MSPEC) += mspec.o 12obj-$(CONFIG_MSPEC) += mspec.o
14obj-$(CONFIG_MMTIMER) += mmtimer.o 13obj-$(CONFIG_MMTIMER) += mmtimer.o
15obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o 14obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
@@ -36,7 +35,6 @@ else
36 obj-$(CONFIG_NVRAM) += nvram.o 35 obj-$(CONFIG_NVRAM) += nvram.o
37endif 36endif
38obj-$(CONFIG_TOSHIBA) += toshiba.o 37obj-$(CONFIG_TOSHIBA) += toshiba.o
39obj-$(CONFIG_I8K) += i8k.o
40obj-$(CONFIG_DS1620) += ds1620.o 38obj-$(CONFIG_DS1620) += ds1620.o
41obj-$(CONFIG_HW_RANDOM) += hw_random/ 39obj-$(CONFIG_HW_RANDOM) += hw_random/
42obj-$(CONFIG_PPDEV) += ppdev.o 40obj-$(CONFIG_PPDEV) += ppdev.o
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 9fd5a91e0d81..fdb0f9b3fe45 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -117,14 +117,14 @@ static int misc_open(struct inode * inode, struct file * file)
117 const struct file_operations *new_fops = NULL; 117 const struct file_operations *new_fops = NULL;
118 118
119 mutex_lock(&misc_mtx); 119 mutex_lock(&misc_mtx);
120 120
121 list_for_each_entry(c, &misc_list, list) { 121 list_for_each_entry(c, &misc_list, list) {
122 if (c->minor == minor) { 122 if (c->minor == minor) {
123 new_fops = fops_get(c->fops); 123 new_fops = fops_get(c->fops);
124 break; 124 break;
125 } 125 }
126 } 126 }
127 127
128 if (!new_fops) { 128 if (!new_fops) {
129 mutex_unlock(&misc_mtx); 129 mutex_unlock(&misc_mtx);
130 request_module("char-major-%d-%d", MISC_MAJOR, minor); 130 request_module("char-major-%d-%d", MISC_MAJOR, minor);
@@ -167,7 +167,7 @@ static const struct file_operations misc_fops = {
167/** 167/**
168 * misc_register - register a miscellaneous device 168 * misc_register - register a miscellaneous device
169 * @misc: device structure 169 * @misc: device structure
170 * 170 *
171 * Register a miscellaneous device with the kernel. If the minor 171 * Register a miscellaneous device with the kernel. If the minor
172 * number is set to %MISC_DYNAMIC_MINOR a minor number is assigned 172 * number is set to %MISC_DYNAMIC_MINOR a minor number is assigned
173 * and placed in the minor field of the structure. For other cases 173 * and placed in the minor field of the structure. For other cases
@@ -181,17 +181,18 @@ static const struct file_operations misc_fops = {
181 * A zero is returned on success and a negative errno code for 181 * A zero is returned on success and a negative errno code for
182 * failure. 182 * failure.
183 */ 183 */
184 184
185int misc_register(struct miscdevice * misc) 185int misc_register(struct miscdevice * misc)
186{ 186{
187 dev_t dev; 187 dev_t dev;
188 int err = 0; 188 int err = 0;
189 bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
189 190
190 INIT_LIST_HEAD(&misc->list); 191 INIT_LIST_HEAD(&misc->list);
191 192
192 mutex_lock(&misc_mtx); 193 mutex_lock(&misc_mtx);
193 194
194 if (misc->minor == MISC_DYNAMIC_MINOR) { 195 if (is_dynamic) {
195 int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS); 196 int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
196 if (i >= DYNAMIC_MINORS) { 197 if (i >= DYNAMIC_MINORS) {
197 err = -EBUSY; 198 err = -EBUSY;
@@ -216,9 +217,13 @@ int misc_register(struct miscdevice * misc)
216 device_create_with_groups(misc_class, misc->parent, dev, 217 device_create_with_groups(misc_class, misc->parent, dev,
217 misc, misc->groups, "%s", misc->name); 218 misc, misc->groups, "%s", misc->name);
218 if (IS_ERR(misc->this_device)) { 219 if (IS_ERR(misc->this_device)) {
219 int i = DYNAMIC_MINORS - misc->minor - 1; 220 if (is_dynamic) {
220 if (i < DYNAMIC_MINORS && i >= 0) 221 int i = DYNAMIC_MINORS - misc->minor - 1;
221 clear_bit(i, misc_minors); 222
223 if (i < DYNAMIC_MINORS && i >= 0)
224 clear_bit(i, misc_minors);
225 misc->minor = MISC_DYNAMIC_MINOR;
226 }
222 err = PTR_ERR(misc->this_device); 227 err = PTR_ERR(misc->this_device);
223 goto out; 228 goto out;
224 } 229 }
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
deleted file mode 100644
index ba82a06d9684..000000000000
--- a/drivers/char/msm_smd_pkt.c
+++ /dev/null
@@ -1,465 +0,0 @@
1/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 *
17 */
18/*
19 * SMD Packet Driver -- Provides userspace interface to SMD packet ports.
20 */
21
22#include <linux/slab.h>
23#include <linux/cdev.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/device.h>
27#include <linux/sched.h>
28#include <linux/mutex.h>
29#include <linux/delay.h>
30#include <linux/uaccess.h>
31#include <linux/workqueue.h>
32#include <linux/poll.h>
33
34#include <mach/msm_smd.h>
35
36#define NUM_SMD_PKT_PORTS 9
37#define DEVICE_NAME "smdpkt"
38#define MAX_BUF_SIZE 2048
39
40struct smd_pkt_dev {
41 struct cdev cdev;
42 struct device *devicep;
43
44 struct smd_channel *ch;
45 int open_count;
46 struct mutex ch_lock;
47 struct mutex rx_lock;
48 struct mutex tx_lock;
49 wait_queue_head_t ch_read_wait_queue;
50 wait_queue_head_t ch_opened_wait_queue;
51
52 int i;
53
54 unsigned char tx_buf[MAX_BUF_SIZE];
55 unsigned char rx_buf[MAX_BUF_SIZE];
56 int remote_open;
57
58} *smd_pkt_devp[NUM_SMD_PKT_PORTS];
59
60struct class *smd_pkt_classp;
61static dev_t smd_pkt_number;
62
63static int msm_smd_pkt_debug_enable;
64module_param_named(debug_enable, msm_smd_pkt_debug_enable,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
67#ifdef DEBUG
68#define D_DUMP_BUFFER(prestr, cnt, buf) do { \
69 int i; \
70 if (msm_smd_pkt_debug_enable) { \
71 pr_debug("%s", prestr); \
72 for (i = 0; i < cnt; i++) \
73 pr_debug("%.2x", buf[i]); \
74 pr_debug("\n"); \
75 } \
76 } while (0)
77#else
78#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
79#endif
80
81#ifdef DEBUG
82#define DBG(x...) do { \
83 if (msm_smd_pkt_debug_enable) \
84 pr_debug(x); \
85 } while (0)
86#else
87#define DBG(x...) do {} while (0)
88#endif
89
90static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp)
91{
92 int sz;
93
94 if (!smd_pkt_devp || !smd_pkt_devp->ch)
95 return;
96
97 sz = smd_cur_packet_size(smd_pkt_devp->ch);
98 if (sz == 0) {
99 DBG("no packet\n");
100 return;
101 }
102 if (sz > smd_read_avail(smd_pkt_devp->ch)) {
103 DBG("incomplete packet\n");
104 return;
105 }
106
107 DBG("waking up reader\n");
108 wake_up_interruptible(&smd_pkt_devp->ch_read_wait_queue);
109}
110
111static int smd_pkt_read(struct file *file, char __user *buf,
112 size_t count, loff_t *ppos)
113{
114 int r, bytes_read;
115 struct smd_pkt_dev *smd_pkt_devp;
116 struct smd_channel *chl;
117
118 DBG("read %d bytes\n", count);
119 if (count > MAX_BUF_SIZE)
120 return -EINVAL;
121
122 smd_pkt_devp = file->private_data;
123 if (!smd_pkt_devp || !smd_pkt_devp->ch)
124 return -EINVAL;
125
126 chl = smd_pkt_devp->ch;
127wait_for_packet:
128 r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue,
129 (smd_cur_packet_size(chl) > 0 &&
130 smd_read_avail(chl) >=
131 smd_cur_packet_size(chl)));
132
133 if (r < 0) {
134 if (r != -ERESTARTSYS)
135 pr_err("wait returned %d\n", r);
136 return r;
137 }
138
139 mutex_lock(&smd_pkt_devp->rx_lock);
140
141 bytes_read = smd_cur_packet_size(smd_pkt_devp->ch);
142 if (bytes_read == 0 ||
143 bytes_read < smd_read_avail(smd_pkt_devp->ch)) {
144 mutex_unlock(&smd_pkt_devp->rx_lock);
145 DBG("Nothing to read\n");
146 goto wait_for_packet;
147 }
148
149 if (bytes_read > count) {
150 mutex_unlock(&smd_pkt_devp->rx_lock);
151 pr_info("packet size %d > buffer size %d", bytes_read, count);
152 return -EINVAL;
153 }
154
155 r = smd_read(smd_pkt_devp->ch, smd_pkt_devp->rx_buf, bytes_read);
156 if (r != bytes_read) {
157 mutex_unlock(&smd_pkt_devp->rx_lock);
158 pr_err("smd_read failed to read %d bytes: %d\n", bytes_read, r);
159 return -EIO;
160 }
161
162 D_DUMP_BUFFER("read: ", bytes_read, smd_pkt_devp->rx_buf);
163 r = copy_to_user(buf, smd_pkt_devp->rx_buf, bytes_read);
164 mutex_unlock(&smd_pkt_devp->rx_lock);
165 if (r) {
166 pr_err("copy_to_user failed %d\n", r);
167 return -EFAULT;
168 }
169
170 DBG("read complete %d bytes\n", bytes_read);
171 check_and_wakeup_reader(smd_pkt_devp);
172
173 return bytes_read;
174}
175
176static int smd_pkt_write(struct file *file, const char __user *buf,
177 size_t count, loff_t *ppos)
178{
179 int r;
180 struct smd_pkt_dev *smd_pkt_devp;
181
182 if (count > MAX_BUF_SIZE)
183 return -EINVAL;
184
185 DBG("writing %d bytes\n", count);
186
187 smd_pkt_devp = file->private_data;
188 if (!smd_pkt_devp || !smd_pkt_devp->ch)
189 return -EINVAL;
190
191 mutex_lock(&smd_pkt_devp->tx_lock);
192 if (smd_write_avail(smd_pkt_devp->ch) < count) {
193 mutex_unlock(&smd_pkt_devp->tx_lock);
194 DBG("Not enough space to write\n");
195 return -ENOMEM;
196 }
197
198 D_DUMP_BUFFER("write: ", count, buf);
199 r = copy_from_user(smd_pkt_devp->tx_buf, buf, count);
200 if (r) {
201 mutex_unlock(&smd_pkt_devp->tx_lock);
202 pr_err("copy_from_user failed %d\n", r);
203 return -EFAULT;
204 }
205
206 r = smd_write(smd_pkt_devp->ch, smd_pkt_devp->tx_buf, count);
207 if (r != count) {
208 mutex_unlock(&smd_pkt_devp->tx_lock);
209 pr_err("smd_write failed to write %d bytes: %d.\n", count, r);
210 return -EIO;
211 }
212 mutex_unlock(&smd_pkt_devp->tx_lock);
213
214 DBG("wrote %d bytes\n", count);
215 return count;
216}
217
218static unsigned int smd_pkt_poll(struct file *file, poll_table *wait)
219{
220 struct smd_pkt_dev *smd_pkt_devp;
221 unsigned int mask = 0;
222
223 smd_pkt_devp = file->private_data;
224 if (!smd_pkt_devp)
225 return POLLERR;
226
227 DBG("poll waiting\n");
228 poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait);
229 if (smd_read_avail(smd_pkt_devp->ch))
230 mask |= POLLIN | POLLRDNORM;
231
232 DBG("poll return\n");
233 return mask;
234}
235
236static void smd_pkt_ch_notify(void *priv, unsigned event)
237{
238 struct smd_pkt_dev *smd_pkt_devp = priv;
239
240 if (smd_pkt_devp->ch == 0)
241 return;
242
243 switch (event) {
244 case SMD_EVENT_DATA:
245 DBG("data\n");
246 check_and_wakeup_reader(smd_pkt_devp);
247 break;
248
249 case SMD_EVENT_OPEN:
250 DBG("remote open\n");
251 smd_pkt_devp->remote_open = 1;
252 wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
253 break;
254
255 case SMD_EVENT_CLOSE:
256 smd_pkt_devp->remote_open = 0;
257 pr_info("remote closed\n");
258 break;
259
260 default:
261 pr_err("unknown event %d\n", event);
262 break;
263 }
264}
265
266static char *smd_pkt_dev_name[] = {
267 "smdcntl0",
268 "smdcntl1",
269 "smdcntl2",
270 "smdcntl3",
271 "smdcntl4",
272 "smdcntl5",
273 "smdcntl6",
274 "smdcntl7",
275 "smd22",
276};
277
278static char *smd_ch_name[] = {
279 "DATA5_CNTL",
280 "DATA6_CNTL",
281 "DATA7_CNTL",
282 "DATA8_CNTL",
283 "DATA9_CNTL",
284 "DATA12_CNTL",
285 "DATA13_CNTL",
286 "DATA14_CNTL",
287 "DATA22",
288};
289
290static int smd_pkt_open(struct inode *inode, struct file *file)
291{
292 int r = 0;
293 struct smd_pkt_dev *smd_pkt_devp;
294
295 smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev);
296 if (!smd_pkt_devp)
297 return -EINVAL;
298
299 file->private_data = smd_pkt_devp;
300
301 mutex_lock(&smd_pkt_devp->ch_lock);
302 if (smd_pkt_devp->open_count == 0) {
303 r = smd_open(smd_ch_name[smd_pkt_devp->i],
304 &smd_pkt_devp->ch, smd_pkt_devp,
305 smd_pkt_ch_notify);
306 if (r < 0) {
307 pr_err("smd_open failed for %s, %d\n",
308 smd_ch_name[smd_pkt_devp->i], r);
309 goto out;
310 }
311
312 r = wait_event_interruptible_timeout(
313 smd_pkt_devp->ch_opened_wait_queue,
314 smd_pkt_devp->remote_open,
315 msecs_to_jiffies(2 * HZ));
316 if (r == 0)
317 r = -ETIMEDOUT;
318
319 if (r < 0) {
320 pr_err("wait returned %d\n", r);
321 smd_close(smd_pkt_devp->ch);
322 smd_pkt_devp->ch = 0;
323 } else {
324 smd_pkt_devp->open_count++;
325 r = 0;
326 }
327 }
328out:
329 mutex_unlock(&smd_pkt_devp->ch_lock);
330 return r;
331}
332
333static int smd_pkt_release(struct inode *inode, struct file *file)
334{
335 int r = 0;
336 struct smd_pkt_dev *smd_pkt_devp = file->private_data;
337
338 if (!smd_pkt_devp)
339 return -EINVAL;
340
341 mutex_lock(&smd_pkt_devp->ch_lock);
342 if (--smd_pkt_devp->open_count == 0) {
343 r = smd_close(smd_pkt_devp->ch);
344 smd_pkt_devp->ch = 0;
345 }
346 mutex_unlock(&smd_pkt_devp->ch_lock);
347
348 return r;
349}
350
351static const struct file_operations smd_pkt_fops = {
352 .owner = THIS_MODULE,
353 .open = smd_pkt_open,
354 .release = smd_pkt_release,
355 .read = smd_pkt_read,
356 .write = smd_pkt_write,
357 .poll = smd_pkt_poll,
358};
359
360static int __init smd_pkt_init(void)
361{
362 int i;
363 int r;
364
365 r = alloc_chrdev_region(&smd_pkt_number, 0,
366 NUM_SMD_PKT_PORTS, DEVICE_NAME);
367 if (r) {
368 pr_err("alloc_chrdev_region() failed %d\n", r);
369 return r;
370 }
371
372 smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
373 if (IS_ERR(smd_pkt_classp)) {
374 r = PTR_ERR(smd_pkt_classp);
375 pr_err("class_create() failed %d\n", r);
376 goto unreg_chardev;
377 }
378
379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
381 GFP_KERNEL);
382 if (!smd_pkt_devp[i]) {
383 pr_err("kmalloc() failed\n");
384 goto clean_cdevs;
385 }
386
387 smd_pkt_devp[i]->i = i;
388
389 init_waitqueue_head(&smd_pkt_devp[i]->ch_read_wait_queue);
390 smd_pkt_devp[i]->remote_open = 0;
391 init_waitqueue_head(&smd_pkt_devp[i]->ch_opened_wait_queue);
392
393 mutex_init(&smd_pkt_devp[i]->ch_lock);
394 mutex_init(&smd_pkt_devp[i]->rx_lock);
395 mutex_init(&smd_pkt_devp[i]->tx_lock);
396
397 cdev_init(&smd_pkt_devp[i]->cdev, &smd_pkt_fops);
398 smd_pkt_devp[i]->cdev.owner = THIS_MODULE;
399
400 r = cdev_add(&smd_pkt_devp[i]->cdev,
401 (smd_pkt_number + i), 1);
402 if (r) {
403 pr_err("cdev_add() failed %d\n", r);
404 kfree(smd_pkt_devp[i]);
405 goto clean_cdevs;
406 }
407
408 smd_pkt_devp[i]->devicep =
409 device_create(smd_pkt_classp, NULL,
410 (smd_pkt_number + i), NULL,
411 smd_pkt_dev_name[i]);
412 if (IS_ERR(smd_pkt_devp[i]->devicep)) {
413 r = PTR_ERR(smd_pkt_devp[i]->devicep);
414 pr_err("device_create() failed %d\n", r);
415 cdev_del(&smd_pkt_devp[i]->cdev);
416 kfree(smd_pkt_devp[i]);
417 goto clean_cdevs;
418 }
419
420 }
421
422 pr_info("SMD Packet Port Driver Initialized.\n");
423 return 0;
424
425clean_cdevs:
426 if (i > 0) {
427 while (--i >= 0) {
428 mutex_destroy(&smd_pkt_devp[i]->ch_lock);
429 mutex_destroy(&smd_pkt_devp[i]->rx_lock);
430 mutex_destroy(&smd_pkt_devp[i]->tx_lock);
431 cdev_del(&smd_pkt_devp[i]->cdev);
432 kfree(smd_pkt_devp[i]);
433 device_destroy(smd_pkt_classp,
434 MKDEV(MAJOR(smd_pkt_number), i));
435 }
436 }
437
438 class_destroy(smd_pkt_classp);
439unreg_chardev:
440 unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
441 return r;
442}
443module_init(smd_pkt_init);
444
445static void __exit smd_pkt_cleanup(void)
446{
447 int i;
448
449 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
450 mutex_destroy(&smd_pkt_devp[i]->ch_lock);
451 mutex_destroy(&smd_pkt_devp[i]->rx_lock);
452 mutex_destroy(&smd_pkt_devp[i]->tx_lock);
453 cdev_del(&smd_pkt_devp[i]->cdev);
454 kfree(smd_pkt_devp[i]);
455 device_destroy(smd_pkt_classp,
456 MKDEV(MAJOR(smd_pkt_number), i));
457 }
458
459 class_destroy(smd_pkt_classp);
460 unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
461}
462module_exit(smd_pkt_cleanup);
463
464MODULE_DESCRIPTION("MSM Shared Memory Packet Port");
465MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 0ea9986059af..7680d5213ff8 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -437,7 +437,7 @@ static int mgslpc_device_count = 0;
437 * .text section address and breakpoint on module load. 437 * .text section address and breakpoint on module load.
438 * This is useful for use with gdb and add-symbol-file command. 438 * This is useful for use with gdb and add-symbol-file command.
439 */ 439 */
440static bool break_on_load=0; 440static bool break_on_load;
441 441
442/* 442/*
443 * Driver major number, defaults to zero to get auto 443 * Driver major number, defaults to zero to get auto
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 8bab59292a0d..8a80ead8d316 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
198 add_wait_queue(&sd->sd_rq, &wait); 198 add_wait_queue(&sd->sd_rq, &wait);
199 spin_unlock_irqrestore(&sd->sd_rlock, flags); 199 spin_unlock_irqrestore(&sd->sd_rlock, flags);
200 200
201 schedule_timeout(SCDRV_TIMEOUT); 201 schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
202 202
203 remove_wait_queue(&sd->sd_rq, &wait); 203 remove_wait_queue(&sd->sd_rq, &wait);
204 if (signal_pending(current)) { 204 if (signal_pending(current)) {
@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf,
294 add_wait_queue(&sd->sd_wq, &wait); 294 add_wait_queue(&sd->sd_wq, &wait);
295 spin_unlock_irqrestore(&sd->sd_wlock, flags); 295 spin_unlock_irqrestore(&sd->sd_wlock, flags);
296 296
297 schedule_timeout(SCDRV_TIMEOUT); 297 schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
298 298
299 remove_wait_queue(&sd->sd_wq, &wait); 299 remove_wait_queue(&sd->sd_wq, &wait);
300 if (signal_pending(current)) { 300 if (signal_pending(current)) {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 50754d203310..d2406fe25533 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1492,8 +1492,8 @@ static int add_port(struct ports_device *portdev, u32 id)
1492 * Finally, create the debugfs file that we can use to 1492 * Finally, create the debugfs file that we can use to
1493 * inspect a port's state at any time 1493 * inspect a port's state at any time
1494 */ 1494 */
1495 sprintf(debugfs_name, "vport%up%u", 1495 snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
1496 port->portdev->vdev->index, id); 1496 port->portdev->vdev->index, id);
1497 port->debugfs_file = debugfs_create_file(debugfs_name, 0444, 1497 port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1498 pdrvdata.debugfs_dir, 1498 pdrvdata.debugfs_dir,
1499 port, 1499 port,
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
index 05d897764f02..53c3882e4981 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.c
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -270,7 +270,7 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
270 int status; 270 int status;
271 s32 buffer_count = 0; 271 s32 buffer_count = 0;
272 s32 num_writes = 0; 272 s32 num_writes = 0;
273 bool dirty = 0; 273 bool dirty = false;
274 u32 i; 274 u32 i;
275 void __iomem *base_address = drvdata->base_address; 275 void __iomem *base_address = drvdata->base_address;
276 276
@@ -279,7 +279,7 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
279 279
280 /* Copy data to bram */ 280 /* Copy data to bram */
281 buffer_icap_set_bram(base_address, buffer_count, data[i]); 281 buffer_icap_set_bram(base_address, buffer_count, data[i]);
282 dirty = 1; 282 dirty = true;
283 283
284 if (buffer_count < XHI_MAX_BUFFER_INTS - 1) { 284 if (buffer_count < XHI_MAX_BUFFER_INTS - 1) {
285 buffer_count++; 285 buffer_count++;
@@ -299,7 +299,7 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
299 299
300 buffer_count = 0; 300 buffer_count = 0;
301 num_writes++; 301 num_writes++;
302 dirty = 0; 302 dirty = false;
303 } 303 }
304 304
305 /* Write unwritten data to ICAP */ 305 /* Write unwritten data to ICAP */
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig
index b53bdf12da0d..b302684d86c1 100644
--- a/drivers/char/xillybus/Kconfig
+++ b/drivers/char/xillybus/Kconfig
@@ -24,7 +24,7 @@ config XILLYBUS_PCIE
24 24
25config XILLYBUS_OF 25config XILLYBUS_OF
26 tristate "Xillybus over Device Tree" 26 tristate "Xillybus over Device Tree"
27 depends on OF_ADDRESS && OF_IRQ 27 depends on OF_ADDRESS && OF_IRQ && HAS_DMA
28 help 28 help
29 Set to M if you want Xillybus to find its resources from the 29 Set to M if you want Xillybus to find its resources from the
30 Open Firmware Flattened Device Tree. If the target is an embedded 30 Open Firmware Flattened Device Tree. If the target is an embedded
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index fdc0bf0543ce..0cebbf668886 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -28,15 +28,22 @@ config EXTCON_ARIZONA
28 with Wolfson Arizona devices. These are audio CODECs with 28 with Wolfson Arizona devices. These are audio CODECs with
29 advanced audio accessory detection support. 29 advanced audio accessory detection support.
30 30
31config EXTCON_AXP288
32 tristate "X-Power AXP288 EXTCON support"
33 depends on MFD_AXP20X && USB_PHY
34 help
35 Say Y here to enable support for USB peripheral detection
36 and USB MUX switching by X-Power AXP288 PMIC.
37
31config EXTCON_GPIO 38config EXTCON_GPIO
32 tristate "GPIO extcon support" 39 tristate "GPIO extcon support"
33 depends on GPIOLIB 40 depends on GPIOLIB || COMPILE_TEST
34 help 41 help
35 Say Y here to enable GPIO based extcon support. Note that GPIO 42 Say Y here to enable GPIO based extcon support. Note that GPIO
36 extcon supports single state per extcon instance. 43 extcon supports single state per extcon instance.
37 44
38config EXTCON_MAX14577 45config EXTCON_MAX14577
39 tristate "MAX14577/77836 EXTCON Support" 46 tristate "Maxim MAX14577/77836 EXTCON Support"
40 depends on MFD_MAX14577 47 depends on MFD_MAX14577
41 select IRQ_DOMAIN 48 select IRQ_DOMAIN
42 select REGMAP_I2C 49 select REGMAP_I2C
@@ -46,7 +53,7 @@ config EXTCON_MAX14577
46 detector and switch. 53 detector and switch.
47 54
48config EXTCON_MAX77693 55config EXTCON_MAX77693
49 tristate "MAX77693 EXTCON Support" 56 tristate "Maxim MAX77693 EXTCON Support"
50 depends on MFD_MAX77693 && INPUT 57 depends on MFD_MAX77693 && INPUT
51 select IRQ_DOMAIN 58 select IRQ_DOMAIN
52 select REGMAP_I2C 59 select REGMAP_I2C
@@ -56,7 +63,7 @@ config EXTCON_MAX77693
56 detector and switch. 63 detector and switch.
57 64
58config EXTCON_MAX77843 65config EXTCON_MAX77843
59 tristate "MAX77843 EXTCON Support" 66 tristate "Maxim MAX77843 EXTCON Support"
60 depends on MFD_MAX77843 67 depends on MFD_MAX77843
61 select IRQ_DOMAIN 68 select IRQ_DOMAIN
62 select REGMAP_I2C 69 select REGMAP_I2C
@@ -66,7 +73,7 @@ config EXTCON_MAX77843
66 detector add switch. 73 detector add switch.
67 74
68config EXTCON_MAX8997 75config EXTCON_MAX8997
69 tristate "MAX8997 EXTCON Support" 76 tristate "Maxim MAX8997 EXTCON Support"
70 depends on MFD_MAX8997 && IRQ_DOMAIN 77 depends on MFD_MAX8997 && IRQ_DOMAIN
71 help 78 help
72 If you say yes here you get support for the MUIC device of 79 If you say yes here you get support for the MUIC device of
@@ -81,7 +88,7 @@ config EXTCON_PALMAS
81 detection by palmas usb. 88 detection by palmas usb.
82 89
83config EXTCON_RT8973A 90config EXTCON_RT8973A
84 tristate "RT8973A EXTCON support" 91 tristate "Richtek RT8973A EXTCON support"
85 depends on I2C 92 depends on I2C
86 select IRQ_DOMAIN 93 select IRQ_DOMAIN
87 select REGMAP_I2C 94 select REGMAP_I2C
@@ -93,7 +100,7 @@ config EXTCON_RT8973A
93 from abnormal high input voltage (up to 28V). 100 from abnormal high input voltage (up to 28V).
94 101
95config EXTCON_SM5502 102config EXTCON_SM5502
96 tristate "SM5502 EXTCON support" 103 tristate "Silicon Mitus SM5502 EXTCON support"
97 depends on I2C 104 depends on I2C
98 select IRQ_DOMAIN 105 select IRQ_DOMAIN
99 select REGMAP_I2C 106 select REGMAP_I2C
@@ -105,9 +112,9 @@ config EXTCON_SM5502
105 112
106config EXTCON_USB_GPIO 113config EXTCON_USB_GPIO
107 tristate "USB GPIO extcon support" 114 tristate "USB GPIO extcon support"
108 depends on GPIOLIB 115 depends on GPIOLIB || COMPILE_TEST
109 help 116 help
110 Say Y here to enable GPIO based USB cable detection extcon support. 117 Say Y here to enable GPIO based USB cable detection extcon support.
111 Used typically if GPIO is used for USB ID pin detection. 118 Used typically if GPIO is used for USB ID pin detection.
112 119
113endif # MULTISTATE_SWITCH 120endif
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 9204114791a3..ba787d04295b 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -5,6 +5,7 @@
5obj-$(CONFIG_EXTCON) += extcon.o 5obj-$(CONFIG_EXTCON) += extcon.o
6obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o 6obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
7obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o 7obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
8obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
8obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o 9obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
9obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o 10obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
10obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o 11obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 2bb82e55065a..7fc0ae1912f8 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -29,7 +29,6 @@
29 * struct adc_jack_data - internal data for adc_jack device driver 29 * struct adc_jack_data - internal data for adc_jack device driver
30 * @edev: extcon device. 30 * @edev: extcon device.
31 * @cable_names: list of supported cables. 31 * @cable_names: list of supported cables.
32 * @num_cables: size of cable_names.
33 * @adc_conditions: list of adc value conditions. 32 * @adc_conditions: list of adc value conditions.
34 * @num_conditions: size of adc_conditions. 33 * @num_conditions: size of adc_conditions.
35 * @irq: irq number of attach/detach event (0 if not exist). 34 * @irq: irq number of attach/detach event (0 if not exist).
@@ -41,8 +40,7 @@
41struct adc_jack_data { 40struct adc_jack_data {
42 struct extcon_dev *edev; 41 struct extcon_dev *edev;
43 42
44 const char **cable_names; 43 const unsigned int **cable_names;
45 int num_cables;
46 struct adc_jack_cond *adc_conditions; 44 struct adc_jack_cond *adc_conditions;
47 int num_conditions; 45 int num_conditions;
48 46
@@ -112,17 +110,6 @@ static int adc_jack_probe(struct platform_device *pdev)
112 dev_err(&pdev->dev, "failed to allocate extcon device\n"); 110 dev_err(&pdev->dev, "failed to allocate extcon device\n");
113 return -ENOMEM; 111 return -ENOMEM;
114 } 112 }
115 data->edev->name = pdata->name;
116
117 /* Check the length of array and set num_cables */
118 for (i = 0; data->edev->supported_cable[i]; i++)
119 ;
120 if (i == 0 || i > SUPPORTED_CABLE_MAX) {
121 dev_err(&pdev->dev, "error: pdata->cable_names size = %d\n",
122 i - 1);
123 return -EINVAL;
124 }
125 data->num_cables = i;
126 113
127 if (!pdata->adc_conditions || 114 if (!pdata->adc_conditions ||
128 !pdata->adc_conditions[0].state) { 115 !pdata->adc_conditions[0].state) {
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index a0ed35b336e4..ad87f263056f 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -32,13 +32,10 @@
32#include <linux/mfd/arizona/core.h> 32#include <linux/mfd/arizona/core.h>
33#include <linux/mfd/arizona/pdata.h> 33#include <linux/mfd/arizona/pdata.h>
34#include <linux/mfd/arizona/registers.h> 34#include <linux/mfd/arizona/registers.h>
35#include <dt-bindings/mfd/arizona.h>
35 36
36#define ARIZONA_MAX_MICD_RANGE 8 37#define ARIZONA_MAX_MICD_RANGE 8
37 38
38#define ARIZONA_ACCDET_MODE_MIC 0
39#define ARIZONA_ACCDET_MODE_HPL 1
40#define ARIZONA_ACCDET_MODE_HPR 2
41
42#define ARIZONA_MICD_CLAMP_MODE_JDL 0x4 39#define ARIZONA_MICD_CLAMP_MODE_JDL 0x4
43#define ARIZONA_MICD_CLAMP_MODE_JDH 0x5 40#define ARIZONA_MICD_CLAMP_MODE_JDH 0x5
44#define ARIZONA_MICD_CLAMP_MODE_JDL_GP5H 0x9 41#define ARIZONA_MICD_CLAMP_MODE_JDL_GP5H 0x9
@@ -94,7 +91,7 @@ struct arizona_extcon_info {
94 bool detecting; 91 bool detecting;
95 int jack_flips; 92 int jack_flips;
96 93
97 int hpdet_ip; 94 int hpdet_ip_version;
98 95
99 struct extcon_dev *edev; 96 struct extcon_dev *edev;
100}; 97};
@@ -121,17 +118,12 @@ static const int arizona_micd_levels[] = {
121 1257, 118 1257,
122}; 119};
123 120
124#define ARIZONA_CABLE_MECHANICAL 0 121static const unsigned int arizona_cable[] = {
125#define ARIZONA_CABLE_MICROPHONE 1 122 EXTCON_MECHANICAL,
126#define ARIZONA_CABLE_HEADPHONE 2 123 EXTCON_MICROPHONE,
127#define ARIZONA_CABLE_LINEOUT 3 124 EXTCON_HEADPHONE,
128 125 EXTCON_LINE_OUT,
129static const char *arizona_cable[] = { 126 EXTCON_NONE,
130 "Mechanical",
131 "Microphone",
132 "Headphone",
133 "Line-out",
134 NULL,
135}; 127};
136 128
137static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info); 129static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info);
@@ -145,6 +137,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
145 137
146 switch (arizona->type) { 138 switch (arizona->type) {
147 case WM5110: 139 case WM5110:
140 case WM8280:
148 mask = ARIZONA_HP1L_SHRTO | ARIZONA_HP1L_FLWR | 141 mask = ARIZONA_HP1L_SHRTO | ARIZONA_HP1L_FLWR |
149 ARIZONA_HP1L_SHRTI; 142 ARIZONA_HP1L_SHRTI;
150 if (clamp) 143 if (clamp)
@@ -380,7 +373,7 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
380 return ret; 373 return ret;
381 } 374 }
382 375
383 switch (info->hpdet_ip) { 376 switch (info->hpdet_ip_version) {
384 case 0: 377 case 0:
385 if (!(val & ARIZONA_HP_DONE)) { 378 if (!(val & ARIZONA_HP_DONE)) {
386 dev_err(arizona->dev, "HPDET did not complete: %x\n", 379 dev_err(arizona->dev, "HPDET did not complete: %x\n",
@@ -441,7 +434,7 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
441 434
442 default: 435 default:
443 dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n", 436 dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
444 info->hpdet_ip); 437 info->hpdet_ip_version);
445 case 2: 438 case 2:
446 if (!(val & ARIZONA_HP_DONE_B)) { 439 if (!(val & ARIZONA_HP_DONE_B)) {
447 dev_err(arizona->dev, "HPDET did not complete: %x\n", 440 dev_err(arizona->dev, "HPDET did not complete: %x\n",
@@ -559,7 +552,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
559 struct arizona_extcon_info *info = data; 552 struct arizona_extcon_info *info = data;
560 struct arizona *arizona = info->arizona; 553 struct arizona *arizona = info->arizona;
561 int id_gpio = arizona->pdata.hpdet_id_gpio; 554 int id_gpio = arizona->pdata.hpdet_id_gpio;
562 int report = ARIZONA_CABLE_HEADPHONE; 555 unsigned int report = EXTCON_HEADPHONE;
563 int ret, reading; 556 int ret, reading;
564 bool mic = false; 557 bool mic = false;
565 558
@@ -573,7 +566,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
573 } 566 }
574 567
575 /* If the cable was removed while measuring ignore the result */ 568 /* If the cable was removed while measuring ignore the result */
576 ret = extcon_get_cable_state_(info->edev, ARIZONA_CABLE_MECHANICAL); 569 ret = extcon_get_cable_state_(info->edev, EXTCON_MECHANICAL);
577 if (ret < 0) { 570 if (ret < 0) {
578 dev_err(arizona->dev, "Failed to check cable state: %d\n", 571 dev_err(arizona->dev, "Failed to check cable state: %d\n",
579 ret); 572 ret);
@@ -604,9 +597,9 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
604 597
605 /* Report high impedence cables as line outputs */ 598 /* Report high impedence cables as line outputs */
606 if (reading >= 5000) 599 if (reading >= 5000)
607 report = ARIZONA_CABLE_LINEOUT; 600 report = EXTCON_LINE_OUT;
608 else 601 else
609 report = ARIZONA_CABLE_HEADPHONE; 602 report = EXTCON_HEADPHONE;
610 603
611 ret = extcon_set_cable_state_(info->edev, report, true); 604 ret = extcon_set_cable_state_(info->edev, report, true);
612 if (ret != 0) 605 if (ret != 0)
@@ -670,9 +663,9 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
670 ret = regmap_update_bits(arizona->regmap, 663 ret = regmap_update_bits(arizona->regmap,
671 ARIZONA_ACCESSORY_DETECT_MODE_1, 664 ARIZONA_ACCESSORY_DETECT_MODE_1,
672 ARIZONA_ACCDET_MODE_MASK, 665 ARIZONA_ACCDET_MODE_MASK,
673 ARIZONA_ACCDET_MODE_HPL); 666 arizona->pdata.hpdet_channel);
674 if (ret != 0) { 667 if (ret != 0) {
675 dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret); 668 dev_err(arizona->dev, "Failed to set HPDET mode: %d\n", ret);
676 goto err; 669 goto err;
677 } 670 }
678 671
@@ -691,8 +684,7 @@ err:
691 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); 684 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
692 685
693 /* Just report headphone */ 686 /* Just report headphone */
694 ret = extcon_set_cable_state_(info->edev, 687 ret = extcon_set_cable_state_(info->edev, EXTCON_HEADPHONE, true);
695 ARIZONA_CABLE_HEADPHONE, true);
696 if (ret != 0) 688 if (ret != 0)
697 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret); 689 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
698 690
@@ -722,9 +714,9 @@ static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
722 ARIZONA_ACCESSORY_DETECT_MODE_1, 714 ARIZONA_ACCESSORY_DETECT_MODE_1,
723 ARIZONA_ACCDET_SRC | ARIZONA_ACCDET_MODE_MASK, 715 ARIZONA_ACCDET_SRC | ARIZONA_ACCDET_MODE_MASK,
724 info->micd_modes[0].src | 716 info->micd_modes[0].src |
725 ARIZONA_ACCDET_MODE_HPL); 717 arizona->pdata.hpdet_channel);
726 if (ret != 0) { 718 if (ret != 0) {
727 dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret); 719 dev_err(arizona->dev, "Failed to set HPDET mode: %d\n", ret);
728 goto err; 720 goto err;
729 } 721 }
730 722
@@ -749,8 +741,7 @@ err:
749 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); 741 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
750 742
751 /* Just report headphone */ 743 /* Just report headphone */
752 ret = extcon_set_cable_state_(info->edev, 744 ret = extcon_set_cable_state_(info->edev, EXTCON_HEADPHONE, true);
753 ARIZONA_CABLE_HEADPHONE, true);
754 if (ret != 0) 745 if (ret != 0)
755 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret); 746 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
756 747
@@ -789,7 +780,7 @@ static void arizona_micd_detect(struct work_struct *work)
789 mutex_lock(&info->lock); 780 mutex_lock(&info->lock);
790 781
791 /* If the cable was removed while measuring ignore the result */ 782 /* If the cable was removed while measuring ignore the result */
792 ret = extcon_get_cable_state_(info->edev, ARIZONA_CABLE_MECHANICAL); 783 ret = extcon_get_cable_state_(info->edev, EXTCON_MECHANICAL);
793 if (ret < 0) { 784 if (ret < 0) {
794 dev_err(arizona->dev, "Failed to check cable state: %d\n", 785 dev_err(arizona->dev, "Failed to check cable state: %d\n",
795 ret); 786 ret);
@@ -838,8 +829,7 @@ static void arizona_micd_detect(struct work_struct *work)
838 arizona_identify_headphone(info); 829 arizona_identify_headphone(info);
839 830
840 ret = extcon_set_cable_state_(info->edev, 831 ret = extcon_set_cable_state_(info->edev,
841 ARIZONA_CABLE_MICROPHONE, true); 832 EXTCON_MICROPHONE, true);
842
843 if (ret != 0) 833 if (ret != 0)
844 dev_err(arizona->dev, "Headset report failed: %d\n", 834 dev_err(arizona->dev, "Headset report failed: %d\n",
845 ret); 835 ret);
@@ -1030,7 +1020,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
1030 if (info->last_jackdet == present) { 1020 if (info->last_jackdet == present) {
1031 dev_dbg(arizona->dev, "Detected jack\n"); 1021 dev_dbg(arizona->dev, "Detected jack\n");
1032 ret = extcon_set_cable_state_(info->edev, 1022 ret = extcon_set_cable_state_(info->edev,
1033 ARIZONA_CABLE_MECHANICAL, true); 1023 EXTCON_MECHANICAL, true);
1034 1024
1035 if (ret != 0) 1025 if (ret != 0)
1036 dev_err(arizona->dev, "Mechanical report failed: %d\n", 1026 dev_err(arizona->dev, "Mechanical report failed: %d\n",
@@ -1120,6 +1110,26 @@ static void arizona_micd_set_level(struct arizona *arizona, int index,
1120 regmap_update_bits(arizona->regmap, reg, mask, level); 1110 regmap_update_bits(arizona->regmap, reg, mask, level);
1121} 1111}
1122 1112
1113static int arizona_extcon_of_get_pdata(struct arizona *arizona)
1114{
1115 struct arizona_pdata *pdata = &arizona->pdata;
1116 unsigned int val = ARIZONA_ACCDET_MODE_HPL;
1117
1118 of_property_read_u32(arizona->dev->of_node, "wlf,hpdet-channel", &val);
1119 switch (val) {
1120 case ARIZONA_ACCDET_MODE_HPL:
1121 case ARIZONA_ACCDET_MODE_HPR:
1122 pdata->hpdet_channel = val;
1123 break;
1124 default:
1125 dev_err(arizona->dev,
1126 "Wrong wlf,hpdet-channel DT value %d\n", val);
1127 pdata->hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
1128 }
1129
1130 return 0;
1131}
1132
1123static int arizona_extcon_probe(struct platform_device *pdev) 1133static int arizona_extcon_probe(struct platform_device *pdev)
1124{ 1134{
1125 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); 1135 struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
@@ -1137,6 +1147,11 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1137 if (!info) 1147 if (!info)
1138 return -ENOMEM; 1148 return -ENOMEM;
1139 1149
1150 if (IS_ENABLED(CONFIG_OF)) {
1151 if (!dev_get_platdata(arizona->dev))
1152 arizona_extcon_of_get_pdata(arizona);
1153 }
1154
1140 info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD"); 1155 info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
1141 if (IS_ERR(info->micvdd)) { 1156 if (IS_ERR(info->micvdd)) {
1142 ret = PTR_ERR(info->micvdd); 1157 ret = PTR_ERR(info->micvdd);
@@ -1161,7 +1176,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1161 break; 1176 break;
1162 default: 1177 default:
1163 info->micd_clamp = true; 1178 info->micd_clamp = true;
1164 info->hpdet_ip = 1; 1179 info->hpdet_ip_version = 1;
1165 break; 1180 break;
1166 } 1181 }
1167 break; 1182 break;
@@ -1172,7 +1187,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1172 break; 1187 break;
1173 default: 1188 default:
1174 info->micd_clamp = true; 1189 info->micd_clamp = true;
1175 info->hpdet_ip = 2; 1190 info->hpdet_ip_version = 2;
1176 break; 1191 break;
1177 } 1192 }
1178 break; 1193 break;
@@ -1185,7 +1200,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1185 dev_err(&pdev->dev, "failed to allocate extcon device\n"); 1200 dev_err(&pdev->dev, "failed to allocate extcon device\n");
1186 return -ENOMEM; 1201 return -ENOMEM;
1187 } 1202 }
1188 info->edev->name = "Headset Jack";
1189 1203
1190 ret = devm_extcon_dev_register(&pdev->dev, info->edev); 1204 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
1191 if (ret < 0) { 1205 if (ret < 0) {
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
new file mode 100644
index 000000000000..ea962bc547b8
--- /dev/null
+++ b/drivers/extcon/extcon-axp288.c
@@ -0,0 +1,381 @@
1/*
2 * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver
3 *
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/platform_device.h>
23#include <linux/property.h>
24#include <linux/usb/phy.h>
25#include <linux/notifier.h>
26#include <linux/extcon.h>
27#include <linux/regmap.h>
28#include <linux/gpio.h>
29#include <linux/gpio/consumer.h>
30#include <linux/mfd/axp20x.h>
31
32/* Power source status register */
33#define PS_STAT_VBUS_TRIGGER BIT(0)
34#define PS_STAT_BAT_CHRG_DIR BIT(2)
35#define PS_STAT_VBUS_ABOVE_VHOLD BIT(3)
36#define PS_STAT_VBUS_VALID BIT(4)
37#define PS_STAT_VBUS_PRESENT BIT(5)
38
39/* BC module global register */
40#define BC_GLOBAL_RUN BIT(0)
41#define BC_GLOBAL_DET_STAT BIT(2)
42#define BC_GLOBAL_DBP_TOUT BIT(3)
43#define BC_GLOBAL_VLGC_COM_SEL BIT(4)
44#define BC_GLOBAL_DCD_TOUT_MASK (BIT(6)|BIT(5))
45#define BC_GLOBAL_DCD_TOUT_300MS 0
46#define BC_GLOBAL_DCD_TOUT_100MS 1
47#define BC_GLOBAL_DCD_TOUT_500MS 2
48#define BC_GLOBAL_DCD_TOUT_900MS 3
49#define BC_GLOBAL_DCD_DET_SEL BIT(7)
50
51/* BC module vbus control and status register */
52#define VBUS_CNTL_DPDM_PD_EN BIT(4)
53#define VBUS_CNTL_DPDM_FD_EN BIT(5)
54#define VBUS_CNTL_FIRST_PO_STAT BIT(6)
55
56/* BC USB status register */
57#define USB_STAT_BUS_STAT_MASK (BIT(3)|BIT(2)|BIT(1)|BIT(0))
58#define USB_STAT_BUS_STAT_SHIFT 0
59#define USB_STAT_BUS_STAT_ATHD 0
60#define USB_STAT_BUS_STAT_CONN 1
61#define USB_STAT_BUS_STAT_SUSP 2
62#define USB_STAT_BUS_STAT_CONF 3
63#define USB_STAT_USB_SS_MODE BIT(4)
64#define USB_STAT_DEAD_BAT_DET BIT(6)
65#define USB_STAT_DBP_UNCFG BIT(7)
66
67/* BC detect status register */
68#define DET_STAT_MASK (BIT(7)|BIT(6)|BIT(5))
69#define DET_STAT_SHIFT 5
70#define DET_STAT_SDP 1
71#define DET_STAT_CDP 2
72#define DET_STAT_DCP 3
73
74/* IRQ enable-1 register */
75#define PWRSRC_IRQ_CFG_MASK (BIT(4)|BIT(3)|BIT(2))
76
77/* IRQ enable-6 register */
78#define BC12_IRQ_CFG_MASK BIT(1)
79
80enum axp288_extcon_reg {
81 AXP288_PS_STAT_REG = 0x00,
82 AXP288_PS_BOOT_REASON_REG = 0x02,
83 AXP288_BC_GLOBAL_REG = 0x2c,
84 AXP288_BC_VBUS_CNTL_REG = 0x2d,
85 AXP288_BC_USB_STAT_REG = 0x2e,
86 AXP288_BC_DET_STAT_REG = 0x2f,
87 AXP288_PWRSRC_IRQ_CFG_REG = 0x40,
88 AXP288_BC12_IRQ_CFG_REG = 0x45,
89};
90
91enum axp288_mux_select {
92 EXTCON_GPIO_MUX_SEL_PMIC = 0,
93 EXTCON_GPIO_MUX_SEL_SOC,
94};
95
96enum axp288_extcon_irq {
97 VBUS_FALLING_IRQ = 0,
98 VBUS_RISING_IRQ,
99 MV_CHNG_IRQ,
100 BC_USB_CHNG_IRQ,
101 EXTCON_IRQ_END,
102};
103
104static const unsigned int axp288_extcon_cables[] = {
105 EXTCON_SLOW_CHARGER,
106 EXTCON_CHARGE_DOWNSTREAM,
107 EXTCON_FAST_CHARGER,
108 EXTCON_NONE,
109};
110
111struct axp288_extcon_info {
112 struct device *dev;
113 struct regmap *regmap;
114 struct regmap_irq_chip_data *regmap_irqc;
115 struct axp288_extcon_pdata *pdata;
116 int irq[EXTCON_IRQ_END];
117 struct extcon_dev *edev;
118 struct notifier_block extcon_nb;
119 struct usb_phy *otg;
120};
121
122/* Power up/down reason string array */
123static char *axp288_pwr_up_down_info[] = {
124 "Last wake caused by user pressing the power button",
125 "Last wake caused by a charger insertion",
126 "Last wake caused by a battery insertion",
127 "Last wake caused by SOC initiated global reset",
128 "Last wake caused by cold reset",
129 "Last shutdown caused by PMIC UVLO threshold",
130 "Last shutdown caused by SOC initiated cold off",
131 "Last shutdown caused by user pressing the power button",
132 NULL,
133};
134
135/*
136 * Decode and log the given "reset source indicator" (rsi)
137 * register and then clear it.
138 */
139static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
140{
141 char **rsi;
142 unsigned int val, i, clear_mask = 0;
143 int ret;
144
145 ret = regmap_read(info->regmap, AXP288_PS_BOOT_REASON_REG, &val);
146 for (i = 0, rsi = axp288_pwr_up_down_info; *rsi; rsi++, i++) {
147 if (val & BIT(i)) {
148 dev_dbg(info->dev, "%s\n", *rsi);
149 clear_mask |= BIT(i);
150 }
151 }
152
153 /* Clear the register value for next reboot (write 1 to clear bit) */
154 regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
155}
156
157static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
158{
159 static bool notify_otg, notify_charger;
160 static unsigned int cable;
161 int ret, stat, cfg, pwr_stat;
162 u8 chrg_type;
163 bool vbus_attach = false;
164
165 ret = regmap_read(info->regmap, AXP288_PS_STAT_REG, &pwr_stat);
166 if (ret < 0) {
167 dev_err(info->dev, "failed to read vbus status\n");
168 return ret;
169 }
170
171 vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
172 if (!vbus_attach)
173 goto notify_otg;
174
175 /* Check charger detection completion status */
176 ret = regmap_read(info->regmap, AXP288_BC_GLOBAL_REG, &cfg);
177 if (ret < 0)
178 goto dev_det_ret;
179 if (cfg & BC_GLOBAL_DET_STAT) {
180 dev_dbg(info->dev, "can't complete the charger detection\n");
181 goto dev_det_ret;
182 }
183
184 ret = regmap_read(info->regmap, AXP288_BC_DET_STAT_REG, &stat);
185 if (ret < 0)
186 goto dev_det_ret;
187
188 chrg_type = (stat & DET_STAT_MASK) >> DET_STAT_SHIFT;
189
190 switch (chrg_type) {
191 case DET_STAT_SDP:
192 dev_dbg(info->dev, "sdp cable is connecetd\n");
193 notify_otg = true;
194 notify_charger = true;
195 cable = EXTCON_SLOW_CHARGER;
196 break;
197 case DET_STAT_CDP:
198 dev_dbg(info->dev, "cdp cable is connecetd\n");
199 notify_otg = true;
200 notify_charger = true;
201 cable = EXTCON_CHARGE_DOWNSTREAM;
202 break;
203 case DET_STAT_DCP:
204 dev_dbg(info->dev, "dcp cable is connecetd\n");
205 notify_charger = true;
206 cable = EXTCON_FAST_CHARGER;
207 break;
208 default:
209 dev_warn(info->dev,
210 "disconnect or unknown or ID event\n");
211 }
212
213notify_otg:
214 if (notify_otg) {
215 /*
216 * If VBUS is absent Connect D+/D- lines to PMIC for BC
217 * detection. Else connect them to SOC for USB communication.
218 */
219 if (info->pdata->gpio_mux_cntl)
220 gpiod_set_value(info->pdata->gpio_mux_cntl,
221 vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
222 : EXTCON_GPIO_MUX_SEL_PMIC);
223
224 atomic_notifier_call_chain(&info->otg->notifier,
225 vbus_attach ? USB_EVENT_VBUS : USB_EVENT_NONE, NULL);
226 }
227
228 if (notify_charger)
229 extcon_set_cable_state_(info->edev, cable, vbus_attach);
230
231 /* Clear the flags on disconnect event */
232 if (!vbus_attach)
233 notify_otg = notify_charger = false;
234
235 return 0;
236
237dev_det_ret:
238 if (ret < 0)
239 dev_err(info->dev, "failed to detect BC Mod\n");
240
241 return ret;
242}
243
244static irqreturn_t axp288_extcon_isr(int irq, void *data)
245{
246 struct axp288_extcon_info *info = data;
247 int ret;
248
249 ret = axp288_handle_chrg_det_event(info);
250 if (ret < 0)
251 dev_err(info->dev, "failed to handle the interrupt\n");
252
253 return IRQ_HANDLED;
254}
255
256static void axp288_extcon_enable_irq(struct axp288_extcon_info *info)
257{
258 /* Unmask VBUS interrupt */
259 regmap_write(info->regmap, AXP288_PWRSRC_IRQ_CFG_REG,
260 PWRSRC_IRQ_CFG_MASK);
261 regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
262 BC_GLOBAL_RUN, 0);
263 /* Unmask the BC1.2 complete interrupts */
264 regmap_write(info->regmap, AXP288_BC12_IRQ_CFG_REG, BC12_IRQ_CFG_MASK);
265 /* Enable the charger detection logic */
266 regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
267 BC_GLOBAL_RUN, BC_GLOBAL_RUN);
268}
269
270static int axp288_extcon_probe(struct platform_device *pdev)
271{
272 struct axp288_extcon_info *info;
273 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
274 int ret, i, pirq, gpio;
275
276 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
277 if (!info)
278 return -ENOMEM;
279
280 info->dev = &pdev->dev;
281 info->regmap = axp20x->regmap;
282 info->regmap_irqc = axp20x->regmap_irqc;
283 info->pdata = pdev->dev.platform_data;
284
285 if (!info->pdata) {
286 /* Try ACPI provided pdata via device properties */
287 if (!device_property_present(&pdev->dev,
288 "axp288_extcon_data\n"))
289 dev_err(&pdev->dev, "failed to get platform data\n");
290 return -ENODEV;
291 }
292 platform_set_drvdata(pdev, info);
293
294 axp288_extcon_log_rsi(info);
295
296 /* Initialize extcon device */
297 info->edev = devm_extcon_dev_allocate(&pdev->dev,
298 axp288_extcon_cables);
299 if (IS_ERR(info->edev)) {
300 dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
301 return PTR_ERR(info->edev);
302 }
303
304 /* Register extcon device */
305 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
306 if (ret) {
307 dev_err(&pdev->dev, "failed to register extcon device\n");
308 return ret;
309 }
310
311 /* Get otg transceiver phy */
312 info->otg = usb_get_phy(USB_PHY_TYPE_USB2);
313 if (IS_ERR(info->otg)) {
314 dev_err(&pdev->dev, "failed to get otg transceiver\n");
315 return PTR_ERR(info->otg);
316 }
317
318 /* Set up gpio control for USB Mux */
319 if (info->pdata->gpio_mux_cntl) {
320 gpio = desc_to_gpio(info->pdata->gpio_mux_cntl);
321 ret = gpio_request(gpio, "USB_MUX");
322 if (ret < 0) {
323 dev_err(&pdev->dev,
324 "failed to request the gpio=%d\n", gpio);
325 goto gpio_req_failed;
326 }
327 gpiod_direction_output(info->pdata->gpio_mux_cntl,
328 EXTCON_GPIO_MUX_SEL_PMIC);
329 }
330
331 for (i = 0; i < EXTCON_IRQ_END; i++) {
332 pirq = platform_get_irq(pdev, i);
333 info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
334 if (info->irq[i] < 0) {
335 dev_err(&pdev->dev,
336 "failed to get virtual interrupt=%d\n", pirq);
337 ret = info->irq[i];
338 goto gpio_req_failed;
339 }
340
341 ret = devm_request_threaded_irq(&pdev->dev, info->irq[i],
342 NULL, axp288_extcon_isr,
343 IRQF_ONESHOT | IRQF_NO_SUSPEND,
344 pdev->name, info);
345 if (ret) {
346 dev_err(&pdev->dev, "failed to request interrupt=%d\n",
347 info->irq[i]);
348 goto gpio_req_failed;
349 }
350 }
351
352 /* Enable interrupts */
353 axp288_extcon_enable_irq(info);
354
355 return 0;
356
357gpio_req_failed:
358 usb_put_phy(info->otg);
359 return ret;
360}
361
362static int axp288_extcon_remove(struct platform_device *pdev)
363{
364 struct axp288_extcon_info *info = platform_get_drvdata(pdev);
365
366 usb_put_phy(info->otg);
367 return 0;
368}
369
370static struct platform_driver axp288_extcon_driver = {
371 .probe = axp288_extcon_probe,
372 .remove = axp288_extcon_remove,
373 .driver = {
374 .name = "axp288_extcon",
375 },
376};
377module_platform_driver(axp288_extcon_driver);
378
379MODULE_AUTHOR("Ramakrishna Pallala <ramakrishna.pallala@intel.com>");
380MODULE_DESCRIPTION("X-Powers AXP288 extcon driver");
381MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 7af33fc433cd..355459a54e8b 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -104,7 +104,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
104 dev_err(&pdev->dev, "failed to allocate extcon device\n"); 104 dev_err(&pdev->dev, "failed to allocate extcon device\n");
105 return -ENOMEM; 105 return -ENOMEM;
106 } 106 }
107 extcon_data->edev->name = pdata->name;
108 107
109 extcon_data->gpio = pdata->gpio; 108 extcon_data->gpio = pdata->gpio;
110 extcon_data->gpio_active_low = pdata->gpio_active_low; 109 extcon_data->gpio_active_low = pdata->gpio_active_low;
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 3823aa4a3a80..df0659d98e5a 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -148,33 +148,14 @@ enum max14577_muic_acc_type {
148 MAX14577_MUIC_ADC_OPEN, 148 MAX14577_MUIC_ADC_OPEN,
149}; 149};
150 150
151/* max14577 MUIC device support below list of accessories(external connector) */ 151static const unsigned int max14577_extcon_cable[] = {
152enum { 152 EXTCON_USB,
153 EXTCON_CABLE_USB = 0, 153 EXTCON_TA,
154 EXTCON_CABLE_TA, 154 EXTCON_FAST_CHARGER,
155 EXTCON_CABLE_FAST_CHARGER, 155 EXTCON_SLOW_CHARGER,
156 EXTCON_CABLE_SLOW_CHARGER, 156 EXTCON_CHARGE_DOWNSTREAM,
157 EXTCON_CABLE_CHARGE_DOWNSTREAM, 157 EXTCON_JIG,
158 EXTCON_CABLE_JIG_USB_ON, 158 EXTCON_NONE,
159 EXTCON_CABLE_JIG_USB_OFF,
160 EXTCON_CABLE_JIG_UART_OFF,
161 EXTCON_CABLE_JIG_UART_ON,
162
163 _EXTCON_CABLE_NUM,
164};
165
166static const char *max14577_extcon_cable[] = {
167 [EXTCON_CABLE_USB] = "USB",
168 [EXTCON_CABLE_TA] = "TA",
169 [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
170 [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
171 [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
172 [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
173 [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
174 [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
175 [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
176
177 NULL,
178}; 159};
179 160
180/* 161/*
@@ -348,7 +329,6 @@ static int max14577_muic_get_cable_type(struct max14577_muic_info *info,
348static int max14577_muic_jig_handler(struct max14577_muic_info *info, 329static int max14577_muic_jig_handler(struct max14577_muic_info *info,
349 int cable_type, bool attached) 330 int cable_type, bool attached)
350{ 331{
351 char cable_name[32];
352 int ret = 0; 332 int ret = 0;
353 u8 path = CTRL1_SW_OPEN; 333 u8 path = CTRL1_SW_OPEN;
354 334
@@ -358,18 +338,12 @@ static int max14577_muic_jig_handler(struct max14577_muic_info *info,
358 338
359 switch (cable_type) { 339 switch (cable_type) {
360 case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */ 340 case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
361 /* PATH:AP_USB */
362 strcpy(cable_name, "JIG-USB-OFF");
363 path = CTRL1_SW_USB;
364 break;
365 case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */ 341 case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
366 /* PATH:AP_USB */ 342 /* PATH:AP_USB */
367 strcpy(cable_name, "JIG-USB-ON");
368 path = CTRL1_SW_USB; 343 path = CTRL1_SW_USB;
369 break; 344 break;
370 case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */ 345 case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
371 /* PATH:AP_UART */ 346 /* PATH:AP_UART */
372 strcpy(cable_name, "JIG-UART-OFF");
373 path = CTRL1_SW_UART; 347 path = CTRL1_SW_UART;
374 break; 348 break;
375 default: 349 default:
@@ -382,7 +356,7 @@ static int max14577_muic_jig_handler(struct max14577_muic_info *info,
382 if (ret < 0) 356 if (ret < 0)
383 return ret; 357 return ret;
384 358
385 extcon_set_cable_state(info->edev, cable_name, attached); 359 extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
386 360
387 return 0; 361 return 0;
388} 362}
@@ -479,20 +453,22 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info)
479 if (ret < 0) 453 if (ret < 0)
480 return ret; 454 return ret;
481 455
482 extcon_set_cable_state(info->edev, "USB", attached); 456 extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
483 break; 457 break;
484 case MAX14577_CHARGER_TYPE_DEDICATED_CHG: 458 case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
485 extcon_set_cable_state(info->edev, "TA", attached); 459 extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
486 break; 460 break;
487 case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT: 461 case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
488 extcon_set_cable_state(info->edev, 462 extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM,
489 "Charge-downstream", attached); 463 attached);
490 break; 464 break;
491 case MAX14577_CHARGER_TYPE_SPECIAL_500MA: 465 case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
492 extcon_set_cable_state(info->edev, "Slow-charger", attached); 466 extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER,
467 attached);
493 break; 468 break;
494 case MAX14577_CHARGER_TYPE_SPECIAL_1A: 469 case MAX14577_CHARGER_TYPE_SPECIAL_1A:
495 extcon_set_cable_state(info->edev, "Fast-charger", attached); 470 extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER,
471 attached);
496 break; 472 break;
497 case MAX14577_CHARGER_TYPE_NONE: 473 case MAX14577_CHARGER_TYPE_NONE:
498 case MAX14577_CHARGER_TYPE_DEAD_BATTERY: 474 case MAX14577_CHARGER_TYPE_DEAD_BATTERY:
@@ -742,8 +718,6 @@ static int max14577_muic_probe(struct platform_device *pdev)
742 return -ENOMEM; 718 return -ENOMEM;
743 } 719 }
744 720
745 info->edev->name = dev_name(&pdev->dev);
746
747 ret = devm_extcon_dev_register(&pdev->dev, info->edev); 721 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
748 if (ret) { 722 if (ret) {
749 dev_err(&pdev->dev, "failed to register extcon device\n"); 723 dev_err(&pdev->dev, "failed to register extcon device\n");
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index a66bec8f6252..f4f3b3d53928 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -200,44 +200,17 @@ enum max77693_muic_acc_type {
200/* 200/*
201 * MAX77693 MUIC device support below list of accessories(external connector) 201 * MAX77693 MUIC device support below list of accessories(external connector)
202 */ 202 */
203enum { 203static const unsigned int max77693_extcon_cable[] = {
204 EXTCON_CABLE_USB = 0, 204 EXTCON_USB,
205 EXTCON_CABLE_USB_HOST, 205 EXTCON_USB_HOST,
206 EXTCON_CABLE_TA, 206 EXTCON_TA,
207 EXTCON_CABLE_FAST_CHARGER, 207 EXTCON_FAST_CHARGER,
208 EXTCON_CABLE_SLOW_CHARGER, 208 EXTCON_SLOW_CHARGER,
209 EXTCON_CABLE_CHARGE_DOWNSTREAM, 209 EXTCON_CHARGE_DOWNSTREAM,
210 EXTCON_CABLE_MHL, 210 EXTCON_MHL,
211 EXTCON_CABLE_MHL_TA, 211 EXTCON_JIG,
212 EXTCON_CABLE_JIG_USB_ON, 212 EXTCON_DOCK,
213 EXTCON_CABLE_JIG_USB_OFF, 213 EXTCON_NONE,
214 EXTCON_CABLE_JIG_UART_OFF,
215 EXTCON_CABLE_JIG_UART_ON,
216 EXTCON_CABLE_DOCK_SMART,
217 EXTCON_CABLE_DOCK_DESK,
218 EXTCON_CABLE_DOCK_AUDIO,
219
220 _EXTCON_CABLE_NUM,
221};
222
223static const char *max77693_extcon_cable[] = {
224 [EXTCON_CABLE_USB] = "USB",
225 [EXTCON_CABLE_USB_HOST] = "USB-Host",
226 [EXTCON_CABLE_TA] = "TA",
227 [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
228 [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
229 [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
230 [EXTCON_CABLE_MHL] = "MHL",
231 [EXTCON_CABLE_MHL_TA] = "MHL-TA",
232 [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
233 [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
234 [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
235 [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
236 [EXTCON_CABLE_DOCK_SMART] = "Dock-Smart",
237 [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
238 [EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio",
239
240 NULL,
241}; 214};
242 215
243/* 216/*
@@ -484,7 +457,7 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
484 int ret = 0; 457 int ret = 0;
485 int vbvolt; 458 int vbvolt;
486 bool cable_attached; 459 bool cable_attached;
487 char dock_name[CABLE_NAME_MAX]; 460 unsigned int dock_id;
488 461
489 dev_info(info->dev, 462 dev_info(info->dev,
490 "external connector is %s (adc:0x%02x)\n", 463 "external connector is %s (adc:0x%02x)\n",
@@ -507,15 +480,15 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
507 } 480 }
508 481
509 /* 482 /*
510 * Notify Dock-Smart/MHL state. 483 * Notify Dock/MHL state.
511 * - Dock-Smart device include three type of cable which 484 * - Dock device include three type of cable which
512 * are HDMI, USB for mouse/keyboard and micro-usb port 485 * are HDMI, USB for mouse/keyboard and micro-usb port
513 * for USB/TA cable. Dock-Smart device need always exteranl 486 * for USB/TA cable. Dock device need always exteranl
514 * power supply(USB/TA cable through micro-usb cable). Dock- 487 * power supply(USB/TA cable through micro-usb cable). Dock
515 * Smart device support screen output of target to separate 488 * device support screen output of target to separate
516 * monitor and mouse/keyboard for desktop mode. 489 * monitor and mouse/keyboard for desktop mode.
517 * 490 *
518 * Features of 'USB/TA cable with Dock-Smart device' 491 * Features of 'USB/TA cable with Dock device'
519 * - Support MHL 492 * - Support MHL
520 * - Support external output feature of audio 493 * - Support external output feature of audio
521 * - Support charging through micro-usb port without data 494 * - Support charging through micro-usb port without data
@@ -529,16 +502,16 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
529 if (ret < 0) 502 if (ret < 0)
530 return ret; 503 return ret;
531 504
532 extcon_set_cable_state(info->edev, "Dock-Smart", attached); 505 extcon_set_cable_state_(info->edev, EXTCON_DOCK, attached);
533 extcon_set_cable_state(info->edev, "MHL", attached); 506 extcon_set_cable_state_(info->edev, EXTCON_MHL, attached);
534 goto out; 507 goto out;
535 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */ 508 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
536 strcpy(dock_name, "Dock-Desk"); 509 dock_id = EXTCON_DOCK;
537 break; 510 break;
538 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */ 511 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
539 strcpy(dock_name, "Dock-Audio"); 512 dock_id = EXTCON_DOCK;
540 if (!attached) 513 if (!attached)
541 extcon_set_cable_state(info->edev, "USB", false); 514 extcon_set_cable_state_(info->edev, EXTCON_USB, false);
542 break; 515 break;
543 default: 516 default:
544 dev_err(info->dev, "failed to detect %s dock device\n", 517 dev_err(info->dev, "failed to detect %s dock device\n",
@@ -550,7 +523,7 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
550 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached); 523 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
551 if (ret < 0) 524 if (ret < 0)
552 return ret; 525 return ret;
553 extcon_set_cable_state(info->edev, dock_name, attached); 526 extcon_set_cable_state_(info->edev, dock_id, attached);
554 527
555out: 528out:
556 return 0; 529 return 0;
@@ -615,20 +588,19 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
615 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached); 588 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
616 if (ret < 0) 589 if (ret < 0)
617 return ret; 590 return ret;
618 extcon_set_cable_state(info->edev, "USB-Host", attached); 591 extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
619 break; 592 break;
620 case MAX77693_MUIC_GND_AV_CABLE_LOAD: 593 case MAX77693_MUIC_GND_AV_CABLE_LOAD:
621 /* Audio Video Cable with load, PATH:AUDIO */ 594 /* Audio Video Cable with load, PATH:AUDIO */
622 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached); 595 ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
623 if (ret < 0) 596 if (ret < 0)
624 return ret; 597 return ret;
625 extcon_set_cable_state(info->edev, 598 extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
626 "Audio-video-load", attached);
627 break; 599 break;
628 case MAX77693_MUIC_GND_MHL: 600 case MAX77693_MUIC_GND_MHL:
629 case MAX77693_MUIC_GND_MHL_VB: 601 case MAX77693_MUIC_GND_MHL_VB:
630 /* MHL or MHL with USB/TA cable */ 602 /* MHL or MHL with USB/TA cable */
631 extcon_set_cable_state(info->edev, "MHL", attached); 603 extcon_set_cable_state_(info->edev, EXTCON_MHL, attached);
632 break; 604 break;
633 default: 605 default:
634 dev_err(info->dev, "failed to detect %s cable of gnd type\n", 606 dev_err(info->dev, "failed to detect %s cable of gnd type\n",
@@ -642,7 +614,6 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
642static int max77693_muic_jig_handler(struct max77693_muic_info *info, 614static int max77693_muic_jig_handler(struct max77693_muic_info *info,
643 int cable_type, bool attached) 615 int cable_type, bool attached)
644{ 616{
645 char cable_name[32];
646 int ret = 0; 617 int ret = 0;
647 u8 path = CONTROL1_SW_OPEN; 618 u8 path = CONTROL1_SW_OPEN;
648 619
@@ -652,23 +623,13 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
652 623
653 switch (cable_type) { 624 switch (cable_type) {
654 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */ 625 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
655 /* PATH:AP_USB */
656 strcpy(cable_name, "JIG-USB-OFF");
657 path = CONTROL1_SW_USB;
658 break;
659 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */ 626 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
660 /* PATH:AP_USB */ 627 /* PATH:AP_USB */
661 strcpy(cable_name, "JIG-USB-ON");
662 path = CONTROL1_SW_USB; 628 path = CONTROL1_SW_USB;
663 break; 629 break;
664 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */ 630 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
665 /* PATH:AP_UART */
666 strcpy(cable_name, "JIG-UART-OFF");
667 path = CONTROL1_SW_UART;
668 break;
669 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */ 631 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */
670 /* PATH:AP_UART */ 632 /* PATH:AP_UART */
671 strcpy(cable_name, "JIG-UART-ON");
672 path = CONTROL1_SW_UART; 633 path = CONTROL1_SW_UART;
673 break; 634 break;
674 default: 635 default:
@@ -681,7 +642,7 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
681 if (ret < 0) 642 if (ret < 0)
682 return ret; 643 return ret;
683 644
684 extcon_set_cable_state(info->edev, cable_name, attached); 645 extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
685 646
686 return 0; 647 return 0;
687} 648}
@@ -823,22 +784,22 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
823 case MAX77693_MUIC_GND_MHL: 784 case MAX77693_MUIC_GND_MHL:
824 case MAX77693_MUIC_GND_MHL_VB: 785 case MAX77693_MUIC_GND_MHL_VB:
825 /* 786 /*
826 * MHL cable with MHL-TA(USB/TA) cable 787 * MHL cable with USB/TA cable
827 * - MHL cable include two port(HDMI line and separate 788 * - MHL cable include two port(HDMI line and separate
828 * micro-usb port. When the target connect MHL cable, 789 * micro-usb port. When the target connect MHL cable,
829 * extcon driver check whether MHL-TA(USB/TA) cable is 790 * extcon driver check whether USB/TA cable is
830 * connected. If MHL-TA cable is connected, extcon 791 * connected. If USB/TA cable is connected, extcon
831 * driver notify state to notifiee for charging battery. 792 * driver notify state to notifiee for charging battery.
832 * 793 *
833 * Features of 'MHL-TA(USB/TA) with MHL cable' 794 * Features of 'USB/TA with MHL cable'
834 * - Support MHL 795 * - Support MHL
835 * - Support charging through micro-usb port without 796 * - Support charging through micro-usb port without
836 * data connection 797 * data connection
837 */ 798 */
838 extcon_set_cable_state(info->edev, "MHL-TA", attached); 799 extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
839 if (!cable_attached) 800 if (!cable_attached)
840 extcon_set_cable_state(info->edev, 801 extcon_set_cable_state_(info->edev, EXTCON_MHL,
841 "MHL", cable_attached); 802 cable_attached);
842 break; 803 break;
843 } 804 }
844 805
@@ -861,11 +822,12 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
861 * - Support charging through micro-usb port without 822 * - Support charging through micro-usb port without
862 * data connection. 823 * data connection.
863 */ 824 */
864 extcon_set_cable_state(info->edev, "USB", attached); 825 extcon_set_cable_state_(info->edev, EXTCON_USB,
826 attached);
865 827
866 if (!cable_attached) 828 if (!cable_attached)
867 extcon_set_cable_state(info->edev, "Dock-Audio", 829 extcon_set_cable_state_(info->edev, EXTCON_DOCK,
868 cable_attached); 830 cable_attached);
869 break; 831 break;
870 case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */ 832 case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
871 /* 833 /*
@@ -893,10 +855,10 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
893 if (ret < 0) 855 if (ret < 0)
894 return ret; 856 return ret;
895 857
896 extcon_set_cable_state(info->edev, "Dock-Smart", 858 extcon_set_cable_state_(info->edev, EXTCON_DOCK,
897 attached); 859 attached);
898 extcon_set_cable_state(info->edev, "MHL", attached); 860 extcon_set_cable_state_(info->edev, EXTCON_MHL,
899 861 attached);
900 break; 862 break;
901 } 863 }
902 864
@@ -929,23 +891,26 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
929 if (ret < 0) 891 if (ret < 0)
930 return ret; 892 return ret;
931 893
932 extcon_set_cable_state(info->edev, "USB", attached); 894 extcon_set_cable_state_(info->edev, EXTCON_USB,
895 attached);
933 break; 896 break;
934 case MAX77693_CHARGER_TYPE_DEDICATED_CHG: 897 case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
935 /* Only TA cable */ 898 /* Only TA cable */
936 extcon_set_cable_state(info->edev, "TA", attached); 899 extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
937 break; 900 break;
938 } 901 }
939 break; 902 break;
940 case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT: 903 case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
941 extcon_set_cable_state(info->edev, 904 extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM,
942 "Charge-downstream", attached); 905 attached);
943 break; 906 break;
944 case MAX77693_CHARGER_TYPE_APPLE_500MA: 907 case MAX77693_CHARGER_TYPE_APPLE_500MA:
945 extcon_set_cable_state(info->edev, "Slow-charger", attached); 908 extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER,
909 attached);
946 break; 910 break;
947 case MAX77693_CHARGER_TYPE_APPLE_1A_2A: 911 case MAX77693_CHARGER_TYPE_APPLE_1A_2A:
948 extcon_set_cable_state(info->edev, "Fast-charger", attached); 912 extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER,
913 attached);
949 break; 914 break;
950 case MAX77693_CHARGER_TYPE_DEAD_BATTERY: 915 case MAX77693_CHARGER_TYPE_DEAD_BATTERY:
951 break; 916 break;
@@ -1182,7 +1147,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
1182 dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); 1147 dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
1183 return -ENOMEM; 1148 return -ENOMEM;
1184 } 1149 }
1185 info->edev->name = DEV_NAME;
1186 1150
1187 ret = devm_extcon_dev_register(&pdev->dev, info->edev); 1151 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
1188 if (ret) { 1152 if (ret) {
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 8db6a926ea07..fac2f1417a79 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -118,36 +118,16 @@ enum max77843_muic_charger_type {
118 MAX77843_MUIC_CHG_GND, 118 MAX77843_MUIC_CHG_GND,
119}; 119};
120 120
121enum { 121static const unsigned int max77843_extcon_cable[] = {
122 MAX77843_CABLE_USB = 0, 122 EXTCON_USB,
123 MAX77843_CABLE_USB_HOST, 123 EXTCON_USB_HOST,
124 MAX77843_CABLE_TA, 124 EXTCON_TA,
125 MAX77843_CABLE_CHARGE_DOWNSTREAM, 125 EXTCON_CHARGE_DOWNSTREAM,
126 MAX77843_CABLE_FAST_CHARGER, 126 EXTCON_FAST_CHARGER,
127 MAX77843_CABLE_SLOW_CHARGER, 127 EXTCON_SLOW_CHARGER,
128 MAX77843_CABLE_MHL, 128 EXTCON_MHL,
129 MAX77843_CABLE_MHL_TA, 129 EXTCON_JIG,
130 MAX77843_CABLE_JIG_USB_ON, 130 EXTCON_NONE,
131 MAX77843_CABLE_JIG_USB_OFF,
132 MAX77843_CABLE_JIG_UART_ON,
133 MAX77843_CABLE_JIG_UART_OFF,
134
135 MAX77843_CABLE_NUM,
136};
137
138static const char *max77843_extcon_cable[] = {
139 [MAX77843_CABLE_USB] = "USB",
140 [MAX77843_CABLE_USB_HOST] = "USB-HOST",
141 [MAX77843_CABLE_TA] = "TA",
142 [MAX77843_CABLE_CHARGE_DOWNSTREAM] = "CHARGER-DOWNSTREAM",
143 [MAX77843_CABLE_FAST_CHARGER] = "FAST-CHARGER",
144 [MAX77843_CABLE_SLOW_CHARGER] = "SLOW-CHARGER",
145 [MAX77843_CABLE_MHL] = "MHL",
146 [MAX77843_CABLE_MHL_TA] = "MHL-TA",
147 [MAX77843_CABLE_JIG_USB_ON] = "JIG-USB-ON",
148 [MAX77843_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
149 [MAX77843_CABLE_JIG_UART_ON] = "JIG-UART-ON",
150 [MAX77843_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
151}; 131};
152 132
153struct max77843_muic_irq { 133struct max77843_muic_irq {
@@ -362,7 +342,7 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
362 if (ret < 0) 342 if (ret < 0)
363 return ret; 343 return ret;
364 344
365 extcon_set_cable_state(info->edev, "USB-HOST", attached); 345 extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
366 break; 346 break;
367 case MAX77843_MUIC_GND_MHL_VB: 347 case MAX77843_MUIC_GND_MHL_VB:
368 case MAX77843_MUIC_GND_MHL: 348 case MAX77843_MUIC_GND_MHL:
@@ -370,7 +350,7 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
370 if (ret < 0) 350 if (ret < 0)
371 return ret; 351 return ret;
372 352
373 extcon_set_cable_state(info->edev, "MHL", attached); 353 extcon_set_cable_state_(info->edev, EXTCON_MHL, attached);
374 break; 354 break;
375 default: 355 default:
376 dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n", 356 dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n",
@@ -385,36 +365,29 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
385 int cable_type, bool attached) 365 int cable_type, bool attached)
386{ 366{
387 int ret; 367 int ret;
368 u8 path = CONTROL1_SW_OPEN;
388 369
389 dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n", 370 dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n",
390 attached ? "attached" : "detached", cable_type); 371 attached ? "attached" : "detached", cable_type);
391 372
392 switch (cable_type) { 373 switch (cable_type) {
393 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF: 374 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
394 ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
395 if (ret < 0)
396 return ret;
397 extcon_set_cable_state(info->edev, "JIG-USB-OFF", attached);
398 break;
399 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON: 375 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
400 ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached); 376 path = CONTROL1_SW_USB;
401 if (ret < 0)
402 return ret;
403 extcon_set_cable_state(info->edev, "JIG-USB-ON", attached);
404 break; 377 break;
405 case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF: 378 case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
406 ret = max77843_muic_set_path(info, CONTROL1_SW_UART, attached); 379 path = CONTROL1_SW_UART;
407 if (ret < 0)
408 return ret;
409 extcon_set_cable_state(info->edev, "JIG-UART-OFF", attached);
410 break; 380 break;
411 default: 381 default:
412 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 382 return -EINVAL;
413 if (ret < 0)
414 return ret;
415 break;
416 } 383 }
417 384
385 ret = max77843_muic_set_path(info, path, attached);
386 if (ret < 0)
387 return ret;
388
389 extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
390
418 return 0; 391 return 0;
419} 392}
420 393
@@ -505,36 +478,38 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
505 if (ret < 0) 478 if (ret < 0)
506 return ret; 479 return ret;
507 480
508 extcon_set_cable_state(info->edev, "USB", attached); 481 extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
509 break; 482 break;
510 case MAX77843_MUIC_CHG_DOWNSTREAM: 483 case MAX77843_MUIC_CHG_DOWNSTREAM:
511 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 484 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
512 if (ret < 0) 485 if (ret < 0)
513 return ret; 486 return ret;
514 487
515 extcon_set_cable_state(info->edev, 488 extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM,
516 "CHARGER-DOWNSTREAM", attached); 489 attached);
517 break; 490 break;
518 case MAX77843_MUIC_CHG_DEDICATED: 491 case MAX77843_MUIC_CHG_DEDICATED:
519 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 492 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
520 if (ret < 0) 493 if (ret < 0)
521 return ret; 494 return ret;
522 495
523 extcon_set_cable_state(info->edev, "TA", attached); 496 extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
524 break; 497 break;
525 case MAX77843_MUIC_CHG_SPECIAL_500MA: 498 case MAX77843_MUIC_CHG_SPECIAL_500MA:
526 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 499 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
527 if (ret < 0) 500 if (ret < 0)
528 return ret; 501 return ret;
529 502
530 extcon_set_cable_state(info->edev, "SLOW-CHAREGER", attached); 503 extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER,
504 attached);
531 break; 505 break;
532 case MAX77843_MUIC_CHG_SPECIAL_1A: 506 case MAX77843_MUIC_CHG_SPECIAL_1A:
533 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached); 507 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
534 if (ret < 0) 508 if (ret < 0)
535 return ret; 509 return ret;
536 510
537 extcon_set_cable_state(info->edev, "FAST-CHARGER", attached); 511 extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER,
512 attached);
538 break; 513 break;
539 case MAX77843_MUIC_CHG_GND: 514 case MAX77843_MUIC_CHG_GND:
540 gnd_type = max77843_muic_get_cable_type(info, 515 gnd_type = max77843_muic_get_cable_type(info,
@@ -542,9 +517,9 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
542 517
543 /* Charger cable on MHL accessory is attach or detach */ 518 /* Charger cable on MHL accessory is attach or detach */
544 if (gnd_type == MAX77843_MUIC_GND_MHL_VB) 519 if (gnd_type == MAX77843_MUIC_GND_MHL_VB)
545 extcon_set_cable_state(info->edev, "MHL-TA", true); 520 extcon_set_cable_state_(info->edev, EXTCON_TA, true);
546 else if (gnd_type == MAX77843_MUIC_GND_MHL) 521 else if (gnd_type == MAX77843_MUIC_GND_MHL)
547 extcon_set_cable_state(info->edev, "MHL-TA", false); 522 extcon_set_cable_state_(info->edev, EXTCON_TA, false);
548 break; 523 break;
549 case MAX77843_MUIC_CHG_NONE: 524 case MAX77843_MUIC_CHG_NONE:
550 break; 525 break;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 5774e56c6422..7b1ef200b121 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -145,34 +145,17 @@ struct max8997_muic_info {
145 int path_uart; 145 int path_uart;
146}; 146};
147 147
148enum { 148static const unsigned int max8997_extcon_cable[] = {
149 EXTCON_CABLE_USB = 0, 149 EXTCON_USB,
150 EXTCON_CABLE_USB_HOST, 150 EXTCON_USB_HOST,
151 EXTCON_CABLE_TA, 151 EXTCON_TA,
152 EXTCON_CABLE_FAST_CHARGER, 152 EXTCON_FAST_CHARGER,
153 EXTCON_CABLE_SLOW_CHARGER, 153 EXTCON_SLOW_CHARGER,
154 EXTCON_CABLE_CHARGE_DOWNSTREAM, 154 EXTCON_CHARGE_DOWNSTREAM,
155 EXTCON_CABLE_MHL, 155 EXTCON_MHL,
156 EXTCON_CABLE_DOCK_DESK, 156 EXTCON_DOCK,
157 EXTCON_CABLE_DOCK_CARD, 157 EXTCON_JIG,
158 EXTCON_CABLE_JIG, 158 EXTCON_NONE,
159
160 _EXTCON_CABLE_NUM,
161};
162
163static const char *max8997_extcon_cable[] = {
164 [EXTCON_CABLE_USB] = "USB",
165 [EXTCON_CABLE_USB_HOST] = "USB-Host",
166 [EXTCON_CABLE_TA] = "TA",
167 [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
168 [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
169 [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
170 [EXTCON_CABLE_MHL] = "MHL",
171 [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
172 [EXTCON_CABLE_DOCK_CARD] = "Dock-Card",
173 [EXTCON_CABLE_JIG] = "JIG",
174
175 NULL,
176}; 159};
177 160
178/* 161/*
@@ -347,10 +330,10 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
347 330
348 switch (usb_type) { 331 switch (usb_type) {
349 case MAX8997_USB_HOST: 332 case MAX8997_USB_HOST:
350 extcon_set_cable_state(info->edev, "USB-Host", attached); 333 extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
351 break; 334 break;
352 case MAX8997_USB_DEVICE: 335 case MAX8997_USB_DEVICE:
353 extcon_set_cable_state(info->edev, "USB", attached); 336 extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
354 break; 337 break;
355 default: 338 default:
356 dev_err(info->dev, "failed to detect %s usb cable\n", 339 dev_err(info->dev, "failed to detect %s usb cable\n",
@@ -374,10 +357,8 @@ static int max8997_muic_handle_dock(struct max8997_muic_info *info,
374 357
375 switch (cable_type) { 358 switch (cable_type) {
376 case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD: 359 case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
377 extcon_set_cable_state(info->edev, "Dock-desk", attached);
378 break;
379 case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON: 360 case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
380 extcon_set_cable_state(info->edev, "Dock-card", attached); 361 extcon_set_cable_state_(info->edev, EXTCON_DOCK, attached);
381 break; 362 break;
382 default: 363 default:
383 dev_err(info->dev, "failed to detect %s dock device\n", 364 dev_err(info->dev, "failed to detect %s dock device\n",
@@ -400,7 +381,7 @@ static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
400 return ret; 381 return ret;
401 } 382 }
402 383
403 extcon_set_cable_state(info->edev, "JIG", attached); 384 extcon_set_cable_state_(info->edev, EXTCON_JIG, attached);
404 385
405 return 0; 386 return 0;
406} 387}
@@ -422,7 +403,7 @@ static int max8997_muic_adc_handler(struct max8997_muic_info *info)
422 return ret; 403 return ret;
423 break; 404 break;
424 case MAX8997_MUIC_ADC_MHL: 405 case MAX8997_MUIC_ADC_MHL:
425 extcon_set_cable_state(info->edev, "MHL", attached); 406 extcon_set_cable_state_(info->edev, EXTCON_MHL, attached);
426 break; 407 break;
427 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF: 408 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
428 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON: 409 case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
@@ -505,17 +486,19 @@ static int max8997_muic_chg_handler(struct max8997_muic_info *info)
505 } 486 }
506 break; 487 break;
507 case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT: 488 case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
508 extcon_set_cable_state(info->edev, 489 extcon_set_cable_state_(info->edev, EXTCON_CHARGE_DOWNSTREAM,
509 "Charge-downstream", attached); 490 attached);
510 break; 491 break;
511 case MAX8997_CHARGER_TYPE_DEDICATED_CHG: 492 case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
512 extcon_set_cable_state(info->edev, "TA", attached); 493 extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
513 break; 494 break;
514 case MAX8997_CHARGER_TYPE_500MA: 495 case MAX8997_CHARGER_TYPE_500MA:
515 extcon_set_cable_state(info->edev, "Slow-charger", attached); 496 extcon_set_cable_state_(info->edev, EXTCON_SLOW_CHARGER,
497 attached);
516 break; 498 break;
517 case MAX8997_CHARGER_TYPE_1A: 499 case MAX8997_CHARGER_TYPE_1A:
518 extcon_set_cable_state(info->edev, "Fast-charger", attached); 500 extcon_set_cable_state_(info->edev, EXTCON_FAST_CHARGER,
501 attached);
519 break; 502 break;
520 default: 503 default:
521 dev_err(info->dev, 504 dev_err(info->dev,
@@ -700,7 +683,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
700 ret = -ENOMEM; 683 ret = -ENOMEM;
701 goto err_irq; 684 goto err_irq;
702 } 685 }
703 info->edev->name = DEV_NAME;
704 686
705 ret = devm_extcon_dev_register(&pdev->dev, info->edev); 687 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
706 if (ret) { 688 if (ret) {
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 11c6757b6c40..080d5cc27055 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -29,10 +29,10 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31 31
32static const char *palmas_extcon_cable[] = { 32static const unsigned int palmas_extcon_cable[] = {
33 [0] = "USB", 33 EXTCON_USB,
34 [1] = "USB-HOST", 34 EXTCON_USB_HOST,
35 NULL, 35 EXTCON_NONE,
36}; 36};
37 37
38static const int mutually_exclusive[] = {0x3, 0x0}; 38static const int mutually_exclusive[] = {0x3, 0x0};
@@ -49,6 +49,7 @@ static void palmas_usb_wakeup(struct palmas *palmas, int enable)
49static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb) 49static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
50{ 50{
51 struct palmas_usb *palmas_usb = _palmas_usb; 51 struct palmas_usb *palmas_usb = _palmas_usb;
52 struct extcon_dev *edev = palmas_usb->edev;
52 unsigned int vbus_line_state; 53 unsigned int vbus_line_state;
53 54
54 palmas_read(palmas_usb->palmas, PALMAS_INTERRUPT_BASE, 55 palmas_read(palmas_usb->palmas, PALMAS_INTERRUPT_BASE,
@@ -57,7 +58,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
57 if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) { 58 if (vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS) {
58 if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) { 59 if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
59 palmas_usb->linkstat = PALMAS_USB_STATE_VBUS; 60 palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
60 extcon_set_cable_state(palmas_usb->edev, "USB", true); 61 extcon_set_cable_state_(edev, EXTCON_USB, true);
61 dev_info(palmas_usb->dev, "USB cable is attached\n"); 62 dev_info(palmas_usb->dev, "USB cable is attached\n");
62 } else { 63 } else {
63 dev_dbg(palmas_usb->dev, 64 dev_dbg(palmas_usb->dev,
@@ -66,7 +67,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
66 } else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) { 67 } else if (!(vbus_line_state & PALMAS_INT3_LINE_STATE_VBUS)) {
67 if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) { 68 if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
68 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; 69 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
69 extcon_set_cable_state(palmas_usb->edev, "USB", false); 70 extcon_set_cable_state_(edev, EXTCON_USB, false);
70 dev_info(palmas_usb->dev, "USB cable is detached\n"); 71 dev_info(palmas_usb->dev, "USB cable is detached\n");
71 } else { 72 } else {
72 dev_dbg(palmas_usb->dev, 73 dev_dbg(palmas_usb->dev,
@@ -81,6 +82,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
81{ 82{
82 unsigned int set, id_src; 83 unsigned int set, id_src;
83 struct palmas_usb *palmas_usb = _palmas_usb; 84 struct palmas_usb *palmas_usb = _palmas_usb;
85 struct extcon_dev *edev = palmas_usb->edev;
84 86
85 palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE, 87 palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
86 PALMAS_USB_ID_INT_LATCH_SET, &set); 88 PALMAS_USB_ID_INT_LATCH_SET, &set);
@@ -93,7 +95,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
93 PALMAS_USB_ID_INT_LATCH_CLR, 95 PALMAS_USB_ID_INT_LATCH_CLR,
94 PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND); 96 PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
95 palmas_usb->linkstat = PALMAS_USB_STATE_ID; 97 palmas_usb->linkstat = PALMAS_USB_STATE_ID;
96 extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true); 98 extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
97 dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); 99 dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
98 } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) && 100 } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
99 (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) { 101 (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
@@ -101,17 +103,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
101 PALMAS_USB_ID_INT_LATCH_CLR, 103 PALMAS_USB_ID_INT_LATCH_CLR,
102 PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT); 104 PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
103 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; 105 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
104 extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false); 106 extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
105 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); 107 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
106 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) && 108 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
107 (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) { 109 (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
108 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; 110 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
109 extcon_set_cable_state(palmas_usb->edev, "USB-HOST", false); 111 extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
110 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); 112 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
111 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) && 113 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
112 (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) { 114 (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
113 palmas_usb->linkstat = PALMAS_USB_STATE_ID; 115 palmas_usb->linkstat = PALMAS_USB_STATE_ID;
114 extcon_set_cable_state(palmas_usb->edev, "USB-HOST", true); 116 extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
115 dev_info(palmas_usb->dev, " USB-HOST cable is attached\n"); 117 dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
116 } 118 }
117 119
@@ -193,7 +195,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
193 dev_err(&pdev->dev, "failed to allocate extcon device\n"); 195 dev_err(&pdev->dev, "failed to allocate extcon device\n");
194 return -ENOMEM; 196 return -ENOMEM;
195 } 197 }
196 palmas_usb->edev->name = kstrdup(node->name, GFP_KERNEL);
197 palmas_usb->edev->mutually_exclusive = mutually_exclusive; 198 palmas_usb->edev->mutually_exclusive = mutually_exclusive;
198 199
199 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); 200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 9ccd5af89d1c..92c939221a41 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -90,27 +90,12 @@ static struct reg_data rt8973a_reg_data[] = {
90}; 90};
91 91
92/* List of detectable cables */ 92/* List of detectable cables */
93enum { 93static const unsigned int rt8973a_extcon_cable[] = {
94 EXTCON_CABLE_USB = 0, 94 EXTCON_USB,
95 EXTCON_CABLE_USB_HOST, 95 EXTCON_USB_HOST,
96 EXTCON_CABLE_TA, 96 EXTCON_TA,
97 EXTCON_CABLE_JIG_OFF_USB, 97 EXTCON_JIG,
98 EXTCON_CABLE_JIG_ON_USB, 98 EXTCON_NONE,
99 EXTCON_CABLE_JIG_OFF_UART,
100 EXTCON_CABLE_JIG_ON_UART,
101
102 EXTCON_CABLE_END,
103};
104
105static const char *rt8973a_extcon_cable[] = {
106 [EXTCON_CABLE_USB] = "USB",
107 [EXTCON_CABLE_USB_HOST] = "USB-Host",
108 [EXTCON_CABLE_TA] = "TA",
109 [EXTCON_CABLE_JIG_OFF_USB] = "JIG-USB-OFF",
110 [EXTCON_CABLE_JIG_ON_USB] = "JIG-USB-ON",
111 [EXTCON_CABLE_JIG_OFF_UART] = "JIG-UART-OFF",
112 [EXTCON_CABLE_JIG_ON_UART] = "JIG-UART-ON",
113 NULL,
114}; 99};
115 100
116/* Define OVP (Over Voltage Protection), OTP (Over Temperature Protection) */ 101/* Define OVP (Over Voltage Protection), OTP (Over Temperature Protection) */
@@ -313,14 +298,11 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
313 enum rt8973a_event_type event) 298 enum rt8973a_event_type event)
314{ 299{
315 static unsigned int prev_cable_type; 300 static unsigned int prev_cable_type;
316 const char **cable_names = info->edev->supported_cable;
317 unsigned int con_sw = DM_DP_SWITCH_UART; 301 unsigned int con_sw = DM_DP_SWITCH_UART;
318 int ret, idx = 0, cable_type; 302 int ret, cable_type;
303 unsigned int id;
319 bool attached = false; 304 bool attached = false;
320 305
321 if (!cable_names)
322 return 0;
323
324 switch (event) { 306 switch (event) {
325 case RT8973A_EVENT_ATTACH: 307 case RT8973A_EVENT_ATTACH:
326 cable_type = rt8973a_muic_get_cable_type(info); 308 cable_type = rt8973a_muic_get_cable_type(info);
@@ -347,31 +329,25 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
347 329
348 switch (cable_type) { 330 switch (cable_type) {
349 case RT8973A_MUIC_ADC_OTG: 331 case RT8973A_MUIC_ADC_OTG:
350 idx = EXTCON_CABLE_USB_HOST; 332 id = EXTCON_USB_HOST;
351 con_sw = DM_DP_SWITCH_USB; 333 con_sw = DM_DP_SWITCH_USB;
352 break; 334 break;
353 case RT8973A_MUIC_ADC_TA: 335 case RT8973A_MUIC_ADC_TA:
354 idx = EXTCON_CABLE_TA; 336 id = EXTCON_TA;
355 con_sw = DM_DP_SWITCH_OPEN; 337 con_sw = DM_DP_SWITCH_OPEN;
356 break; 338 break;
357 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB: 339 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB:
358 idx = EXTCON_CABLE_JIG_OFF_USB;
359 con_sw = DM_DP_SWITCH_UART;
360 break;
361 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB: 340 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB:
362 idx = EXTCON_CABLE_JIG_ON_USB; 341 id = EXTCON_JIG;
363 con_sw = DM_DP_SWITCH_UART; 342 con_sw = DM_DP_SWITCH_USB;
364 break; 343 break;
365 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART: 344 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART:
366 idx = EXTCON_CABLE_JIG_OFF_UART;
367 con_sw = DM_DP_SWITCH_UART;
368 break;
369 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART: 345 case RT8973A_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART:
370 idx = EXTCON_CABLE_JIG_ON_UART; 346 id = EXTCON_JIG;
371 con_sw = DM_DP_SWITCH_UART; 347 con_sw = DM_DP_SWITCH_UART;
372 break; 348 break;
373 case RT8973A_MUIC_ADC_USB: 349 case RT8973A_MUIC_ADC_USB:
374 idx = EXTCON_CABLE_USB; 350 id = EXTCON_USB;
375 con_sw = DM_DP_SWITCH_USB; 351 con_sw = DM_DP_SWITCH_USB;
376 break; 352 break;
377 case RT8973A_MUIC_ADC_OPEN: 353 case RT8973A_MUIC_ADC_OPEN:
@@ -421,7 +397,7 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
421 return ret; 397 return ret;
422 398
423 /* Change the state of external accessory */ 399 /* Change the state of external accessory */
424 extcon_set_cable_state(info->edev, cable_names[idx], attached); 400 extcon_set_cable_state_(info->edev, id, attached);
425 401
426 return 0; 402 return 0;
427} 403}
@@ -643,7 +619,6 @@ static int rt8973a_muic_i2c_probe(struct i2c_client *i2c,
643 dev_err(info->dev, "failed to allocate memory for extcon\n"); 619 dev_err(info->dev, "failed to allocate memory for extcon\n");
644 return -ENOMEM; 620 return -ENOMEM;
645 } 621 }
646 info->edev->name = np->name;
647 622
648 /* Register extcon device */ 623 /* Register extcon device */
649 ret = devm_extcon_dev_register(info->dev, info->edev); 624 ret = devm_extcon_dev_register(info->dev, info->edev);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 2f93cf307852..817dece23b4c 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -92,19 +92,11 @@ static struct reg_data sm5502_reg_data[] = {
92}; 92};
93 93
94/* List of detectable cables */ 94/* List of detectable cables */
95enum { 95static const unsigned int sm5502_extcon_cable[] = {
96 EXTCON_CABLE_USB = 0, 96 EXTCON_USB,
97 EXTCON_CABLE_USB_HOST, 97 EXTCON_USB_HOST,
98 EXTCON_CABLE_TA, 98 EXTCON_TA,
99 99 EXTCON_NONE,
100 EXTCON_CABLE_END,
101};
102
103static const char *sm5502_extcon_cable[] = {
104 [EXTCON_CABLE_USB] = "USB",
105 [EXTCON_CABLE_USB_HOST] = "USB-Host",
106 [EXTCON_CABLE_TA] = "TA",
107 NULL,
108}; 100};
109 101
110/* Define supported accessory type */ 102/* Define supported accessory type */
@@ -377,16 +369,12 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
377 bool attached) 369 bool attached)
378{ 370{
379 static unsigned int prev_cable_type = SM5502_MUIC_ADC_GROUND; 371 static unsigned int prev_cable_type = SM5502_MUIC_ADC_GROUND;
380 const char **cable_names = info->edev->supported_cable;
381 unsigned int cable_type = SM5502_MUIC_ADC_GROUND; 372 unsigned int cable_type = SM5502_MUIC_ADC_GROUND;
382 unsigned int con_sw = DM_DP_SWITCH_OPEN; 373 unsigned int con_sw = DM_DP_SWITCH_OPEN;
383 unsigned int vbus_sw = VBUSIN_SWITCH_OPEN; 374 unsigned int vbus_sw = VBUSIN_SWITCH_OPEN;
384 unsigned int idx = 0; 375 unsigned int id;
385 int ret; 376 int ret;
386 377
387 if (!cable_names)
388 return 0;
389
390 /* Get the type of attached or detached cable */ 378 /* Get the type of attached or detached cable */
391 if (attached) 379 if (attached)
392 cable_type = sm5502_muic_get_cable_type(info); 380 cable_type = sm5502_muic_get_cable_type(info);
@@ -396,17 +384,17 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
396 384
397 switch (cable_type) { 385 switch (cable_type) {
398 case SM5502_MUIC_ADC_OPEN_USB: 386 case SM5502_MUIC_ADC_OPEN_USB:
399 idx = EXTCON_CABLE_USB; 387 id = EXTCON_USB;
400 con_sw = DM_DP_SWITCH_USB; 388 con_sw = DM_DP_SWITCH_USB;
401 vbus_sw = VBUSIN_SWITCH_VBUSOUT_WITH_USB; 389 vbus_sw = VBUSIN_SWITCH_VBUSOUT_WITH_USB;
402 break; 390 break;
403 case SM5502_MUIC_ADC_OPEN_TA: 391 case SM5502_MUIC_ADC_OPEN_TA:
404 idx = EXTCON_CABLE_TA; 392 id = EXTCON_TA;
405 con_sw = DM_DP_SWITCH_OPEN; 393 con_sw = DM_DP_SWITCH_OPEN;
406 vbus_sw = VBUSIN_SWITCH_VBUSOUT; 394 vbus_sw = VBUSIN_SWITCH_VBUSOUT;
407 break; 395 break;
408 case SM5502_MUIC_ADC_OPEN_USB_OTG: 396 case SM5502_MUIC_ADC_OPEN_USB_OTG:
409 idx = EXTCON_CABLE_USB_HOST; 397 id = EXTCON_USB_HOST;
410 con_sw = DM_DP_SWITCH_USB; 398 con_sw = DM_DP_SWITCH_USB;
411 vbus_sw = VBUSIN_SWITCH_OPEN; 399 vbus_sw = VBUSIN_SWITCH_OPEN;
412 break; 400 break;
@@ -422,7 +410,7 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
422 return ret; 410 return ret;
423 411
424 /* Change the state of external accessory */ 412 /* Change the state of external accessory */
425 extcon_set_cable_state(info->edev, cable_names[idx], attached); 413 extcon_set_cable_state_(info->edev, id, attached);
426 414
427 return 0; 415 return 0;
428} 416}
@@ -623,7 +611,6 @@ static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
623 dev_err(info->dev, "failed to allocate memory for extcon\n"); 611 dev_err(info->dev, "failed to allocate memory for extcon\n");
624 return -ENOMEM; 612 return -ENOMEM;
625 } 613 }
626 info->edev->name = np->name;
627 614
628 /* Register extcon device */ 615 /* Register extcon device */
629 ret = devm_extcon_dev_register(info->dev, info->edev); 616 ret = devm_extcon_dev_register(info->dev, info->edev);
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index e45d1f13f445..a2a44536a608 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/extcon.h> 17#include <linux/extcon.h>
18#include <linux/gpio/consumer.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
20#include <linux/irq.h> 21#include <linux/irq.h>
@@ -38,18 +39,10 @@ struct usb_extcon_info {
38 struct delayed_work wq_detcable; 39 struct delayed_work wq_detcable;
39}; 40};
40 41
41/* List of detectable cables */ 42static const unsigned int usb_extcon_cable[] = {
42enum { 43 EXTCON_USB,
43 EXTCON_CABLE_USB = 0, 44 EXTCON_USB_HOST,
44 EXTCON_CABLE_USB_HOST, 45 EXTCON_NONE,
45
46 EXTCON_CABLE_END,
47};
48
49static const char *usb_extcon_cable[] = {
50 [EXTCON_CABLE_USB] = "USB",
51 [EXTCON_CABLE_USB_HOST] = "USB-HOST",
52 NULL,
53}; 46};
54 47
55static void usb_extcon_detect_cable(struct work_struct *work) 48static void usb_extcon_detect_cable(struct work_struct *work)
@@ -67,24 +60,16 @@ static void usb_extcon_detect_cable(struct work_struct *work)
67 * As we don't have event for USB peripheral cable attached, 60 * As we don't have event for USB peripheral cable attached,
68 * we simulate USB peripheral attach here. 61 * we simulate USB peripheral attach here.
69 */ 62 */
70 extcon_set_cable_state(info->edev, 63 extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, false);
71 usb_extcon_cable[EXTCON_CABLE_USB_HOST], 64 extcon_set_cable_state_(info->edev, EXTCON_USB, true);
72 false);
73 extcon_set_cable_state(info->edev,
74 usb_extcon_cable[EXTCON_CABLE_USB],
75 true);
76 } else { 65 } else {
77 /* 66 /*
78 * ID = 0 means USB HOST cable attached. 67 * ID = 0 means USB HOST cable attached.
79 * As we don't have event for USB peripheral cable detached, 68 * As we don't have event for USB peripheral cable detached,
80 * we simulate USB peripheral detach here. 69 * we simulate USB peripheral detach here.
81 */ 70 */
82 extcon_set_cable_state(info->edev, 71 extcon_set_cable_state_(info->edev, EXTCON_USB, false);
83 usb_extcon_cable[EXTCON_CABLE_USB], 72 extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, true);
84 false);
85 extcon_set_cable_state(info->edev,
86 usb_extcon_cable[EXTCON_CABLE_USB_HOST],
87 true);
88 } 73 }
89} 74}
90 75
@@ -113,7 +98,7 @@ static int usb_extcon_probe(struct platform_device *pdev)
113 return -ENOMEM; 98 return -ENOMEM;
114 99
115 info->dev = dev; 100 info->dev = dev;
116 info->id_gpiod = devm_gpiod_get(&pdev->dev, "id"); 101 info->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
117 if (IS_ERR(info->id_gpiod)) { 102 if (IS_ERR(info->id_gpiod)) {
118 dev_err(dev, "failed to get ID GPIO\n"); 103 dev_err(dev, "failed to get ID GPIO\n");
119 return PTR_ERR(info->id_gpiod); 104 return PTR_ERR(info->id_gpiod);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 4c9f165e4a04..76157ab9faf3 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1,8 +1,11 @@
1/* 1/*
2 * drivers/extcon/extcon_class.c 2 * drivers/extcon/extcon.c - External Connector (extcon) framework.
3 * 3 *
4 * External connector (extcon) class driver 4 * External connector (extcon) class driver
5 * 5 *
6 * Copyright (C) 2015 Samsung Electronics
7 * Author: Chanwoo Choi <cw00.choi@samsung.com>
8 *
6 * Copyright (C) 2012 Samsung Electronics 9 * Copyright (C) 2012 Samsung Electronics
7 * Author: Donggeun Kim <dg77.kim@samsung.com> 10 * Author: Donggeun Kim <dg77.kim@samsung.com>
8 * Author: MyungJoo Ham <myungjoo.ham@samsung.com> 11 * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -19,8 +22,7 @@
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 24 * GNU General Public License for more details.
22 * 25 */
23*/
24 26
25#include <linux/module.h> 27#include <linux/module.h>
26#include <linux/types.h> 28#include <linux/types.h>
@@ -33,36 +35,43 @@
33#include <linux/slab.h> 35#include <linux/slab.h>
34#include <linux/sysfs.h> 36#include <linux/sysfs.h>
35 37
36/* 38#define SUPPORTED_CABLE_MAX 32
37 * extcon_cable_name suggests the standard cable names for commonly used 39#define CABLE_NAME_MAX 30
38 * cable types. 40
39 * 41static const char *extcon_name[] = {
40 * However, please do not use extcon_cable_name directly for extcon_dev 42 [EXTCON_NONE] = "NONE",
41 * struct's supported_cable pointer unless your device really supports 43
42 * every single port-type of the following cable names. Please choose cable 44 /* USB external connector */
43 * names that are actually used in your extcon device.
44 */
45const char extcon_cable_name[][CABLE_NAME_MAX + 1] = {
46 [EXTCON_USB] = "USB", 45 [EXTCON_USB] = "USB",
47 [EXTCON_USB_HOST] = "USB-Host", 46 [EXTCON_USB_HOST] = "USB-HOST",
47
48 /* Charger external connector */
48 [EXTCON_TA] = "TA", 49 [EXTCON_TA] = "TA",
49 [EXTCON_FAST_CHARGER] = "Fast-charger", 50 [EXTCON_FAST_CHARGER] = "FAST-CHARGER",
50 [EXTCON_SLOW_CHARGER] = "Slow-charger", 51 [EXTCON_SLOW_CHARGER] = "SLOW-CHARGER",
51 [EXTCON_CHARGE_DOWNSTREAM] = "Charge-downstream", 52 [EXTCON_CHARGE_DOWNSTREAM] = "CHARGE-DOWNSTREAM",
53
54 /* Audio/Video external connector */
55 [EXTCON_LINE_IN] = "LINE-IN",
56 [EXTCON_LINE_OUT] = "LINE-OUT",
57 [EXTCON_MICROPHONE] = "MICROPHONE",
58 [EXTCON_HEADPHONE] = "HEADPHONE",
59
52 [EXTCON_HDMI] = "HDMI", 60 [EXTCON_HDMI] = "HDMI",
53 [EXTCON_MHL] = "MHL", 61 [EXTCON_MHL] = "MHL",
54 [EXTCON_DVI] = "DVI", 62 [EXTCON_DVI] = "DVI",
55 [EXTCON_VGA] = "VGA", 63 [EXTCON_VGA] = "VGA",
56 [EXTCON_DOCK] = "Dock", 64 [EXTCON_SPDIF_IN] = "SPDIF-IN",
57 [EXTCON_LINE_IN] = "Line-in", 65 [EXTCON_SPDIF_OUT] = "SPDIF-OUT",
58 [EXTCON_LINE_OUT] = "Line-out", 66 [EXTCON_VIDEO_IN] = "VIDEO-IN",
59 [EXTCON_MIC_IN] = "Microphone", 67 [EXTCON_VIDEO_OUT] = "VIDEO-OUT",
60 [EXTCON_HEADPHONE_OUT] = "Headphone", 68
61 [EXTCON_SPDIF_IN] = "SPDIF-in", 69 /* Etc external connector */
62 [EXTCON_SPDIF_OUT] = "SPDIF-out", 70 [EXTCON_DOCK] = "DOCK",
63 [EXTCON_VIDEO_IN] = "Video-in", 71 [EXTCON_JIG] = "JIG",
64 [EXTCON_VIDEO_OUT] = "Video-out", 72 [EXTCON_MECHANICAL] = "MECHANICAL",
65 [EXTCON_MECHANICAL] = "Mechanical", 73
74 NULL,
66}; 75};
67 76
68static struct class *extcon_class; 77static struct class *extcon_class;
@@ -102,6 +111,51 @@ static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
102 return 0; 111 return 0;
103} 112}
104 113
114static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id)
115{
116 int i;
117
118 /* Find the the index of extcon cable in edev->supported_cable */
119 for (i = 0; i < edev->max_supported; i++) {
120 if (edev->supported_cable[i] == id)
121 return i;
122 }
123
124 return -EINVAL;
125}
126
127static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
128{
129 unsigned int id = EXTCON_NONE;
130 int i = 0;
131
132 if (edev->max_supported == 0)
133 return -EINVAL;
134
135 /* Find the the number of extcon cable */
136 while (extcon_name[i]) {
137 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
138 id = i;
139 break;
140 }
141 }
142
143 if (id == EXTCON_NONE)
144 return -EINVAL;
145
146 return find_cable_index_by_id(edev, id);
147}
148
149static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
150{
151 if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
152 *attached = new ? true : false;
153 return true;
154 }
155
156 return false;
157}
158
105static ssize_t state_show(struct device *dev, struct device_attribute *attr, 159static ssize_t state_show(struct device *dev, struct device_attribute *attr,
106 char *buf) 160 char *buf)
107{ 161{
@@ -119,11 +173,9 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
119 if (edev->max_supported == 0) 173 if (edev->max_supported == 0)
120 return sprintf(buf, "%u\n", edev->state); 174 return sprintf(buf, "%u\n", edev->state);
121 175
122 for (i = 0; i < SUPPORTED_CABLE_MAX; i++) { 176 for (i = 0; i < edev->max_supported; i++) {
123 if (!edev->supported_cable[i])
124 break;
125 count += sprintf(buf + count, "%s=%d\n", 177 count += sprintf(buf + count, "%s=%d\n",
126 edev->supported_cable[i], 178 extcon_name[edev->supported_cable[i]],
127 !!(edev->state & (1 << i))); 179 !!(edev->state & (1 << i)));
128 } 180 }
129 181
@@ -155,15 +207,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
155{ 207{
156 struct extcon_dev *edev = dev_get_drvdata(dev); 208 struct extcon_dev *edev = dev_get_drvdata(dev);
157 209
158 /* Optional callback given by the user */ 210 return sprintf(buf, "%s\n", edev->name);
159 if (edev->print_name) {
160 int ret = edev->print_name(edev, buf);
161
162 if (ret >= 0)
163 return ret;
164 }
165
166 return sprintf(buf, "%s\n", dev_name(&edev->dev));
167} 211}
168static DEVICE_ATTR_RO(name); 212static DEVICE_ATTR_RO(name);
169 213
@@ -172,9 +216,10 @@ static ssize_t cable_name_show(struct device *dev,
172{ 216{
173 struct extcon_cable *cable = container_of(attr, struct extcon_cable, 217 struct extcon_cable *cable = container_of(attr, struct extcon_cable,
174 attr_name); 218 attr_name);
219 int i = cable->cable_index;
175 220
176 return sprintf(buf, "%s\n", 221 return sprintf(buf, "%s\n",
177 cable->edev->supported_cable[cable->cable_index]); 222 extcon_name[cable->edev->supported_cable[i]]);
178} 223}
179 224
180static ssize_t cable_state_show(struct device *dev, 225static ssize_t cable_state_show(struct device *dev,
@@ -211,23 +256,27 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
211 char *envp[3]; 256 char *envp[3];
212 int env_offset = 0; 257 int env_offset = 0;
213 int length; 258 int length;
259 int index;
214 unsigned long flags; 260 unsigned long flags;
261 bool attached;
215 262
216 spin_lock_irqsave(&edev->lock, flags); 263 spin_lock_irqsave(&edev->lock, flags);
217 264
218 if (edev->state != ((edev->state & ~mask) | (state & mask))) { 265 if (edev->state != ((edev->state & ~mask) | (state & mask))) {
219 u32 old_state = edev->state;
220
221 if (check_mutually_exclusive(edev, (edev->state & ~mask) | 266 if (check_mutually_exclusive(edev, (edev->state & ~mask) |
222 (state & mask))) { 267 (state & mask))) {
223 spin_unlock_irqrestore(&edev->lock, flags); 268 spin_unlock_irqrestore(&edev->lock, flags);
224 return -EPERM; 269 return -EPERM;
225 } 270 }
226 271
272 for (index = 0; index < edev->max_supported; index++) {
273 if (is_extcon_changed(edev->state, state, index, &attached))
274 raw_notifier_call_chain(&edev->nh[index], attached, edev);
275 }
276
227 edev->state &= ~mask; 277 edev->state &= ~mask;
228 edev->state |= state & mask; 278 edev->state |= state & mask;
229 279
230 raw_notifier_call_chain(&edev->nh, old_state, edev);
231 /* This could be in interrupt handler */ 280 /* This could be in interrupt handler */
232 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); 281 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
233 if (prop_buf) { 282 if (prop_buf) {
@@ -284,39 +333,19 @@ int extcon_set_state(struct extcon_dev *edev, u32 state)
284EXPORT_SYMBOL_GPL(extcon_set_state); 333EXPORT_SYMBOL_GPL(extcon_set_state);
285 334
286/** 335/**
287 * extcon_find_cable_index() - Get the cable index based on the cable name. 336 * extcon_get_cable_state_() - Get the status of a specific cable.
288 * @edev: the extcon device that has the cable. 337 * @edev: the extcon device that has the cable.
289 * @cable_name: cable name to be searched. 338 * @id: the unique id of each external connector in extcon enumeration.
290 *
291 * Note that accessing a cable state based on cable_index is faster than
292 * cable_name because using cable_name induces a loop with strncmp().
293 * Thus, when get/set_cable_state is repeatedly used, using cable_index
294 * is recommended.
295 */ 339 */
296int extcon_find_cable_index(struct extcon_dev *edev, const char *cable_name) 340int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
297{ 341{
298 int i; 342 int index;
299 343
300 if (edev->supported_cable) { 344 index = find_cable_index_by_id(edev, id);
301 for (i = 0; edev->supported_cable[i]; i++) { 345 if (index < 0)
302 if (!strncmp(edev->supported_cable[i], 346 return index;
303 cable_name, CABLE_NAME_MAX))
304 return i;
305 }
306 }
307 347
308 return -EINVAL; 348 if (edev->max_supported && edev->max_supported <= index)
309}
310EXPORT_SYMBOL_GPL(extcon_find_cable_index);
311
312/**
313 * extcon_get_cable_state_() - Get the status of a specific cable.
314 * @edev: the extcon device that has the cable.
315 * @index: cable index that can be retrieved by extcon_find_cable_index().
316 */
317int extcon_get_cable_state_(struct extcon_dev *edev, int index)
318{
319 if (index < 0 || (edev->max_supported && edev->max_supported <= index))
320 return -EINVAL; 349 return -EINVAL;
321 350
322 return !!(edev->state & (1 << index)); 351 return !!(edev->state & (1 << index));
@@ -332,7 +361,7 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
332 */ 361 */
333int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) 362int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
334{ 363{
335 return extcon_get_cable_state_(edev, extcon_find_cable_index 364 return extcon_get_cable_state_(edev, find_cable_index_by_name
336 (edev, cable_name)); 365 (edev, cable_name));
337} 366}
338EXPORT_SYMBOL_GPL(extcon_get_cable_state); 367EXPORT_SYMBOL_GPL(extcon_get_cable_state);
@@ -340,17 +369,22 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state);
340/** 369/**
341 * extcon_set_cable_state_() - Set the status of a specific cable. 370 * extcon_set_cable_state_() - Set the status of a specific cable.
342 * @edev: the extcon device that has the cable. 371 * @edev: the extcon device that has the cable.
343 * @index: cable index that can be retrieved by 372 * @id: the unique id of each external connector
344 * extcon_find_cable_index(). 373 * in extcon enumeration.
345 * @cable_state: the new cable status. The default semantics is 374 * @state: the new cable status. The default semantics is
346 * true: attached / false: detached. 375 * true: attached / false: detached.
347 */ 376 */
348int extcon_set_cable_state_(struct extcon_dev *edev, 377int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
349 int index, bool cable_state) 378 bool cable_state)
350{ 379{
351 u32 state; 380 u32 state;
381 int index;
382
383 index = find_cable_index_by_id(edev, id);
384 if (index < 0)
385 return index;
352 386
353 if (index < 0 || (edev->max_supported && edev->max_supported <= index)) 387 if (edev->max_supported && edev->max_supported <= index)
354 return -EINVAL; 388 return -EINVAL;
355 389
356 state = cable_state ? (1 << index) : 0; 390 state = cable_state ? (1 << index) : 0;
@@ -370,7 +404,7 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
370int extcon_set_cable_state(struct extcon_dev *edev, 404int extcon_set_cable_state(struct extcon_dev *edev,
371 const char *cable_name, bool cable_state) 405 const char *cable_name, bool cable_state)
372{ 406{
373 return extcon_set_cable_state_(edev, extcon_find_cable_index 407 return extcon_set_cable_state_(edev, find_cable_index_by_name
374 (edev, cable_name), cable_state); 408 (edev, cable_name), cable_state);
375} 409}
376EXPORT_SYMBOL_GPL(extcon_set_cable_state); 410EXPORT_SYMBOL_GPL(extcon_set_cable_state);
@@ -395,29 +429,6 @@ out:
395} 429}
396EXPORT_SYMBOL_GPL(extcon_get_extcon_dev); 430EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
397 431
398static int _call_per_cable(struct notifier_block *nb, unsigned long val,
399 void *ptr)
400{
401 struct extcon_specific_cable_nb *obj = container_of(nb,
402 struct extcon_specific_cable_nb, internal_nb);
403 struct extcon_dev *edev = ptr;
404
405 if ((val & (1 << obj->cable_index)) !=
406 (edev->state & (1 << obj->cable_index))) {
407 bool cable_state = true;
408
409 obj->previous_value = val;
410
411 if (val & (1 << obj->cable_index))
412 cable_state = false;
413
414 return obj->user_nb->notifier_call(obj->user_nb,
415 cable_state, ptr);
416 }
417
418 return NOTIFY_OK;
419}
420
421/** 432/**
422 * extcon_register_interest() - Register a notifier for a state change of a 433 * extcon_register_interest() - Register a notifier for a state change of a
423 * specific cable, not an entier set of cables of a 434 * specific cable, not an entier set of cables of a
@@ -456,20 +467,18 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
456 if (!obj->edev) 467 if (!obj->edev)
457 return -ENODEV; 468 return -ENODEV;
458 469
459 obj->cable_index = extcon_find_cable_index(obj->edev, 470 obj->cable_index = find_cable_index_by_name(obj->edev,
460 cable_name); 471 cable_name);
461 if (obj->cable_index < 0) 472 if (obj->cable_index < 0)
462 return obj->cable_index; 473 return obj->cable_index;
463 474
464 obj->user_nb = nb; 475 obj->user_nb = nb;
465 476
466 obj->internal_nb.notifier_call = _call_per_cable;
467
468 spin_lock_irqsave(&obj->edev->lock, flags); 477 spin_lock_irqsave(&obj->edev->lock, flags);
469 ret = raw_notifier_chain_register(&obj->edev->nh, 478 ret = raw_notifier_chain_register(
470 &obj->internal_nb); 479 &obj->edev->nh[obj->cable_index],
480 obj->user_nb);
471 spin_unlock_irqrestore(&obj->edev->lock, flags); 481 spin_unlock_irqrestore(&obj->edev->lock, flags);
472 return ret;
473 } else { 482 } else {
474 struct class_dev_iter iter; 483 struct class_dev_iter iter;
475 struct extcon_dev *extd; 484 struct extcon_dev *extd;
@@ -481,7 +490,7 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
481 while ((dev = class_dev_iter_next(&iter))) { 490 while ((dev = class_dev_iter_next(&iter))) {
482 extd = dev_get_drvdata(dev); 491 extd = dev_get_drvdata(dev);
483 492
484 if (extcon_find_cable_index(extd, cable_name) < 0) 493 if (find_cable_index_by_name(extd, cable_name) < 0)
485 continue; 494 continue;
486 495
487 class_dev_iter_exit(&iter); 496 class_dev_iter_exit(&iter);
@@ -489,8 +498,10 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
489 cable_name, nb); 498 cable_name, nb);
490 } 499 }
491 500
492 return -ENODEV; 501 ret = -ENODEV;
493 } 502 }
503
504 return ret;
494} 505}
495EXPORT_SYMBOL_GPL(extcon_register_interest); 506EXPORT_SYMBOL_GPL(extcon_register_interest);
496 507
@@ -509,7 +520,8 @@ int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
509 return -EINVAL; 520 return -EINVAL;
510 521
511 spin_lock_irqsave(&obj->edev->lock, flags); 522 spin_lock_irqsave(&obj->edev->lock, flags);
512 ret = raw_notifier_chain_unregister(&obj->edev->nh, &obj->internal_nb); 523 ret = raw_notifier_chain_unregister(
524 &obj->edev->nh[obj->cable_index], obj->user_nb);
513 spin_unlock_irqrestore(&obj->edev->lock, flags); 525 spin_unlock_irqrestore(&obj->edev->lock, flags);
514 526
515 return ret; 527 return ret;
@@ -519,21 +531,24 @@ EXPORT_SYMBOL_GPL(extcon_unregister_interest);
519/** 531/**
520 * extcon_register_notifier() - Register a notifiee to get notified by 532 * extcon_register_notifier() - Register a notifiee to get notified by
521 * any attach status changes from the extcon. 533 * any attach status changes from the extcon.
522 * @edev: the extcon device. 534 * @edev: the extcon device that has the external connecotr.
535 * @id: the unique id of each external connector in extcon enumeration.
523 * @nb: a notifier block to be registered. 536 * @nb: a notifier block to be registered.
524 * 537 *
525 * Note that the second parameter given to the callback of nb (val) is 538 * Note that the second parameter given to the callback of nb (val) is
526 * "old_state", not the current state. The current state can be retrieved 539 * "old_state", not the current state. The current state can be retrieved
527 * by looking at the third pameter (edev pointer)'s state value. 540 * by looking at the third pameter (edev pointer)'s state value.
528 */ 541 */
529int extcon_register_notifier(struct extcon_dev *edev, 542int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
530 struct notifier_block *nb) 543 struct notifier_block *nb)
531{ 544{
532 unsigned long flags; 545 unsigned long flags;
533 int ret; 546 int ret, idx;
547
548 idx = find_cable_index_by_id(edev, id);
534 549
535 spin_lock_irqsave(&edev->lock, flags); 550 spin_lock_irqsave(&edev->lock, flags);
536 ret = raw_notifier_chain_register(&edev->nh, nb); 551 ret = raw_notifier_chain_register(&edev->nh[idx], nb);
537 spin_unlock_irqrestore(&edev->lock, flags); 552 spin_unlock_irqrestore(&edev->lock, flags);
538 553
539 return ret; 554 return ret;
@@ -542,17 +557,20 @@ EXPORT_SYMBOL_GPL(extcon_register_notifier);
542 557
543/** 558/**
544 * extcon_unregister_notifier() - Unregister a notifiee from the extcon device. 559 * extcon_unregister_notifier() - Unregister a notifiee from the extcon device.
545 * @edev: the extcon device. 560 * @edev: the extcon device that has the external connecotr.
546 * @nb: a registered notifier block to be unregistered. 561 * @id: the unique id of each external connector in extcon enumeration.
562 * @nb: a notifier block to be registered.
547 */ 563 */
548int extcon_unregister_notifier(struct extcon_dev *edev, 564int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
549 struct notifier_block *nb) 565 struct notifier_block *nb)
550{ 566{
551 unsigned long flags; 567 unsigned long flags;
552 int ret; 568 int ret, idx;
569
570 idx = find_cable_index_by_id(edev, id);
553 571
554 spin_lock_irqsave(&edev->lock, flags); 572 spin_lock_irqsave(&edev->lock, flags);
555 ret = raw_notifier_chain_unregister(&edev->nh, nb); 573 ret = raw_notifier_chain_unregister(&edev->nh[idx], nb);
556 spin_unlock_irqrestore(&edev->lock, flags); 574 spin_unlock_irqrestore(&edev->lock, flags);
557 575
558 return ret; 576 return ret;
@@ -595,7 +613,7 @@ static void dummy_sysfs_dev_release(struct device *dev)
595 613
596/* 614/*
597 * extcon_dev_allocate() - Allocate the memory of extcon device. 615 * extcon_dev_allocate() - Allocate the memory of extcon device.
598 * @supported_cable: Array of supported cable names ending with NULL. 616 * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
599 * If supported_cable is NULL, cable name related APIs 617 * If supported_cable is NULL, cable name related APIs
600 * are disabled. 618 * are disabled.
601 * 619 *
@@ -605,7 +623,7 @@ static void dummy_sysfs_dev_release(struct device *dev)
605 * 623 *
606 * Return the pointer of extcon device if success or ERR_PTR(err) if fail 624 * Return the pointer of extcon device if success or ERR_PTR(err) if fail
607 */ 625 */
608struct extcon_dev *extcon_dev_allocate(const char **supported_cable) 626struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
609{ 627{
610 struct extcon_dev *edev; 628 struct extcon_dev *edev;
611 629
@@ -647,7 +665,7 @@ static void devm_extcon_dev_release(struct device *dev, void *res)
647/** 665/**
648 * devm_extcon_dev_allocate - Allocate managed extcon device 666 * devm_extcon_dev_allocate - Allocate managed extcon device
649 * @dev: device owning the extcon device being created 667 * @dev: device owning the extcon device being created
650 * @supported_cable: Array of supported cable names ending with NULL. 668 * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
651 * If supported_cable is NULL, cable name related APIs 669 * If supported_cable is NULL, cable name related APIs
652 * are disabled. 670 * are disabled.
653 * 671 *
@@ -659,7 +677,7 @@ static void devm_extcon_dev_release(struct device *dev, void *res)
659 * or ERR_PTR(err) if fail 677 * or ERR_PTR(err) if fail
660 */ 678 */
661struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, 679struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
662 const char **supported_cable) 680 const unsigned int *supported_cable)
663{ 681{
664 struct extcon_dev **ptr, *edev; 682 struct extcon_dev **ptr, *edev;
665 683
@@ -701,6 +719,7 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
701int extcon_dev_register(struct extcon_dev *edev) 719int extcon_dev_register(struct extcon_dev *edev)
702{ 720{
703 int ret, index = 0; 721 int ret, index = 0;
722 static atomic_t edev_no = ATOMIC_INIT(-1);
704 723
705 if (!extcon_class) { 724 if (!extcon_class) {
706 ret = create_extcon_class(); 725 ret = create_extcon_class();
@@ -708,30 +727,29 @@ int extcon_dev_register(struct extcon_dev *edev)
708 return ret; 727 return ret;
709 } 728 }
710 729
711 if (edev->supported_cable) { 730 if (!edev->supported_cable)
712 /* Get size of array */ 731 return -EINVAL;
713 for (index = 0; edev->supported_cable[index]; index++) 732
714 ; 733 for (; edev->supported_cable[index] != EXTCON_NONE; index++);
715 edev->max_supported = index;
716 } else {
717 edev->max_supported = 0;
718 }
719 734
735 edev->max_supported = index;
720 if (index > SUPPORTED_CABLE_MAX) { 736 if (index > SUPPORTED_CABLE_MAX) {
721 dev_err(&edev->dev, "extcon: maximum number of supported cables exceeded.\n"); 737 dev_err(&edev->dev,
738 "exceed the maximum number of supported cables\n");
722 return -EINVAL; 739 return -EINVAL;
723 } 740 }
724 741
725 edev->dev.class = extcon_class; 742 edev->dev.class = extcon_class;
726 edev->dev.release = extcon_dev_release; 743 edev->dev.release = extcon_dev_release;
727 744
728 edev->name = edev->name ? edev->name : dev_name(edev->dev.parent); 745 edev->name = dev_name(edev->dev.parent);
729 if (IS_ERR_OR_NULL(edev->name)) { 746 if (IS_ERR_OR_NULL(edev->name)) {
730 dev_err(&edev->dev, 747 dev_err(&edev->dev,
731 "extcon device name is null\n"); 748 "extcon device name is null\n");
732 return -EINVAL; 749 return -EINVAL;
733 } 750 }
734 dev_set_name(&edev->dev, "%s", edev->name); 751 dev_set_name(&edev->dev, "extcon%lu",
752 (unsigned long)atomic_inc_return(&edev_no));
735 753
736 if (edev->max_supported) { 754 if (edev->max_supported) {
737 char buf[10]; 755 char buf[10];
@@ -864,7 +882,15 @@ int extcon_dev_register(struct extcon_dev *edev)
864 882
865 spin_lock_init(&edev->lock); 883 spin_lock_init(&edev->lock);
866 884
867 RAW_INIT_NOTIFIER_HEAD(&edev->nh); 885 edev->nh = devm_kzalloc(&edev->dev,
886 sizeof(*edev->nh) * edev->max_supported, GFP_KERNEL);
887 if (!edev->nh) {
888 ret = -ENOMEM;
889 goto err_dev;
890 }
891
892 for (index = 0; index < edev->max_supported; index++)
893 RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]);
868 894
869 dev_set_drvdata(&edev->dev, edev); 895 dev_set_drvdata(&edev->dev, edev);
870 edev->state = 0; 896 edev->state = 0;
@@ -1044,6 +1070,15 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
1044#endif /* CONFIG_OF */ 1070#endif /* CONFIG_OF */
1045EXPORT_SYMBOL_GPL(extcon_get_edev_by_phandle); 1071EXPORT_SYMBOL_GPL(extcon_get_edev_by_phandle);
1046 1072
1073/**
1074 * extcon_get_edev_name() - Get the name of the extcon device.
1075 * @edev: the extcon device
1076 */
1077const char *extcon_get_edev_name(struct extcon_dev *edev)
1078{
1079 return !edev ? NULL : edev->name;
1080}
1081
1047static int __init extcon_class_init(void) 1082static int __init extcon_class_init(void)
1048{ 1083{
1049 return create_extcon_class(); 1084 return create_extcon_class();
@@ -1059,6 +1094,7 @@ static void __exit extcon_class_exit(void)
1059} 1094}
1060module_exit(extcon_class_exit); 1095module_exit(extcon_class_exit);
1061 1096
1097MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
1062MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); 1098MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
1063MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); 1099MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
1064MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1100MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 5e4dfa4cfe22..39c9b2c08d33 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
5hv_vmbus-y := vmbus_drv.o \ 5hv_vmbus-y := vmbus_drv.o \
6 hv.o connection.o channel.o \ 6 hv.o connection.o channel.o \
7 channel_mgmt.o ring_buffer.o 7 channel_mgmt.o ring_buffer.o
8hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o 8hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 54da66dc7d16..603ce97e9027 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -73,6 +73,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
73 unsigned long flags; 73 unsigned long flags;
74 int ret, err = 0; 74 int ret, err = 0;
75 unsigned long t; 75 unsigned long t;
76 struct page *page;
76 77
77 spin_lock_irqsave(&newchannel->lock, flags); 78 spin_lock_irqsave(&newchannel->lock, flags);
78 if (newchannel->state == CHANNEL_OPEN_STATE) { 79 if (newchannel->state == CHANNEL_OPEN_STATE) {
@@ -87,8 +88,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
87 newchannel->channel_callback_context = context; 88 newchannel->channel_callback_context = context;
88 89
89 /* Allocate the ring buffer */ 90 /* Allocate the ring buffer */
90 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 91 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
91 get_order(send_ringbuffer_size + recv_ringbuffer_size)); 92 GFP_KERNEL|__GFP_ZERO,
93 get_order(send_ringbuffer_size +
94 recv_ringbuffer_size));
95
96 if (!page)
97 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
98 get_order(send_ringbuffer_size +
99 recv_ringbuffer_size));
100 else
101 out = (void *)page_address(page);
92 102
93 if (!out) { 103 if (!out) {
94 err = -ENOMEM; 104 err = -ENOMEM;
@@ -178,19 +188,18 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
178 goto error1; 188 goto error1;
179 } 189 }
180 190
181
182 if (open_info->response.open_result.status)
183 err = open_info->response.open_result.status;
184
185 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 191 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
186 list_del(&open_info->msglistentry); 192 list_del(&open_info->msglistentry);
187 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 193 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
188 194
189 if (err == 0) 195 if (open_info->response.open_result.status) {
190 newchannel->state = CHANNEL_OPENED_STATE; 196 err = -EAGAIN;
197 goto error_gpadl;
198 }
191 199
200 newchannel->state = CHANNEL_OPENED_STATE;
192 kfree(open_info); 201 kfree(open_info);
193 return err; 202 return 0;
194 203
195error1: 204error1:
196 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 205 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0eeb1b3bc048..4506a6623618 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -32,6 +32,9 @@
32 32
33#include "hyperv_vmbus.h" 33#include "hyperv_vmbus.h"
34 34
35static void init_vp_index(struct vmbus_channel *channel,
36 const uuid_le *type_guid);
37
35/** 38/**
36 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message 39 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
37 * @icmsghdrp: Pointer to msg header structure 40 * @icmsghdrp: Pointer to msg header structure
@@ -205,6 +208,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
205 primary_channel = channel->primary_channel; 208 primary_channel = channel->primary_channel;
206 spin_lock_irqsave(&primary_channel->lock, flags); 209 spin_lock_irqsave(&primary_channel->lock, flags);
207 list_del(&channel->sc_list); 210 list_del(&channel->sc_list);
211 primary_channel->num_sc--;
208 spin_unlock_irqrestore(&primary_channel->lock, flags); 212 spin_unlock_irqrestore(&primary_channel->lock, flags);
209 } 213 }
210 free_channel(channel); 214 free_channel(channel);
@@ -212,11 +216,16 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
212 216
213void vmbus_free_channels(void) 217void vmbus_free_channels(void)
214{ 218{
215 struct vmbus_channel *channel; 219 struct vmbus_channel *channel, *tmp;
220
221 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
222 listentry) {
223 /* if we don't set rescind to true, vmbus_close_internal()
224 * won't invoke hv_process_channel_removal().
225 */
226 channel->rescind = true;
216 227
217 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
218 vmbus_device_unregister(channel->device_obj); 228 vmbus_device_unregister(channel->device_obj);
219 free_channel(channel);
220 } 229 }
221} 230}
222 231
@@ -228,7 +237,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
228{ 237{
229 struct vmbus_channel *channel; 238 struct vmbus_channel *channel;
230 bool fnew = true; 239 bool fnew = true;
231 bool enq = false;
232 unsigned long flags; 240 unsigned long flags;
233 241
234 /* Make sure this is a new offer */ 242 /* Make sure this is a new offer */
@@ -244,25 +252,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
244 } 252 }
245 } 253 }
246 254
247 if (fnew) { 255 if (fnew)
248 list_add_tail(&newchannel->listentry, 256 list_add_tail(&newchannel->listentry,
249 &vmbus_connection.chn_list); 257 &vmbus_connection.chn_list);
250 enq = true;
251 }
252 258
253 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 259 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
254 260
255 if (enq) {
256 if (newchannel->target_cpu != get_cpu()) {
257 put_cpu();
258 smp_call_function_single(newchannel->target_cpu,
259 percpu_channel_enq,
260 newchannel, true);
261 } else {
262 percpu_channel_enq(newchannel);
263 put_cpu();
264 }
265 }
266 if (!fnew) { 261 if (!fnew) {
267 /* 262 /*
268 * Check to see if this is a sub-channel. 263 * Check to see if this is a sub-channel.
@@ -274,27 +269,22 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
274 newchannel->primary_channel = channel; 269 newchannel->primary_channel = channel;
275 spin_lock_irqsave(&channel->lock, flags); 270 spin_lock_irqsave(&channel->lock, flags);
276 list_add_tail(&newchannel->sc_list, &channel->sc_list); 271 list_add_tail(&newchannel->sc_list, &channel->sc_list);
277 spin_unlock_irqrestore(&channel->lock, flags);
278
279 if (newchannel->target_cpu != get_cpu()) {
280 put_cpu();
281 smp_call_function_single(newchannel->target_cpu,
282 percpu_channel_enq,
283 newchannel, true);
284 } else {
285 percpu_channel_enq(newchannel);
286 put_cpu();
287 }
288
289 newchannel->state = CHANNEL_OPEN_STATE;
290 channel->num_sc++; 272 channel->num_sc++;
291 if (channel->sc_creation_callback != NULL) 273 spin_unlock_irqrestore(&channel->lock, flags);
292 channel->sc_creation_callback(newchannel); 274 } else
275 goto err_free_chan;
276 }
293 277
294 return; 278 init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
295 }
296 279
297 goto err_free_chan; 280 if (newchannel->target_cpu != get_cpu()) {
281 put_cpu();
282 smp_call_function_single(newchannel->target_cpu,
283 percpu_channel_enq,
284 newchannel, true);
285 } else {
286 percpu_channel_enq(newchannel);
287 put_cpu();
298 } 288 }
299 289
300 /* 290 /*
@@ -304,6 +294,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
304 */ 294 */
305 newchannel->state = CHANNEL_OPEN_STATE; 295 newchannel->state = CHANNEL_OPEN_STATE;
306 296
297 if (!fnew) {
298 if (channel->sc_creation_callback != NULL)
299 channel->sc_creation_callback(newchannel);
300 return;
301 }
302
307 /* 303 /*
308 * Start the process of binding this offer to the driver 304 * Start the process of binding this offer to the driver
309 * We need to set the DeviceObject field before calling 305 * We need to set the DeviceObject field before calling
@@ -374,23 +370,27 @@ static const struct hv_vmbus_device_id hp_devs[] = {
374/* 370/*
375 * We use this state to statically distribute the channel interrupt load. 371 * We use this state to statically distribute the channel interrupt load.
376 */ 372 */
377static u32 next_vp; 373static int next_numa_node_id;
378 374
379/* 375/*
380 * Starting with Win8, we can statically distribute the incoming 376 * Starting with Win8, we can statically distribute the incoming
381 * channel interrupt load by binding a channel to VCPU. We 377 * channel interrupt load by binding a channel to VCPU.
382 * implement here a simple round robin scheme for distributing 378 * We do this in a hierarchical fashion:
383 * the interrupt load. 379 * First distribute the primary channels across available NUMA nodes
384 * We will bind channels that are not performance critical to cpu 0 and 380 * and then distribute the subchannels amongst the CPUs in the NUMA
385 * performance critical channels (IDE, SCSI and Network) will be uniformly 381 * node assigned to the primary channel.
386 * distributed across all available CPUs. 382 *
383 * For pre-win8 hosts or non-performance critical channels we assign the
384 * first CPU in the first NUMA node.
387 */ 385 */
388static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid) 386static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
389{ 387{
390 u32 cur_cpu; 388 u32 cur_cpu;
391 int i; 389 int i;
392 bool perf_chn = false; 390 bool perf_chn = false;
393 u32 max_cpus = num_online_cpus(); 391 struct vmbus_channel *primary = channel->primary_channel;
392 int next_node;
393 struct cpumask available_mask;
394 394
395 for (i = IDE; i < MAX_PERF_CHN; i++) { 395 for (i = IDE; i < MAX_PERF_CHN; i++) {
396 if (!memcmp(type_guid->b, hp_devs[i].guid, 396 if (!memcmp(type_guid->b, hp_devs[i].guid,
@@ -407,16 +407,77 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
407 * Also if the channel is not a performance critical 407 * Also if the channel is not a performance critical
408 * channel, bind it to cpu 0. 408 * channel, bind it to cpu 0.
409 */ 409 */
410 channel->numa_node = 0;
411 cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
410 channel->target_cpu = 0; 412 channel->target_cpu = 0;
411 channel->target_vp = 0; 413 channel->target_vp = hv_context.vp_index[0];
412 return; 414 return;
413 } 415 }
414 cur_cpu = (++next_vp % max_cpus); 416
417 /*
418 * We distribute primary channels evenly across all the available
419 * NUMA nodes and within the assigned NUMA node we will assign the
420 * first available CPU to the primary channel.
421 * The sub-channels will be assigned to the CPUs available in the
422 * NUMA node evenly.
423 */
424 if (!primary) {
425 while (true) {
426 next_node = next_numa_node_id++;
427 if (next_node == nr_node_ids)
428 next_node = next_numa_node_id = 0;
429 if (cpumask_empty(cpumask_of_node(next_node)))
430 continue;
431 break;
432 }
433 channel->numa_node = next_node;
434 primary = channel;
435 }
436
437 if (cpumask_weight(&primary->alloced_cpus_in_node) ==
438 cpumask_weight(cpumask_of_node(primary->numa_node))) {
439 /*
440 * We have cycled through all the CPUs in the node;
441 * reset the alloced map.
442 */
443 cpumask_clear(&primary->alloced_cpus_in_node);
444 }
445
446 cpumask_xor(&available_mask, &primary->alloced_cpus_in_node,
447 cpumask_of_node(primary->numa_node));
448
449 cur_cpu = cpumask_next(-1, &available_mask);
450 cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node);
451
415 channel->target_cpu = cur_cpu; 452 channel->target_cpu = cur_cpu;
416 channel->target_vp = hv_context.vp_index[cur_cpu]; 453 channel->target_vp = hv_context.vp_index[cur_cpu];
417} 454}
418 455
419/* 456/*
457 * vmbus_unload_response - Handler for the unload response.
458 */
459static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
460{
461 /*
462 * This is a global event; just wakeup the waiting thread.
463 * Once we successfully unload, we can cleanup the monitor state.
464 */
465 complete(&vmbus_connection.unload_event);
466}
467
468void vmbus_initiate_unload(void)
469{
470 struct vmbus_channel_message_header hdr;
471
472 init_completion(&vmbus_connection.unload_event);
473 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
474 hdr.msgtype = CHANNELMSG_UNLOAD;
475 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
476
477 wait_for_completion(&vmbus_connection.unload_event);
478}
479
480/*
420 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition. 481 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
421 * 482 *
422 */ 483 */
@@ -461,8 +522,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
461 offer->connection_id; 522 offer->connection_id;
462 } 523 }
463 524
464 init_vp_index(newchannel, &offer->offer.if_type);
465
466 memcpy(&newchannel->offermsg, offer, 525 memcpy(&newchannel->offermsg, offer,
467 sizeof(struct vmbus_channel_offer_channel)); 526 sizeof(struct vmbus_channel_offer_channel));
468 newchannel->monitor_grp = (u8)offer->monitorid / 32; 527 newchannel->monitor_grp = (u8)offer->monitorid / 32;
@@ -712,6 +771,7 @@ struct vmbus_channel_message_table_entry
712 {CHANNELMSG_INITIATE_CONTACT, 0, NULL}, 771 {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
713 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response}, 772 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
714 {CHANNELMSG_UNLOAD, 0, NULL}, 773 {CHANNELMSG_UNLOAD, 0, NULL},
774 {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
715}; 775};
716 776
717/* 777/*
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index b27220a425f4..4fc2e8836e60 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -58,6 +58,9 @@ static __u32 vmbus_get_next_version(__u32 current_version)
58 case (VERSION_WIN8_1): 58 case (VERSION_WIN8_1):
59 return VERSION_WIN8; 59 return VERSION_WIN8;
60 60
61 case (VERSION_WIN10):
62 return VERSION_WIN8_1;
63
61 case (VERSION_WS2008): 64 case (VERSION_WS2008):
62 default: 65 default:
63 return VERSION_INVAL; 66 return VERSION_INVAL;
@@ -80,7 +83,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
80 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); 83 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
81 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); 84 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
82 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); 85 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
83 if (version == VERSION_WIN8_1) { 86 if (version >= VERSION_WIN8_1) {
84 msg->target_vcpu = hv_context.vp_index[get_cpu()]; 87 msg->target_vcpu = hv_context.vp_index[get_cpu()];
85 put_cpu(); 88 put_cpu();
86 } 89 }
@@ -227,6 +230,11 @@ cleanup:
227 230
228void vmbus_disconnect(void) 231void vmbus_disconnect(void)
229{ 232{
233 /*
234 * First send the unload request to the host.
235 */
236 vmbus_initiate_unload();
237
230 if (vmbus_connection.work_queue) { 238 if (vmbus_connection.work_queue) {
231 drain_workqueue(vmbus_connection.work_queue); 239 drain_workqueue(vmbus_connection.work_queue);
232 destroy_workqueue(vmbus_connection.work_queue); 240 destroy_workqueue(vmbus_connection.work_queue);
@@ -371,8 +379,7 @@ void vmbus_on_event(unsigned long data)
371 int cpu = smp_processor_id(); 379 int cpu = smp_processor_id();
372 union hv_synic_event_flags *event; 380 union hv_synic_event_flags *event;
373 381
374 if ((vmbus_proto_version == VERSION_WS2008) || 382 if (vmbus_proto_version < VERSION_WIN8) {
375 (vmbus_proto_version == VERSION_WIN7)) {
376 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5; 383 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
377 recv_int_page = vmbus_connection.recv_int_page; 384 recv_int_page = vmbus_connection.recv_int_page;
378 } else { 385 } else {
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index cb5b7dc9797f..8a725cd69ad7 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -567,7 +567,9 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
567 case MEM_ONLINE: 567 case MEM_ONLINE:
568 dm_device.num_pages_onlined += mem->nr_pages; 568 dm_device.num_pages_onlined += mem->nr_pages;
569 case MEM_CANCEL_ONLINE: 569 case MEM_CANCEL_ONLINE:
570 mutex_unlock(&dm_device.ha_region_mutex); 570 if (val == MEM_ONLINE ||
571 mutex_is_locked(&dm_device.ha_region_mutex))
572 mutex_unlock(&dm_device.ha_region_mutex);
571 if (dm_device.ha_waiting) { 573 if (dm_device.ha_waiting) {
572 dm_device.ha_waiting = false; 574 dm_device.ha_waiting = false;
573 complete(&dm_device.ol_waitevent); 575 complete(&dm_device.ol_waitevent);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index cd453e4b2a07..b50dd330cf31 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -19,17 +19,13 @@
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 21
22#include <linux/semaphore.h>
23#include <linux/fs.h>
24#include <linux/nls.h> 22#include <linux/nls.h>
25#include <linux/workqueue.h> 23#include <linux/workqueue.h>
26#include <linux/cdev.h>
27#include <linux/hyperv.h> 24#include <linux/hyperv.h>
28#include <linux/sched.h> 25#include <linux/sched.h>
29#include <linux/uaccess.h>
30#include <linux/miscdevice.h>
31 26
32#include "hyperv_vmbus.h" 27#include "hyperv_vmbus.h"
28#include "hv_utils_transport.h"
33 29
34#define WIN8_SRV_MAJOR 1 30#define WIN8_SRV_MAJOR 1
35#define WIN8_SRV_MINOR 1 31#define WIN8_SRV_MINOR 1
@@ -47,39 +43,31 @@
47 * ensure this by serializing packet processing in this driver - we do not 43 * ensure this by serializing packet processing in this driver - we do not
48 * read additional packets from the VMBUs until the current packet is fully 44 * read additional packets from the VMBUs until the current packet is fully
49 * handled. 45 * handled.
50 *
51 * The transaction "active" state is set when we receive a request from the
52 * host and we cleanup this state when the transaction is completed - when we
53 * respond to the host with our response. When the transaction active state is
54 * set, we defer handling incoming packets.
55 */ 46 */
56 47
57static struct { 48static struct {
58 bool active; /* transaction status - active or not */ 49 int state; /* hvutil_device_state */
59 int recv_len; /* number of bytes received. */ 50 int recv_len; /* number of bytes received. */
60 struct hv_fcopy_hdr *fcopy_msg; /* current message */ 51 struct hv_fcopy_hdr *fcopy_msg; /* current message */
61 struct hv_start_fcopy message; /* sent to daemon */
62 struct vmbus_channel *recv_channel; /* chn we got the request */ 52 struct vmbus_channel *recv_channel; /* chn we got the request */
63 u64 recv_req_id; /* request ID. */ 53 u64 recv_req_id; /* request ID. */
64 void *fcopy_context; /* for the channel callback */ 54 void *fcopy_context; /* for the channel callback */
65 struct semaphore read_sema;
66} fcopy_transaction; 55} fcopy_transaction;
67 56
68static bool opened; /* currently device opened */
69
70/*
71 * Before we can accept copy messages from the host, we need
72 * to handshake with the user level daemon. This state tracks
73 * if we are in the handshake phase.
74 */
75static bool in_hand_shake = true;
76static void fcopy_send_data(void);
77static void fcopy_respond_to_host(int error); 57static void fcopy_respond_to_host(int error);
78static void fcopy_work_func(struct work_struct *dummy); 58static void fcopy_send_data(struct work_struct *dummy);
79static DECLARE_DELAYED_WORK(fcopy_work, fcopy_work_func); 59static void fcopy_timeout_func(struct work_struct *dummy);
60static DECLARE_DELAYED_WORK(fcopy_timeout_work, fcopy_timeout_func);
61static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
62static const char fcopy_devname[] = "vmbus/hv_fcopy";
80static u8 *recv_buffer; 63static u8 *recv_buffer;
64static struct hvutil_transport *hvt;
65/*
66 * This state maintains the version number registered by the daemon.
67 */
68static int dm_reg_value;
81 69
82static void fcopy_work_func(struct work_struct *dummy) 70static void fcopy_timeout_func(struct work_struct *dummy)
83{ 71{
84 /* 72 /*
85 * If the timer fires, the user-mode component has not responded; 73 * If the timer fires, the user-mode component has not responded;
@@ -87,23 +75,28 @@ static void fcopy_work_func(struct work_struct *dummy)
87 */ 75 */
88 fcopy_respond_to_host(HV_E_FAIL); 76 fcopy_respond_to_host(HV_E_FAIL);
89 77
90 /* In the case the user-space daemon crashes, hangs or is killed, we 78 /* Transaction is finished, reset the state. */
91 * need to down the semaphore, otherwise, after the daemon starts next 79 if (fcopy_transaction.state > HVUTIL_READY)
92 * time, the obsolete data in fcopy_transaction.message or 80 fcopy_transaction.state = HVUTIL_READY;
93 * fcopy_transaction.fcopy_msg will be used immediately.
94 *
95 * NOTE: fcopy_read() happens to get the semaphore (very rare)? We're
96 * still OK, because we've reported the failure to the host.
97 */
98 if (down_trylock(&fcopy_transaction.read_sema))
99 ;
100 81
82 hv_poll_channel(fcopy_transaction.fcopy_context,
83 hv_fcopy_onchannelcallback);
101} 84}
102 85
103static int fcopy_handle_handshake(u32 version) 86static int fcopy_handle_handshake(u32 version)
104{ 87{
88 u32 our_ver = FCOPY_CURRENT_VERSION;
89
105 switch (version) { 90 switch (version) {
106 case FCOPY_CURRENT_VERSION: 91 case FCOPY_VERSION_0:
92 /* Daemon doesn't expect us to reply */
93 dm_reg_value = version;
94 break;
95 case FCOPY_VERSION_1:
96 /* Daemon expects us to reply with our own version */
97 if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
98 return -EFAULT;
99 dm_reg_value = version;
107 break; 100 break;
108 default: 101 default:
109 /* 102 /*
@@ -114,20 +107,20 @@ static int fcopy_handle_handshake(u32 version)
114 */ 107 */
115 return -EINVAL; 108 return -EINVAL;
116 } 109 }
117 pr_info("FCP: user-mode registering done. Daemon version: %d\n", 110 pr_debug("FCP: userspace daemon ver. %d registered\n", version);
118 version); 111 fcopy_transaction.state = HVUTIL_READY;
119 fcopy_transaction.active = false; 112 hv_poll_channel(fcopy_transaction.fcopy_context,
120 if (fcopy_transaction.fcopy_context) 113 hv_fcopy_onchannelcallback);
121 hv_fcopy_onchannelcallback(fcopy_transaction.fcopy_context);
122 in_hand_shake = false;
123 return 0; 114 return 0;
124} 115}
125 116
126static void fcopy_send_data(void) 117static void fcopy_send_data(struct work_struct *dummy)
127{ 118{
128 struct hv_start_fcopy *smsg_out = &fcopy_transaction.message; 119 struct hv_start_fcopy smsg_out;
129 int operation = fcopy_transaction.fcopy_msg->operation; 120 int operation = fcopy_transaction.fcopy_msg->operation;
130 struct hv_start_fcopy *smsg_in; 121 struct hv_start_fcopy *smsg_in;
122 void *out_src;
123 int rc, out_len;
131 124
132 /* 125 /*
133 * The strings sent from the host are encoded in 126 * The strings sent from the host are encoded in
@@ -142,26 +135,39 @@ static void fcopy_send_data(void)
142 135
143 switch (operation) { 136 switch (operation) {
144 case START_FILE_COPY: 137 case START_FILE_COPY:
145 memset(smsg_out, 0, sizeof(struct hv_start_fcopy)); 138 out_len = sizeof(struct hv_start_fcopy);
146 smsg_out->hdr.operation = operation; 139 memset(&smsg_out, 0, out_len);
140 smsg_out.hdr.operation = operation;
147 smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg; 141 smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
148 142
149 utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH, 143 utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
150 UTF16_LITTLE_ENDIAN, 144 UTF16_LITTLE_ENDIAN,
151 (__u8 *)smsg_out->file_name, W_MAX_PATH - 1); 145 (__u8 *)&smsg_out.file_name, W_MAX_PATH - 1);
152 146
153 utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH, 147 utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
154 UTF16_LITTLE_ENDIAN, 148 UTF16_LITTLE_ENDIAN,
155 (__u8 *)smsg_out->path_name, W_MAX_PATH - 1); 149 (__u8 *)&smsg_out.path_name, W_MAX_PATH - 1);
156 150
157 smsg_out->copy_flags = smsg_in->copy_flags; 151 smsg_out.copy_flags = smsg_in->copy_flags;
158 smsg_out->file_size = smsg_in->file_size; 152 smsg_out.file_size = smsg_in->file_size;
153 out_src = &smsg_out;
159 break; 154 break;
160 155
161 default: 156 default:
157 out_src = fcopy_transaction.fcopy_msg;
158 out_len = fcopy_transaction.recv_len;
162 break; 159 break;
163 } 160 }
164 up(&fcopy_transaction.read_sema); 161
162 fcopy_transaction.state = HVUTIL_USERSPACE_REQ;
163 rc = hvutil_transport_send(hvt, out_src, out_len);
164 if (rc) {
165 pr_debug("FCP: failed to communicate to the daemon: %d\n", rc);
166 if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
167 fcopy_respond_to_host(HV_E_FAIL);
168 fcopy_transaction.state = HVUTIL_READY;
169 }
170 }
165 return; 171 return;
166} 172}
167 173
@@ -189,8 +195,6 @@ fcopy_respond_to_host(int error)
189 channel = fcopy_transaction.recv_channel; 195 channel = fcopy_transaction.recv_channel;
190 req_id = fcopy_transaction.recv_req_id; 196 req_id = fcopy_transaction.recv_req_id;
191 197
192 fcopy_transaction.active = false;
193
194 icmsghdr = (struct icmsg_hdr *) 198 icmsghdr = (struct icmsg_hdr *)
195 &recv_buffer[sizeof(struct vmbuspipe_hdr)]; 199 &recv_buffer[sizeof(struct vmbuspipe_hdr)];
196 200
@@ -218,7 +222,7 @@ void hv_fcopy_onchannelcallback(void *context)
218 int util_fw_version; 222 int util_fw_version;
219 int fcopy_srv_version; 223 int fcopy_srv_version;
220 224
221 if (fcopy_transaction.active) { 225 if (fcopy_transaction.state > HVUTIL_READY) {
222 /* 226 /*
223 * We will defer processing this callback once 227 * We will defer processing this callback once
224 * the current transaction is complete. 228 * the current transaction is complete.
@@ -226,6 +230,7 @@ void hv_fcopy_onchannelcallback(void *context)
226 fcopy_transaction.fcopy_context = context; 230 fcopy_transaction.fcopy_context = context;
227 return; 231 return;
228 } 232 }
233 fcopy_transaction.fcopy_context = NULL;
229 234
230 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, 235 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
231 &requestid); 236 &requestid);
@@ -249,17 +254,23 @@ void hv_fcopy_onchannelcallback(void *context)
249 * transaction; note transactions are serialized. 254 * transaction; note transactions are serialized.
250 */ 255 */
251 256
252 fcopy_transaction.active = true;
253 fcopy_transaction.recv_len = recvlen; 257 fcopy_transaction.recv_len = recvlen;
254 fcopy_transaction.recv_channel = channel; 258 fcopy_transaction.recv_channel = channel;
255 fcopy_transaction.recv_req_id = requestid; 259 fcopy_transaction.recv_req_id = requestid;
256 fcopy_transaction.fcopy_msg = fcopy_msg; 260 fcopy_transaction.fcopy_msg = fcopy_msg;
257 261
262 if (fcopy_transaction.state < HVUTIL_READY) {
263 /* Userspace is not registered yet */
264 fcopy_respond_to_host(HV_E_FAIL);
265 return;
266 }
267 fcopy_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
268
258 /* 269 /*
259 * Send the information to the user-level daemon. 270 * Send the information to the user-level daemon.
260 */ 271 */
261 schedule_delayed_work(&fcopy_work, 5*HZ); 272 schedule_work(&fcopy_send_work);
262 fcopy_send_data(); 273 schedule_delayed_work(&fcopy_timeout_work, 5*HZ);
263 return; 274 return;
264 } 275 }
265 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; 276 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
@@ -267,155 +278,44 @@ void hv_fcopy_onchannelcallback(void *context)
267 VM_PKT_DATA_INBAND, 0); 278 VM_PKT_DATA_INBAND, 0);
268} 279}
269 280
270/* 281/* Callback when data is received from userspace */
271 * Create a char device that can support read/write for passing 282static int fcopy_on_msg(void *msg, int len)
272 * the payload.
273 */
274
275static ssize_t fcopy_read(struct file *file, char __user *buf,
276 size_t count, loff_t *ppos)
277{
278 void *src;
279 size_t copy_size;
280 int operation;
281
282 /*
283 * Wait until there is something to be read.
284 */
285 if (down_interruptible(&fcopy_transaction.read_sema))
286 return -EINTR;
287
288 /*
289 * The channel may be rescinded and in this case, we will wakeup the
290 * the thread blocked on the semaphore and we will use the opened
291 * state to correctly handle this case.
292 */
293 if (!opened)
294 return -ENODEV;
295
296 operation = fcopy_transaction.fcopy_msg->operation;
297
298 if (operation == START_FILE_COPY) {
299 src = &fcopy_transaction.message;
300 copy_size = sizeof(struct hv_start_fcopy);
301 if (count < copy_size)
302 return 0;
303 } else {
304 src = fcopy_transaction.fcopy_msg;
305 copy_size = sizeof(struct hv_do_fcopy);
306 if (count < copy_size)
307 return 0;
308 }
309 if (copy_to_user(buf, src, copy_size))
310 return -EFAULT;
311
312 return copy_size;
313}
314
315static ssize_t fcopy_write(struct file *file, const char __user *buf,
316 size_t count, loff_t *ppos)
317{ 283{
318 int response = 0; 284 int *val = (int *)msg;
319 285
320 if (count != sizeof(int)) 286 if (len != sizeof(int))
321 return -EINVAL; 287 return -EINVAL;
322 288
323 if (copy_from_user(&response, buf, sizeof(int))) 289 if (fcopy_transaction.state == HVUTIL_DEVICE_INIT)
324 return -EFAULT; 290 return fcopy_handle_handshake(*val);
325 291
326 if (in_hand_shake) { 292 if (fcopy_transaction.state != HVUTIL_USERSPACE_REQ)
327 if (fcopy_handle_handshake(response)) 293 return -EINVAL;
328 return -EINVAL;
329 return sizeof(int);
330 }
331 294
332 /* 295 /*
333 * Complete the transaction by forwarding the result 296 * Complete the transaction by forwarding the result
334 * to the host. But first, cancel the timeout. 297 * to the host. But first, cancel the timeout.
335 */ 298 */
336 if (cancel_delayed_work_sync(&fcopy_work)) 299 if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
337 fcopy_respond_to_host(response); 300 fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
338 301 fcopy_respond_to_host(*val);
339 return sizeof(int); 302 fcopy_transaction.state = HVUTIL_READY;
340} 303 hv_poll_channel(fcopy_transaction.fcopy_context,
341 304 hv_fcopy_onchannelcallback);
342static int fcopy_open(struct inode *inode, struct file *f) 305 }
343{
344 /*
345 * The user level daemon that will open this device is
346 * really an extension of this driver. We can have only
347 * active open at a time.
348 */
349 if (opened)
350 return -EBUSY;
351 306
352 /*
353 * The daemon is alive; setup the state.
354 */
355 opened = true;
356 return 0; 307 return 0;
357} 308}
358 309
359/* XXX: there are still some tricky corner cases, e.g., 310static void fcopy_on_reset(void)
360 * 1) In a SMP guest, when fcopy_release() runs between
361 * schedule_delayed_work() and fcopy_send_data(), there is
362 * still a chance an obsolete message will be queued.
363 *
364 * 2) When the fcopy daemon is running, if we unload the driver,
365 * we'll notice a kernel oops when we kill the daemon later.
366 */
367static int fcopy_release(struct inode *inode, struct file *f)
368{ 311{
369 /* 312 /*
370 * The daemon has exited; reset the state. 313 * The daemon has exited; reset the state.
371 */ 314 */
372 in_hand_shake = true; 315 fcopy_transaction.state = HVUTIL_DEVICE_INIT;
373 opened = false;
374 316
375 if (cancel_delayed_work_sync(&fcopy_work)) { 317 if (cancel_delayed_work_sync(&fcopy_timeout_work))
376 /* We haven't up()-ed the semaphore(very rare)? */
377 if (down_trylock(&fcopy_transaction.read_sema))
378 ;
379 fcopy_respond_to_host(HV_E_FAIL); 318 fcopy_respond_to_host(HV_E_FAIL);
380 }
381 return 0;
382}
383
384
385static const struct file_operations fcopy_fops = {
386 .read = fcopy_read,
387 .write = fcopy_write,
388 .release = fcopy_release,
389 .open = fcopy_open,
390};
391
392static struct miscdevice fcopy_misc = {
393 .minor = MISC_DYNAMIC_MINOR,
394 .name = "vmbus/hv_fcopy",
395 .fops = &fcopy_fops,
396};
397
398static int fcopy_dev_init(void)
399{
400 return misc_register(&fcopy_misc);
401}
402
403static void fcopy_dev_deinit(void)
404{
405
406 /*
407 * The device is going away - perhaps because the
408 * host has rescinded the channel. Setup state so that
409 * user level daemon can gracefully exit if it is blocked
410 * on the read semaphore.
411 */
412 opened = false;
413 /*
414 * Signal the semaphore as the device is
415 * going away.
416 */
417 up(&fcopy_transaction.read_sema);
418 misc_deregister(&fcopy_misc);
419} 319}
420 320
421int hv_fcopy_init(struct hv_util_service *srv) 321int hv_fcopy_init(struct hv_util_service *srv)
@@ -428,14 +328,19 @@ int hv_fcopy_init(struct hv_util_service *srv)
428 * Defer processing channel callbacks until the daemon 328 * Defer processing channel callbacks until the daemon
429 * has registered. 329 * has registered.
430 */ 330 */
431 fcopy_transaction.active = true; 331 fcopy_transaction.state = HVUTIL_DEVICE_INIT;
432 sema_init(&fcopy_transaction.read_sema, 0); 332
333 hvt = hvutil_transport_init(fcopy_devname, 0, 0,
334 fcopy_on_msg, fcopy_on_reset);
335 if (!hvt)
336 return -EFAULT;
433 337
434 return fcopy_dev_init(); 338 return 0;
435} 339}
436 340
437void hv_fcopy_deinit(void) 341void hv_fcopy_deinit(void)
438{ 342{
439 cancel_delayed_work_sync(&fcopy_work); 343 fcopy_transaction.state = HVUTIL_DEVICE_DYING;
440 fcopy_dev_deinit(); 344 cancel_delayed_work_sync(&fcopy_timeout_work);
345 hvutil_transport_destroy(hvt);
441} 346}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index beb8105c0e7b..d85798d5992c 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -28,6 +28,8 @@
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30 30
31#include "hyperv_vmbus.h"
32#include "hv_utils_transport.h"
31 33
32/* 34/*
33 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) 35 * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
@@ -45,16 +47,21 @@
45#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) 47#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
46 48
47/* 49/*
48 * Global state maintained for transaction that is being processed. 50 * Global state maintained for transaction that is being processed. For a class
49 * Note that only one transaction can be active at any point in time. 51 * of integration services, including the "KVP service", the specified protocol
52 * is a "request/response" protocol which means that there can only be single
53 * outstanding transaction from the host at any given point in time. We use
54 * this to simplify memory management in this driver - we cache and process
55 * only one message at a time.
50 * 56 *
51 * This state is set when we receive a request from the host; we 57 * While the request/response protocol is guaranteed by the host, we further
52 * cleanup this state when the transaction is completed - when we respond 58 * ensure this by serializing packet processing in this driver - we do not
53 * to the host with the key value. 59 * read additional packets from the VMBUs until the current packet is fully
60 * handled.
54 */ 61 */
55 62
56static struct { 63static struct {
57 bool active; /* transaction status - active or not */ 64 int state; /* hvutil_device_state */
58 int recv_len; /* number of bytes received. */ 65 int recv_len; /* number of bytes received. */
59 struct hv_kvp_msg *kvp_msg; /* current message */ 66 struct hv_kvp_msg *kvp_msg; /* current message */
60 struct vmbus_channel *recv_channel; /* chn we got the request */ 67 struct vmbus_channel *recv_channel; /* chn we got the request */
@@ -63,13 +70,6 @@ static struct {
63} kvp_transaction; 70} kvp_transaction;
64 71
65/* 72/*
66 * Before we can accept KVP messages from the host, we need
67 * to handshake with the user level daemon. This state tracks
68 * if we are in the handshake phase.
69 */
70static bool in_hand_shake = true;
71
72/*
73 * This state maintains the version number registered by the daemon. 73 * This state maintains the version number registered by the daemon.
74 */ 74 */
75static int dm_reg_value; 75static int dm_reg_value;
@@ -78,15 +78,15 @@ static void kvp_send_key(struct work_struct *dummy);
78 78
79 79
80static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error); 80static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
81static void kvp_work_func(struct work_struct *dummy); 81static void kvp_timeout_func(struct work_struct *dummy);
82static void kvp_register(int); 82static void kvp_register(int);
83 83
84static DECLARE_DELAYED_WORK(kvp_work, kvp_work_func); 84static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
85static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); 85static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
86 86
87static struct cb_id kvp_id = { CN_KVP_IDX, CN_KVP_VAL }; 87static const char kvp_devname[] = "vmbus/hv_kvp";
88static const char kvp_name[] = "kvp_kernel_module";
89static u8 *recv_buffer; 88static u8 *recv_buffer;
89static struct hvutil_transport *hvt;
90/* 90/*
91 * Register the kernel component with the user-level daemon. 91 * Register the kernel component with the user-level daemon.
92 * As part of this registration, pass the LIC version number. 92 * As part of this registration, pass the LIC version number.
@@ -98,50 +98,39 @@ static void
98kvp_register(int reg_value) 98kvp_register(int reg_value)
99{ 99{
100 100
101 struct cn_msg *msg;
102 struct hv_kvp_msg *kvp_msg; 101 struct hv_kvp_msg *kvp_msg;
103 char *version; 102 char *version;
104 103
105 msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg), GFP_ATOMIC); 104 kvp_msg = kzalloc(sizeof(*kvp_msg), GFP_KERNEL);
106 105
107 if (msg) { 106 if (kvp_msg) {
108 kvp_msg = (struct hv_kvp_msg *)msg->data;
109 version = kvp_msg->body.kvp_register.version; 107 version = kvp_msg->body.kvp_register.version;
110 msg->id.idx = CN_KVP_IDX;
111 msg->id.val = CN_KVP_VAL;
112
113 kvp_msg->kvp_hdr.operation = reg_value; 108 kvp_msg->kvp_hdr.operation = reg_value;
114 strcpy(version, HV_DRV_VERSION); 109 strcpy(version, HV_DRV_VERSION);
115 msg->len = sizeof(struct hv_kvp_msg); 110
116 cn_netlink_send(msg, 0, 0, GFP_ATOMIC); 111 hvutil_transport_send(hvt, kvp_msg, sizeof(*kvp_msg));
117 kfree(msg); 112 kfree(kvp_msg);
118 } 113 }
119} 114}
120static void 115
121kvp_work_func(struct work_struct *dummy) 116static void kvp_timeout_func(struct work_struct *dummy)
122{ 117{
123 /* 118 /*
124 * If the timer fires, the user-mode component has not responded; 119 * If the timer fires, the user-mode component has not responded;
125 * process the pending transaction. 120 * process the pending transaction.
126 */ 121 */
127 kvp_respond_to_host(NULL, HV_E_FAIL); 122 kvp_respond_to_host(NULL, HV_E_FAIL);
128}
129 123
130static void poll_channel(struct vmbus_channel *channel) 124 /* Transaction is finished, reset the state. */
131{ 125 if (kvp_transaction.state > HVUTIL_READY)
132 if (channel->target_cpu != smp_processor_id()) 126 kvp_transaction.state = HVUTIL_READY;
133 smp_call_function_single(channel->target_cpu,
134 hv_kvp_onchannelcallback,
135 channel, true);
136 else
137 hv_kvp_onchannelcallback(channel);
138}
139 127
128 hv_poll_channel(kvp_transaction.kvp_context,
129 hv_kvp_onchannelcallback);
130}
140 131
141static int kvp_handle_handshake(struct hv_kvp_msg *msg) 132static int kvp_handle_handshake(struct hv_kvp_msg *msg)
142{ 133{
143 int ret = 1;
144
145 switch (msg->kvp_hdr.operation) { 134 switch (msg->kvp_hdr.operation) {
146 case KVP_OP_REGISTER: 135 case KVP_OP_REGISTER:
147 dm_reg_value = KVP_OP_REGISTER; 136 dm_reg_value = KVP_OP_REGISTER;
@@ -155,20 +144,18 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
155 pr_info("KVP: incompatible daemon\n"); 144 pr_info("KVP: incompatible daemon\n");
156 pr_info("KVP: KVP version: %d, Daemon version: %d\n", 145 pr_info("KVP: KVP version: %d, Daemon version: %d\n",
157 KVP_OP_REGISTER1, msg->kvp_hdr.operation); 146 KVP_OP_REGISTER1, msg->kvp_hdr.operation);
158 ret = 0; 147 return -EINVAL;
159 } 148 }
160 149
161 if (ret) { 150 /*
162 /* 151 * We have a compatible daemon; complete the handshake.
163 * We have a compatible daemon; complete the handshake. 152 */
164 */ 153 pr_debug("KVP: userspace daemon ver. %d registered\n",
165 pr_info("KVP: user-mode registering done.\n"); 154 KVP_OP_REGISTER);
166 kvp_register(dm_reg_value); 155 kvp_register(dm_reg_value);
167 kvp_transaction.active = false; 156 kvp_transaction.state = HVUTIL_READY;
168 if (kvp_transaction.kvp_context) 157
169 poll_channel(kvp_transaction.kvp_context); 158 return 0;
170 }
171 return ret;
172} 159}
173 160
174 161
@@ -176,26 +163,30 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
176 * Callback when data is received from user mode. 163 * Callback when data is received from user mode.
177 */ 164 */
178 165
179static void 166static int kvp_on_msg(void *msg, int len)
180kvp_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
181{ 167{
182 struct hv_kvp_msg *message; 168 struct hv_kvp_msg *message = (struct hv_kvp_msg *)msg;
183 struct hv_kvp_msg_enumerate *data; 169 struct hv_kvp_msg_enumerate *data;
184 int error = 0; 170 int error = 0;
185 171
186 message = (struct hv_kvp_msg *)msg->data; 172 if (len < sizeof(*message))
173 return -EINVAL;
187 174
188 /* 175 /*
189 * If we are negotiating the version information 176 * If we are negotiating the version information
190 * with the daemon; handle that first. 177 * with the daemon; handle that first.
191 */ 178 */
192 179
193 if (in_hand_shake) { 180 if (kvp_transaction.state < HVUTIL_READY) {
194 if (kvp_handle_handshake(message)) 181 return kvp_handle_handshake(message);
195 in_hand_shake = false;
196 return;
197 } 182 }
198 183
184 /* We didn't send anything to userspace so the reply is spurious */
185 if (kvp_transaction.state < HVUTIL_USERSPACE_REQ)
186 return -EINVAL;
187
188 kvp_transaction.state = HVUTIL_USERSPACE_RECV;
189
199 /* 190 /*
200 * Based on the version of the daemon, we propagate errors from the 191 * Based on the version of the daemon, we propagate errors from the
201 * daemon differently. 192 * daemon differently.
@@ -225,8 +216,14 @@ kvp_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
225 * Complete the transaction by forwarding the key value 216 * Complete the transaction by forwarding the key value
226 * to the host. But first, cancel the timeout. 217 * to the host. But first, cancel the timeout.
227 */ 218 */
228 if (cancel_delayed_work_sync(&kvp_work)) 219 if (cancel_delayed_work_sync(&kvp_timeout_work)) {
229 kvp_respond_to_host(message, error); 220 kvp_respond_to_host(message, error);
221 kvp_transaction.state = HVUTIL_READY;
222 hv_poll_channel(kvp_transaction.kvp_context,
223 hv_kvp_onchannelcallback);
224 }
225
226 return 0;
230} 227}
231 228
232 229
@@ -343,7 +340,6 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
343static void 340static void
344kvp_send_key(struct work_struct *dummy) 341kvp_send_key(struct work_struct *dummy)
345{ 342{
346 struct cn_msg *msg;
347 struct hv_kvp_msg *message; 343 struct hv_kvp_msg *message;
348 struct hv_kvp_msg *in_msg; 344 struct hv_kvp_msg *in_msg;
349 __u8 operation = kvp_transaction.kvp_msg->kvp_hdr.operation; 345 __u8 operation = kvp_transaction.kvp_msg->kvp_hdr.operation;
@@ -352,14 +348,11 @@ kvp_send_key(struct work_struct *dummy)
352 __u64 val64; 348 __u64 val64;
353 int rc; 349 int rc;
354 350
355 msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC); 351 /* The transaction state is wrong. */
356 if (!msg) 352 if (kvp_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
357 return; 353 return;
358 354
359 msg->id.idx = CN_KVP_IDX; 355 message = kzalloc(sizeof(*message), GFP_KERNEL);
360 msg->id.val = CN_KVP_VAL;
361
362 message = (struct hv_kvp_msg *)msg->data;
363 message->kvp_hdr.operation = operation; 356 message->kvp_hdr.operation = operation;
364 message->kvp_hdr.pool = pool; 357 message->kvp_hdr.pool = pool;
365 in_msg = kvp_transaction.kvp_msg; 358 in_msg = kvp_transaction.kvp_msg;
@@ -446,15 +439,17 @@ kvp_send_key(struct work_struct *dummy)
446 break; 439 break;
447 } 440 }
448 441
449 msg->len = sizeof(struct hv_kvp_msg); 442 kvp_transaction.state = HVUTIL_USERSPACE_REQ;
450 rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC); 443 rc = hvutil_transport_send(hvt, message, sizeof(*message));
451 if (rc) { 444 if (rc) {
452 pr_debug("KVP: failed to communicate to the daemon: %d\n", rc); 445 pr_debug("KVP: failed to communicate to the daemon: %d\n", rc);
453 if (cancel_delayed_work_sync(&kvp_work)) 446 if (cancel_delayed_work_sync(&kvp_timeout_work)) {
454 kvp_respond_to_host(message, HV_E_FAIL); 447 kvp_respond_to_host(message, HV_E_FAIL);
448 kvp_transaction.state = HVUTIL_READY;
449 }
455 } 450 }
456 451
457 kfree(msg); 452 kfree(message);
458 453
459 return; 454 return;
460} 455}
@@ -479,17 +474,6 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
479 int ret; 474 int ret;
480 475
481 /* 476 /*
482 * If a transaction is not active; log and return.
483 */
484
485 if (!kvp_transaction.active) {
486 /*
487 * This is a spurious call!
488 */
489 pr_warn("KVP: Transaction not active\n");
490 return;
491 }
492 /*
493 * Copy the global state for completing the transaction. Note that 477 * Copy the global state for completing the transaction. Note that
494 * only one transaction can be active at a time. 478 * only one transaction can be active at a time.
495 */ 479 */
@@ -498,8 +482,6 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
498 channel = kvp_transaction.recv_channel; 482 channel = kvp_transaction.recv_channel;
499 req_id = kvp_transaction.recv_req_id; 483 req_id = kvp_transaction.recv_req_id;
500 484
501 kvp_transaction.active = false;
502
503 icmsghdrp = (struct icmsg_hdr *) 485 icmsghdrp = (struct icmsg_hdr *)
504 &recv_buffer[sizeof(struct vmbuspipe_hdr)]; 486 &recv_buffer[sizeof(struct vmbuspipe_hdr)];
505 487
@@ -586,7 +568,6 @@ response_done:
586 568
587 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, 569 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
588 VM_PKT_DATA_INBAND, 0); 570 VM_PKT_DATA_INBAND, 0);
589 poll_channel(channel);
590} 571}
591 572
592/* 573/*
@@ -612,7 +593,7 @@ void hv_kvp_onchannelcallback(void *context)
612 int util_fw_version; 593 int util_fw_version;
613 int kvp_srv_version; 594 int kvp_srv_version;
614 595
615 if (kvp_transaction.active) { 596 if (kvp_transaction.state > HVUTIL_READY) {
616 /* 597 /*
617 * We will defer processing this callback once 598 * We will defer processing this callback once
618 * the current transaction is complete. 599 * the current transaction is complete.
@@ -620,6 +601,7 @@ void hv_kvp_onchannelcallback(void *context)
620 kvp_transaction.kvp_context = context; 601 kvp_transaction.kvp_context = context;
621 return; 602 return;
622 } 603 }
604 kvp_transaction.kvp_context = NULL;
623 605
624 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen, 606 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
625 &requestid); 607 &requestid);
@@ -664,9 +646,15 @@ void hv_kvp_onchannelcallback(void *context)
664 kvp_transaction.recv_len = recvlen; 646 kvp_transaction.recv_len = recvlen;
665 kvp_transaction.recv_channel = channel; 647 kvp_transaction.recv_channel = channel;
666 kvp_transaction.recv_req_id = requestid; 648 kvp_transaction.recv_req_id = requestid;
667 kvp_transaction.active = true;
668 kvp_transaction.kvp_msg = kvp_msg; 649 kvp_transaction.kvp_msg = kvp_msg;
669 650
651 if (kvp_transaction.state < HVUTIL_READY) {
652 /* Userspace is not registered yet */
653 kvp_respond_to_host(NULL, HV_E_FAIL);
654 return;
655 }
656 kvp_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
657
670 /* 658 /*
671 * Get the information from the 659 * Get the information from the
672 * user-mode component. 660 * user-mode component.
@@ -677,7 +665,7 @@ void hv_kvp_onchannelcallback(void *context)
677 * user-mode not responding. 665 * user-mode not responding.
678 */ 666 */
679 schedule_work(&kvp_sendkey_work); 667 schedule_work(&kvp_sendkey_work);
680 schedule_delayed_work(&kvp_work, 5*HZ); 668 schedule_delayed_work(&kvp_timeout_work, 5*HZ);
681 669
682 return; 670 return;
683 671
@@ -693,14 +681,16 @@ void hv_kvp_onchannelcallback(void *context)
693 681
694} 682}
695 683
684static void kvp_on_reset(void)
685{
686 if (cancel_delayed_work_sync(&kvp_timeout_work))
687 kvp_respond_to_host(NULL, HV_E_FAIL);
688 kvp_transaction.state = HVUTIL_DEVICE_INIT;
689}
690
696int 691int
697hv_kvp_init(struct hv_util_service *srv) 692hv_kvp_init(struct hv_util_service *srv)
698{ 693{
699 int err;
700
701 err = cn_add_callback(&kvp_id, kvp_name, kvp_cn_callback);
702 if (err)
703 return err;
704 recv_buffer = srv->recv_buffer; 694 recv_buffer = srv->recv_buffer;
705 695
706 /* 696 /*
@@ -709,14 +699,20 @@ hv_kvp_init(struct hv_util_service *srv)
709 * Defer processing channel callbacks until the daemon 699 * Defer processing channel callbacks until the daemon
710 * has registered. 700 * has registered.
711 */ 701 */
712 kvp_transaction.active = true; 702 kvp_transaction.state = HVUTIL_DEVICE_INIT;
703
704 hvt = hvutil_transport_init(kvp_devname, CN_KVP_IDX, CN_KVP_VAL,
705 kvp_on_msg, kvp_on_reset);
706 if (!hvt)
707 return -EFAULT;
713 708
714 return 0; 709 return 0;
715} 710}
716 711
717void hv_kvp_deinit(void) 712void hv_kvp_deinit(void)
718{ 713{
719 cn_del_callback(&kvp_id); 714 kvp_transaction.state = HVUTIL_DEVICE_DYING;
720 cancel_delayed_work_sync(&kvp_work); 715 cancel_delayed_work_sync(&kvp_timeout_work);
721 cancel_work_sync(&kvp_sendkey_work); 716 cancel_work_sync(&kvp_sendkey_work);
717 hvutil_transport_destroy(hvt);
722} 718}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 9d5e0d1efdb5..815405f2e777 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -24,6 +24,9 @@
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <linux/hyperv.h> 25#include <linux/hyperv.h>
26 26
27#include "hyperv_vmbus.h"
28#include "hv_utils_transport.h"
29
27#define VSS_MAJOR 5 30#define VSS_MAJOR 5
28#define VSS_MINOR 0 31#define VSS_MINOR 0
29#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR) 32#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
@@ -31,28 +34,39 @@
31#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000)) 34#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
32 35
33/* 36/*
34 * Global state maintained for transaction that is being processed. 37 * Global state maintained for transaction that is being processed. For a class
35 * Note that only one transaction can be active at any point in time. 38 * of integration services, including the "VSS service", the specified protocol
39 * is a "request/response" protocol which means that there can only be single
40 * outstanding transaction from the host at any given point in time. We use
41 * this to simplify memory management in this driver - we cache and process
42 * only one message at a time.
36 * 43 *
37 * This state is set when we receive a request from the host; we 44 * While the request/response protocol is guaranteed by the host, we further
38 * cleanup this state when the transaction is completed - when we respond 45 * ensure this by serializing packet processing in this driver - we do not
39 * to the host with the key value. 46 * read additional packets from the VMBUs until the current packet is fully
47 * handled.
40 */ 48 */
41 49
42static struct { 50static struct {
43 bool active; /* transaction status - active or not */ 51 int state; /* hvutil_device_state */
44 int recv_len; /* number of bytes received. */ 52 int recv_len; /* number of bytes received. */
45 struct vmbus_channel *recv_channel; /* chn we got the request */ 53 struct vmbus_channel *recv_channel; /* chn we got the request */
46 u64 recv_req_id; /* request ID. */ 54 u64 recv_req_id; /* request ID. */
47 struct hv_vss_msg *msg; /* current message */ 55 struct hv_vss_msg *msg; /* current message */
56 void *vss_context; /* for the channel callback */
48} vss_transaction; 57} vss_transaction;
49 58
50 59
51static void vss_respond_to_host(int error); 60static void vss_respond_to_host(int error);
52 61
53static struct cb_id vss_id = { CN_VSS_IDX, CN_VSS_VAL }; 62/*
54static const char vss_name[] = "vss_kernel_module"; 63 * This state maintains the version number registered by the daemon.
64 */
65static int dm_reg_value;
66
67static const char vss_devname[] = "vmbus/hv_vss";
55static __u8 *recv_buffer; 68static __u8 *recv_buffer;
69static struct hvutil_transport *hvt;
56 70
57static void vss_send_op(struct work_struct *dummy); 71static void vss_send_op(struct work_struct *dummy);
58static void vss_timeout_func(struct work_struct *dummy); 72static void vss_timeout_func(struct work_struct *dummy);
@@ -71,25 +85,69 @@ static void vss_timeout_func(struct work_struct *dummy)
71 */ 85 */
72 pr_warn("VSS: timeout waiting for daemon to reply\n"); 86 pr_warn("VSS: timeout waiting for daemon to reply\n");
73 vss_respond_to_host(HV_E_FAIL); 87 vss_respond_to_host(HV_E_FAIL);
88
89 /* Transaction is finished, reset the state. */
90 if (vss_transaction.state > HVUTIL_READY)
91 vss_transaction.state = HVUTIL_READY;
92
93 hv_poll_channel(vss_transaction.vss_context,
94 hv_vss_onchannelcallback);
74} 95}
75 96
76static void 97static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
77vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
78{ 98{
79 struct hv_vss_msg *vss_msg; 99 u32 our_ver = VSS_OP_REGISTER1;
100
101 switch (vss_msg->vss_hdr.operation) {
102 case VSS_OP_REGISTER:
103 /* Daemon doesn't expect us to reply */
104 dm_reg_value = VSS_OP_REGISTER;
105 break;
106 case VSS_OP_REGISTER1:
107 /* Daemon expects us to reply with our own version*/
108 if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver)))
109 return -EFAULT;
110 dm_reg_value = VSS_OP_REGISTER1;
111 break;
112 default:
113 return -EINVAL;
114 }
115 vss_transaction.state = HVUTIL_READY;
116 pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
117 return 0;
118}
80 119
81 vss_msg = (struct hv_vss_msg *)msg->data; 120static int vss_on_msg(void *msg, int len)
121{
122 struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
82 123
83 if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER) { 124 if (len != sizeof(*vss_msg))
84 pr_info("VSS daemon registered\n"); 125 return -EINVAL;
85 vss_transaction.active = false;
86 if (vss_transaction.recv_channel != NULL)
87 hv_vss_onchannelcallback(vss_transaction.recv_channel);
88 return;
89 126
127 if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
128 vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
129 /*
130 * Don't process registration messages if we're in the middle
131 * of a transaction processing.
132 */
133 if (vss_transaction.state > HVUTIL_READY)
134 return -EINVAL;
135 return vss_handle_handshake(vss_msg);
136 } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
137 vss_transaction.state = HVUTIL_USERSPACE_RECV;
138 if (cancel_delayed_work_sync(&vss_timeout_work)) {
139 vss_respond_to_host(vss_msg->error);
140 /* Transaction is finished, reset the state. */
141 vss_transaction.state = HVUTIL_READY;
142 hv_poll_channel(vss_transaction.vss_context,
143 hv_vss_onchannelcallback);
144 }
145 } else {
146 /* This is a spurious call! */
147 pr_warn("VSS: Transaction not active\n");
148 return -EINVAL;
90 } 149 }
91 if (cancel_delayed_work_sync(&vss_timeout_work)) 150 return 0;
92 vss_respond_to_host(vss_msg->error);
93} 151}
94 152
95 153
@@ -97,28 +155,29 @@ static void vss_send_op(struct work_struct *dummy)
97{ 155{
98 int op = vss_transaction.msg->vss_hdr.operation; 156 int op = vss_transaction.msg->vss_hdr.operation;
99 int rc; 157 int rc;
100 struct cn_msg *msg;
101 struct hv_vss_msg *vss_msg; 158 struct hv_vss_msg *vss_msg;
102 159
103 msg = kzalloc(sizeof(*msg) + sizeof(*vss_msg), GFP_ATOMIC); 160 /* The transaction state is wrong. */
104 if (!msg) 161 if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
105 return; 162 return;
106 163
107 vss_msg = (struct hv_vss_msg *)msg->data; 164 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
108 165 if (!vss_msg)
109 msg->id.idx = CN_VSS_IDX; 166 return;
110 msg->id.val = CN_VSS_VAL;
111 167
112 vss_msg->vss_hdr.operation = op; 168 vss_msg->vss_hdr.operation = op;
113 msg->len = sizeof(struct hv_vss_msg);
114 169
115 rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC); 170 vss_transaction.state = HVUTIL_USERSPACE_REQ;
171 rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg));
116 if (rc) { 172 if (rc) {
117 pr_warn("VSS: failed to communicate to the daemon: %d\n", rc); 173 pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
118 if (cancel_delayed_work_sync(&vss_timeout_work)) 174 if (cancel_delayed_work_sync(&vss_timeout_work)) {
119 vss_respond_to_host(HV_E_FAIL); 175 vss_respond_to_host(HV_E_FAIL);
176 vss_transaction.state = HVUTIL_READY;
177 }
120 } 178 }
121 kfree(msg); 179
180 kfree(vss_msg);
122 181
123 return; 182 return;
124} 183}
@@ -136,17 +195,6 @@ vss_respond_to_host(int error)
136 u64 req_id; 195 u64 req_id;
137 196
138 /* 197 /*
139 * If a transaction is not active; log and return.
140 */
141
142 if (!vss_transaction.active) {
143 /*
144 * This is a spurious call!
145 */
146 pr_warn("VSS: Transaction not active\n");
147 return;
148 }
149 /*
150 * Copy the global state for completing the transaction. Note that 198 * Copy the global state for completing the transaction. Note that
151 * only one transaction can be active at a time. 199 * only one transaction can be active at a time.
152 */ 200 */
@@ -154,7 +202,6 @@ vss_respond_to_host(int error)
154 buf_len = vss_transaction.recv_len; 202 buf_len = vss_transaction.recv_len;
155 channel = vss_transaction.recv_channel; 203 channel = vss_transaction.recv_channel;
156 req_id = vss_transaction.recv_req_id; 204 req_id = vss_transaction.recv_req_id;
157 vss_transaction.active = false;
158 205
159 icmsghdrp = (struct icmsg_hdr *) 206 icmsghdrp = (struct icmsg_hdr *)
160 &recv_buffer[sizeof(struct vmbuspipe_hdr)]; 207 &recv_buffer[sizeof(struct vmbuspipe_hdr)];
@@ -191,14 +238,15 @@ void hv_vss_onchannelcallback(void *context)
191 struct icmsg_hdr *icmsghdrp; 238 struct icmsg_hdr *icmsghdrp;
192 struct icmsg_negotiate *negop = NULL; 239 struct icmsg_negotiate *negop = NULL;
193 240
194 if (vss_transaction.active) { 241 if (vss_transaction.state > HVUTIL_READY) {
195 /* 242 /*
196 * We will defer processing this callback once 243 * We will defer processing this callback once
197 * the current transaction is complete. 244 * the current transaction is complete.
198 */ 245 */
199 vss_transaction.recv_channel = channel; 246 vss_transaction.vss_context = context;
200 return; 247 return;
201 } 248 }
249 vss_transaction.vss_context = NULL;
202 250
203 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, 251 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
204 &requestid); 252 &requestid);
@@ -224,7 +272,6 @@ void hv_vss_onchannelcallback(void *context)
224 vss_transaction.recv_len = recvlen; 272 vss_transaction.recv_len = recvlen;
225 vss_transaction.recv_channel = channel; 273 vss_transaction.recv_channel = channel;
226 vss_transaction.recv_req_id = requestid; 274 vss_transaction.recv_req_id = requestid;
227 vss_transaction.active = true;
228 vss_transaction.msg = (struct hv_vss_msg *)vss_msg; 275 vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
229 276
230 switch (vss_msg->vss_hdr.operation) { 277 switch (vss_msg->vss_hdr.operation) {
@@ -241,6 +288,12 @@ void hv_vss_onchannelcallback(void *context)
241 */ 288 */
242 case VSS_OP_FREEZE: 289 case VSS_OP_FREEZE:
243 case VSS_OP_THAW: 290 case VSS_OP_THAW:
291 if (vss_transaction.state < HVUTIL_READY) {
292 /* Userspace is not registered yet */
293 vss_respond_to_host(HV_E_FAIL);
294 return;
295 }
296 vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
244 schedule_work(&vss_send_op_work); 297 schedule_work(&vss_send_op_work);
245 schedule_delayed_work(&vss_timeout_work, 298 schedule_delayed_work(&vss_timeout_work,
246 VSS_USERSPACE_TIMEOUT); 299 VSS_USERSPACE_TIMEOUT);
@@ -275,14 +328,16 @@ void hv_vss_onchannelcallback(void *context)
275 328
276} 329}
277 330
331static void vss_on_reset(void)
332{
333 if (cancel_delayed_work_sync(&vss_timeout_work))
334 vss_respond_to_host(HV_E_FAIL);
335 vss_transaction.state = HVUTIL_DEVICE_INIT;
336}
337
278int 338int
279hv_vss_init(struct hv_util_service *srv) 339hv_vss_init(struct hv_util_service *srv)
280{ 340{
281 int err;
282
283 err = cn_add_callback(&vss_id, vss_name, vss_cn_callback);
284 if (err)
285 return err;
286 recv_buffer = srv->recv_buffer; 341 recv_buffer = srv->recv_buffer;
287 342
288 /* 343 /*
@@ -291,13 +346,20 @@ hv_vss_init(struct hv_util_service *srv)
291 * Defer processing channel callbacks until the daemon 346 * Defer processing channel callbacks until the daemon
292 * has registered. 347 * has registered.
293 */ 348 */
294 vss_transaction.active = true; 349 vss_transaction.state = HVUTIL_DEVICE_INIT;
350
351 hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
352 vss_on_msg, vss_on_reset);
353 if (!hvt)
354 return -EFAULT;
355
295 return 0; 356 return 0;
296} 357}
297 358
298void hv_vss_deinit(void) 359void hv_vss_deinit(void)
299{ 360{
300 cn_del_callback(&vss_id); 361 vss_transaction.state = HVUTIL_DEVICE_DYING;
301 cancel_delayed_work_sync(&vss_timeout_work); 362 cancel_delayed_work_sync(&vss_timeout_work);
302 cancel_work_sync(&vss_send_op_work); 363 cancel_work_sync(&vss_send_op_work);
364 hvutil_transport_destroy(hvt);
303} 365}
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
new file mode 100644
index 000000000000..ea7ba5ef16a9
--- /dev/null
+++ b/drivers/hv/hv_utils_transport.c
@@ -0,0 +1,276 @@
1/*
2 * Kernel/userspace transport abstraction for Hyper-V util driver.
3 *
4 * Copyright (C) 2015, Vitaly Kuznetsov <vkuznets@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 */
17
18#include <linux/slab.h>
19#include <linux/fs.h>
20#include <linux/poll.h>
21
22#include "hyperv_vmbus.h"
23#include "hv_utils_transport.h"
24
25static DEFINE_SPINLOCK(hvt_list_lock);
26static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
27
28static void hvt_reset(struct hvutil_transport *hvt)
29{
30 mutex_lock(&hvt->outmsg_lock);
31 kfree(hvt->outmsg);
32 hvt->outmsg = NULL;
33 hvt->outmsg_len = 0;
34 mutex_unlock(&hvt->outmsg_lock);
35 if (hvt->on_reset)
36 hvt->on_reset();
37}
38
39static ssize_t hvt_op_read(struct file *file, char __user *buf,
40 size_t count, loff_t *ppos)
41{
42 struct hvutil_transport *hvt;
43 int ret;
44
45 hvt = container_of(file->f_op, struct hvutil_transport, fops);
46
47 if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0))
48 return -EINTR;
49
50 mutex_lock(&hvt->outmsg_lock);
51 if (!hvt->outmsg) {
52 ret = -EAGAIN;
53 goto out_unlock;
54 }
55
56 if (count < hvt->outmsg_len) {
57 ret = -EINVAL;
58 goto out_unlock;
59 }
60
61 if (!copy_to_user(buf, hvt->outmsg, hvt->outmsg_len))
62 ret = hvt->outmsg_len;
63 else
64 ret = -EFAULT;
65
66 kfree(hvt->outmsg);
67 hvt->outmsg = NULL;
68 hvt->outmsg_len = 0;
69
70out_unlock:
71 mutex_unlock(&hvt->outmsg_lock);
72 return ret;
73}
74
75static ssize_t hvt_op_write(struct file *file, const char __user *buf,
76 size_t count, loff_t *ppos)
77{
78 struct hvutil_transport *hvt;
79 u8 *inmsg;
80
81 hvt = container_of(file->f_op, struct hvutil_transport, fops);
82
83 inmsg = kzalloc(count, GFP_KERNEL);
84 if (copy_from_user(inmsg, buf, count)) {
85 kfree(inmsg);
86 return -EFAULT;
87 }
88 if (hvt->on_msg(inmsg, count))
89 return -EFAULT;
90 kfree(inmsg);
91
92 return count;
93}
94
95static unsigned int hvt_op_poll(struct file *file, poll_table *wait)
96{
97 struct hvutil_transport *hvt;
98
99 hvt = container_of(file->f_op, struct hvutil_transport, fops);
100
101 poll_wait(file, &hvt->outmsg_q, wait);
102 if (hvt->outmsg_len > 0)
103 return POLLIN | POLLRDNORM;
104
105 return 0;
106}
107
108static int hvt_op_open(struct inode *inode, struct file *file)
109{
110 struct hvutil_transport *hvt;
111
112 hvt = container_of(file->f_op, struct hvutil_transport, fops);
113
114 /*
115 * Switching to CHARDEV mode. We switch bach to INIT when device
116 * gets released.
117 */
118 if (hvt->mode == HVUTIL_TRANSPORT_INIT)
119 hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
120 else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
121 /*
122 * We're switching from netlink communication to using char
123 * device. Issue the reset first.
124 */
125 hvt_reset(hvt);
126 hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
127 } else
128 return -EBUSY;
129
130 return 0;
131}
132
133static int hvt_op_release(struct inode *inode, struct file *file)
134{
135 struct hvutil_transport *hvt;
136
137 hvt = container_of(file->f_op, struct hvutil_transport, fops);
138
139 hvt->mode = HVUTIL_TRANSPORT_INIT;
140 /*
141 * Cleanup message buffers to avoid spurious messages when the daemon
142 * connects back.
143 */
144 hvt_reset(hvt);
145
146 return 0;
147}
148
149static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
150{
151 struct hvutil_transport *hvt, *hvt_found = NULL;
152
153 spin_lock(&hvt_list_lock);
154 list_for_each_entry(hvt, &hvt_list, list) {
155 if (hvt->cn_id.idx == msg->id.idx &&
156 hvt->cn_id.val == msg->id.val) {
157 hvt_found = hvt;
158 break;
159 }
160 }
161 spin_unlock(&hvt_list_lock);
162 if (!hvt_found) {
163 pr_warn("hvt_cn_callback: spurious message received!\n");
164 return;
165 }
166
167 /*
168 * Switching to NETLINK mode. Switching to CHARDEV happens when someone
169 * opens the device.
170 */
171 if (hvt->mode == HVUTIL_TRANSPORT_INIT)
172 hvt->mode = HVUTIL_TRANSPORT_NETLINK;
173
174 if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
175 hvt_found->on_msg(msg->data, msg->len);
176 else
177 pr_warn("hvt_cn_callback: unexpected netlink message!\n");
178}
179
180int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
181{
182 struct cn_msg *cn_msg;
183 int ret = 0;
184
185 if (hvt->mode == HVUTIL_TRANSPORT_INIT) {
186 return -EINVAL;
187 } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
188 cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
189 if (!msg)
190 return -ENOMEM;
191 cn_msg->id.idx = hvt->cn_id.idx;
192 cn_msg->id.val = hvt->cn_id.val;
193 cn_msg->len = len;
194 memcpy(cn_msg->data, msg, len);
195 ret = cn_netlink_send(cn_msg, 0, 0, GFP_ATOMIC);
196 kfree(cn_msg);
197 return ret;
198 }
199 /* HVUTIL_TRANSPORT_CHARDEV */
200 mutex_lock(&hvt->outmsg_lock);
201 if (hvt->outmsg) {
202 /* Previous message wasn't received */
203 ret = -EFAULT;
204 goto out_unlock;
205 }
206 hvt->outmsg = kzalloc(len, GFP_KERNEL);
207 memcpy(hvt->outmsg, msg, len);
208 hvt->outmsg_len = len;
209 wake_up_interruptible(&hvt->outmsg_q);
210out_unlock:
211 mutex_unlock(&hvt->outmsg_lock);
212 return ret;
213}
214
215struct hvutil_transport *hvutil_transport_init(const char *name,
216 u32 cn_idx, u32 cn_val,
217 int (*on_msg)(void *, int),
218 void (*on_reset)(void))
219{
220 struct hvutil_transport *hvt;
221
222 hvt = kzalloc(sizeof(*hvt), GFP_KERNEL);
223 if (!hvt)
224 return NULL;
225
226 hvt->cn_id.idx = cn_idx;
227 hvt->cn_id.val = cn_val;
228
229 hvt->mdev.minor = MISC_DYNAMIC_MINOR;
230 hvt->mdev.name = name;
231
232 hvt->fops.owner = THIS_MODULE;
233 hvt->fops.read = hvt_op_read;
234 hvt->fops.write = hvt_op_write;
235 hvt->fops.poll = hvt_op_poll;
236 hvt->fops.open = hvt_op_open;
237 hvt->fops.release = hvt_op_release;
238
239 hvt->mdev.fops = &hvt->fops;
240
241 init_waitqueue_head(&hvt->outmsg_q);
242 mutex_init(&hvt->outmsg_lock);
243
244 spin_lock(&hvt_list_lock);
245 list_add(&hvt->list, &hvt_list);
246 spin_unlock(&hvt_list_lock);
247
248 hvt->on_msg = on_msg;
249 hvt->on_reset = on_reset;
250
251 if (misc_register(&hvt->mdev))
252 goto err_free_hvt;
253
254 /* Use cn_id.idx/cn_id.val to determine if we need to setup netlink */
255 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0 &&
256 cn_add_callback(&hvt->cn_id, name, hvt_cn_callback))
257 goto err_free_hvt;
258
259 return hvt;
260
261err_free_hvt:
262 kfree(hvt);
263 return NULL;
264}
265
266void hvutil_transport_destroy(struct hvutil_transport *hvt)
267{
268 spin_lock(&hvt_list_lock);
269 list_del(&hvt->list);
270 spin_unlock(&hvt_list_lock);
271 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
272 cn_del_callback(&hvt->cn_id);
273 misc_deregister(&hvt->mdev);
274 kfree(hvt->outmsg);
275 kfree(hvt);
276}
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
new file mode 100644
index 000000000000..314c76ce1b07
--- /dev/null
+++ b/drivers/hv/hv_utils_transport.h
@@ -0,0 +1,51 @@
1/*
2 * Kernel/userspace transport abstraction for Hyper-V util driver.
3 *
4 * Copyright (C) 2015, Vitaly Kuznetsov <vkuznets@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 */
17
18#ifndef _HV_UTILS_TRANSPORT_H
19#define _HV_UTILS_TRANSPORT_H
20
21#include <linux/connector.h>
22#include <linux/miscdevice.h>
23
24enum hvutil_transport_mode {
25 HVUTIL_TRANSPORT_INIT = 0,
26 HVUTIL_TRANSPORT_NETLINK,
27 HVUTIL_TRANSPORT_CHARDEV,
28};
29
30struct hvutil_transport {
31 int mode; /* hvutil_transport_mode */
32 struct file_operations fops; /* file operations */
33 struct miscdevice mdev; /* misc device */
34 struct cb_id cn_id; /* CN_*_IDX/CN_*_VAL */
35 struct list_head list; /* hvt_list */
36 int (*on_msg)(void *, int); /* callback on new user message */
37 void (*on_reset)(void); /* callback when userspace drops */
38 u8 *outmsg; /* message to the userspace */
39 int outmsg_len; /* its length */
40 wait_queue_head_t outmsg_q; /* poll/read wait queue */
41 struct mutex outmsg_lock; /* protects outmsg */
42};
43
44struct hvutil_transport *hvutil_transport_init(const char *name,
45 u32 cn_idx, u32 cn_val,
46 int (*on_msg)(void *, int),
47 void (*on_reset)(void));
48int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len);
49void hvutil_transport_destroy(struct hvutil_transport *hvt);
50
51#endif /* _HV_UTILS_TRANSPORT_H */
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 887287ad411f..cddc0c9f6bf9 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -647,6 +647,7 @@ struct vmbus_connection {
647 647
648 atomic_t next_gpadl_handle; 648 atomic_t next_gpadl_handle;
649 649
650 struct completion unload_event;
650 /* 651 /*
651 * Represents channel interrupts. Each bit position represents a 652 * Represents channel interrupts. Each bit position represents a
652 * channel. When a channel sends an interrupt via VMBUS, it finds its 653 * channel. When a channel sends an interrupt via VMBUS, it finds its
@@ -730,9 +731,39 @@ int vmbus_set_event(struct vmbus_channel *channel);
730 731
731void vmbus_on_event(unsigned long data); 732void vmbus_on_event(unsigned long data);
732 733
734int hv_kvp_init(struct hv_util_service *);
735void hv_kvp_deinit(void);
736void hv_kvp_onchannelcallback(void *);
737
738int hv_vss_init(struct hv_util_service *);
739void hv_vss_deinit(void);
740void hv_vss_onchannelcallback(void *);
741
733int hv_fcopy_init(struct hv_util_service *); 742int hv_fcopy_init(struct hv_util_service *);
734void hv_fcopy_deinit(void); 743void hv_fcopy_deinit(void);
735void hv_fcopy_onchannelcallback(void *); 744void hv_fcopy_onchannelcallback(void *);
745void vmbus_initiate_unload(void);
746
747static inline void hv_poll_channel(struct vmbus_channel *channel,
748 void (*cb)(void *))
749{
750 if (!channel)
751 return;
752
753 if (channel->target_cpu != smp_processor_id())
754 smp_call_function_single(channel->target_cpu,
755 cb, channel, true);
756 else
757 cb(channel);
758}
736 759
760enum hvutil_device_state {
761 HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */
762 HVUTIL_READY, /* userspace is registered */
763 HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
764 HVUTIL_USERSPACE_REQ, /* request to userspace was sent */
765 HVUTIL_USERSPACE_RECV, /* reply from userspace was received */
766 HVUTIL_DEVICE_DYING, /* driver unload is in progress */
767};
737 768
738#endif /* _HYPERV_VMBUS_H */ 769#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c85235e9f245..cf204005ee78 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1035,6 +1035,15 @@ acpi_walk_err:
1035 return ret_val; 1035 return ret_val;
1036} 1036}
1037 1037
1038static int vmbus_acpi_remove(struct acpi_device *device)
1039{
1040 int ret = 0;
1041
1042 if (hyperv_mmio.start && hyperv_mmio.end)
1043 ret = release_resource(&hyperv_mmio);
1044 return ret;
1045}
1046
1038static const struct acpi_device_id vmbus_acpi_device_ids[] = { 1047static const struct acpi_device_id vmbus_acpi_device_ids[] = {
1039 {"VMBUS", 0}, 1048 {"VMBUS", 0},
1040 {"VMBus", 0}, 1049 {"VMBus", 0},
@@ -1047,6 +1056,7 @@ static struct acpi_driver vmbus_acpi_driver = {
1047 .ids = vmbus_acpi_device_ids, 1056 .ids = vmbus_acpi_device_ids,
1048 .ops = { 1057 .ops = {
1049 .add = vmbus_acpi_add, 1058 .add = vmbus_acpi_add,
1059 .remove = vmbus_acpi_remove,
1050 }, 1060 },
1051}; 1061};
1052 1062
@@ -1096,15 +1106,22 @@ static void __exit vmbus_exit(void)
1096 1106
1097 vmbus_connection.conn_state = DISCONNECTED; 1107 vmbus_connection.conn_state = DISCONNECTED;
1098 hv_synic_clockevents_cleanup(); 1108 hv_synic_clockevents_cleanup();
1109 vmbus_disconnect();
1099 hv_remove_vmbus_irq(); 1110 hv_remove_vmbus_irq();
1111 tasklet_kill(&msg_dpc);
1100 vmbus_free_channels(); 1112 vmbus_free_channels();
1113 if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1114 atomic_notifier_chain_unregister(&panic_notifier_list,
1115 &hyperv_panic_block);
1116 }
1101 bus_unregister(&hv_bus); 1117 bus_unregister(&hv_bus);
1102 hv_cleanup(); 1118 hv_cleanup();
1103 for_each_online_cpu(cpu) 1119 for_each_online_cpu(cpu) {
1120 tasklet_kill(hv_context.event_dpc[cpu]);
1104 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); 1121 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
1122 }
1105 acpi_bus_unregister_driver(&vmbus_acpi_driver); 1123 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1106 hv_cpu_hotplug_quirk(false); 1124 hv_cpu_hotplug_quirk(false);
1107 vmbus_disconnect();
1108} 1125}
1109 1126
1110 1127
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 54075a07d2a1..7c65b7334738 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -371,6 +371,17 @@ config SENSORS_DS1621
371 This driver can also be built as a module. If so, the module 371 This driver can also be built as a module. If so, the module
372 will be called ds1621. 372 will be called ds1621.
373 373
374config SENSORS_DELL_SMM
375 tristate "Dell laptop SMM BIOS hwmon driver"
376 depends on X86
377 help
378 This hwmon driver adds support for reporting temperature of different
379 sensors and controls the fans on Dell laptops via System Management
380 Mode provided by Dell BIOS.
381
382 When option I8K is also enabled this driver provides legacy /proc/i8k
383 userspace interface for i8kutils package.
384
374config SENSORS_DA9052_ADC 385config SENSORS_DA9052_ADC
375 tristate "Dialog DA9052/DA9053 ADC" 386 tristate "Dialog DA9052/DA9053 ADC"
376 depends on PMIC_DA9052 387 depends on PMIC_DA9052
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index ab904027f074..9e0f3dd2841d 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
49obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o 49obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
50obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o 50obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
51obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o 51obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o
52obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o
52obj-$(CONFIG_SENSORS_DME1737) += dme1737.o 53obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
53obj-$(CONFIG_SENSORS_DS620) += ds620.o 54obj-$(CONFIG_SENSORS_DS620) += ds620.o
54obj-$(CONFIG_SENSORS_DS1621) += ds1621.o 55obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
diff --git a/drivers/char/i8k.c b/drivers/hwmon/dell-smm-hwmon.c
index a43048b5b05f..2a808822af21 100644
--- a/drivers/char/i8k.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops. 2 * dell-smm-hwmon.c -- Linux driver for accessing the SMM BIOS on Dell laptops.
3 * 3 *
4 * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org> 4 * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org>
5 * 5 *
6 * Hwmon integration: 6 * Hwmon integration:
7 * Copyright (C) 2011 Jean Delvare <jdelvare@suse.de> 7 * Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
8 * Copyright (C) 2013, 2014 Guenter Roeck <linux@roeck-us.net> 8 * Copyright (C) 2013, 2014 Guenter Roeck <linux@roeck-us.net>
9 * Copyright (C) 2014 Pali Rohár <pali.rohar@gmail.com> 9 * Copyright (C) 2014, 2015 Pali Rohár <pali.rohar@gmail.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 12 * under the terms of the GNU General Public License as published by the
@@ -80,8 +80,10 @@ static uint i8k_fan_max = I8K_FAN_HIGH;
80#define I8K_HWMON_HAVE_FAN2 (1 << 5) 80#define I8K_HWMON_HAVE_FAN2 (1 << 5)
81 81
82MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); 82MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
83MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops"); 83MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
84MODULE_DESCRIPTION("Dell laptop SMM BIOS hwmon driver");
84MODULE_LICENSE("GPL"); 85MODULE_LICENSE("GPL");
86MODULE_ALIAS("i8k");
85 87
86static bool force; 88static bool force;
87module_param(force, bool, 0); 89module_param(force, bool, 0);
@@ -91,6 +93,7 @@ static bool ignore_dmi;
91module_param(ignore_dmi, bool, 0); 93module_param(ignore_dmi, bool, 0);
92MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); 94MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
93 95
96#if IS_ENABLED(CONFIG_I8K)
94static bool restricted; 97static bool restricted;
95module_param(restricted, bool, 0); 98module_param(restricted, bool, 0);
96MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); 99MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
@@ -98,6 +101,7 @@ MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
98static bool power_status; 101static bool power_status;
99module_param(power_status, bool, 0600); 102module_param(power_status, bool, 0600);
100MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); 103MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
104#endif
101 105
102static uint fan_mult; 106static uint fan_mult;
103module_param(fan_mult, uint, 0); 107module_param(fan_mult, uint, 0);
@@ -107,18 +111,6 @@ static uint fan_max;
107module_param(fan_max, uint, 0); 111module_param(fan_max, uint, 0);
108MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed (default: autodetect)"); 112MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed (default: autodetect)");
109 113
110static int i8k_open_fs(struct inode *inode, struct file *file);
111static long i8k_ioctl(struct file *, unsigned int, unsigned long);
112
113static const struct file_operations i8k_fops = {
114 .owner = THIS_MODULE,
115 .open = i8k_open_fs,
116 .read = seq_read,
117 .llseek = seq_lseek,
118 .release = single_release,
119 .unlocked_ioctl = i8k_ioctl,
120};
121
122struct smm_regs { 114struct smm_regs {
123 unsigned int eax; 115 unsigned int eax;
124 unsigned int ebx __packed; 116 unsigned int ebx __packed;
@@ -219,45 +211,6 @@ out:
219} 211}
220 212
221/* 213/*
222 * Read the Fn key status.
223 */
224static int i8k_get_fn_status(void)
225{
226 struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
227 int rc;
228
229 rc = i8k_smm(&regs);
230 if (rc < 0)
231 return rc;
232
233 switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) {
234 case I8K_FN_UP:
235 return I8K_VOL_UP;
236 case I8K_FN_DOWN:
237 return I8K_VOL_DOWN;
238 case I8K_FN_MUTE:
239 return I8K_VOL_MUTE;
240 default:
241 return 0;
242 }
243}
244
245/*
246 * Read the power status.
247 */
248static int i8k_get_power_status(void)
249{
250 struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
251 int rc;
252
253 rc = i8k_smm(&regs);
254 if (rc < 0)
255 return rc;
256
257 return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY;
258}
259
260/*
261 * Read the fan status. 214 * Read the fan status.
262 */ 215 */
263static int i8k_get_fan_status(int fan) 216static int i8k_get_fan_status(int fan)
@@ -376,6 +329,51 @@ static int i8k_get_dell_signature(int req_fn)
376 return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1; 329 return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
377} 330}
378 331
332#if IS_ENABLED(CONFIG_I8K)
333
334/*
335 * Read the Fn key status.
336 */
337static int i8k_get_fn_status(void)
338{
339 struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
340 int rc;
341
342 rc = i8k_smm(&regs);
343 if (rc < 0)
344 return rc;
345
346 switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) {
347 case I8K_FN_UP:
348 return I8K_VOL_UP;
349 case I8K_FN_DOWN:
350 return I8K_VOL_DOWN;
351 case I8K_FN_MUTE:
352 return I8K_VOL_MUTE;
353 default:
354 return 0;
355 }
356}
357
358/*
359 * Read the power status.
360 */
361static int i8k_get_power_status(void)
362{
363 struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
364 int rc;
365
366 rc = i8k_smm(&regs);
367 if (rc < 0)
368 return rc;
369
370 return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY;
371}
372
373/*
374 * Procfs interface
375 */
376
379static int 377static int
380i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) 378i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
381{ 379{
@@ -526,6 +524,37 @@ static int i8k_open_fs(struct inode *inode, struct file *file)
526 return single_open(file, i8k_proc_show, NULL); 524 return single_open(file, i8k_proc_show, NULL);
527} 525}
528 526
527static const struct file_operations i8k_fops = {
528 .owner = THIS_MODULE,
529 .open = i8k_open_fs,
530 .read = seq_read,
531 .llseek = seq_lseek,
532 .release = single_release,
533 .unlocked_ioctl = i8k_ioctl,
534};
535
536static void __init i8k_init_procfs(void)
537{
538 /* Register the proc entry */
539 proc_create("i8k", 0, NULL, &i8k_fops);
540}
541
542static void __exit i8k_exit_procfs(void)
543{
544 remove_proc_entry("i8k", NULL);
545}
546
547#else
548
549static inline void __init i8k_init_procfs(void)
550{
551}
552
553static inline void __exit i8k_exit_procfs(void)
554{
555}
556
557#endif
529 558
530/* 559/*
531 * Hwmon interface 560 * Hwmon interface
@@ -748,8 +777,8 @@ static int __init i8k_init_hwmon(void)
748 if (err >= 0) 777 if (err >= 0)
749 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; 778 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
750 779
751 i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "i8k", NULL, 780 i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell-smm",
752 i8k_groups); 781 NULL, i8k_groups);
753 if (IS_ERR(i8k_hwmon_dev)) { 782 if (IS_ERR(i8k_hwmon_dev)) {
754 err = PTR_ERR(i8k_hwmon_dev); 783 err = PTR_ERR(i8k_hwmon_dev);
755 i8k_hwmon_dev = NULL; 784 i8k_hwmon_dev = NULL;
@@ -974,33 +1003,24 @@ static int __init i8k_probe(void)
974 1003
975static int __init i8k_init(void) 1004static int __init i8k_init(void)
976{ 1005{
977 struct proc_dir_entry *proc_i8k;
978 int err; 1006 int err;
979 1007
980 /* Are we running on an supported laptop? */ 1008 /* Are we running on an supported laptop? */
981 if (i8k_probe()) 1009 if (i8k_probe())
982 return -ENODEV; 1010 return -ENODEV;
983 1011
984 /* Register the proc entry */
985 proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops);
986 if (!proc_i8k)
987 return -ENOENT;
988
989 err = i8k_init_hwmon(); 1012 err = i8k_init_hwmon();
990 if (err) 1013 if (err)
991 goto exit_remove_proc; 1014 return err;
992 1015
1016 i8k_init_procfs();
993 return 0; 1017 return 0;
994
995 exit_remove_proc:
996 remove_proc_entry("i8k", NULL);
997 return err;
998} 1018}
999 1019
1000static void __exit i8k_exit(void) 1020static void __exit i8k_exit(void)
1001{ 1021{
1002 hwmon_device_unregister(i8k_hwmon_dev); 1022 hwmon_device_unregister(i8k_hwmon_dev);
1003 remove_proc_entry("i8k", NULL); 1023 i8k_exit_procfs();
1004} 1024}
1005 1025
1006module_init(i8k_init); 1026module_init(i8k_init);
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index fc1f1ae7a49d..6c8921140f02 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -58,4 +58,23 @@ config CORESIGHT_SOURCE_ETM3X
58 which allows tracing the instructions that a processor is executing 58 which allows tracing the instructions that a processor is executing
59 This is primarily useful for instruction level tracing. Depending 59 This is primarily useful for instruction level tracing. Depending
60 the ETM version data tracing may also be available. 60 the ETM version data tracing may also be available.
61
62config CORESIGHT_SOURCE_ETM4X
63 bool "CoreSight Embedded Trace Macrocell 4.x driver"
64 depends on ARM64
65 select CORESIGHT_LINKS_AND_SINKS
66 help
67 This driver provides support for the ETM4.x tracer module, tracing the
68 instructions that a processor is executing. This is primarily useful
69 for instruction level tracing. Depending on the implemented version
70 data tracing may also be available.
71
72config CORESIGHT_QCOM_REPLICATOR
73 bool "Qualcomm CoreSight Replicator driver"
74 depends on CORESIGHT_LINKS_AND_SINKS
75 help
76 This enables support for Qualcomm CoreSight link driver. The
77 programmable ATB replicator sends the ATB trace stream from the
78 ETB/ETF to the TPIUi and ETR.
79
61endif 80endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 4b4bec890ef5..99f8e5f6256e 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -9,3 +9,5 @@ obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
9obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ 9obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
10 coresight-replicator.o 10 coresight-replicator.o
11obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o 11obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
12obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
13obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 40049869aecd..77d0f9c1118d 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -22,10 +22,11 @@
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/clk.h> 25#include <linux/pm_runtime.h>
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/coresight.h> 27#include <linux/coresight.h>
28#include <linux/amba/bus.h> 28#include <linux/amba/bus.h>
29#include <linux/clk.h>
29 30
30#include "coresight-priv.h" 31#include "coresight-priv.h"
31 32
@@ -66,9 +67,9 @@
66 * struct etb_drvdata - specifics associated to an ETB component 67 * struct etb_drvdata - specifics associated to an ETB component
67 * @base: memory mapped base address for this component. 68 * @base: memory mapped base address for this component.
68 * @dev: the device entity associated to this component. 69 * @dev: the device entity associated to this component.
70 * @atclk: optional clock for the core parts of the ETB.
69 * @csdev: component vitals needed by the framework. 71 * @csdev: component vitals needed by the framework.
70 * @miscdev: specifics to handle "/dev/xyz.etb" entry. 72 * @miscdev: specifics to handle "/dev/xyz.etb" entry.
71 * @clk: the clock this component is associated to.
72 * @spinlock: only one at a time pls. 73 * @spinlock: only one at a time pls.
73 * @in_use: synchronise user space access to etb buffer. 74 * @in_use: synchronise user space access to etb buffer.
74 * @buf: area of memory where ETB buffer content gets sent. 75 * @buf: area of memory where ETB buffer content gets sent.
@@ -79,9 +80,9 @@
79struct etb_drvdata { 80struct etb_drvdata {
80 void __iomem *base; 81 void __iomem *base;
81 struct device *dev; 82 struct device *dev;
83 struct clk *atclk;
82 struct coresight_device *csdev; 84 struct coresight_device *csdev;
83 struct miscdevice miscdev; 85 struct miscdevice miscdev;
84 struct clk *clk;
85 spinlock_t spinlock; 86 spinlock_t spinlock;
86 atomic_t in_use; 87 atomic_t in_use;
87 u8 *buf; 88 u8 *buf;
@@ -92,17 +93,14 @@ struct etb_drvdata {
92 93
93static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata) 94static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
94{ 95{
95 int ret;
96 u32 depth = 0; 96 u32 depth = 0;
97 97
98 ret = clk_prepare_enable(drvdata->clk); 98 pm_runtime_get_sync(drvdata->dev);
99 if (ret)
100 return ret;
101 99
102 /* RO registers don't need locking */ 100 /* RO registers don't need locking */
103 depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); 101 depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
104 102
105 clk_disable_unprepare(drvdata->clk); 103 pm_runtime_put(drvdata->dev);
106 return depth; 104 return depth;
107} 105}
108 106
@@ -137,12 +135,9 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
137static int etb_enable(struct coresight_device *csdev) 135static int etb_enable(struct coresight_device *csdev)
138{ 136{
139 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 137 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
140 int ret;
141 unsigned long flags; 138 unsigned long flags;
142 139
143 ret = clk_prepare_enable(drvdata->clk); 140 pm_runtime_get_sync(drvdata->dev);
144 if (ret)
145 return ret;
146 141
147 spin_lock_irqsave(&drvdata->spinlock, flags); 142 spin_lock_irqsave(&drvdata->spinlock, flags);
148 etb_enable_hw(drvdata); 143 etb_enable_hw(drvdata);
@@ -252,7 +247,7 @@ static void etb_disable(struct coresight_device *csdev)
252 drvdata->enable = false; 247 drvdata->enable = false;
253 spin_unlock_irqrestore(&drvdata->spinlock, flags); 248 spin_unlock_irqrestore(&drvdata->spinlock, flags);
254 249
255 clk_disable_unprepare(drvdata->clk); 250 pm_runtime_put(drvdata->dev);
256 251
257 dev_info(drvdata->dev, "ETB disabled\n"); 252 dev_info(drvdata->dev, "ETB disabled\n");
258} 253}
@@ -339,16 +334,12 @@ static const struct file_operations etb_fops = {
339static ssize_t status_show(struct device *dev, 334static ssize_t status_show(struct device *dev,
340 struct device_attribute *attr, char *buf) 335 struct device_attribute *attr, char *buf)
341{ 336{
342 int ret;
343 unsigned long flags; 337 unsigned long flags;
344 u32 etb_rdr, etb_sr, etb_rrp, etb_rwp; 338 u32 etb_rdr, etb_sr, etb_rrp, etb_rwp;
345 u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr; 339 u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr;
346 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); 340 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
347 341
348 ret = clk_prepare_enable(drvdata->clk); 342 pm_runtime_get_sync(drvdata->dev);
349 if (ret)
350 goto out;
351
352 spin_lock_irqsave(&drvdata->spinlock, flags); 343 spin_lock_irqsave(&drvdata->spinlock, flags);
353 CS_UNLOCK(drvdata->base); 344 CS_UNLOCK(drvdata->base);
354 345
@@ -364,7 +355,7 @@ static ssize_t status_show(struct device *dev,
364 CS_LOCK(drvdata->base); 355 CS_LOCK(drvdata->base);
365 spin_unlock_irqrestore(&drvdata->spinlock, flags); 356 spin_unlock_irqrestore(&drvdata->spinlock, flags);
366 357
367 clk_disable_unprepare(drvdata->clk); 358 pm_runtime_put(drvdata->dev);
368 359
369 return sprintf(buf, 360 return sprintf(buf,
370 "Depth:\t\t0x%x\n" 361 "Depth:\t\t0x%x\n"
@@ -377,7 +368,7 @@ static ssize_t status_show(struct device *dev,
377 "Flush ctrl:\t0x%x\n", 368 "Flush ctrl:\t0x%x\n",
378 etb_rdr, etb_sr, etb_rrp, etb_rwp, 369 etb_rdr, etb_sr, etb_rrp, etb_rwp,
379 etb_trg, etb_cr, etb_ffsr, etb_ffcr); 370 etb_trg, etb_cr, etb_ffsr, etb_ffcr);
380out: 371
381 return -EINVAL; 372 return -EINVAL;
382} 373}
383static DEVICE_ATTR_RO(status); 374static DEVICE_ATTR_RO(status);
@@ -438,6 +429,12 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
438 return -ENOMEM; 429 return -ENOMEM;
439 430
440 drvdata->dev = &adev->dev; 431 drvdata->dev = &adev->dev;
432 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
433 if (!IS_ERR(drvdata->atclk)) {
434 ret = clk_prepare_enable(drvdata->atclk);
435 if (ret)
436 return ret;
437 }
441 dev_set_drvdata(dev, drvdata); 438 dev_set_drvdata(dev, drvdata);
442 439
443 /* validity for the resource is already checked by the AMBA core */ 440 /* validity for the resource is already checked by the AMBA core */
@@ -449,21 +446,19 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
449 446
450 spin_lock_init(&drvdata->spinlock); 447 spin_lock_init(&drvdata->spinlock);
451 448
452 drvdata->clk = adev->pclk;
453 ret = clk_prepare_enable(drvdata->clk);
454 if (ret)
455 return ret;
456
457 drvdata->buffer_depth = etb_get_buffer_depth(drvdata); 449 drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
458 clk_disable_unprepare(drvdata->clk); 450 pm_runtime_put(&adev->dev);
459 451
460 if (drvdata->buffer_depth < 0) 452 if (drvdata->buffer_depth & 0x80000000)
461 return -EINVAL; 453 return -EINVAL;
462 454
463 drvdata->buf = devm_kzalloc(dev, 455 drvdata->buf = devm_kzalloc(dev,
464 drvdata->buffer_depth * 4, GFP_KERNEL); 456 drvdata->buffer_depth * 4, GFP_KERNEL);
465 if (!drvdata->buf) 457 if (!drvdata->buf) {
458 dev_err(dev, "Failed to allocate %u bytes for buffer data\n",
459 drvdata->buffer_depth * 4);
466 return -ENOMEM; 460 return -ENOMEM;
461 }
467 462
468 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); 463 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
469 if (!desc) 464 if (!desc)
@@ -503,6 +498,32 @@ static int etb_remove(struct amba_device *adev)
503 return 0; 498 return 0;
504} 499}
505 500
501#ifdef CONFIG_PM
502static int etb_runtime_suspend(struct device *dev)
503{
504 struct etb_drvdata *drvdata = dev_get_drvdata(dev);
505
506 if (drvdata && !IS_ERR(drvdata->atclk))
507 clk_disable_unprepare(drvdata->atclk);
508
509 return 0;
510}
511
512static int etb_runtime_resume(struct device *dev)
513{
514 struct etb_drvdata *drvdata = dev_get_drvdata(dev);
515
516 if (drvdata && !IS_ERR(drvdata->atclk))
517 clk_prepare_enable(drvdata->atclk);
518
519 return 0;
520}
521#endif
522
523static const struct dev_pm_ops etb_dev_pm_ops = {
524 SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
525};
526
506static struct amba_id etb_ids[] = { 527static struct amba_id etb_ids[] = {
507 { 528 {
508 .id = 0x0003b907, 529 .id = 0x0003b907,
@@ -515,6 +536,8 @@ static struct amba_driver etb_driver = {
515 .drv = { 536 .drv = {
516 .name = "coresight-etb10", 537 .name = "coresight-etb10",
517 .owner = THIS_MODULE, 538 .owner = THIS_MODULE,
539 .pm = &etb_dev_pm_ops,
540
518 }, 541 },
519 .probe = etb_probe, 542 .probe = etb_probe,
520 .remove = etb_remove, 543 .remove = etb_remove,
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 501c5fac8a45..098ffbec0a44 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -140,8 +140,8 @@
140 * struct etm_drvdata - specifics associated to an ETM component 140 * struct etm_drvdata - specifics associated to an ETM component
141 * @base: memory mapped base address for this component. 141 * @base: memory mapped base address for this component.
142 * @dev: the device entity associated to this component. 142 * @dev: the device entity associated to this component.
143 * @atclk: optional clock for the core parts of the ETM.
143 * @csdev: component vitals needed by the framework. 144 * @csdev: component vitals needed by the framework.
144 * @clk: the clock this component is associated to.
145 * @spinlock: only one at a time pls. 145 * @spinlock: only one at a time pls.
146 * @cpu: the cpu this component is affined to. 146 * @cpu: the cpu this component is affined to.
147 * @port_size: port size as reported by ETMCR bit 4-6 and 21. 147 * @port_size: port size as reported by ETMCR bit 4-6 and 21.
@@ -192,8 +192,8 @@
192struct etm_drvdata { 192struct etm_drvdata {
193 void __iomem *base; 193 void __iomem *base;
194 struct device *dev; 194 struct device *dev;
195 struct clk *atclk;
195 struct coresight_device *csdev; 196 struct coresight_device *csdev;
196 struct clk *clk;
197 spinlock_t spinlock; 197 spinlock_t spinlock;
198 int cpu; 198 int cpu;
199 int port_size; 199 int port_size;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index c965f5724abd..018a00fda611 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -23,13 +23,14 @@
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/sysfs.h> 24#include <linux/sysfs.h>
25#include <linux/stat.h> 25#include <linux/stat.h>
26#include <linux/clk.h> 26#include <linux/pm_runtime.h>
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/coresight.h> 29#include <linux/coresight.h>
30#include <linux/amba/bus.h> 30#include <linux/amba/bus.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <linux/clk.h>
33#include <asm/sections.h> 34#include <asm/sections.h>
34 35
35#include "coresight-etm.h" 36#include "coresight-etm.h"
@@ -325,9 +326,7 @@ static int etm_trace_id(struct coresight_device *csdev)
325 326
326 if (!drvdata->enable) 327 if (!drvdata->enable)
327 return drvdata->traceid; 328 return drvdata->traceid;
328 329 pm_runtime_get_sync(csdev->dev.parent);
329 if (clk_prepare_enable(drvdata->clk))
330 goto out;
331 330
332 spin_lock_irqsave(&drvdata->spinlock, flags); 331 spin_lock_irqsave(&drvdata->spinlock, flags);
333 332
@@ -336,8 +335,8 @@ static int etm_trace_id(struct coresight_device *csdev)
336 CS_LOCK(drvdata->base); 335 CS_LOCK(drvdata->base);
337 336
338 spin_unlock_irqrestore(&drvdata->spinlock, flags); 337 spin_unlock_irqrestore(&drvdata->spinlock, flags);
339 clk_disable_unprepare(drvdata->clk); 338 pm_runtime_put(csdev->dev.parent);
340out: 339
341 return trace_id; 340 return trace_id;
342} 341}
343 342
@@ -346,10 +345,7 @@ static int etm_enable(struct coresight_device *csdev)
346 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 345 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
347 int ret; 346 int ret;
348 347
349 ret = clk_prepare_enable(drvdata->clk); 348 pm_runtime_get_sync(csdev->dev.parent);
350 if (ret)
351 goto err_clk;
352
353 spin_lock(&drvdata->spinlock); 349 spin_lock(&drvdata->spinlock);
354 350
355 /* 351 /*
@@ -373,8 +369,7 @@ static int etm_enable(struct coresight_device *csdev)
373 return 0; 369 return 0;
374err: 370err:
375 spin_unlock(&drvdata->spinlock); 371 spin_unlock(&drvdata->spinlock);
376 clk_disable_unprepare(drvdata->clk); 372 pm_runtime_put(csdev->dev.parent);
377err_clk:
378 return ret; 373 return ret;
379} 374}
380 375
@@ -423,8 +418,7 @@ static void etm_disable(struct coresight_device *csdev)
423 418
424 spin_unlock(&drvdata->spinlock); 419 spin_unlock(&drvdata->spinlock);
425 put_online_cpus(); 420 put_online_cpus();
426 421 pm_runtime_put(csdev->dev.parent);
427 clk_disable_unprepare(drvdata->clk);
428 422
429 dev_info(drvdata->dev, "ETM tracing disabled\n"); 423 dev_info(drvdata->dev, "ETM tracing disabled\n");
430} 424}
@@ -474,14 +468,10 @@ static DEVICE_ATTR_RO(nr_ctxid_cmp);
474static ssize_t etmsr_show(struct device *dev, 468static ssize_t etmsr_show(struct device *dev,
475 struct device_attribute *attr, char *buf) 469 struct device_attribute *attr, char *buf)
476{ 470{
477 int ret;
478 unsigned long flags, val; 471 unsigned long flags, val;
479 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 472 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
480 473
481 ret = clk_prepare_enable(drvdata->clk); 474 pm_runtime_get_sync(drvdata->dev);
482 if (ret)
483 return ret;
484
485 spin_lock_irqsave(&drvdata->spinlock, flags); 475 spin_lock_irqsave(&drvdata->spinlock, flags);
486 CS_UNLOCK(drvdata->base); 476 CS_UNLOCK(drvdata->base);
487 477
@@ -489,7 +479,7 @@ static ssize_t etmsr_show(struct device *dev,
489 479
490 CS_LOCK(drvdata->base); 480 CS_LOCK(drvdata->base);
491 spin_unlock_irqrestore(&drvdata->spinlock, flags); 481 spin_unlock_irqrestore(&drvdata->spinlock, flags);
492 clk_disable_unprepare(drvdata->clk); 482 pm_runtime_put(drvdata->dev);
493 483
494 return sprintf(buf, "%#lx\n", val); 484 return sprintf(buf, "%#lx\n", val);
495} 485}
@@ -1317,7 +1307,6 @@ static DEVICE_ATTR_RW(seq_13_event);
1317static ssize_t seq_curr_state_show(struct device *dev, 1307static ssize_t seq_curr_state_show(struct device *dev,
1318 struct device_attribute *attr, char *buf) 1308 struct device_attribute *attr, char *buf)
1319{ 1309{
1320 int ret;
1321 unsigned long val, flags; 1310 unsigned long val, flags;
1322 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1311 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1323 1312
@@ -1326,10 +1315,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
1326 goto out; 1315 goto out;
1327 } 1316 }
1328 1317
1329 ret = clk_prepare_enable(drvdata->clk); 1318 pm_runtime_get_sync(drvdata->dev);
1330 if (ret)
1331 return ret;
1332
1333 spin_lock_irqsave(&drvdata->spinlock, flags); 1319 spin_lock_irqsave(&drvdata->spinlock, flags);
1334 1320
1335 CS_UNLOCK(drvdata->base); 1321 CS_UNLOCK(drvdata->base);
@@ -1337,7 +1323,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
1337 CS_LOCK(drvdata->base); 1323 CS_LOCK(drvdata->base);
1338 1324
1339 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1325 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1340 clk_disable_unprepare(drvdata->clk); 1326 pm_runtime_put(drvdata->dev);
1341out: 1327out:
1342 return sprintf(buf, "%#lx\n", val); 1328 return sprintf(buf, "%#lx\n", val);
1343} 1329}
@@ -1521,10 +1507,7 @@ static ssize_t status_show(struct device *dev,
1521 unsigned long flags; 1507 unsigned long flags;
1522 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1508 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1523 1509
1524 ret = clk_prepare_enable(drvdata->clk); 1510 pm_runtime_get_sync(drvdata->dev);
1525 if (ret)
1526 return ret;
1527
1528 spin_lock_irqsave(&drvdata->spinlock, flags); 1511 spin_lock_irqsave(&drvdata->spinlock, flags);
1529 1512
1530 CS_UNLOCK(drvdata->base); 1513 CS_UNLOCK(drvdata->base);
@@ -1550,7 +1533,7 @@ static ssize_t status_show(struct device *dev,
1550 CS_LOCK(drvdata->base); 1533 CS_LOCK(drvdata->base);
1551 1534
1552 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1535 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1553 clk_disable_unprepare(drvdata->clk); 1536 pm_runtime_put(drvdata->dev);
1554 1537
1555 return ret; 1538 return ret;
1556} 1539}
@@ -1559,7 +1542,6 @@ static DEVICE_ATTR_RO(status);
1559static ssize_t traceid_show(struct device *dev, 1542static ssize_t traceid_show(struct device *dev,
1560 struct device_attribute *attr, char *buf) 1543 struct device_attribute *attr, char *buf)
1561{ 1544{
1562 int ret;
1563 unsigned long val, flags; 1545 unsigned long val, flags;
1564 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1546 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1565 1547
@@ -1568,10 +1550,7 @@ static ssize_t traceid_show(struct device *dev,
1568 goto out; 1550 goto out;
1569 } 1551 }
1570 1552
1571 ret = clk_prepare_enable(drvdata->clk); 1553 pm_runtime_get_sync(drvdata->dev);
1572 if (ret)
1573 return ret;
1574
1575 spin_lock_irqsave(&drvdata->spinlock, flags); 1554 spin_lock_irqsave(&drvdata->spinlock, flags);
1576 CS_UNLOCK(drvdata->base); 1555 CS_UNLOCK(drvdata->base);
1577 1556
@@ -1579,7 +1558,7 @@ static ssize_t traceid_show(struct device *dev,
1579 1558
1580 CS_LOCK(drvdata->base); 1559 CS_LOCK(drvdata->base);
1581 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1560 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1582 clk_disable_unprepare(drvdata->clk); 1561 pm_runtime_put(drvdata->dev);
1583out: 1562out:
1584 return sprintf(buf, "%#lx\n", val); 1563 return sprintf(buf, "%#lx\n", val);
1585} 1564}
@@ -1817,10 +1796,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1817 1796
1818 spin_lock_init(&drvdata->spinlock); 1797 spin_lock_init(&drvdata->spinlock);
1819 1798
1820 drvdata->clk = adev->pclk; 1799 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
1821 ret = clk_prepare_enable(drvdata->clk); 1800 if (!IS_ERR(drvdata->atclk)) {
1822 if (ret) 1801 ret = clk_prepare_enable(drvdata->atclk);
1823 return ret; 1802 if (ret)
1803 return ret;
1804 }
1824 1805
1825 drvdata->cpu = pdata ? pdata->cpu : 0; 1806 drvdata->cpu = pdata ? pdata->cpu : 0;
1826 1807
@@ -1845,8 +1826,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1845 } 1826 }
1846 etm_init_default_data(drvdata); 1827 etm_init_default_data(drvdata);
1847 1828
1848 clk_disable_unprepare(drvdata->clk);
1849
1850 desc->type = CORESIGHT_DEV_TYPE_SOURCE; 1829 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
1851 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; 1830 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1852 desc->ops = &etm_cs_ops; 1831 desc->ops = &etm_cs_ops;
@@ -1859,7 +1838,8 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1859 goto err_arch_supported; 1838 goto err_arch_supported;
1860 } 1839 }
1861 1840
1862 dev_info(dev, "ETM initialized\n"); 1841 pm_runtime_put(&adev->dev);
1842 dev_info(dev, "%s initialized\n", (char *)id->data);
1863 1843
1864 if (boot_enable) { 1844 if (boot_enable) {
1865 coresight_enable(drvdata->csdev); 1845 coresight_enable(drvdata->csdev);
@@ -1869,7 +1849,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1869 return 0; 1849 return 0;
1870 1850
1871err_arch_supported: 1851err_arch_supported:
1872 clk_disable_unprepare(drvdata->clk);
1873 if (--etm_count == 0) 1852 if (--etm_count == 0)
1874 unregister_hotcpu_notifier(&etm_cpu_notifier); 1853 unregister_hotcpu_notifier(&etm_cpu_notifier);
1875 return ret; 1854 return ret;
@@ -1886,22 +1865,52 @@ static int etm_remove(struct amba_device *adev)
1886 return 0; 1865 return 0;
1887} 1866}
1888 1867
1868#ifdef CONFIG_PM
1869static int etm_runtime_suspend(struct device *dev)
1870{
1871 struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1872
1873 if (drvdata && !IS_ERR(drvdata->atclk))
1874 clk_disable_unprepare(drvdata->atclk);
1875
1876 return 0;
1877}
1878
1879static int etm_runtime_resume(struct device *dev)
1880{
1881 struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1882
1883 if (drvdata && !IS_ERR(drvdata->atclk))
1884 clk_prepare_enable(drvdata->atclk);
1885
1886 return 0;
1887}
1888#endif
1889
1890static const struct dev_pm_ops etm_dev_pm_ops = {
1891 SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
1892};
1893
1889static struct amba_id etm_ids[] = { 1894static struct amba_id etm_ids[] = {
1890 { /* ETM 3.3 */ 1895 { /* ETM 3.3 */
1891 .id = 0x0003b921, 1896 .id = 0x0003b921,
1892 .mask = 0x0003ffff, 1897 .mask = 0x0003ffff,
1898 .data = "ETM 3.3",
1893 }, 1899 },
1894 { /* ETM 3.5 */ 1900 { /* ETM 3.5 */
1895 .id = 0x0003b956, 1901 .id = 0x0003b956,
1896 .mask = 0x0003ffff, 1902 .mask = 0x0003ffff,
1903 .data = "ETM 3.5",
1897 }, 1904 },
1898 { /* PTM 1.0 */ 1905 { /* PTM 1.0 */
1899 .id = 0x0003b950, 1906 .id = 0x0003b950,
1900 .mask = 0x0003ffff, 1907 .mask = 0x0003ffff,
1908 .data = "PTM 1.0",
1901 }, 1909 },
1902 { /* PTM 1.1 */ 1910 { /* PTM 1.1 */
1903 .id = 0x0003b95f, 1911 .id = 0x0003b95f,
1904 .mask = 0x0003ffff, 1912 .mask = 0x0003ffff,
1913 .data = "PTM 1.1",
1905 }, 1914 },
1906 { 0, 0}, 1915 { 0, 0},
1907}; 1916};
@@ -1910,23 +1919,14 @@ static struct amba_driver etm_driver = {
1910 .drv = { 1919 .drv = {
1911 .name = "coresight-etm3x", 1920 .name = "coresight-etm3x",
1912 .owner = THIS_MODULE, 1921 .owner = THIS_MODULE,
1922 .pm = &etm_dev_pm_ops,
1913 }, 1923 },
1914 .probe = etm_probe, 1924 .probe = etm_probe,
1915 .remove = etm_remove, 1925 .remove = etm_remove,
1916 .id_table = etm_ids, 1926 .id_table = etm_ids,
1917}; 1927};
1918 1928
1919int __init etm_init(void) 1929module_amba_driver(etm_driver);
1920{
1921 return amba_driver_register(&etm_driver);
1922}
1923module_init(etm_init);
1924
1925void __exit etm_exit(void)
1926{
1927 amba_driver_unregister(&etm_driver);
1928}
1929module_exit(etm_exit);
1930 1930
1931MODULE_LICENSE("GPL v2"); 1931MODULE_LICENSE("GPL v2");
1932MODULE_DESCRIPTION("CoreSight Program Flow Trace driver"); 1932MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
new file mode 100644
index 000000000000..1312e993c501
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -0,0 +1,2702 @@
1/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/module.h>
19#include <linux/io.h>
20#include <linux/err.h>
21#include <linux/fs.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/smp.h>
25#include <linux/sysfs.h>
26#include <linux/stat.h>
27#include <linux/clk.h>
28#include <linux/cpu.h>
29#include <linux/coresight.h>
30#include <linux/pm_wakeup.h>
31#include <linux/amba/bus.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/pm_runtime.h>
35#include <asm/sections.h>
36
37#include "coresight-etm4x.h"
38
39static int boot_enable;
40module_param_named(boot_enable, boot_enable, int, S_IRUGO);
41
42/* The number of ETMv4 currently registered */
43static int etm4_count;
44static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
45
46static void etm4_os_unlock(void *info)
47{
48 struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
49
50 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
52 isb();
53}
54
55static bool etm4_arch_supported(u8 arch)
56{
57 switch (arch) {
58 case ETM_ARCH_V4:
59 break;
60 default:
61 return false;
62 }
63 return true;
64}
65
66static int etm4_trace_id(struct coresight_device *csdev)
67{
68 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
69 unsigned long flags;
70 int trace_id = -1;
71
72 if (!drvdata->enable)
73 return drvdata->trcid;
74
75 pm_runtime_get_sync(drvdata->dev);
76 spin_lock_irqsave(&drvdata->spinlock, flags);
77
78 CS_UNLOCK(drvdata->base);
79 trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
80 trace_id &= ETM_TRACEID_MASK;
81 CS_LOCK(drvdata->base);
82
83 spin_unlock_irqrestore(&drvdata->spinlock, flags);
84 pm_runtime_put(drvdata->dev);
85
86 return trace_id;
87}
88
89static void etm4_enable_hw(void *info)
90{
91 int i;
92 struct etmv4_drvdata *drvdata = info;
93
94 CS_UNLOCK(drvdata->base);
95
96 etm4_os_unlock(drvdata);
97
98 /* Disable the trace unit before programming trace registers */
99 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
100
101 /* wait for TRCSTATR.IDLE to go up */
102 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
103 dev_err(drvdata->dev,
104 "timeout observed when probing at offset %#x\n",
105 TRCSTATR);
106
107 writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
108 writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
109 /* nothing specific implemented */
110 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
111 writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
112 writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
113 writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
114 writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
115 writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
116 writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
117 writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
118 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
119 writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
120 writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
121 writel_relaxed(drvdata->vissctlr,
122 drvdata->base + TRCVISSCTLR);
123 writel_relaxed(drvdata->vipcssctlr,
124 drvdata->base + TRCVIPCSSCTLR);
125 for (i = 0; i < drvdata->nrseqstate - 1; i++)
126 writel_relaxed(drvdata->seq_ctrl[i],
127 drvdata->base + TRCSEQEVRn(i));
128 writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
129 writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
130 writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
131 for (i = 0; i < drvdata->nr_cntr; i++) {
132 writel_relaxed(drvdata->cntrldvr[i],
133 drvdata->base + TRCCNTRLDVRn(i));
134 writel_relaxed(drvdata->cntr_ctrl[i],
135 drvdata->base + TRCCNTCTLRn(i));
136 writel_relaxed(drvdata->cntr_val[i],
137 drvdata->base + TRCCNTVRn(i));
138 }
139 for (i = 0; i < drvdata->nr_resource; i++)
140 writel_relaxed(drvdata->res_ctrl[i],
141 drvdata->base + TRCRSCTLRn(i));
142
143 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
144 writel_relaxed(drvdata->ss_ctrl[i],
145 drvdata->base + TRCSSCCRn(i));
146 writel_relaxed(drvdata->ss_status[i],
147 drvdata->base + TRCSSCSRn(i));
148 writel_relaxed(drvdata->ss_pe_cmp[i],
149 drvdata->base + TRCSSPCICRn(i));
150 }
151 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
152 writeq_relaxed(drvdata->addr_val[i],
153 drvdata->base + TRCACVRn(i));
154 writeq_relaxed(drvdata->addr_acc[i],
155 drvdata->base + TRCACATRn(i));
156 }
157 for (i = 0; i < drvdata->numcidc; i++)
158 writeq_relaxed(drvdata->ctxid_val[i],
159 drvdata->base + TRCCIDCVRn(i));
160 writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
161 writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
162
163 for (i = 0; i < drvdata->numvmidc; i++)
164 writeq_relaxed(drvdata->vmid_val[i],
165 drvdata->base + TRCVMIDCVRn(i));
166 writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
167 writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
168
169 /* Enable the trace unit */
170 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
171
172 /* wait for TRCSTATR.IDLE to go back down to '0' */
173 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
174 dev_err(drvdata->dev,
175 "timeout observed when probing at offset %#x\n",
176 TRCSTATR);
177
178 CS_LOCK(drvdata->base);
179
180 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
181}
182
183static int etm4_enable(struct coresight_device *csdev)
184{
185 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
186 int ret;
187
188 pm_runtime_get_sync(drvdata->dev);
189 spin_lock(&drvdata->spinlock);
190
191 /*
192 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
193 * ensures that register writes occur when cpu is powered.
194 */
195 ret = smp_call_function_single(drvdata->cpu,
196 etm4_enable_hw, drvdata, 1);
197 if (ret)
198 goto err;
199 drvdata->enable = true;
200 drvdata->sticky_enable = true;
201
202 spin_unlock(&drvdata->spinlock);
203
204 dev_info(drvdata->dev, "ETM tracing enabled\n");
205 return 0;
206err:
207 spin_unlock(&drvdata->spinlock);
208 pm_runtime_put(drvdata->dev);
209 return ret;
210}
211
212static void etm4_disable_hw(void *info)
213{
214 u32 control;
215 struct etmv4_drvdata *drvdata = info;
216
217 CS_UNLOCK(drvdata->base);
218
219 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
220
221 /* EN, bit[0] Trace unit enable bit */
222 control &= ~0x1;
223
224 /* make sure everything completes before disabling */
225 mb();
226 isb();
227 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
228
229 CS_LOCK(drvdata->base);
230
231 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
232}
233
234static void etm4_disable(struct coresight_device *csdev)
235{
236 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
237
238 /*
239 * Taking hotplug lock here protects from clocks getting disabled
240 * with tracing being left on (crash scenario) if user disable occurs
241 * after cpu online mask indicates the cpu is offline but before the
242 * DYING hotplug callback is serviced by the ETM driver.
243 */
244 get_online_cpus();
245 spin_lock(&drvdata->spinlock);
246
247 /*
248 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
249 * ensures that register writes occur when cpu is powered.
250 */
251 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
252 drvdata->enable = false;
253
254 spin_unlock(&drvdata->spinlock);
255 put_online_cpus();
256
257 pm_runtime_put(drvdata->dev);
258
259 dev_info(drvdata->dev, "ETM tracing disabled\n");
260}
261
262static const struct coresight_ops_source etm4_source_ops = {
263 .trace_id = etm4_trace_id,
264 .enable = etm4_enable,
265 .disable = etm4_disable,
266};
267
268static const struct coresight_ops etm4_cs_ops = {
269 .source_ops = &etm4_source_ops,
270};
271
272static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
273{
274 u8 idx = drvdata->addr_idx;
275
276 /*
277 * TRCACATRn.TYPE bit[1:0]: type of comparison
278 * the trace unit performs
279 */
280 if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
281 if (idx % 2 != 0)
282 return -EINVAL;
283
284 /*
285 * We are performing instruction address comparison. Set the
286 * relevant bit of ViewInst Include/Exclude Control register
287 * for corresponding address comparator pair.
288 */
289 if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
290 drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
291 return -EINVAL;
292
293 if (exclude == true) {
294 /*
295 * Set exclude bit and unset the include bit
296 * corresponding to comparator pair
297 */
298 drvdata->viiectlr |= BIT(idx / 2 + 16);
299 drvdata->viiectlr &= ~BIT(idx / 2);
300 } else {
301 /*
302 * Set include bit and unset exclude bit
303 * corresponding to comparator pair
304 */
305 drvdata->viiectlr |= BIT(idx / 2);
306 drvdata->viiectlr &= ~BIT(idx / 2 + 16);
307 }
308 }
309 return 0;
310}
311
312static ssize_t nr_pe_cmp_show(struct device *dev,
313 struct device_attribute *attr,
314 char *buf)
315{
316 unsigned long val;
317 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
318
319 val = drvdata->nr_pe_cmp;
320 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
321}
322static DEVICE_ATTR_RO(nr_pe_cmp);
323
324static ssize_t nr_addr_cmp_show(struct device *dev,
325 struct device_attribute *attr,
326 char *buf)
327{
328 unsigned long val;
329 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
330
331 val = drvdata->nr_addr_cmp;
332 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
333}
334static DEVICE_ATTR_RO(nr_addr_cmp);
335
336static ssize_t nr_cntr_show(struct device *dev,
337 struct device_attribute *attr,
338 char *buf)
339{
340 unsigned long val;
341 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
342
343 val = drvdata->nr_cntr;
344 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
345}
346static DEVICE_ATTR_RO(nr_cntr);
347
348static ssize_t nr_ext_inp_show(struct device *dev,
349 struct device_attribute *attr,
350 char *buf)
351{
352 unsigned long val;
353 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
354
355 val = drvdata->nr_ext_inp;
356 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
357}
358static DEVICE_ATTR_RO(nr_ext_inp);
359
360static ssize_t numcidc_show(struct device *dev,
361 struct device_attribute *attr,
362 char *buf)
363{
364 unsigned long val;
365 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
366
367 val = drvdata->numcidc;
368 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
369}
370static DEVICE_ATTR_RO(numcidc);
371
372static ssize_t numvmidc_show(struct device *dev,
373 struct device_attribute *attr,
374 char *buf)
375{
376 unsigned long val;
377 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
378
379 val = drvdata->numvmidc;
380 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
381}
382static DEVICE_ATTR_RO(numvmidc);
383
384static ssize_t nrseqstate_show(struct device *dev,
385 struct device_attribute *attr,
386 char *buf)
387{
388 unsigned long val;
389 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
390
391 val = drvdata->nrseqstate;
392 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
393}
394static DEVICE_ATTR_RO(nrseqstate);
395
396static ssize_t nr_resource_show(struct device *dev,
397 struct device_attribute *attr,
398 char *buf)
399{
400 unsigned long val;
401 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
402
403 val = drvdata->nr_resource;
404 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
405}
406static DEVICE_ATTR_RO(nr_resource);
407
408static ssize_t nr_ss_cmp_show(struct device *dev,
409 struct device_attribute *attr,
410 char *buf)
411{
412 unsigned long val;
413 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
414
415 val = drvdata->nr_ss_cmp;
416 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
417}
418static DEVICE_ATTR_RO(nr_ss_cmp);
419
420static ssize_t reset_store(struct device *dev,
421 struct device_attribute *attr,
422 const char *buf, size_t size)
423{
424 int i;
425 unsigned long val;
426 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
427
428 if (kstrtoul(buf, 16, &val))
429 return -EINVAL;
430
431 spin_lock(&drvdata->spinlock);
432 if (val)
433 drvdata->mode = 0x0;
434
435 /* Disable data tracing: do not trace load and store data transfers */
436 drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
437 drvdata->cfg &= ~(BIT(1) | BIT(2));
438
439 /* Disable data value and data address tracing */
440 drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
441 ETM_MODE_DATA_TRACE_VAL);
442 drvdata->cfg &= ~(BIT(16) | BIT(17));
443
444 /* Disable all events tracing */
445 drvdata->eventctrl0 = 0x0;
446 drvdata->eventctrl1 = 0x0;
447
448 /* Disable timestamp event */
449 drvdata->ts_ctrl = 0x0;
450
451 /* Disable stalling */
452 drvdata->stall_ctrl = 0x0;
453
454 /* Reset trace synchronization period to 2^8 = 256 bytes*/
455 if (drvdata->syncpr == false)
456 drvdata->syncfreq = 0x8;
457
458 /*
459 * Enable ViewInst to trace everything with start-stop logic in
460 * started state. ARM recommends start-stop logic is set before
461 * each trace run.
462 */
463 drvdata->vinst_ctrl |= BIT(0);
464 if (drvdata->nr_addr_cmp == true) {
465 drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
466 /* SSSTATUS, bit[9] */
467 drvdata->vinst_ctrl |= BIT(9);
468 }
469
470 /* No address range filtering for ViewInst */
471 drvdata->viiectlr = 0x0;
472
473 /* No start-stop filtering for ViewInst */
474 drvdata->vissctlr = 0x0;
475
476 /* Disable seq events */
477 for (i = 0; i < drvdata->nrseqstate-1; i++)
478 drvdata->seq_ctrl[i] = 0x0;
479 drvdata->seq_rst = 0x0;
480 drvdata->seq_state = 0x0;
481
482 /* Disable external input events */
483 drvdata->ext_inp = 0x0;
484
485 drvdata->cntr_idx = 0x0;
486 for (i = 0; i < drvdata->nr_cntr; i++) {
487 drvdata->cntrldvr[i] = 0x0;
488 drvdata->cntr_ctrl[i] = 0x0;
489 drvdata->cntr_val[i] = 0x0;
490 }
491
492 drvdata->res_idx = 0x0;
493 for (i = 0; i < drvdata->nr_resource; i++)
494 drvdata->res_ctrl[i] = 0x0;
495
496 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
497 drvdata->ss_ctrl[i] = 0x0;
498 drvdata->ss_pe_cmp[i] = 0x0;
499 }
500
501 drvdata->addr_idx = 0x0;
502 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
503 drvdata->addr_val[i] = 0x0;
504 drvdata->addr_acc[i] = 0x0;
505 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
506 }
507
508 drvdata->ctxid_idx = 0x0;
509 for (i = 0; i < drvdata->numcidc; i++)
510 drvdata->ctxid_val[i] = 0x0;
511 drvdata->ctxid_mask0 = 0x0;
512 drvdata->ctxid_mask1 = 0x0;
513
514 drvdata->vmid_idx = 0x0;
515 for (i = 0; i < drvdata->numvmidc; i++)
516 drvdata->vmid_val[i] = 0x0;
517 drvdata->vmid_mask0 = 0x0;
518 drvdata->vmid_mask1 = 0x0;
519
520 drvdata->trcid = drvdata->cpu + 1;
521 spin_unlock(&drvdata->spinlock);
522 return size;
523}
524static DEVICE_ATTR_WO(reset);
525
526static ssize_t mode_show(struct device *dev,
527 struct device_attribute *attr,
528 char *buf)
529{
530 unsigned long val;
531 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
532
533 val = drvdata->mode;
534 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
535}
536
537static ssize_t mode_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t size)
540{
541 unsigned long val, mode;
542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
543
544 if (kstrtoul(buf, 16, &val))
545 return -EINVAL;
546
547 spin_lock(&drvdata->spinlock);
548 drvdata->mode = val & ETMv4_MODE_ALL;
549
550 if (drvdata->mode & ETM_MODE_EXCLUDE)
551 etm4_set_mode_exclude(drvdata, true);
552 else
553 etm4_set_mode_exclude(drvdata, false);
554
555 if (drvdata->instrp0 == true) {
556 /* start by clearing instruction P0 field */
557 drvdata->cfg &= ~(BIT(1) | BIT(2));
558 if (drvdata->mode & ETM_MODE_LOAD)
559 /* 0b01 Trace load instructions as P0 instructions */
560 drvdata->cfg |= BIT(1);
561 if (drvdata->mode & ETM_MODE_STORE)
562 /* 0b10 Trace store instructions as P0 instructions */
563 drvdata->cfg |= BIT(2);
564 if (drvdata->mode & ETM_MODE_LOAD_STORE)
565 /*
566 * 0b11 Trace load and store instructions
567 * as P0 instructions
568 */
569 drvdata->cfg |= BIT(1) | BIT(2);
570 }
571
572 /* bit[3], Branch broadcast mode */
573 if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
574 drvdata->cfg |= BIT(3);
575 else
576 drvdata->cfg &= ~BIT(3);
577
578 /* bit[4], Cycle counting instruction trace bit */
579 if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
580 (drvdata->trccci == true))
581 drvdata->cfg |= BIT(4);
582 else
583 drvdata->cfg &= ~BIT(4);
584
585 /* bit[6], Context ID tracing bit */
586 if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
587 drvdata->cfg |= BIT(6);
588 else
589 drvdata->cfg &= ~BIT(6);
590
591 if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
592 drvdata->cfg |= BIT(7);
593 else
594 drvdata->cfg &= ~BIT(7);
595
596 /* bits[10:8], Conditional instruction tracing bit */
597 mode = ETM_MODE_COND(drvdata->mode);
598 if (drvdata->trccond == true) {
599 drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
600 drvdata->cfg |= mode << 8;
601 }
602
603 /* bit[11], Global timestamp tracing bit */
604 if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
605 drvdata->cfg |= BIT(11);
606 else
607 drvdata->cfg &= ~BIT(11);
608
609 /* bit[12], Return stack enable bit */
610 if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
611 (drvdata->retstack == true))
612 drvdata->cfg |= BIT(12);
613 else
614 drvdata->cfg &= ~BIT(12);
615
616 /* bits[14:13], Q element enable field */
617 mode = ETM_MODE_QELEM(drvdata->mode);
618 /* start by clearing QE bits */
619 drvdata->cfg &= ~(BIT(13) | BIT(14));
620 /* if supported, Q elements with instruction counts are enabled */
621 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
622 drvdata->cfg |= BIT(13);
623 /*
624 * if supported, Q elements with and without instruction
625 * counts are enabled
626 */
627 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
628 drvdata->cfg |= BIT(14);
629
630 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
631 if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
632 (drvdata->atbtrig == true))
633 drvdata->eventctrl1 |= BIT(11);
634 else
635 drvdata->eventctrl1 &= ~BIT(11);
636
637 /* bit[12], Low-power state behavior override bit */
638 if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
639 (drvdata->lpoverride == true))
640 drvdata->eventctrl1 |= BIT(12);
641 else
642 drvdata->eventctrl1 &= ~BIT(12);
643
644 /* bit[8], Instruction stall bit */
645 if (drvdata->mode & ETM_MODE_ISTALL_EN)
646 drvdata->stall_ctrl |= BIT(8);
647 else
648 drvdata->stall_ctrl &= ~BIT(8);
649
650 /* bit[10], Prioritize instruction trace bit */
651 if (drvdata->mode & ETM_MODE_INSTPRIO)
652 drvdata->stall_ctrl |= BIT(10);
653 else
654 drvdata->stall_ctrl &= ~BIT(10);
655
656 /* bit[13], Trace overflow prevention bit */
657 if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
658 (drvdata->nooverflow == true))
659 drvdata->stall_ctrl |= BIT(13);
660 else
661 drvdata->stall_ctrl &= ~BIT(13);
662
663 /* bit[9] Start/stop logic control bit */
664 if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
665 drvdata->vinst_ctrl |= BIT(9);
666 else
667 drvdata->vinst_ctrl &= ~BIT(9);
668
669 /* bit[10], Whether a trace unit must trace a Reset exception */
670 if (drvdata->mode & ETM_MODE_TRACE_RESET)
671 drvdata->vinst_ctrl |= BIT(10);
672 else
673 drvdata->vinst_ctrl &= ~BIT(10);
674
675 /* bit[11], Whether a trace unit must trace a system error exception */
676 if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
677 (drvdata->trc_error == true))
678 drvdata->vinst_ctrl |= BIT(11);
679 else
680 drvdata->vinst_ctrl &= ~BIT(11);
681
682 spin_unlock(&drvdata->spinlock);
683 return size;
684}
685static DEVICE_ATTR_RW(mode);
686
687static ssize_t pe_show(struct device *dev,
688 struct device_attribute *attr,
689 char *buf)
690{
691 unsigned long val;
692 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
693
694 val = drvdata->pe_sel;
695 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
696}
697
698static ssize_t pe_store(struct device *dev,
699 struct device_attribute *attr,
700 const char *buf, size_t size)
701{
702 unsigned long val;
703 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
704
705 if (kstrtoul(buf, 16, &val))
706 return -EINVAL;
707
708 spin_lock(&drvdata->spinlock);
709 if (val > drvdata->nr_pe) {
710 spin_unlock(&drvdata->spinlock);
711 return -EINVAL;
712 }
713
714 drvdata->pe_sel = val;
715 spin_unlock(&drvdata->spinlock);
716 return size;
717}
718static DEVICE_ATTR_RW(pe);
719
720static ssize_t event_show(struct device *dev,
721 struct device_attribute *attr,
722 char *buf)
723{
724 unsigned long val;
725 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
726
727 val = drvdata->eventctrl0;
728 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
729}
730
731static ssize_t event_store(struct device *dev,
732 struct device_attribute *attr,
733 const char *buf, size_t size)
734{
735 unsigned long val;
736 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
737
738 if (kstrtoul(buf, 16, &val))
739 return -EINVAL;
740
741 spin_lock(&drvdata->spinlock);
742 switch (drvdata->nr_event) {
743 case 0x0:
744 /* EVENT0, bits[7:0] */
745 drvdata->eventctrl0 = val & 0xFF;
746 break;
747 case 0x1:
748 /* EVENT1, bits[15:8] */
749 drvdata->eventctrl0 = val & 0xFFFF;
750 break;
751 case 0x2:
752 /* EVENT2, bits[23:16] */
753 drvdata->eventctrl0 = val & 0xFFFFFF;
754 break;
755 case 0x3:
756 /* EVENT3, bits[31:24] */
757 drvdata->eventctrl0 = val;
758 break;
759 default:
760 break;
761 }
762 spin_unlock(&drvdata->spinlock);
763 return size;
764}
765static DEVICE_ATTR_RW(event);
766
767static ssize_t event_instren_show(struct device *dev,
768 struct device_attribute *attr,
769 char *buf)
770{
771 unsigned long val;
772 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
773
774 val = BMVAL(drvdata->eventctrl1, 0, 3);
775 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
776}
777
778static ssize_t event_instren_store(struct device *dev,
779 struct device_attribute *attr,
780 const char *buf, size_t size)
781{
782 unsigned long val;
783 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
784
785 if (kstrtoul(buf, 16, &val))
786 return -EINVAL;
787
788 spin_lock(&drvdata->spinlock);
789 /* start by clearing all instruction event enable bits */
790 drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
791 switch (drvdata->nr_event) {
792 case 0x0:
793 /* generate Event element for event 1 */
794 drvdata->eventctrl1 |= val & BIT(1);
795 break;
796 case 0x1:
797 /* generate Event element for event 1 and 2 */
798 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
799 break;
800 case 0x2:
801 /* generate Event element for event 1, 2 and 3 */
802 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
803 break;
804 case 0x3:
805 /* generate Event element for all 4 events */
806 drvdata->eventctrl1 |= val & 0xF;
807 break;
808 default:
809 break;
810 }
811 spin_unlock(&drvdata->spinlock);
812 return size;
813}
814static DEVICE_ATTR_RW(event_instren);
815
816static ssize_t event_ts_show(struct device *dev,
817 struct device_attribute *attr,
818 char *buf)
819{
820 unsigned long val;
821 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
822
823 val = drvdata->ts_ctrl;
824 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
825}
826
827static ssize_t event_ts_store(struct device *dev,
828 struct device_attribute *attr,
829 const char *buf, size_t size)
830{
831 unsigned long val;
832 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
833
834 if (kstrtoul(buf, 16, &val))
835 return -EINVAL;
836 if (!drvdata->ts_size)
837 return -EINVAL;
838
839 drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
840 return size;
841}
842static DEVICE_ATTR_RW(event_ts);
843
844static ssize_t syncfreq_show(struct device *dev,
845 struct device_attribute *attr,
846 char *buf)
847{
848 unsigned long val;
849 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
850
851 val = drvdata->syncfreq;
852 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
853}
854
855static ssize_t syncfreq_store(struct device *dev,
856 struct device_attribute *attr,
857 const char *buf, size_t size)
858{
859 unsigned long val;
860 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
861
862 if (kstrtoul(buf, 16, &val))
863 return -EINVAL;
864 if (drvdata->syncpr == true)
865 return -EINVAL;
866
867 drvdata->syncfreq = val & ETMv4_SYNC_MASK;
868 return size;
869}
870static DEVICE_ATTR_RW(syncfreq);
871
872static ssize_t cyc_threshold_show(struct device *dev,
873 struct device_attribute *attr,
874 char *buf)
875{
876 unsigned long val;
877 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
878
879 val = drvdata->ccctlr;
880 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
881}
882
883static ssize_t cyc_threshold_store(struct device *dev,
884 struct device_attribute *attr,
885 const char *buf, size_t size)
886{
887 unsigned long val;
888 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
889
890 if (kstrtoul(buf, 16, &val))
891 return -EINVAL;
892 if (val < drvdata->ccitmin)
893 return -EINVAL;
894
895 drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
896 return size;
897}
898static DEVICE_ATTR_RW(cyc_threshold);
899
900static ssize_t bb_ctrl_show(struct device *dev,
901 struct device_attribute *attr,
902 char *buf)
903{
904 unsigned long val;
905 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
906
907 val = drvdata->bb_ctrl;
908 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
909}
910
911static ssize_t bb_ctrl_store(struct device *dev,
912 struct device_attribute *attr,
913 const char *buf, size_t size)
914{
915 unsigned long val;
916 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
917
918 if (kstrtoul(buf, 16, &val))
919 return -EINVAL;
920 if (drvdata->trcbb == false)
921 return -EINVAL;
922 if (!drvdata->nr_addr_cmp)
923 return -EINVAL;
924 /*
925 * Bit[7:0] selects which address range comparator is used for
926 * branch broadcast control.
927 */
928 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
929 return -EINVAL;
930
931 drvdata->bb_ctrl = val;
932 return size;
933}
934static DEVICE_ATTR_RW(bb_ctrl);
935
936static ssize_t event_vinst_show(struct device *dev,
937 struct device_attribute *attr,
938 char *buf)
939{
940 unsigned long val;
941 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
942
943 val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
944 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
945}
946
947static ssize_t event_vinst_store(struct device *dev,
948 struct device_attribute *attr,
949 const char *buf, size_t size)
950{
951 unsigned long val;
952 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
953
954 if (kstrtoul(buf, 16, &val))
955 return -EINVAL;
956
957 spin_lock(&drvdata->spinlock);
958 val &= ETMv4_EVENT_MASK;
959 drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
960 drvdata->vinst_ctrl |= val;
961 spin_unlock(&drvdata->spinlock);
962 return size;
963}
964static DEVICE_ATTR_RW(event_vinst);
965
966static ssize_t s_exlevel_vinst_show(struct device *dev,
967 struct device_attribute *attr,
968 char *buf)
969{
970 unsigned long val;
971 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
972
973 val = BMVAL(drvdata->vinst_ctrl, 16, 19);
974 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
975}
976
977static ssize_t s_exlevel_vinst_store(struct device *dev,
978 struct device_attribute *attr,
979 const char *buf, size_t size)
980{
981 unsigned long val;
982 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
983
984 if (kstrtoul(buf, 16, &val))
985 return -EINVAL;
986
987 spin_lock(&drvdata->spinlock);
988 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
989 drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
990 /* enable instruction tracing for corresponding exception level */
991 val &= drvdata->s_ex_level;
992 drvdata->vinst_ctrl |= (val << 16);
993 spin_unlock(&drvdata->spinlock);
994 return size;
995}
996static DEVICE_ATTR_RW(s_exlevel_vinst);
997
998static ssize_t ns_exlevel_vinst_show(struct device *dev,
999 struct device_attribute *attr,
1000 char *buf)
1001{
1002 unsigned long val;
1003 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1004
1005 /* EXLEVEL_NS, bits[23:20] */
1006 val = BMVAL(drvdata->vinst_ctrl, 20, 23);
1007 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1008}
1009
1010static ssize_t ns_exlevel_vinst_store(struct device *dev,
1011 struct device_attribute *attr,
1012 const char *buf, size_t size)
1013{
1014 unsigned long val;
1015 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1016
1017 if (kstrtoul(buf, 16, &val))
1018 return -EINVAL;
1019
1020 spin_lock(&drvdata->spinlock);
1021 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
1022 drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
1023 /* enable instruction tracing for corresponding exception level */
1024 val &= drvdata->ns_ex_level;
1025 drvdata->vinst_ctrl |= (val << 20);
1026 spin_unlock(&drvdata->spinlock);
1027 return size;
1028}
1029static DEVICE_ATTR_RW(ns_exlevel_vinst);
1030
1031static ssize_t addr_idx_show(struct device *dev,
1032 struct device_attribute *attr,
1033 char *buf)
1034{
1035 unsigned long val;
1036 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037
1038 val = drvdata->addr_idx;
1039 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1040}
1041
1042static ssize_t addr_idx_store(struct device *dev,
1043 struct device_attribute *attr,
1044 const char *buf, size_t size)
1045{
1046 unsigned long val;
1047 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1048
1049 if (kstrtoul(buf, 16, &val))
1050 return -EINVAL;
1051 if (val >= drvdata->nr_addr_cmp * 2)
1052 return -EINVAL;
1053
1054 /*
1055 * Use spinlock to ensure index doesn't change while it gets
1056 * dereferenced multiple times within a spinlock block elsewhere.
1057 */
1058 spin_lock(&drvdata->spinlock);
1059 drvdata->addr_idx = val;
1060 spin_unlock(&drvdata->spinlock);
1061 return size;
1062}
1063static DEVICE_ATTR_RW(addr_idx);
1064
1065static ssize_t addr_instdatatype_show(struct device *dev,
1066 struct device_attribute *attr,
1067 char *buf)
1068{
1069 ssize_t len;
1070 u8 val, idx;
1071 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1072
1073 spin_lock(&drvdata->spinlock);
1074 idx = drvdata->addr_idx;
1075 val = BMVAL(drvdata->addr_acc[idx], 0, 1);
1076 len = scnprintf(buf, PAGE_SIZE, "%s\n",
1077 val == ETM_INSTR_ADDR ? "instr" :
1078 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
1079 (val == ETM_DATA_STORE_ADDR ? "data_store" :
1080 "data_load_store")));
1081 spin_unlock(&drvdata->spinlock);
1082 return len;
1083}
1084
1085static ssize_t addr_instdatatype_store(struct device *dev,
1086 struct device_attribute *attr,
1087 const char *buf, size_t size)
1088{
1089 u8 idx;
1090 char str[20] = "";
1091 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1092
1093 if (strlen(buf) >= 20)
1094 return -EINVAL;
1095 if (sscanf(buf, "%s", str) != 1)
1096 return -EINVAL;
1097
1098 spin_lock(&drvdata->spinlock);
1099 idx = drvdata->addr_idx;
1100 if (!strcmp(str, "instr"))
1101 /* TYPE, bits[1:0] */
1102 drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
1103
1104 spin_unlock(&drvdata->spinlock);
1105 return size;
1106}
1107static DEVICE_ATTR_RW(addr_instdatatype);
1108
1109static ssize_t addr_single_show(struct device *dev,
1110 struct device_attribute *attr,
1111 char *buf)
1112{
1113 u8 idx;
1114 unsigned long val;
1115 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1116
1117 idx = drvdata->addr_idx;
1118 spin_lock(&drvdata->spinlock);
1119 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1120 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1121 spin_unlock(&drvdata->spinlock);
1122 return -EPERM;
1123 }
1124 val = (unsigned long)drvdata->addr_val[idx];
1125 spin_unlock(&drvdata->spinlock);
1126 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1127}
1128
1129static ssize_t addr_single_store(struct device *dev,
1130 struct device_attribute *attr,
1131 const char *buf, size_t size)
1132{
1133 u8 idx;
1134 unsigned long val;
1135 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1136
1137 if (kstrtoul(buf, 16, &val))
1138 return -EINVAL;
1139
1140 spin_lock(&drvdata->spinlock);
1141 idx = drvdata->addr_idx;
1142 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1143 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1144 spin_unlock(&drvdata->spinlock);
1145 return -EPERM;
1146 }
1147
1148 drvdata->addr_val[idx] = (u64)val;
1149 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
1150 spin_unlock(&drvdata->spinlock);
1151 return size;
1152}
1153static DEVICE_ATTR_RW(addr_single);
1154
1155static ssize_t addr_range_show(struct device *dev,
1156 struct device_attribute *attr,
1157 char *buf)
1158{
1159 u8 idx;
1160 unsigned long val1, val2;
1161 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1162
1163 spin_lock(&drvdata->spinlock);
1164 idx = drvdata->addr_idx;
1165 if (idx % 2 != 0) {
1166 spin_unlock(&drvdata->spinlock);
1167 return -EPERM;
1168 }
1169 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1170 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1171 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1172 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1173 spin_unlock(&drvdata->spinlock);
1174 return -EPERM;
1175 }
1176
1177 val1 = (unsigned long)drvdata->addr_val[idx];
1178 val2 = (unsigned long)drvdata->addr_val[idx + 1];
1179 spin_unlock(&drvdata->spinlock);
1180 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1181}
1182
1183static ssize_t addr_range_store(struct device *dev,
1184 struct device_attribute *attr,
1185 const char *buf, size_t size)
1186{
1187 u8 idx;
1188 unsigned long val1, val2;
1189 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1190
1191 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1192 return -EINVAL;
1193 /* lower address comparator cannot have a higher address value */
1194 if (val1 > val2)
1195 return -EINVAL;
1196
1197 spin_lock(&drvdata->spinlock);
1198 idx = drvdata->addr_idx;
1199 if (idx % 2 != 0) {
1200 spin_unlock(&drvdata->spinlock);
1201 return -EPERM;
1202 }
1203
1204 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1205 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1206 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1207 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1208 spin_unlock(&drvdata->spinlock);
1209 return -EPERM;
1210 }
1211
1212 drvdata->addr_val[idx] = (u64)val1;
1213 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1214 drvdata->addr_val[idx + 1] = (u64)val2;
1215 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1216 /*
1217 * Program include or exclude control bits for vinst or vdata
1218 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1219 */
1220 if (drvdata->mode & ETM_MODE_EXCLUDE)
1221 etm4_set_mode_exclude(drvdata, true);
1222 else
1223 etm4_set_mode_exclude(drvdata, false);
1224
1225 spin_unlock(&drvdata->spinlock);
1226 return size;
1227}
1228static DEVICE_ATTR_RW(addr_range);
1229
1230static ssize_t addr_start_show(struct device *dev,
1231 struct device_attribute *attr,
1232 char *buf)
1233{
1234 u8 idx;
1235 unsigned long val;
1236 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1237
1238 spin_lock(&drvdata->spinlock);
1239 idx = drvdata->addr_idx;
1240
1241 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1242 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1243 spin_unlock(&drvdata->spinlock);
1244 return -EPERM;
1245 }
1246
1247 val = (unsigned long)drvdata->addr_val[idx];
1248 spin_unlock(&drvdata->spinlock);
1249 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1250}
1251
1252static ssize_t addr_start_store(struct device *dev,
1253 struct device_attribute *attr,
1254 const char *buf, size_t size)
1255{
1256 u8 idx;
1257 unsigned long val;
1258 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1259
1260 if (kstrtoul(buf, 16, &val))
1261 return -EINVAL;
1262
1263 spin_lock(&drvdata->spinlock);
1264 idx = drvdata->addr_idx;
1265 if (!drvdata->nr_addr_cmp) {
1266 spin_unlock(&drvdata->spinlock);
1267 return -EINVAL;
1268 }
1269 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1270 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1271 spin_unlock(&drvdata->spinlock);
1272 return -EPERM;
1273 }
1274
1275 drvdata->addr_val[idx] = (u64)val;
1276 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
1277 drvdata->vissctlr |= BIT(idx);
1278 /* SSSTATUS, bit[9] - turn on start/stop logic */
1279 drvdata->vinst_ctrl |= BIT(9);
1280 spin_unlock(&drvdata->spinlock);
1281 return size;
1282}
1283static DEVICE_ATTR_RW(addr_start);
1284
1285static ssize_t addr_stop_show(struct device *dev,
1286 struct device_attribute *attr,
1287 char *buf)
1288{
1289 u8 idx;
1290 unsigned long val;
1291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1292
1293 spin_lock(&drvdata->spinlock);
1294 idx = drvdata->addr_idx;
1295
1296 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1297 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1298 spin_unlock(&drvdata->spinlock);
1299 return -EPERM;
1300 }
1301
1302 val = (unsigned long)drvdata->addr_val[idx];
1303 spin_unlock(&drvdata->spinlock);
1304 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1305}
1306
1307static ssize_t addr_stop_store(struct device *dev,
1308 struct device_attribute *attr,
1309 const char *buf, size_t size)
1310{
1311 u8 idx;
1312 unsigned long val;
1313 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1314
1315 if (kstrtoul(buf, 16, &val))
1316 return -EINVAL;
1317
1318 spin_lock(&drvdata->spinlock);
1319 idx = drvdata->addr_idx;
1320 if (!drvdata->nr_addr_cmp) {
1321 spin_unlock(&drvdata->spinlock);
1322 return -EINVAL;
1323 }
1324 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1325 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1326 spin_unlock(&drvdata->spinlock);
1327 return -EPERM;
1328 }
1329
1330 drvdata->addr_val[idx] = (u64)val;
1331 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1332 drvdata->vissctlr |= BIT(idx + 16);
1333 /* SSSTATUS, bit[9] - turn on start/stop logic */
1334 drvdata->vinst_ctrl |= BIT(9);
1335 spin_unlock(&drvdata->spinlock);
1336 return size;
1337}
1338static DEVICE_ATTR_RW(addr_stop);
1339
1340static ssize_t addr_ctxtype_show(struct device *dev,
1341 struct device_attribute *attr,
1342 char *buf)
1343{
1344 ssize_t len;
1345 u8 idx, val;
1346 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1347
1348 spin_lock(&drvdata->spinlock);
1349 idx = drvdata->addr_idx;
1350 /* CONTEXTTYPE, bits[3:2] */
1351 val = BMVAL(drvdata->addr_acc[idx], 2, 3);
1352 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1353 (val == ETM_CTX_CTXID ? "ctxid" :
1354 (val == ETM_CTX_VMID ? "vmid" : "all")));
1355 spin_unlock(&drvdata->spinlock);
1356 return len;
1357}
1358
1359static ssize_t addr_ctxtype_store(struct device *dev,
1360 struct device_attribute *attr,
1361 const char *buf, size_t size)
1362{
1363 u8 idx;
1364 char str[10] = "";
1365 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1366
1367 if (strlen(buf) >= 10)
1368 return -EINVAL;
1369 if (sscanf(buf, "%s", str) != 1)
1370 return -EINVAL;
1371
1372 spin_lock(&drvdata->spinlock);
1373 idx = drvdata->addr_idx;
1374 if (!strcmp(str, "none"))
1375 /* start by clearing context type bits */
1376 drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1377 else if (!strcmp(str, "ctxid")) {
1378 /* 0b01 The trace unit performs a Context ID */
1379 if (drvdata->numcidc) {
1380 drvdata->addr_acc[idx] |= BIT(2);
1381 drvdata->addr_acc[idx] &= ~BIT(3);
1382 }
1383 } else if (!strcmp(str, "vmid")) {
1384 /* 0b10 The trace unit performs a VMID */
1385 if (drvdata->numvmidc) {
1386 drvdata->addr_acc[idx] &= ~BIT(2);
1387 drvdata->addr_acc[idx] |= BIT(3);
1388 }
1389 } else if (!strcmp(str, "all")) {
1390 /*
1391 * 0b11 The trace unit performs a Context ID
1392 * comparison and a VMID
1393 */
1394 if (drvdata->numcidc)
1395 drvdata->addr_acc[idx] |= BIT(2);
1396 if (drvdata->numvmidc)
1397 drvdata->addr_acc[idx] |= BIT(3);
1398 }
1399 spin_unlock(&drvdata->spinlock);
1400 return size;
1401}
1402static DEVICE_ATTR_RW(addr_ctxtype);
1403
1404static ssize_t addr_context_show(struct device *dev,
1405 struct device_attribute *attr,
1406 char *buf)
1407{
1408 u8 idx;
1409 unsigned long val;
1410 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1411
1412 spin_lock(&drvdata->spinlock);
1413 idx = drvdata->addr_idx;
1414 /* context ID comparator bits[6:4] */
1415 val = BMVAL(drvdata->addr_acc[idx], 4, 6);
1416 spin_unlock(&drvdata->spinlock);
1417 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1418}
1419
1420static ssize_t addr_context_store(struct device *dev,
1421 struct device_attribute *attr,
1422 const char *buf, size_t size)
1423{
1424 u8 idx;
1425 unsigned long val;
1426 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1427
1428 if (kstrtoul(buf, 16, &val))
1429 return -EINVAL;
1430 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1431 return -EINVAL;
1432 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1433 drvdata->numcidc : drvdata->numvmidc))
1434 return -EINVAL;
1435
1436 spin_lock(&drvdata->spinlock);
1437 idx = drvdata->addr_idx;
1438 /* clear context ID comparator bits[6:4] */
1439 drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1440 drvdata->addr_acc[idx] |= (val << 4);
1441 spin_unlock(&drvdata->spinlock);
1442 return size;
1443}
1444static DEVICE_ATTR_RW(addr_context);
1445
1446static ssize_t seq_idx_show(struct device *dev,
1447 struct device_attribute *attr,
1448 char *buf)
1449{
1450 unsigned long val;
1451 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1452
1453 val = drvdata->seq_idx;
1454 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1455}
1456
1457static ssize_t seq_idx_store(struct device *dev,
1458 struct device_attribute *attr,
1459 const char *buf, size_t size)
1460{
1461 unsigned long val;
1462 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1463
1464 if (kstrtoul(buf, 16, &val))
1465 return -EINVAL;
1466 if (val >= drvdata->nrseqstate - 1)
1467 return -EINVAL;
1468
1469 /*
1470 * Use spinlock to ensure index doesn't change while it gets
1471 * dereferenced multiple times within a spinlock block elsewhere.
1472 */
1473 spin_lock(&drvdata->spinlock);
1474 drvdata->seq_idx = val;
1475 spin_unlock(&drvdata->spinlock);
1476 return size;
1477}
1478static DEVICE_ATTR_RW(seq_idx);
1479
1480static ssize_t seq_state_show(struct device *dev,
1481 struct device_attribute *attr,
1482 char *buf)
1483{
1484 unsigned long val;
1485 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1486
1487 val = drvdata->seq_state;
1488 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1489}
1490
1491static ssize_t seq_state_store(struct device *dev,
1492 struct device_attribute *attr,
1493 const char *buf, size_t size)
1494{
1495 unsigned long val;
1496 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1497
1498 if (kstrtoul(buf, 16, &val))
1499 return -EINVAL;
1500 if (val >= drvdata->nrseqstate)
1501 return -EINVAL;
1502
1503 drvdata->seq_state = val;
1504 return size;
1505}
1506static DEVICE_ATTR_RW(seq_state);
1507
1508static ssize_t seq_event_show(struct device *dev,
1509 struct device_attribute *attr,
1510 char *buf)
1511{
1512 u8 idx;
1513 unsigned long val;
1514 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1515
1516 spin_lock(&drvdata->spinlock);
1517 idx = drvdata->seq_idx;
1518 val = drvdata->seq_ctrl[idx];
1519 spin_unlock(&drvdata->spinlock);
1520 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1521}
1522
1523static ssize_t seq_event_store(struct device *dev,
1524 struct device_attribute *attr,
1525 const char *buf, size_t size)
1526{
1527 u8 idx;
1528 unsigned long val;
1529 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1530
1531 if (kstrtoul(buf, 16, &val))
1532 return -EINVAL;
1533
1534 spin_lock(&drvdata->spinlock);
1535 idx = drvdata->seq_idx;
1536 /* RST, bits[7:0] */
1537 drvdata->seq_ctrl[idx] = val & 0xFF;
1538 spin_unlock(&drvdata->spinlock);
1539 return size;
1540}
1541static DEVICE_ATTR_RW(seq_event);
1542
1543static ssize_t seq_reset_event_show(struct device *dev,
1544 struct device_attribute *attr,
1545 char *buf)
1546{
1547 unsigned long val;
1548 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1549
1550 val = drvdata->seq_rst;
1551 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1552}
1553
1554static ssize_t seq_reset_event_store(struct device *dev,
1555 struct device_attribute *attr,
1556 const char *buf, size_t size)
1557{
1558 unsigned long val;
1559 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1560
1561 if (kstrtoul(buf, 16, &val))
1562 return -EINVAL;
1563 if (!(drvdata->nrseqstate))
1564 return -EINVAL;
1565
1566 drvdata->seq_rst = val & ETMv4_EVENT_MASK;
1567 return size;
1568}
1569static DEVICE_ATTR_RW(seq_reset_event);
1570
1571static ssize_t cntr_idx_show(struct device *dev,
1572 struct device_attribute *attr,
1573 char *buf)
1574{
1575 unsigned long val;
1576 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1577
1578 val = drvdata->cntr_idx;
1579 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1580}
1581
1582static ssize_t cntr_idx_store(struct device *dev,
1583 struct device_attribute *attr,
1584 const char *buf, size_t size)
1585{
1586 unsigned long val;
1587 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1588
1589 if (kstrtoul(buf, 16, &val))
1590 return -EINVAL;
1591 if (val >= drvdata->nr_cntr)
1592 return -EINVAL;
1593
1594 /*
1595 * Use spinlock to ensure index doesn't change while it gets
1596 * dereferenced multiple times within a spinlock block elsewhere.
1597 */
1598 spin_lock(&drvdata->spinlock);
1599 drvdata->cntr_idx = val;
1600 spin_unlock(&drvdata->spinlock);
1601 return size;
1602}
1603static DEVICE_ATTR_RW(cntr_idx);
1604
1605static ssize_t cntrldvr_show(struct device *dev,
1606 struct device_attribute *attr,
1607 char *buf)
1608{
1609 u8 idx;
1610 unsigned long val;
1611 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1612
1613 spin_lock(&drvdata->spinlock);
1614 idx = drvdata->cntr_idx;
1615 val = drvdata->cntrldvr[idx];
1616 spin_unlock(&drvdata->spinlock);
1617 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1618}
1619
1620static ssize_t cntrldvr_store(struct device *dev,
1621 struct device_attribute *attr,
1622 const char *buf, size_t size)
1623{
1624 u8 idx;
1625 unsigned long val;
1626 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1627
1628 if (kstrtoul(buf, 16, &val))
1629 return -EINVAL;
1630 if (val > ETM_CNTR_MAX_VAL)
1631 return -EINVAL;
1632
1633 spin_lock(&drvdata->spinlock);
1634 idx = drvdata->cntr_idx;
1635 drvdata->cntrldvr[idx] = val;
1636 spin_unlock(&drvdata->spinlock);
1637 return size;
1638}
1639static DEVICE_ATTR_RW(cntrldvr);
1640
1641static ssize_t cntr_val_show(struct device *dev,
1642 struct device_attribute *attr,
1643 char *buf)
1644{
1645 u8 idx;
1646 unsigned long val;
1647 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1648
1649 spin_lock(&drvdata->spinlock);
1650 idx = drvdata->cntr_idx;
1651 val = drvdata->cntr_val[idx];
1652 spin_unlock(&drvdata->spinlock);
1653 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1654}
1655
1656static ssize_t cntr_val_store(struct device *dev,
1657 struct device_attribute *attr,
1658 const char *buf, size_t size)
1659{
1660 u8 idx;
1661 unsigned long val;
1662 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1663
1664 if (kstrtoul(buf, 16, &val))
1665 return -EINVAL;
1666 if (val > ETM_CNTR_MAX_VAL)
1667 return -EINVAL;
1668
1669 spin_lock(&drvdata->spinlock);
1670 idx = drvdata->cntr_idx;
1671 drvdata->cntr_val[idx] = val;
1672 spin_unlock(&drvdata->spinlock);
1673 return size;
1674}
1675static DEVICE_ATTR_RW(cntr_val);
1676
1677static ssize_t cntr_ctrl_show(struct device *dev,
1678 struct device_attribute *attr,
1679 char *buf)
1680{
1681 u8 idx;
1682 unsigned long val;
1683 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1684
1685 spin_lock(&drvdata->spinlock);
1686 idx = drvdata->cntr_idx;
1687 val = drvdata->cntr_ctrl[idx];
1688 spin_unlock(&drvdata->spinlock);
1689 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1690}
1691
1692static ssize_t cntr_ctrl_store(struct device *dev,
1693 struct device_attribute *attr,
1694 const char *buf, size_t size)
1695{
1696 u8 idx;
1697 unsigned long val;
1698 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1699
1700 if (kstrtoul(buf, 16, &val))
1701 return -EINVAL;
1702
1703 spin_lock(&drvdata->spinlock);
1704 idx = drvdata->cntr_idx;
1705 drvdata->cntr_ctrl[idx] = val;
1706 spin_unlock(&drvdata->spinlock);
1707 return size;
1708}
1709static DEVICE_ATTR_RW(cntr_ctrl);
1710
1711static ssize_t res_idx_show(struct device *dev,
1712 struct device_attribute *attr,
1713 char *buf)
1714{
1715 unsigned long val;
1716 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1717
1718 val = drvdata->res_idx;
1719 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1720}
1721
1722static ssize_t res_idx_store(struct device *dev,
1723 struct device_attribute *attr,
1724 const char *buf, size_t size)
1725{
1726 unsigned long val;
1727 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1728
1729 if (kstrtoul(buf, 16, &val))
1730 return -EINVAL;
1731 /* Resource selector pair 0 is always implemented and reserved */
1732 if ((val == 0) || (val >= drvdata->nr_resource))
1733 return -EINVAL;
1734
1735 /*
1736 * Use spinlock to ensure index doesn't change while it gets
1737 * dereferenced multiple times within a spinlock block elsewhere.
1738 */
1739 spin_lock(&drvdata->spinlock);
1740 drvdata->res_idx = val;
1741 spin_unlock(&drvdata->spinlock);
1742 return size;
1743}
1744static DEVICE_ATTR_RW(res_idx);
1745
1746static ssize_t res_ctrl_show(struct device *dev,
1747 struct device_attribute *attr,
1748 char *buf)
1749{
1750 u8 idx;
1751 unsigned long val;
1752 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1753
1754 spin_lock(&drvdata->spinlock);
1755 idx = drvdata->res_idx;
1756 val = drvdata->res_ctrl[idx];
1757 spin_unlock(&drvdata->spinlock);
1758 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1759}
1760
1761static ssize_t res_ctrl_store(struct device *dev,
1762 struct device_attribute *attr,
1763 const char *buf, size_t size)
1764{
1765 u8 idx;
1766 unsigned long val;
1767 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1768
1769 if (kstrtoul(buf, 16, &val))
1770 return -EINVAL;
1771
1772 spin_lock(&drvdata->spinlock);
1773 idx = drvdata->res_idx;
1774 /* For odd idx pair inversal bit is RES0 */
1775 if (idx % 2 != 0)
1776 /* PAIRINV, bit[21] */
1777 val &= ~BIT(21);
1778 drvdata->res_ctrl[idx] = val;
1779 spin_unlock(&drvdata->spinlock);
1780 return size;
1781}
1782static DEVICE_ATTR_RW(res_ctrl);
1783
1784static ssize_t ctxid_idx_show(struct device *dev,
1785 struct device_attribute *attr,
1786 char *buf)
1787{
1788 unsigned long val;
1789 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1790
1791 val = drvdata->ctxid_idx;
1792 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1793}
1794
1795static ssize_t ctxid_idx_store(struct device *dev,
1796 struct device_attribute *attr,
1797 const char *buf, size_t size)
1798{
1799 unsigned long val;
1800 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1801
1802 if (kstrtoul(buf, 16, &val))
1803 return -EINVAL;
1804 if (val >= drvdata->numcidc)
1805 return -EINVAL;
1806
1807 /*
1808 * Use spinlock to ensure index doesn't change while it gets
1809 * dereferenced multiple times within a spinlock block elsewhere.
1810 */
1811 spin_lock(&drvdata->spinlock);
1812 drvdata->ctxid_idx = val;
1813 spin_unlock(&drvdata->spinlock);
1814 return size;
1815}
1816static DEVICE_ATTR_RW(ctxid_idx);
1817
1818static ssize_t ctxid_val_show(struct device *dev,
1819 struct device_attribute *attr,
1820 char *buf)
1821{
1822 u8 idx;
1823 unsigned long val;
1824 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1825
1826 spin_lock(&drvdata->spinlock);
1827 idx = drvdata->ctxid_idx;
1828 val = (unsigned long)drvdata->ctxid_val[idx];
1829 spin_unlock(&drvdata->spinlock);
1830 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1831}
1832
1833static ssize_t ctxid_val_store(struct device *dev,
1834 struct device_attribute *attr,
1835 const char *buf, size_t size)
1836{
1837 u8 idx;
1838 unsigned long val;
1839 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1840
1841 /*
1842 * only implemented when ctxid tracing is enabled, i.e. at least one
1843 * ctxid comparator is implemented and ctxid is greater than 0 bits
1844 * in length
1845 */
1846 if (!drvdata->ctxid_size || !drvdata->numcidc)
1847 return -EINVAL;
1848 if (kstrtoul(buf, 16, &val))
1849 return -EINVAL;
1850
1851 spin_lock(&drvdata->spinlock);
1852 idx = drvdata->ctxid_idx;
1853 drvdata->ctxid_val[idx] = (u64)val;
1854 spin_unlock(&drvdata->spinlock);
1855 return size;
1856}
1857static DEVICE_ATTR_RW(ctxid_val);
1858
1859static ssize_t ctxid_masks_show(struct device *dev,
1860 struct device_attribute *attr,
1861 char *buf)
1862{
1863 unsigned long val1, val2;
1864 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1865
1866 spin_lock(&drvdata->spinlock);
1867 val1 = drvdata->ctxid_mask0;
1868 val2 = drvdata->ctxid_mask1;
1869 spin_unlock(&drvdata->spinlock);
1870 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1871}
1872
1873static ssize_t ctxid_masks_store(struct device *dev,
1874 struct device_attribute *attr,
1875 const char *buf, size_t size)
1876{
1877 u8 i, j, maskbyte;
1878 unsigned long val1, val2, mask;
1879 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1880
1881 /*
1882 * only implemented when ctxid tracing is enabled, i.e. at least one
1883 * ctxid comparator is implemented and ctxid is greater than 0 bits
1884 * in length
1885 */
1886 if (!drvdata->ctxid_size || !drvdata->numcidc)
1887 return -EINVAL;
1888 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1889 return -EINVAL;
1890
1891 spin_lock(&drvdata->spinlock);
1892 /*
1893 * each byte[0..3] controls mask value applied to ctxid
1894 * comparator[0..3]
1895 */
1896 switch (drvdata->numcidc) {
1897 case 0x1:
1898 /* COMP0, bits[7:0] */
1899 drvdata->ctxid_mask0 = val1 & 0xFF;
1900 break;
1901 case 0x2:
1902 /* COMP1, bits[15:8] */
1903 drvdata->ctxid_mask0 = val1 & 0xFFFF;
1904 break;
1905 case 0x3:
1906 /* COMP2, bits[23:16] */
1907 drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
1908 break;
1909 case 0x4:
1910 /* COMP3, bits[31:24] */
1911 drvdata->ctxid_mask0 = val1;
1912 break;
1913 case 0x5:
1914 /* COMP4, bits[7:0] */
1915 drvdata->ctxid_mask0 = val1;
1916 drvdata->ctxid_mask1 = val2 & 0xFF;
1917 break;
1918 case 0x6:
1919 /* COMP5, bits[15:8] */
1920 drvdata->ctxid_mask0 = val1;
1921 drvdata->ctxid_mask1 = val2 & 0xFFFF;
1922 break;
1923 case 0x7:
1924 /* COMP6, bits[23:16] */
1925 drvdata->ctxid_mask0 = val1;
1926 drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
1927 break;
1928 case 0x8:
1929 /* COMP7, bits[31:24] */
1930 drvdata->ctxid_mask0 = val1;
1931 drvdata->ctxid_mask1 = val2;
1932 break;
1933 default:
1934 break;
1935 }
1936 /*
1937 * If software sets a mask bit to 1, it must program relevant byte
1938 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1939 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1940 * of ctxid comparator0 value (corresponding to byte 0) register.
1941 */
1942 mask = drvdata->ctxid_mask0;
1943 for (i = 0; i < drvdata->numcidc; i++) {
1944 /* mask value of corresponding ctxid comparator */
1945 maskbyte = mask & ETMv4_EVENT_MASK;
1946 /*
1947 * each bit corresponds to a byte of respective ctxid comparator
1948 * value register
1949 */
1950 for (j = 0; j < 8; j++) {
1951 if (maskbyte & 1)
1952 drvdata->ctxid_val[i] &= ~(0xFF << (j * 8));
1953 maskbyte >>= 1;
1954 }
1955 /* Select the next ctxid comparator mask value */
1956 if (i == 3)
1957 /* ctxid comparators[4-7] */
1958 mask = drvdata->ctxid_mask1;
1959 else
1960 mask >>= 0x8;
1961 }
1962
1963 spin_unlock(&drvdata->spinlock);
1964 return size;
1965}
1966static DEVICE_ATTR_RW(ctxid_masks);
1967
1968static ssize_t vmid_idx_show(struct device *dev,
1969 struct device_attribute *attr,
1970 char *buf)
1971{
1972 unsigned long val;
1973 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1974
1975 val = drvdata->vmid_idx;
1976 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1977}
1978
1979static ssize_t vmid_idx_store(struct device *dev,
1980 struct device_attribute *attr,
1981 const char *buf, size_t size)
1982{
1983 unsigned long val;
1984 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1985
1986 if (kstrtoul(buf, 16, &val))
1987 return -EINVAL;
1988 if (val >= drvdata->numvmidc)
1989 return -EINVAL;
1990
1991 /*
1992 * Use spinlock to ensure index doesn't change while it gets
1993 * dereferenced multiple times within a spinlock block elsewhere.
1994 */
1995 spin_lock(&drvdata->spinlock);
1996 drvdata->vmid_idx = val;
1997 spin_unlock(&drvdata->spinlock);
1998 return size;
1999}
2000static DEVICE_ATTR_RW(vmid_idx);
2001
2002static ssize_t vmid_val_show(struct device *dev,
2003 struct device_attribute *attr,
2004 char *buf)
2005{
2006 unsigned long val;
2007 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2008
2009 val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
2010 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2011}
2012
2013static ssize_t vmid_val_store(struct device *dev,
2014 struct device_attribute *attr,
2015 const char *buf, size_t size)
2016{
2017 unsigned long val;
2018 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2019
2020 /*
2021 * only implemented when vmid tracing is enabled, i.e. at least one
2022 * vmid comparator is implemented and at least 8 bit vmid size
2023 */
2024 if (!drvdata->vmid_size || !drvdata->numvmidc)
2025 return -EINVAL;
2026 if (kstrtoul(buf, 16, &val))
2027 return -EINVAL;
2028
2029 spin_lock(&drvdata->spinlock);
2030 drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
2031 spin_unlock(&drvdata->spinlock);
2032 return size;
2033}
2034static DEVICE_ATTR_RW(vmid_val);
2035
2036static ssize_t vmid_masks_show(struct device *dev,
2037 struct device_attribute *attr, char *buf)
2038{
2039 unsigned long val1, val2;
2040 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2041
2042 spin_lock(&drvdata->spinlock);
2043 val1 = drvdata->vmid_mask0;
2044 val2 = drvdata->vmid_mask1;
2045 spin_unlock(&drvdata->spinlock);
2046 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2047}
2048
2049static ssize_t vmid_masks_store(struct device *dev,
2050 struct device_attribute *attr,
2051 const char *buf, size_t size)
2052{
2053 u8 i, j, maskbyte;
2054 unsigned long val1, val2, mask;
2055 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2056 /*
2057 * only implemented when vmid tracing is enabled, i.e. at least one
2058 * vmid comparator is implemented and at least 8 bit vmid size
2059 */
2060 if (!drvdata->vmid_size || !drvdata->numvmidc)
2061 return -EINVAL;
2062 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
2063 return -EINVAL;
2064
2065 spin_lock(&drvdata->spinlock);
2066
2067 /*
2068 * each byte[0..3] controls mask value applied to vmid
2069 * comparator[0..3]
2070 */
2071 switch (drvdata->numvmidc) {
2072 case 0x1:
2073 /* COMP0, bits[7:0] */
2074 drvdata->vmid_mask0 = val1 & 0xFF;
2075 break;
2076 case 0x2:
2077 /* COMP1, bits[15:8] */
2078 drvdata->vmid_mask0 = val1 & 0xFFFF;
2079 break;
2080 case 0x3:
2081 /* COMP2, bits[23:16] */
2082 drvdata->vmid_mask0 = val1 & 0xFFFFFF;
2083 break;
2084 case 0x4:
2085 /* COMP3, bits[31:24] */
2086 drvdata->vmid_mask0 = val1;
2087 break;
2088 case 0x5:
2089 /* COMP4, bits[7:0] */
2090 drvdata->vmid_mask0 = val1;
2091 drvdata->vmid_mask1 = val2 & 0xFF;
2092 break;
2093 case 0x6:
2094 /* COMP5, bits[15:8] */
2095 drvdata->vmid_mask0 = val1;
2096 drvdata->vmid_mask1 = val2 & 0xFFFF;
2097 break;
2098 case 0x7:
2099 /* COMP6, bits[23:16] */
2100 drvdata->vmid_mask0 = val1;
2101 drvdata->vmid_mask1 = val2 & 0xFFFFFF;
2102 break;
2103 case 0x8:
2104 /* COMP7, bits[31:24] */
2105 drvdata->vmid_mask0 = val1;
2106 drvdata->vmid_mask1 = val2;
2107 break;
2108 default:
2109 break;
2110 }
2111
2112 /*
2113 * If software sets a mask bit to 1, it must program relevant byte
2114 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2115 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2116 * of vmid comparator0 value (corresponding to byte 0) register.
2117 */
2118 mask = drvdata->vmid_mask0;
2119 for (i = 0; i < drvdata->numvmidc; i++) {
2120 /* mask value of corresponding vmid comparator */
2121 maskbyte = mask & ETMv4_EVENT_MASK;
2122 /*
2123 * each bit corresponds to a byte of respective vmid comparator
2124 * value register
2125 */
2126 for (j = 0; j < 8; j++) {
2127 if (maskbyte & 1)
2128 drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
2129 maskbyte >>= 1;
2130 }
2131 /* Select the next vmid comparator mask value */
2132 if (i == 3)
2133 /* vmid comparators[4-7] */
2134 mask = drvdata->vmid_mask1;
2135 else
2136 mask >>= 0x8;
2137 }
2138 spin_unlock(&drvdata->spinlock);
2139 return size;
2140}
2141static DEVICE_ATTR_RW(vmid_masks);
2142
2143static ssize_t cpu_show(struct device *dev,
2144 struct device_attribute *attr, char *buf)
2145{
2146 int val;
2147 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2148
2149 val = drvdata->cpu;
2150 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2151
2152}
2153static DEVICE_ATTR_RO(cpu);
2154
2155static struct attribute *coresight_etmv4_attrs[] = {
2156 &dev_attr_nr_pe_cmp.attr,
2157 &dev_attr_nr_addr_cmp.attr,
2158 &dev_attr_nr_cntr.attr,
2159 &dev_attr_nr_ext_inp.attr,
2160 &dev_attr_numcidc.attr,
2161 &dev_attr_numvmidc.attr,
2162 &dev_attr_nrseqstate.attr,
2163 &dev_attr_nr_resource.attr,
2164 &dev_attr_nr_ss_cmp.attr,
2165 &dev_attr_reset.attr,
2166 &dev_attr_mode.attr,
2167 &dev_attr_pe.attr,
2168 &dev_attr_event.attr,
2169 &dev_attr_event_instren.attr,
2170 &dev_attr_event_ts.attr,
2171 &dev_attr_syncfreq.attr,
2172 &dev_attr_cyc_threshold.attr,
2173 &dev_attr_bb_ctrl.attr,
2174 &dev_attr_event_vinst.attr,
2175 &dev_attr_s_exlevel_vinst.attr,
2176 &dev_attr_ns_exlevel_vinst.attr,
2177 &dev_attr_addr_idx.attr,
2178 &dev_attr_addr_instdatatype.attr,
2179 &dev_attr_addr_single.attr,
2180 &dev_attr_addr_range.attr,
2181 &dev_attr_addr_start.attr,
2182 &dev_attr_addr_stop.attr,
2183 &dev_attr_addr_ctxtype.attr,
2184 &dev_attr_addr_context.attr,
2185 &dev_attr_seq_idx.attr,
2186 &dev_attr_seq_state.attr,
2187 &dev_attr_seq_event.attr,
2188 &dev_attr_seq_reset_event.attr,
2189 &dev_attr_cntr_idx.attr,
2190 &dev_attr_cntrldvr.attr,
2191 &dev_attr_cntr_val.attr,
2192 &dev_attr_cntr_ctrl.attr,
2193 &dev_attr_res_idx.attr,
2194 &dev_attr_res_ctrl.attr,
2195 &dev_attr_ctxid_idx.attr,
2196 &dev_attr_ctxid_val.attr,
2197 &dev_attr_ctxid_masks.attr,
2198 &dev_attr_vmid_idx.attr,
2199 &dev_attr_vmid_val.attr,
2200 &dev_attr_vmid_masks.attr,
2201 &dev_attr_cpu.attr,
2202 NULL,
2203};
2204
2205#define coresight_simple_func(name, offset) \
2206static ssize_t name##_show(struct device *_dev, \
2207 struct device_attribute *attr, char *buf) \
2208{ \
2209 struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
2210 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
2211 readl_relaxed(drvdata->base + offset)); \
2212} \
2213DEVICE_ATTR_RO(name)
2214
2215coresight_simple_func(trcoslsr, TRCOSLSR);
2216coresight_simple_func(trcpdcr, TRCPDCR);
2217coresight_simple_func(trcpdsr, TRCPDSR);
2218coresight_simple_func(trclsr, TRCLSR);
2219coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
2220coresight_simple_func(trcdevid, TRCDEVID);
2221coresight_simple_func(trcdevtype, TRCDEVTYPE);
2222coresight_simple_func(trcpidr0, TRCPIDR0);
2223coresight_simple_func(trcpidr1, TRCPIDR1);
2224coresight_simple_func(trcpidr2, TRCPIDR2);
2225coresight_simple_func(trcpidr3, TRCPIDR3);
2226
2227static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2228 &dev_attr_trcoslsr.attr,
2229 &dev_attr_trcpdcr.attr,
2230 &dev_attr_trcpdsr.attr,
2231 &dev_attr_trclsr.attr,
2232 &dev_attr_trcauthstatus.attr,
2233 &dev_attr_trcdevid.attr,
2234 &dev_attr_trcdevtype.attr,
2235 &dev_attr_trcpidr0.attr,
2236 &dev_attr_trcpidr1.attr,
2237 &dev_attr_trcpidr2.attr,
2238 &dev_attr_trcpidr3.attr,
2239 NULL,
2240};
2241
2242coresight_simple_func(trcidr0, TRCIDR0);
2243coresight_simple_func(trcidr1, TRCIDR1);
2244coresight_simple_func(trcidr2, TRCIDR2);
2245coresight_simple_func(trcidr3, TRCIDR3);
2246coresight_simple_func(trcidr4, TRCIDR4);
2247coresight_simple_func(trcidr5, TRCIDR5);
2248/* trcidr[6,7] are reserved */
2249coresight_simple_func(trcidr8, TRCIDR8);
2250coresight_simple_func(trcidr9, TRCIDR9);
2251coresight_simple_func(trcidr10, TRCIDR10);
2252coresight_simple_func(trcidr11, TRCIDR11);
2253coresight_simple_func(trcidr12, TRCIDR12);
2254coresight_simple_func(trcidr13, TRCIDR13);
2255
2256static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2257 &dev_attr_trcidr0.attr,
2258 &dev_attr_trcidr1.attr,
2259 &dev_attr_trcidr2.attr,
2260 &dev_attr_trcidr3.attr,
2261 &dev_attr_trcidr4.attr,
2262 &dev_attr_trcidr5.attr,
2263 /* trcidr[6,7] are reserved */
2264 &dev_attr_trcidr8.attr,
2265 &dev_attr_trcidr9.attr,
2266 &dev_attr_trcidr10.attr,
2267 &dev_attr_trcidr11.attr,
2268 &dev_attr_trcidr12.attr,
2269 &dev_attr_trcidr13.attr,
2270 NULL,
2271};
2272
2273static const struct attribute_group coresight_etmv4_group = {
2274 .attrs = coresight_etmv4_attrs,
2275};
2276
2277static const struct attribute_group coresight_etmv4_mgmt_group = {
2278 .attrs = coresight_etmv4_mgmt_attrs,
2279 .name = "mgmt",
2280};
2281
2282static const struct attribute_group coresight_etmv4_trcidr_group = {
2283 .attrs = coresight_etmv4_trcidr_attrs,
2284 .name = "trcidr",
2285};
2286
2287static const struct attribute_group *coresight_etmv4_groups[] = {
2288 &coresight_etmv4_group,
2289 &coresight_etmv4_mgmt_group,
2290 &coresight_etmv4_trcidr_group,
2291 NULL,
2292};
2293
2294static void etm4_init_arch_data(void *info)
2295{
2296 u32 etmidr0;
2297 u32 etmidr1;
2298 u32 etmidr2;
2299 u32 etmidr3;
2300 u32 etmidr4;
2301 u32 etmidr5;
2302 struct etmv4_drvdata *drvdata = info;
2303
2304 CS_UNLOCK(drvdata->base);
2305
2306 /* find all capabilities of the tracing unit */
2307 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
2308
2309 /* INSTP0, bits[2:1] P0 tracing support field */
2310 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
2311 drvdata->instrp0 = true;
2312 else
2313 drvdata->instrp0 = false;
2314
2315 /* TRCBB, bit[5] Branch broadcast tracing support bit */
2316 if (BMVAL(etmidr0, 5, 5))
2317 drvdata->trcbb = true;
2318 else
2319 drvdata->trcbb = false;
2320
2321 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
2322 if (BMVAL(etmidr0, 6, 6))
2323 drvdata->trccond = true;
2324 else
2325 drvdata->trccond = false;
2326
2327 /* TRCCCI, bit[7] Cycle counting instruction bit */
2328 if (BMVAL(etmidr0, 7, 7))
2329 drvdata->trccci = true;
2330 else
2331 drvdata->trccci = false;
2332
2333 /* RETSTACK, bit[9] Return stack bit */
2334 if (BMVAL(etmidr0, 9, 9))
2335 drvdata->retstack = true;
2336 else
2337 drvdata->retstack = false;
2338
2339 /* NUMEVENT, bits[11:10] Number of events field */
2340 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
2341 /* QSUPP, bits[16:15] Q element support field */
2342 drvdata->q_support = BMVAL(etmidr0, 15, 16);
2343 /* TSSIZE, bits[28:24] Global timestamp size field */
2344 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
2345
2346 /* base architecture of trace unit */
2347 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
2348 /*
2349 * TRCARCHMIN, bits[7:4] architecture the minor version number
2350 * TRCARCHMAJ, bits[11:8] architecture major versin number
2351 */
2352 drvdata->arch = BMVAL(etmidr1, 4, 11);
2353
2354 /* maximum size of resources */
2355 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
2356 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
2357 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
2358 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
2359 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
2360 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
2361 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
2362
2363 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
2364 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
2365 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
2366 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
2367 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
2368 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
2369 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
2370
2371 /*
2372 * TRCERR, bit[24] whether a trace unit can trace a
2373 * system error exception.
2374 */
2375 if (BMVAL(etmidr3, 24, 24))
2376 drvdata->trc_error = true;
2377 else
2378 drvdata->trc_error = false;
2379
2380 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
2381 if (BMVAL(etmidr3, 25, 25))
2382 drvdata->syncpr = true;
2383 else
2384 drvdata->syncpr = false;
2385
2386 /* STALLCTL, bit[26] is stall control implemented? */
2387 if (BMVAL(etmidr3, 26, 26))
2388 drvdata->stallctl = true;
2389 else
2390 drvdata->stallctl = false;
2391
2392 /* SYSSTALL, bit[27] implementation can support stall control? */
2393 if (BMVAL(etmidr3, 27, 27))
2394 drvdata->sysstall = true;
2395 else
2396 drvdata->sysstall = false;
2397
2398 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
2399 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
2400
2401 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
2402 if (BMVAL(etmidr3, 31, 31))
2403 drvdata->nooverflow = true;
2404 else
2405 drvdata->nooverflow = false;
2406
2407 /* number of resources trace unit supports */
2408 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
2409 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
2410 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
2411 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2412 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
2413 /* NUMRSPAIR, bits[19:16] the number of resource pairs for tracing */
2414 drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
2415 /*
2416 * NUMSSCC, bits[23:20] the number of single-shot
2417 * comparator control for tracing
2418 */
2419 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
2420 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
2421 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
2422 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
2423 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
2424
2425 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
2426 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
2427 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
2428 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
2429 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
2430 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
2431 if (BMVAL(etmidr5, 22, 22))
2432 drvdata->atbtrig = true;
2433 else
2434 drvdata->atbtrig = false;
2435 /*
2436 * LPOVERRIDE, bit[23] implementation supports
2437 * low-power state override
2438 */
2439 if (BMVAL(etmidr5, 23, 23))
2440 drvdata->lpoverride = true;
2441 else
2442 drvdata->lpoverride = false;
2443 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
2444 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
2445 /* NUMCNTR, bits[30:28] number of counters available for tracing */
2446 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
2447 CS_LOCK(drvdata->base);
2448}
2449
2450static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
2451{
2452 int i;
2453
2454 drvdata->pe_sel = 0x0;
2455 drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
2456 ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
2457
2458 /* disable all events tracing */
2459 drvdata->eventctrl0 = 0x0;
2460 drvdata->eventctrl1 = 0x0;
2461
2462 /* disable stalling */
2463 drvdata->stall_ctrl = 0x0;
2464
2465 /* disable timestamp event */
2466 drvdata->ts_ctrl = 0x0;
2467
2468 /* enable trace synchronization every 4096 bytes for trace */
2469 if (drvdata->syncpr == false)
2470 drvdata->syncfreq = 0xC;
2471
2472 /*
2473 * enable viewInst to trace everything with start-stop logic in
2474 * started state
2475 */
2476 drvdata->vinst_ctrl |= BIT(0);
2477 /* set initial state of start-stop logic */
2478 if (drvdata->nr_addr_cmp)
2479 drvdata->vinst_ctrl |= BIT(9);
2480
2481 /* no address range filtering for ViewInst */
2482 drvdata->viiectlr = 0x0;
2483 /* no start-stop filtering for ViewInst */
2484 drvdata->vissctlr = 0x0;
2485
2486 /* disable seq events */
2487 for (i = 0; i < drvdata->nrseqstate-1; i++)
2488 drvdata->seq_ctrl[i] = 0x0;
2489 drvdata->seq_rst = 0x0;
2490 drvdata->seq_state = 0x0;
2491
2492 /* disable external input events */
2493 drvdata->ext_inp = 0x0;
2494
2495 for (i = 0; i < drvdata->nr_cntr; i++) {
2496 drvdata->cntrldvr[i] = 0x0;
2497 drvdata->cntr_ctrl[i] = 0x0;
2498 drvdata->cntr_val[i] = 0x0;
2499 }
2500
2501 for (i = 2; i < drvdata->nr_resource * 2; i++)
2502 drvdata->res_ctrl[i] = 0x0;
2503
2504 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
2505 drvdata->ss_ctrl[i] = 0x0;
2506 drvdata->ss_pe_cmp[i] = 0x0;
2507 }
2508
2509 if (drvdata->nr_addr_cmp >= 1) {
2510 drvdata->addr_val[0] = (unsigned long)_stext;
2511 drvdata->addr_val[1] = (unsigned long)_etext;
2512 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
2513 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
2514 }
2515
2516 for (i = 0; i < drvdata->numcidc; i++)
2517 drvdata->ctxid_val[i] = 0x0;
2518 drvdata->ctxid_mask0 = 0x0;
2519 drvdata->ctxid_mask1 = 0x0;
2520
2521 for (i = 0; i < drvdata->numvmidc; i++)
2522 drvdata->vmid_val[i] = 0x0;
2523 drvdata->vmid_mask0 = 0x0;
2524 drvdata->vmid_mask1 = 0x0;
2525
2526 /*
2527 * A trace ID value of 0 is invalid, so let's start at some
2528 * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
2529 * start at 0x20.
2530 */
2531 drvdata->trcid = 0x20 + drvdata->cpu;
2532}
2533
2534static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
2535 void *hcpu)
2536{
2537 unsigned int cpu = (unsigned long)hcpu;
2538
2539 if (!etmdrvdata[cpu])
2540 goto out;
2541
2542 switch (action & (~CPU_TASKS_FROZEN)) {
2543 case CPU_STARTING:
2544 spin_lock(&etmdrvdata[cpu]->spinlock);
2545 if (!etmdrvdata[cpu]->os_unlock) {
2546 etm4_os_unlock(etmdrvdata[cpu]);
2547 etmdrvdata[cpu]->os_unlock = true;
2548 }
2549
2550 if (etmdrvdata[cpu]->enable)
2551 etm4_enable_hw(etmdrvdata[cpu]);
2552 spin_unlock(&etmdrvdata[cpu]->spinlock);
2553 break;
2554
2555 case CPU_ONLINE:
2556 if (etmdrvdata[cpu]->boot_enable &&
2557 !etmdrvdata[cpu]->sticky_enable)
2558 coresight_enable(etmdrvdata[cpu]->csdev);
2559 break;
2560
2561 case CPU_DYING:
2562 spin_lock(&etmdrvdata[cpu]->spinlock);
2563 if (etmdrvdata[cpu]->enable)
2564 etm4_disable_hw(etmdrvdata[cpu]);
2565 spin_unlock(&etmdrvdata[cpu]->spinlock);
2566 break;
2567 }
2568out:
2569 return NOTIFY_OK;
2570}
2571
2572static struct notifier_block etm4_cpu_notifier = {
2573 .notifier_call = etm4_cpu_callback,
2574};
2575
2576static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2577{
2578 int ret;
2579 void __iomem *base;
2580 struct device *dev = &adev->dev;
2581 struct coresight_platform_data *pdata = NULL;
2582 struct etmv4_drvdata *drvdata;
2583 struct resource *res = &adev->res;
2584 struct coresight_desc *desc;
2585 struct device_node *np = adev->dev.of_node;
2586
2587 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
2588 if (!desc)
2589 return -ENOMEM;
2590
2591 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
2592 if (!drvdata)
2593 return -ENOMEM;
2594
2595 if (np) {
2596 pdata = of_get_coresight_platform_data(dev, np);
2597 if (IS_ERR(pdata))
2598 return PTR_ERR(pdata);
2599 adev->dev.platform_data = pdata;
2600 }
2601
2602 drvdata->dev = &adev->dev;
2603 dev_set_drvdata(dev, drvdata);
2604
2605 /* Validity for the resource is already checked by the AMBA core */
2606 base = devm_ioremap_resource(dev, res);
2607 if (IS_ERR(base))
2608 return PTR_ERR(base);
2609
2610 drvdata->base = base;
2611
2612 spin_lock_init(&drvdata->spinlock);
2613
2614 drvdata->cpu = pdata ? pdata->cpu : 0;
2615
2616 get_online_cpus();
2617 etmdrvdata[drvdata->cpu] = drvdata;
2618
2619 if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
2620 drvdata->os_unlock = true;
2621
2622 if (smp_call_function_single(drvdata->cpu,
2623 etm4_init_arch_data, drvdata, 1))
2624 dev_err(dev, "ETM arch init failed\n");
2625
2626 if (!etm4_count++)
2627 register_hotcpu_notifier(&etm4_cpu_notifier);
2628
2629 put_online_cpus();
2630
2631 if (etm4_arch_supported(drvdata->arch) == false) {
2632 ret = -EINVAL;
2633 goto err_arch_supported;
2634 }
2635 etm4_init_default_data(drvdata);
2636
2637 pm_runtime_put(&adev->dev);
2638
2639 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
2640 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
2641 desc->ops = &etm4_cs_ops;
2642 desc->pdata = pdata;
2643 desc->dev = dev;
2644 desc->groups = coresight_etmv4_groups;
2645 drvdata->csdev = coresight_register(desc);
2646 if (IS_ERR(drvdata->csdev)) {
2647 ret = PTR_ERR(drvdata->csdev);
2648 goto err_coresight_register;
2649 }
2650
2651 dev_info(dev, "%s initialized\n", (char *)id->data);
2652
2653 if (boot_enable) {
2654 coresight_enable(drvdata->csdev);
2655 drvdata->boot_enable = true;
2656 }
2657
2658 return 0;
2659
2660err_arch_supported:
2661 pm_runtime_put(&adev->dev);
2662err_coresight_register:
2663 if (--etm4_count == 0)
2664 unregister_hotcpu_notifier(&etm4_cpu_notifier);
2665 return ret;
2666}
2667
2668static int etm4_remove(struct amba_device *adev)
2669{
2670 struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
2671
2672 coresight_unregister(drvdata->csdev);
2673 if (--etm4_count == 0)
2674 unregister_hotcpu_notifier(&etm4_cpu_notifier);
2675
2676 return 0;
2677}
2678
2679static struct amba_id etm4_ids[] = {
2680 { /* ETM 4.0 - Qualcomm */
2681 .id = 0x0003b95d,
2682 .mask = 0x0003ffff,
2683 .data = "ETM 4.0",
2684 },
2685 { /* ETM 4.0 - Juno board */
2686 .id = 0x000bb95e,
2687 .mask = 0x000fffff,
2688 .data = "ETM 4.0",
2689 },
2690 { 0, 0},
2691};
2692
2693static struct amba_driver etm4x_driver = {
2694 .drv = {
2695 .name = "coresight-etm4x",
2696 },
2697 .probe = etm4_probe,
2698 .remove = etm4_remove,
2699 .id_table = etm4_ids,
2700};
2701
2702module_amba_driver(etm4x_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
new file mode 100644
index 000000000000..e08e983dd2d9
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -0,0 +1,391 @@
1/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef _CORESIGHT_CORESIGHT_ETM_H
14#define _CORESIGHT_CORESIGHT_ETM_H
15
16#include <linux/spinlock.h>
17#include "coresight-priv.h"
18
19/*
20 * Device registers:
21 * 0x000 - 0x2FC: Trace registers
22 * 0x300 - 0x314: Management registers
23 * 0x318 - 0xEFC: Trace registers
24 * 0xF00: Management registers
25 * 0xFA0 - 0xFA4: Trace registers
26 * 0xFA8 - 0xFFC: Management registers
27 */
28/* Trace registers (0x000-0x2FC) */
29/* Main control and configuration registers */
30#define TRCPRGCTLR 0x004
31#define TRCPROCSELR 0x008
32#define TRCSTATR 0x00C
33#define TRCCONFIGR 0x010
34#define TRCAUXCTLR 0x018
35#define TRCEVENTCTL0R 0x020
36#define TRCEVENTCTL1R 0x024
37#define TRCSTALLCTLR 0x02C
38#define TRCTSCTLR 0x030
39#define TRCSYNCPR 0x034
40#define TRCCCCTLR 0x038
41#define TRCBBCTLR 0x03C
42#define TRCTRACEIDR 0x040
43#define TRCQCTLR 0x044
44/* Filtering control registers */
45#define TRCVICTLR 0x080
46#define TRCVIIECTLR 0x084
47#define TRCVISSCTLR 0x088
48#define TRCVIPCSSCTLR 0x08C
49#define TRCVDCTLR 0x0A0
50#define TRCVDSACCTLR 0x0A4
51#define TRCVDARCCTLR 0x0A8
52/* Derived resources registers */
53#define TRCSEQEVRn(n) (0x100 + (n * 4))
54#define TRCSEQRSTEVR 0x118
55#define TRCSEQSTR 0x11C
56#define TRCEXTINSELR 0x120
57#define TRCCNTRLDVRn(n) (0x140 + (n * 4))
58#define TRCCNTCTLRn(n) (0x150 + (n * 4))
59#define TRCCNTVRn(n) (0x160 + (n * 4))
60/* ID registers */
61#define TRCIDR8 0x180
62#define TRCIDR9 0x184
63#define TRCIDR10 0x188
64#define TRCIDR11 0x18C
65#define TRCIDR12 0x190
66#define TRCIDR13 0x194
67#define TRCIMSPEC0 0x1C0
68#define TRCIMSPECn(n) (0x1C0 + (n * 4))
69#define TRCIDR0 0x1E0
70#define TRCIDR1 0x1E4
71#define TRCIDR2 0x1E8
72#define TRCIDR3 0x1EC
73#define TRCIDR4 0x1F0
74#define TRCIDR5 0x1F4
75#define TRCIDR6 0x1F8
76#define TRCIDR7 0x1FC
77/* Resource selection registers */
78#define TRCRSCTLRn(n) (0x200 + (n * 4))
79/* Single-shot comparator registers */
80#define TRCSSCCRn(n) (0x280 + (n * 4))
81#define TRCSSCSRn(n) (0x2A0 + (n * 4))
82#define TRCSSPCICRn(n) (0x2C0 + (n * 4))
83/* Management registers (0x300-0x314) */
84#define TRCOSLAR 0x300
85#define TRCOSLSR 0x304
86#define TRCPDCR 0x310
87#define TRCPDSR 0x314
88/* Trace registers (0x318-0xEFC) */
89/* Comparator registers */
90#define TRCACVRn(n) (0x400 + (n * 8))
91#define TRCACATRn(n) (0x480 + (n * 8))
92#define TRCDVCVRn(n) (0x500 + (n * 16))
93#define TRCDVCMRn(n) (0x580 + (n * 16))
94#define TRCCIDCVRn(n) (0x600 + (n * 8))
95#define TRCVMIDCVRn(n) (0x640 + (n * 8))
96#define TRCCIDCCTLR0 0x680
97#define TRCCIDCCTLR1 0x684
98#define TRCVMIDCCTLR0 0x688
99#define TRCVMIDCCTLR1 0x68C
100/* Management register (0xF00) */
101/* Integration control registers */
102#define TRCITCTRL 0xF00
103/* Trace registers (0xFA0-0xFA4) */
104/* Claim tag registers */
105#define TRCCLAIMSET 0xFA0
106#define TRCCLAIMCLR 0xFA4
107/* Management registers (0xFA8-0xFFC) */
108#define TRCDEVAFF0 0xFA8
109#define TRCDEVAFF1 0xFAC
110#define TRCLAR 0xFB0
111#define TRCLSR 0xFB4
112#define TRCAUTHSTATUS 0xFB8
113#define TRCDEVARCH 0xFBC
114#define TRCDEVID 0xFC8
115#define TRCDEVTYPE 0xFCC
116#define TRCPIDR4 0xFD0
117#define TRCPIDR5 0xFD4
118#define TRCPIDR6 0xFD8
119#define TRCPIDR7 0xFDC
120#define TRCPIDR0 0xFE0
121#define TRCPIDR1 0xFE4
122#define TRCPIDR2 0xFE8
123#define TRCPIDR3 0xFEC
124#define TRCCIDR0 0xFF0
125#define TRCCIDR1 0xFF4
126#define TRCCIDR2 0xFF8
127#define TRCCIDR3 0xFFC
128
129/* ETMv4 resources */
130#define ETM_MAX_NR_PE 8
131#define ETMv4_MAX_CNTR 4
132#define ETM_MAX_SEQ_STATES 4
133#define ETM_MAX_EXT_INP_SEL 4
134#define ETM_MAX_EXT_INP 256
135#define ETM_MAX_EXT_OUT 4
136#define ETM_MAX_SINGLE_ADDR_CMP 16
137#define ETM_MAX_ADDR_RANGE_CMP (ETM_MAX_SINGLE_ADDR_CMP / 2)
138#define ETM_MAX_DATA_VAL_CMP 8
139#define ETMv4_MAX_CTXID_CMP 8
140#define ETM_MAX_VMID_CMP 8
141#define ETM_MAX_PE_CMP 8
142#define ETM_MAX_RES_SEL 16
143#define ETM_MAX_SS_CMP 8
144
145#define ETM_ARCH_V4 0x40
146#define ETMv4_SYNC_MASK 0x1F
147#define ETM_CYC_THRESHOLD_MASK 0xFFF
148#define ETMv4_EVENT_MASK 0xFF
149#define ETM_CNTR_MAX_VAL 0xFFFF
150#define ETM_TRACEID_MASK 0x3f
151
152/* ETMv4 programming modes */
153#define ETM_MODE_EXCLUDE BIT(0)
154#define ETM_MODE_LOAD BIT(1)
155#define ETM_MODE_STORE BIT(2)
156#define ETM_MODE_LOAD_STORE BIT(3)
157#define ETM_MODE_BB BIT(4)
158#define ETMv4_MODE_CYCACC BIT(5)
159#define ETMv4_MODE_CTXID BIT(6)
160#define ETM_MODE_VMID BIT(7)
161#define ETM_MODE_COND(val) BMVAL(val, 8, 10)
162#define ETMv4_MODE_TIMESTAMP BIT(11)
163#define ETM_MODE_RETURNSTACK BIT(12)
164#define ETM_MODE_QELEM(val) BMVAL(val, 13, 14)
165#define ETM_MODE_DATA_TRACE_ADDR BIT(15)
166#define ETM_MODE_DATA_TRACE_VAL BIT(16)
167#define ETM_MODE_ISTALL BIT(17)
168#define ETM_MODE_DSTALL BIT(18)
169#define ETM_MODE_ATB_TRIGGER BIT(19)
170#define ETM_MODE_LPOVERRIDE BIT(20)
171#define ETM_MODE_ISTALL_EN BIT(21)
172#define ETM_MODE_DSTALL_EN BIT(22)
173#define ETM_MODE_INSTPRIO BIT(23)
174#define ETM_MODE_NOOVERFLOW BIT(24)
175#define ETM_MODE_TRACE_RESET BIT(25)
176#define ETM_MODE_TRACE_ERR BIT(26)
177#define ETM_MODE_VIEWINST_STARTSTOP BIT(27)
178#define ETMv4_MODE_ALL 0xFFFFFFF
179
180#define TRCSTATR_IDLE_BIT 0
181
182/**
183 * struct etm4_drvdata - specifics associated to an ETM component
184 * @base: Memory mapped base address for this component.
185 * @dev: The device entity associated to this component.
186 * @csdev: Component vitals needed by the framework.
187 * @spinlock: Only one at a time pls.
188 * @cpu: The cpu this component is affined to.
189 * @arch: ETM version number.
190 * @enable: Is this ETM currently tracing.
191 * @sticky_enable: true if ETM base configuration has been done.
192 * @boot_enable:True if we should start tracing at boot time.
193 * @os_unlock: True if access to management registers is allowed.
194 * @nr_pe: The number of processing entity available for tracing.
195 * @nr_pe_cmp: The number of processing entity comparator inputs that are
196 * available for tracing.
197 * @nr_addr_cmp:Number of pairs of address comparators available
198 * as found in ETMIDR4 0-3.
199 * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30.
200 * @nr_ext_inp: Number of external input.
201 * @numcidc: Number of contextID comparators.
202 * @numvmidc: Number of VMID comparators.
203 * @nrseqstate: The number of sequencer states that are implemented.
204 * @nr_event: Indicates how many events the trace unit support.
205 * @nr_resource:The number of resource selection pairs available for tracing.
206 * @nr_ss_cmp: Number of single-shot comparator controls that are available.
207 * @mode: Controls various modes supported by this ETM.
208 * @trcid: value of the current ID for this component.
209 * @trcid_size: Indicates the trace ID width.
210 * @instrp0: Tracing of load and store instructions
211 * as P0 elements is supported.
212 * @trccond: If the trace unit supports conditional
213 * instruction tracing.
214 * @retstack: Indicates if the implementation supports a return stack.
215 * @trc_error: Whether a trace unit can trace a system
216 * error exception.
217 * @atbtrig: If the implementation can support ATB triggers
218 * @lpoverride: If the implementation can support low-power state over.
219 * @pe_sel: Controls which PE to trace.
220 * @cfg: Controls the tracing options.
221 * @eventctrl0: Controls the tracing of arbitrary events.
222 * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects.
223 * @stallctl: If functionality that prevents trace unit buffer overflows
224 * is available.
225 * @sysstall: Does the system support stall control of the PE?
226 * @nooverflow: Indicate if overflow prevention is supported.
227 * @stall_ctrl: Enables trace unit functionality that prevents trace
228 * unit buffer overflows.
229 * @ts_size: Global timestamp size field.
230 * @ts_ctrl: Controls the insertion of global timestamps in the
231 * trace streams.
232 * @syncpr: Indicates if an implementation has a fixed
233 * synchronization period.
234 * @syncfreq: Controls how often trace synchronization requests occur.
235 * @trccci: Indicates if the trace unit supports cycle counting
236 * for instruction.
237 * @ccsize: Indicates the size of the cycle counter in bits.
238 * @ccitmin: minimum value that can be programmed in
239 * the TRCCCCTLR register.
240 * @ccctlr: Sets the threshold value for cycle counting.
241 * @trcbb: Indicates if the trace unit supports branch broadcast tracing.
242 * @q_support: Q element support characteristics.
243 * @vinst_ctrl: Controls instruction trace filtering.
244 * @viiectlr: Set or read, the address range comparators.
245 * @vissctlr: Set, or read, the single address comparators that control the
246 * ViewInst start-stop logic.
247 * @vipcssctlr: Set, or read, which PE comparator inputs can control the
248 * ViewInst start-stop logic.
249 * @seq_idx: Sequencor index selector.
250 * @seq_ctrl: Control for the sequencer state transition control register.
251 * @seq_rst: Moves the sequencer to state 0 when a programmed event occurs.
252 * @seq_state: Set, or read the sequencer state.
253 * @cntr_idx: Counter index seletor.
254 * @cntrldvr: Sets or returns the reload count value for a counter.
255 * @cntr_ctrl: Controls the operation of a counter.
256 * @cntr_val: Sets or returns the value for a counter.
257 * @res_idx: Resource index selector.
258 * @res_ctrl: Controls the selection of the resources in the trace unit.
259 * @ss_ctrl: Controls the corresponding single-shot comparator resource.
260 * @ss_status: The status of the corresponding single-shot comparator.
261 * @ss_pe_cmp: Selects the PE comparator inputs for Single-shot control.
262 * @addr_idx: Address comparator index selector.
263 * @addr_val: Value for address comparator.
264 * @addr_acc: Address comparator access type.
265 * @addr_type: Current status of the comparator register.
266 * @ctxid_idx: Context ID index selector.
267 * @ctxid_size: Size of the context ID field to consider.
268 * @ctxid_val: Value of the context ID comparator.
269 * @ctxid_mask0:Context ID comparator mask for comparator 0-3.
270 * @ctxid_mask1:Context ID comparator mask for comparator 4-7.
271 * @vmid_idx: VM ID index selector.
272 * @vmid_size: Size of the VM ID comparator to consider.
273 * @vmid_val: Value of the VM ID comparator.
274 * @vmid_mask0: VM ID comparator mask for comparator 0-3.
275 * @vmid_mask1: VM ID comparator mask for comparator 4-7.
276 * @s_ex_level: In secure state, indicates whether instruction tracing is
277 * supported for the corresponding Exception level.
278 * @ns_ex_level:In non-secure state, indicates whether instruction tracing is
279 * supported for the corresponding Exception level.
280 * @ext_inp: External input selection.
281 */
282struct etmv4_drvdata {
283 void __iomem *base;
284 struct device *dev;
285 struct coresight_device *csdev;
286 spinlock_t spinlock;
287 int cpu;
288 u8 arch;
289 bool enable;
290 bool sticky_enable;
291 bool boot_enable;
292 bool os_unlock;
293 u8 nr_pe;
294 u8 nr_pe_cmp;
295 u8 nr_addr_cmp;
296 u8 nr_cntr;
297 u8 nr_ext_inp;
298 u8 numcidc;
299 u8 numvmidc;
300 u8 nrseqstate;
301 u8 nr_event;
302 u8 nr_resource;
303 u8 nr_ss_cmp;
304 u32 mode;
305 u8 trcid;
306 u8 trcid_size;
307 bool instrp0;
308 bool trccond;
309 bool retstack;
310 bool trc_error;
311 bool atbtrig;
312 bool lpoverride;
313 u32 pe_sel;
314 u32 cfg;
315 u32 eventctrl0;
316 u32 eventctrl1;
317 bool stallctl;
318 bool sysstall;
319 bool nooverflow;
320 u32 stall_ctrl;
321 u8 ts_size;
322 u32 ts_ctrl;
323 bool syncpr;
324 u32 syncfreq;
325 bool trccci;
326 u8 ccsize;
327 u8 ccitmin;
328 u32 ccctlr;
329 bool trcbb;
330 u32 bb_ctrl;
331 bool q_support;
332 u32 vinst_ctrl;
333 u32 viiectlr;
334 u32 vissctlr;
335 u32 vipcssctlr;
336 u8 seq_idx;
337 u32 seq_ctrl[ETM_MAX_SEQ_STATES];
338 u32 seq_rst;
339 u32 seq_state;
340 u8 cntr_idx;
341 u32 cntrldvr[ETMv4_MAX_CNTR];
342 u32 cntr_ctrl[ETMv4_MAX_CNTR];
343 u32 cntr_val[ETMv4_MAX_CNTR];
344 u8 res_idx;
345 u32 res_ctrl[ETM_MAX_RES_SEL];
346 u32 ss_ctrl[ETM_MAX_SS_CMP];
347 u32 ss_status[ETM_MAX_SS_CMP];
348 u32 ss_pe_cmp[ETM_MAX_SS_CMP];
349 u8 addr_idx;
350 u64 addr_val[ETM_MAX_SINGLE_ADDR_CMP];
351 u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP];
352 u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
353 u8 ctxid_idx;
354 u8 ctxid_size;
355 u64 ctxid_val[ETMv4_MAX_CTXID_CMP];
356 u32 ctxid_mask0;
357 u32 ctxid_mask1;
358 u8 vmid_idx;
359 u8 vmid_size;
360 u64 vmid_val[ETM_MAX_VMID_CMP];
361 u32 vmid_mask0;
362 u32 vmid_mask1;
363 u8 s_ex_level;
364 u8 ns_ex_level;
365 u32 ext_inp;
366};
367
368/* Address comparator access types */
369enum etm_addr_acctype {
370 ETM_INSTR_ADDR,
371 ETM_DATA_LOAD_ADDR,
372 ETM_DATA_STORE_ADDR,
373 ETM_DATA_LOAD_STORE_ADDR,
374};
375
376/* Address comparator context types */
377enum etm_addr_ctxtype {
378 ETM_CTX_NONE,
379 ETM_CTX_CTXID,
380 ETM_CTX_VMID,
381 ETM_CTX_CTXID_VMID,
382};
383
384enum etm_addr_type {
385 ETM_ADDR_TYPE_NONE,
386 ETM_ADDR_TYPE_SINGLE,
387 ETM_ADDR_TYPE_RANGE,
388 ETM_ADDR_TYPE_START,
389 ETM_ADDR_TYPE_STOP,
390};
391#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 3db36f70b666..2e36bde7fcb4 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -18,9 +18,10 @@
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/clk.h> 21#include <linux/pm_runtime.h>
22#include <linux/coresight.h> 22#include <linux/coresight.h>
23#include <linux/amba/bus.h> 23#include <linux/amba/bus.h>
24#include <linux/clk.h>
24 25
25#include "coresight-priv.h" 26#include "coresight-priv.h"
26 27
@@ -35,15 +36,15 @@
35 * struct funnel_drvdata - specifics associated to a funnel component 36 * struct funnel_drvdata - specifics associated to a funnel component
36 * @base: memory mapped base address for this component. 37 * @base: memory mapped base address for this component.
37 * @dev: the device entity associated to this component. 38 * @dev: the device entity associated to this component.
39 * @atclk: optional clock for the core parts of the funnel.
38 * @csdev: component vitals needed by the framework. 40 * @csdev: component vitals needed by the framework.
39 * @clk: the clock this component is associated to.
40 * @priority: port selection order. 41 * @priority: port selection order.
41 */ 42 */
42struct funnel_drvdata { 43struct funnel_drvdata {
43 void __iomem *base; 44 void __iomem *base;
44 struct device *dev; 45 struct device *dev;
46 struct clk *atclk;
45 struct coresight_device *csdev; 47 struct coresight_device *csdev;
46 struct clk *clk;
47 unsigned long priority; 48 unsigned long priority;
48}; 49};
49 50
@@ -67,12 +68,8 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
67 int outport) 68 int outport)
68{ 69{
69 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 70 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
70 int ret;
71
72 ret = clk_prepare_enable(drvdata->clk);
73 if (ret)
74 return ret;
75 71
72 pm_runtime_get_sync(drvdata->dev);
76 funnel_enable_hw(drvdata, inport); 73 funnel_enable_hw(drvdata, inport);
77 74
78 dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); 75 dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
@@ -98,8 +95,7 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
98 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 95 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
99 96
100 funnel_disable_hw(drvdata, inport); 97 funnel_disable_hw(drvdata, inport);
101 98 pm_runtime_put(drvdata->dev);
102 clk_disable_unprepare(drvdata->clk);
103 99
104 dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); 100 dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
105} 101}
@@ -153,16 +149,14 @@ static u32 get_funnel_ctrl_hw(struct funnel_drvdata *drvdata)
153static ssize_t funnel_ctrl_show(struct device *dev, 149static ssize_t funnel_ctrl_show(struct device *dev,
154 struct device_attribute *attr, char *buf) 150 struct device_attribute *attr, char *buf)
155{ 151{
156 int ret;
157 u32 val; 152 u32 val;
158 struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent); 153 struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
159 154
160 ret = clk_prepare_enable(drvdata->clk); 155 pm_runtime_get_sync(drvdata->dev);
161 if (ret)
162 return ret;
163 156
164 val = get_funnel_ctrl_hw(drvdata); 157 val = get_funnel_ctrl_hw(drvdata);
165 clk_disable_unprepare(drvdata->clk); 158
159 pm_runtime_put(drvdata->dev);
166 160
167 return sprintf(buf, "%#x\n", val); 161 return sprintf(buf, "%#x\n", val);
168} 162}
@@ -177,6 +171,7 @@ ATTRIBUTE_GROUPS(coresight_funnel);
177 171
178static int funnel_probe(struct amba_device *adev, const struct amba_id *id) 172static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
179{ 173{
174 int ret;
180 void __iomem *base; 175 void __iomem *base;
181 struct device *dev = &adev->dev; 176 struct device *dev = &adev->dev;
182 struct coresight_platform_data *pdata = NULL; 177 struct coresight_platform_data *pdata = NULL;
@@ -197,6 +192,12 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
197 return -ENOMEM; 192 return -ENOMEM;
198 193
199 drvdata->dev = &adev->dev; 194 drvdata->dev = &adev->dev;
195 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
196 if (!IS_ERR(drvdata->atclk)) {
197 ret = clk_prepare_enable(drvdata->atclk);
198 if (ret)
199 return ret;
200 }
200 dev_set_drvdata(dev, drvdata); 201 dev_set_drvdata(dev, drvdata);
201 202
202 /* Validity for the resource is already checked by the AMBA core */ 203 /* Validity for the resource is already checked by the AMBA core */
@@ -205,8 +206,7 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
205 return PTR_ERR(base); 206 return PTR_ERR(base);
206 207
207 drvdata->base = base; 208 drvdata->base = base;
208 209 pm_runtime_put(&adev->dev);
209 drvdata->clk = adev->pclk;
210 210
211 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); 211 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
212 if (!desc) 212 if (!desc)
@@ -234,6 +234,32 @@ static int funnel_remove(struct amba_device *adev)
234 return 0; 234 return 0;
235} 235}
236 236
237#ifdef CONFIG_PM
238static int funnel_runtime_suspend(struct device *dev)
239{
240 struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
241
242 if (drvdata && !IS_ERR(drvdata->atclk))
243 clk_disable_unprepare(drvdata->atclk);
244
245 return 0;
246}
247
248static int funnel_runtime_resume(struct device *dev)
249{
250 struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
251
252 if (drvdata && !IS_ERR(drvdata->atclk))
253 clk_prepare_enable(drvdata->atclk);
254
255 return 0;
256}
257#endif
258
259static const struct dev_pm_ops funnel_dev_pm_ops = {
260 SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
261};
262
237static struct amba_id funnel_ids[] = { 263static struct amba_id funnel_ids[] = {
238 { 264 {
239 .id = 0x0003b908, 265 .id = 0x0003b908,
@@ -246,6 +272,7 @@ static struct amba_driver funnel_driver = {
246 .drv = { 272 .drv = {
247 .name = "coresight-funnel", 273 .name = "coresight-funnel",
248 .owner = THIS_MODULE, 274 .owner = THIS_MODULE,
275 .pm = &funnel_dev_pm_ops,
249 }, 276 },
250 .probe = funnel_probe, 277 .probe = funnel_probe,
251 .remove = funnel_remove, 278 .remove = funnel_remove,
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
new file mode 100644
index 000000000000..584059e9e866
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/amba/bus.h>
15#include <linux/clk.h>
16#include <linux/coresight.h>
17#include <linux/device.h>
18#include <linux/module.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/io.h>
22#include <linux/kernel.h>
23#include <linux/of.h>
24#include <linux/pm_runtime.h>
25#include <linux/slab.h>
26
27#include "coresight-priv.h"
28
29#define REPLICATOR_IDFILTER0 0x000
30#define REPLICATOR_IDFILTER1 0x004
31
32/**
33 * struct replicator_state - specifics associated to a replicator component
34 * @base: memory mapped base address for this component.
35 * @dev: the device entity associated with this component
36 * @atclk: optional clock for the core parts of the replicator.
37 * @csdev: component vitals needed by the framework
38 */
39struct replicator_state {
40 void __iomem *base;
41 struct device *dev;
42 struct clk *atclk;
43 struct coresight_device *csdev;
44};
45
46static int replicator_enable(struct coresight_device *csdev, int inport,
47 int outport)
48{
49 struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
50
51 pm_runtime_get_sync(drvdata->dev);
52
53 CS_UNLOCK(drvdata->base);
54
55 /*
56 * Ensure that the other port is disabled
57 * 0x00 - passing through the replicator unimpeded
58 * 0xff - disable (or impede) the flow of ATB data
59 */
60 if (outport == 0) {
61 writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER0);
62 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
63 } else {
64 writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER1);
65 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
66 }
67
68 CS_LOCK(drvdata->base);
69
70 dev_info(drvdata->dev, "REPLICATOR enabled\n");
71 return 0;
72}
73
74static void replicator_disable(struct coresight_device *csdev, int inport,
75 int outport)
76{
77 struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
78
79 CS_UNLOCK(drvdata->base);
80
81 /* disable the flow of ATB data through port */
82 if (outport == 0)
83 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
84 else
85 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
86
87 CS_LOCK(drvdata->base);
88
89 pm_runtime_put(drvdata->dev);
90
91 dev_info(drvdata->dev, "REPLICATOR disabled\n");
92}
93
94static const struct coresight_ops_link replicator_link_ops = {
95 .enable = replicator_enable,
96 .disable = replicator_disable,
97};
98
99static const struct coresight_ops replicator_cs_ops = {
100 .link_ops = &replicator_link_ops,
101};
102
103static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
104{
105 int ret;
106 struct device *dev = &adev->dev;
107 struct resource *res = &adev->res;
108 struct coresight_platform_data *pdata = NULL;
109 struct replicator_state *drvdata;
110 struct coresight_desc *desc;
111 struct device_node *np = adev->dev.of_node;
112 void __iomem *base;
113
114 if (np) {
115 pdata = of_get_coresight_platform_data(dev, np);
116 if (IS_ERR(pdata))
117 return PTR_ERR(pdata);
118 adev->dev.platform_data = pdata;
119 }
120
121 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
122 if (!drvdata)
123 return -ENOMEM;
124
125 drvdata->dev = &adev->dev;
126 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
127 if (!IS_ERR(drvdata->atclk)) {
128 ret = clk_prepare_enable(drvdata->atclk);
129 if (ret)
130 return ret;
131 }
132
133 /* Validity for the resource is already checked by the AMBA core */
134 base = devm_ioremap_resource(dev, res);
135 if (IS_ERR(base))
136 return PTR_ERR(base);
137
138 drvdata->base = base;
139 dev_set_drvdata(dev, drvdata);
140 pm_runtime_put(&adev->dev);
141
142 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
143 if (!desc)
144 return -ENOMEM;
145
146 desc->type = CORESIGHT_DEV_TYPE_LINK;
147 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
148 desc->ops = &replicator_cs_ops;
149 desc->pdata = adev->dev.platform_data;
150 desc->dev = &adev->dev;
151 drvdata->csdev = coresight_register(desc);
152 if (IS_ERR(drvdata->csdev))
153 return PTR_ERR(drvdata->csdev);
154
155 dev_info(dev, "%s initialized\n", (char *)id->data);
156 return 0;
157}
158
159static int replicator_remove(struct amba_device *adev)
160{
161 struct replicator_state *drvdata = amba_get_drvdata(adev);
162
163 pm_runtime_disable(&adev->dev);
164 coresight_unregister(drvdata->csdev);
165 return 0;
166}
167
168#ifdef CONFIG_PM
169static int replicator_runtime_suspend(struct device *dev)
170{
171 struct replicator_state *drvdata = dev_get_drvdata(dev);
172
173 if (drvdata && !IS_ERR(drvdata->atclk))
174 clk_disable_unprepare(drvdata->atclk);
175
176 return 0;
177}
178
179static int replicator_runtime_resume(struct device *dev)
180{
181 struct replicator_state *drvdata = dev_get_drvdata(dev);
182
183 if (drvdata && !IS_ERR(drvdata->atclk))
184 clk_prepare_enable(drvdata->atclk);
185
186 return 0;
187}
188#endif
189
190static const struct dev_pm_ops replicator_dev_pm_ops = {
191 SET_RUNTIME_PM_OPS(replicator_runtime_suspend,
192 replicator_runtime_resume,
193 NULL)
194};
195
196static struct amba_id replicator_ids[] = {
197 {
198 .id = 0x0003b909,
199 .mask = 0x0003ffff,
200 .data = "REPLICATOR 1.0",
201 },
202 { 0, 0 },
203};
204
205static struct amba_driver replicator_driver = {
206 .drv = {
207 .name = "coresight-replicator-qcom",
208 .pm = &replicator_dev_pm_ops,
209 },
210 .probe = replicator_probe,
211 .remove = replicator_remove,
212 .id_table = replicator_ids,
213};
214
215module_amba_driver(replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 75b9abd804e6..7974b7c3da6b 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -18,6 +18,7 @@
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/pm_runtime.h>
21#include <linux/clk.h> 22#include <linux/clk.h>
22#include <linux/of.h> 23#include <linux/of.h>
23#include <linux/coresight.h> 24#include <linux/coresight.h>
@@ -27,10 +28,12 @@
27/** 28/**
28 * struct replicator_drvdata - specifics associated to a replicator component 29 * struct replicator_drvdata - specifics associated to a replicator component
29 * @dev: the device entity associated with this component 30 * @dev: the device entity associated with this component
31 * @atclk: optional clock for the core parts of the replicator.
30 * @csdev: component vitals needed by the framework 32 * @csdev: component vitals needed by the framework
31 */ 33 */
32struct replicator_drvdata { 34struct replicator_drvdata {
33 struct device *dev; 35 struct device *dev;
36 struct clk *atclk;
34 struct coresight_device *csdev; 37 struct coresight_device *csdev;
35}; 38};
36 39
@@ -39,6 +42,7 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
39{ 42{
40 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 43 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
41 44
45 pm_runtime_get_sync(drvdata->dev);
42 dev_info(drvdata->dev, "REPLICATOR enabled\n"); 46 dev_info(drvdata->dev, "REPLICATOR enabled\n");
43 return 0; 47 return 0;
44} 48}
@@ -48,6 +52,7 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
48{ 52{
49 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 53 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
50 54
55 pm_runtime_put(drvdata->dev);
51 dev_info(drvdata->dev, "REPLICATOR disabled\n"); 56 dev_info(drvdata->dev, "REPLICATOR disabled\n");
52} 57}
53 58
@@ -62,6 +67,7 @@ static const struct coresight_ops replicator_cs_ops = {
62 67
63static int replicator_probe(struct platform_device *pdev) 68static int replicator_probe(struct platform_device *pdev)
64{ 69{
70 int ret;
65 struct device *dev = &pdev->dev; 71 struct device *dev = &pdev->dev;
66 struct coresight_platform_data *pdata = NULL; 72 struct coresight_platform_data *pdata = NULL;
67 struct replicator_drvdata *drvdata; 73 struct replicator_drvdata *drvdata;
@@ -80,11 +86,22 @@ static int replicator_probe(struct platform_device *pdev)
80 return -ENOMEM; 86 return -ENOMEM;
81 87
82 drvdata->dev = &pdev->dev; 88 drvdata->dev = &pdev->dev;
89 drvdata->atclk = devm_clk_get(&pdev->dev, "atclk"); /* optional */
90 if (!IS_ERR(drvdata->atclk)) {
91 ret = clk_prepare_enable(drvdata->atclk);
92 if (ret)
93 return ret;
94 }
95 pm_runtime_get_noresume(&pdev->dev);
96 pm_runtime_set_active(&pdev->dev);
97 pm_runtime_enable(&pdev->dev);
83 platform_set_drvdata(pdev, drvdata); 98 platform_set_drvdata(pdev, drvdata);
84 99
85 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); 100 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
86 if (!desc) 101 if (!desc) {
87 return -ENOMEM; 102 ret = -ENOMEM;
103 goto out_disable_pm;
104 }
88 105
89 desc->type = CORESIGHT_DEV_TYPE_LINK; 106 desc->type = CORESIGHT_DEV_TYPE_LINK;
90 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT; 107 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
@@ -92,11 +109,23 @@ static int replicator_probe(struct platform_device *pdev)
92 desc->pdata = pdev->dev.platform_data; 109 desc->pdata = pdev->dev.platform_data;
93 desc->dev = &pdev->dev; 110 desc->dev = &pdev->dev;
94 drvdata->csdev = coresight_register(desc); 111 drvdata->csdev = coresight_register(desc);
95 if (IS_ERR(drvdata->csdev)) 112 if (IS_ERR(drvdata->csdev)) {
96 return PTR_ERR(drvdata->csdev); 113 ret = PTR_ERR(drvdata->csdev);
114 goto out_disable_pm;
115 }
116
117 pm_runtime_put(&pdev->dev);
97 118
98 dev_info(dev, "REPLICATOR initialized\n"); 119 dev_info(dev, "REPLICATOR initialized\n");
99 return 0; 120 return 0;
121
122out_disable_pm:
123 if (!IS_ERR(drvdata->atclk))
124 clk_disable_unprepare(drvdata->atclk);
125 pm_runtime_put_noidle(&pdev->dev);
126 pm_runtime_disable(&pdev->dev);
127
128 return ret;
100} 129}
101 130
102static int replicator_remove(struct platform_device *pdev) 131static int replicator_remove(struct platform_device *pdev)
@@ -104,9 +133,42 @@ static int replicator_remove(struct platform_device *pdev)
104 struct replicator_drvdata *drvdata = platform_get_drvdata(pdev); 133 struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
105 134
106 coresight_unregister(drvdata->csdev); 135 coresight_unregister(drvdata->csdev);
136 pm_runtime_get_sync(&pdev->dev);
137 if (!IS_ERR(drvdata->atclk))
138 clk_disable_unprepare(drvdata->atclk);
139 pm_runtime_put_noidle(&pdev->dev);
140 pm_runtime_disable(&pdev->dev);
141
142 return 0;
143}
144
145#ifdef CONFIG_PM
146static int replicator_runtime_suspend(struct device *dev)
147{
148 struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
149
150 if (drvdata && !IS_ERR(drvdata->atclk))
151 clk_disable_unprepare(drvdata->atclk);
152
107 return 0; 153 return 0;
108} 154}
109 155
156static int replicator_runtime_resume(struct device *dev)
157{
158 struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
159
160 if (drvdata && !IS_ERR(drvdata->atclk))
161 clk_prepare_enable(drvdata->atclk);
162
163 return 0;
164}
165#endif
166
167static const struct dev_pm_ops replicator_dev_pm_ops = {
168 SET_RUNTIME_PM_OPS(replicator_runtime_suspend,
169 replicator_runtime_resume, NULL)
170};
171
110static const struct of_device_id replicator_match[] = { 172static const struct of_device_id replicator_match[] = {
111 {.compatible = "arm,coresight-replicator"}, 173 {.compatible = "arm,coresight-replicator"},
112 {} 174 {}
@@ -118,6 +180,7 @@ static struct platform_driver replicator_driver = {
118 .driver = { 180 .driver = {
119 .name = "coresight-replicator", 181 .name = "coresight-replicator",
120 .of_match_table = replicator_match, 182 .of_match_table = replicator_match,
183 .pm = &replicator_dev_pm_ops,
121 }, 184 },
122}; 185};
123 186
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 7147f3dd363c..a57c7ec1661f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -23,7 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/clk.h> 26#include <linux/pm_runtime.h>
27#include <linux/of.h> 27#include <linux/of.h>
28#include <linux/coresight.h> 28#include <linux/coresight.h>
29#include <linux/amba/bus.h> 29#include <linux/amba/bus.h>
@@ -104,7 +104,6 @@ enum tmc_mem_intf_width {
104 * @dev: the device entity associated to this component. 104 * @dev: the device entity associated to this component.
105 * @csdev: component vitals needed by the framework. 105 * @csdev: component vitals needed by the framework.
106 * @miscdev: specifics to handle "/dev/xyz.tmc" entry. 106 * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
107 * @clk: the clock this component is associated to.
108 * @spinlock: only one at a time pls. 107 * @spinlock: only one at a time pls.
109 * @read_count: manages preparation of buffer for reading. 108 * @read_count: manages preparation of buffer for reading.
110 * @buf: area of memory where trace data get sent. 109 * @buf: area of memory where trace data get sent.
@@ -120,7 +119,6 @@ struct tmc_drvdata {
120 struct device *dev; 119 struct device *dev;
121 struct coresight_device *csdev; 120 struct coresight_device *csdev;
122 struct miscdevice miscdev; 121 struct miscdevice miscdev;
123 struct clk *clk;
124 spinlock_t spinlock; 122 spinlock_t spinlock;
125 int read_count; 123 int read_count;
126 bool reading; 124 bool reading;
@@ -242,17 +240,14 @@ static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
242 240
243static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) 241static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
244{ 242{
245 int ret;
246 unsigned long flags; 243 unsigned long flags;
247 244
248 ret = clk_prepare_enable(drvdata->clk); 245 pm_runtime_get_sync(drvdata->dev);
249 if (ret)
250 return ret;
251 246
252 spin_lock_irqsave(&drvdata->spinlock, flags); 247 spin_lock_irqsave(&drvdata->spinlock, flags);
253 if (drvdata->reading) { 248 if (drvdata->reading) {
254 spin_unlock_irqrestore(&drvdata->spinlock, flags); 249 spin_unlock_irqrestore(&drvdata->spinlock, flags);
255 clk_disable_unprepare(drvdata->clk); 250 pm_runtime_put(drvdata->dev);
256 return -EBUSY; 251 return -EBUSY;
257 } 252 }
258 253
@@ -386,7 +381,7 @@ out:
386 drvdata->enable = false; 381 drvdata->enable = false;
387 spin_unlock_irqrestore(&drvdata->spinlock, flags); 382 spin_unlock_irqrestore(&drvdata->spinlock, flags);
388 383
389 clk_disable_unprepare(drvdata->clk); 384 pm_runtime_put(drvdata->dev);
390 385
391 dev_info(drvdata->dev, "TMC disabled\n"); 386 dev_info(drvdata->dev, "TMC disabled\n");
392} 387}
@@ -568,17 +563,13 @@ static const struct file_operations tmc_fops = {
568static ssize_t status_show(struct device *dev, 563static ssize_t status_show(struct device *dev,
569 struct device_attribute *attr, char *buf) 564 struct device_attribute *attr, char *buf)
570{ 565{
571 int ret;
572 unsigned long flags; 566 unsigned long flags;
573 u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg; 567 u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
574 u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr; 568 u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
575 u32 devid; 569 u32 devid;
576 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); 570 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
577 571
578 ret = clk_prepare_enable(drvdata->clk); 572 pm_runtime_get_sync(drvdata->dev);
579 if (ret)
580 goto out;
581
582 spin_lock_irqsave(&drvdata->spinlock, flags); 573 spin_lock_irqsave(&drvdata->spinlock, flags);
583 CS_UNLOCK(drvdata->base); 574 CS_UNLOCK(drvdata->base);
584 575
@@ -596,8 +587,7 @@ static ssize_t status_show(struct device *dev,
596 587
597 CS_LOCK(drvdata->base); 588 CS_LOCK(drvdata->base);
598 spin_unlock_irqrestore(&drvdata->spinlock, flags); 589 spin_unlock_irqrestore(&drvdata->spinlock, flags);
599 590 pm_runtime_put(drvdata->dev);
600 clk_disable_unprepare(drvdata->clk);
601 591
602 return sprintf(buf, 592 return sprintf(buf,
603 "Depth:\t\t0x%x\n" 593 "Depth:\t\t0x%x\n"
@@ -613,7 +603,7 @@ static ssize_t status_show(struct device *dev,
613 "DEVID:\t\t0x%x\n", 603 "DEVID:\t\t0x%x\n",
614 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg, 604 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
615 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid); 605 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
616out: 606
617 return -EINVAL; 607 return -EINVAL;
618} 608}
619static DEVICE_ATTR_RO(status); 609static DEVICE_ATTR_RO(status);
@@ -700,11 +690,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
700 690
701 spin_lock_init(&drvdata->spinlock); 691 spin_lock_init(&drvdata->spinlock);
702 692
703 drvdata->clk = adev->pclk;
704 ret = clk_prepare_enable(drvdata->clk);
705 if (ret)
706 return ret;
707
708 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); 693 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
709 drvdata->config_type = BMVAL(devid, 6, 7); 694 drvdata->config_type = BMVAL(devid, 6, 7);
710 695
@@ -719,7 +704,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
719 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4; 704 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
720 } 705 }
721 706
722 clk_disable_unprepare(drvdata->clk); 707 pm_runtime_put(&adev->dev);
723 708
724 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { 709 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
725 drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size, 710 drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 3b33af2416bb..7214efd10db5 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -17,9 +17,10 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/clk.h> 20#include <linux/pm_runtime.h>
21#include <linux/coresight.h> 21#include <linux/coresight.h>
22#include <linux/amba/bus.h> 22#include <linux/amba/bus.h>
23#include <linux/clk.h>
23 24
24#include "coresight-priv.h" 25#include "coresight-priv.h"
25 26
@@ -50,14 +51,14 @@
50/** 51/**
51 * @base: memory mapped base address for this component. 52 * @base: memory mapped base address for this component.
52 * @dev: the device entity associated to this component. 53 * @dev: the device entity associated to this component.
54 * @atclk: optional clock for the core parts of the TPIU.
53 * @csdev: component vitals needed by the framework. 55 * @csdev: component vitals needed by the framework.
54 * @clk: the clock this component is associated to.
55 */ 56 */
56struct tpiu_drvdata { 57struct tpiu_drvdata {
57 void __iomem *base; 58 void __iomem *base;
58 struct device *dev; 59 struct device *dev;
60 struct clk *atclk;
59 struct coresight_device *csdev; 61 struct coresight_device *csdev;
60 struct clk *clk;
61}; 62};
62 63
63static void tpiu_enable_hw(struct tpiu_drvdata *drvdata) 64static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
@@ -72,12 +73,8 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
72static int tpiu_enable(struct coresight_device *csdev) 73static int tpiu_enable(struct coresight_device *csdev)
73{ 74{
74 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 75 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
75 int ret;
76
77 ret = clk_prepare_enable(drvdata->clk);
78 if (ret)
79 return ret;
80 76
77 pm_runtime_get_sync(csdev->dev.parent);
81 tpiu_enable_hw(drvdata); 78 tpiu_enable_hw(drvdata);
82 79
83 dev_info(drvdata->dev, "TPIU enabled\n"); 80 dev_info(drvdata->dev, "TPIU enabled\n");
@@ -101,8 +98,7 @@ static void tpiu_disable(struct coresight_device *csdev)
101 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 98 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
102 99
103 tpiu_disable_hw(drvdata); 100 tpiu_disable_hw(drvdata);
104 101 pm_runtime_put(csdev->dev.parent);
105 clk_disable_unprepare(drvdata->clk);
106 102
107 dev_info(drvdata->dev, "TPIU disabled\n"); 103 dev_info(drvdata->dev, "TPIU disabled\n");
108} 104}
@@ -139,6 +135,12 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
139 return -ENOMEM; 135 return -ENOMEM;
140 136
141 drvdata->dev = &adev->dev; 137 drvdata->dev = &adev->dev;
138 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
139 if (!IS_ERR(drvdata->atclk)) {
140 ret = clk_prepare_enable(drvdata->atclk);
141 if (ret)
142 return ret;
143 }
142 dev_set_drvdata(dev, drvdata); 144 dev_set_drvdata(dev, drvdata);
143 145
144 /* Validity for the resource is already checked by the AMBA core */ 146 /* Validity for the resource is already checked by the AMBA core */
@@ -148,15 +150,10 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
148 150
149 drvdata->base = base; 151 drvdata->base = base;
150 152
151 drvdata->clk = adev->pclk;
152 ret = clk_prepare_enable(drvdata->clk);
153 if (ret)
154 return ret;
155
156 /* Disable tpiu to support older devices */ 153 /* Disable tpiu to support older devices */
157 tpiu_disable_hw(drvdata); 154 tpiu_disable_hw(drvdata);
158 155
159 clk_disable_unprepare(drvdata->clk); 156 pm_runtime_put(&adev->dev);
160 157
161 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); 158 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
162 if (!desc) 159 if (!desc)
@@ -183,11 +180,41 @@ static int tpiu_remove(struct amba_device *adev)
183 return 0; 180 return 0;
184} 181}
185 182
183#ifdef CONFIG_PM
184static int tpiu_runtime_suspend(struct device *dev)
185{
186 struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
187
188 if (drvdata && !IS_ERR(drvdata->atclk))
189 clk_disable_unprepare(drvdata->atclk);
190
191 return 0;
192}
193
194static int tpiu_runtime_resume(struct device *dev)
195{
196 struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
197
198 if (drvdata && !IS_ERR(drvdata->atclk))
199 clk_prepare_enable(drvdata->atclk);
200
201 return 0;
202}
203#endif
204
205static const struct dev_pm_ops tpiu_dev_pm_ops = {
206 SET_RUNTIME_PM_OPS(tpiu_runtime_suspend, tpiu_runtime_resume, NULL)
207};
208
186static struct amba_id tpiu_ids[] = { 209static struct amba_id tpiu_ids[] = {
187 { 210 {
188 .id = 0x0003b912, 211 .id = 0x0003b912,
189 .mask = 0x0003ffff, 212 .mask = 0x0003ffff,
190 }, 213 },
214 {
215 .id = 0x0004b912,
216 .mask = 0x0007ffff,
217 },
191 { 0, 0}, 218 { 0, 0},
192}; 219};
193 220
@@ -195,6 +222,7 @@ static struct amba_driver tpiu_driver = {
195 .drv = { 222 .drv = {
196 .name = "coresight-tpiu", 223 .name = "coresight-tpiu",
197 .owner = THIS_MODULE, 224 .owner = THIS_MODULE,
225 .pm = &tpiu_dev_pm_ops,
198 }, 226 },
199 .probe = tpiu_probe, 227 .probe = tpiu_probe,
200 .remove = tpiu_remove, 228 .remove = tpiu_remove,
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 35e51ce93a5c..b0973617826f 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -37,7 +37,7 @@ of_coresight_get_endpoint_device(struct device_node *endpoint)
37 struct device *dev = NULL; 37 struct device *dev = NULL;
38 38
39 /* 39 /*
40 * If we have a non-configuable replicator, it will be found on the 40 * If we have a non-configurable replicator, it will be found on the
41 * platform bus. 41 * platform bus.
42 */ 42 */
43 dev = bus_find_device(&platform_bus_type, NULL, 43 dev = bus_find_device(&platform_bus_type, NULL,
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index a1fac5aa9bae..9b94c3db80ab 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -46,6 +46,9 @@ struct i2c_par {
46 46
47static LIST_HEAD(adapter_list); 47static LIST_HEAD(adapter_list);
48static DEFINE_MUTEX(adapter_list_lock); 48static DEFINE_MUTEX(adapter_list_lock);
49#define MAX_DEVICE 4
50static int parport[MAX_DEVICE] = {0, -1, -1, -1};
51
49 52
50/* ----- Low-level parallel port access ----------------------------------- */ 53/* ----- Low-level parallel port access ----------------------------------- */
51 54
@@ -163,17 +166,34 @@ static void i2c_parport_irq(void *data)
163static void i2c_parport_attach(struct parport *port) 166static void i2c_parport_attach(struct parport *port)
164{ 167{
165 struct i2c_par *adapter; 168 struct i2c_par *adapter;
169 int i;
170 struct pardev_cb i2c_parport_cb;
171
172 for (i = 0; i < MAX_DEVICE; i++) {
173 if (parport[i] == -1)
174 continue;
175 if (port->number == parport[i])
176 break;
177 }
178 if (i == MAX_DEVICE) {
179 pr_debug("i2c-parport: Not using parport%d.\n", port->number);
180 return;
181 }
166 182
167 adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL); 183 adapter = kzalloc(sizeof(struct i2c_par), GFP_KERNEL);
168 if (adapter == NULL) { 184 if (adapter == NULL) {
169 printk(KERN_ERR "i2c-parport: Failed to kzalloc\n"); 185 printk(KERN_ERR "i2c-parport: Failed to kzalloc\n");
170 return; 186 return;
171 } 187 }
188 memset(&i2c_parport_cb, 0, sizeof(i2c_parport_cb));
189 i2c_parport_cb.flags = PARPORT_FLAG_EXCL;
190 i2c_parport_cb.irq_func = i2c_parport_irq;
191 i2c_parport_cb.private = adapter;
172 192
173 pr_debug("i2c-parport: attaching to %s\n", port->name); 193 pr_debug("i2c-parport: attaching to %s\n", port->name);
174 parport_disable_irq(port); 194 parport_disable_irq(port);
175 adapter->pdev = parport_register_device(port, "i2c-parport", 195 adapter->pdev = parport_register_dev_model(port, "i2c-parport",
176 NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter); 196 &i2c_parport_cb, i);
177 if (!adapter->pdev) { 197 if (!adapter->pdev) {
178 printk(KERN_ERR "i2c-parport: Unable to register with parport\n"); 198 printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
179 goto err_free; 199 goto err_free;
@@ -267,9 +287,10 @@ static void i2c_parport_detach(struct parport *port)
267} 287}
268 288
269static struct parport_driver i2c_parport_driver = { 289static struct parport_driver i2c_parport_driver = {
270 .name = "i2c-parport", 290 .name = "i2c-parport",
271 .attach = i2c_parport_attach, 291 .match_port = i2c_parport_attach,
272 .detach = i2c_parport_detach, 292 .detach = i2c_parport_detach,
293 .devmodel = true,
273}; 294};
274 295
275/* ----- Module loading, unloading and information ------------------------ */ 296/* ----- Module loading, unloading and information ------------------------ */
@@ -298,5 +319,12 @@ MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
298MODULE_DESCRIPTION("I2C bus over parallel port"); 319MODULE_DESCRIPTION("I2C bus over parallel port");
299MODULE_LICENSE("GPL"); 320MODULE_LICENSE("GPL");
300 321
322module_param_array(parport, int, NULL, 0);
323MODULE_PARM_DESC(parport,
324 "List of parallel ports to bind to, by index.\n"
325 " Atmost " __stringify(MAX_DEVICE) " devices are supported.\n"
326 " Default is one device connected to parport0.\n"
327);
328
301module_init(i2c_parport_init); 329module_init(i2c_parport_init);
302module_exit(i2c_parport_exit); 330module_exit(i2c_parport_exit);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 006242c8bca0..42c38525904b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -520,7 +520,6 @@ source "drivers/misc/eeprom/Kconfig"
520source "drivers/misc/cb710/Kconfig" 520source "drivers/misc/cb710/Kconfig"
521source "drivers/misc/ti-st/Kconfig" 521source "drivers/misc/ti-st/Kconfig"
522source "drivers/misc/lis3lv02d/Kconfig" 522source "drivers/misc/lis3lv02d/Kconfig"
523source "drivers/misc/carma/Kconfig"
524source "drivers/misc/altera-stapl/Kconfig" 523source "drivers/misc/altera-stapl/Kconfig"
525source "drivers/misc/mei/Kconfig" 524source "drivers/misc/mei/Kconfig"
526source "drivers/misc/vmw_vmci/Kconfig" 525source "drivers/misc/vmw_vmci/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7d5c4cd118c4..d056fb7186fe 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
44obj-$(CONFIG_PCH_PHUB) += pch_phub.o 44obj-$(CONFIG_PCH_PHUB) += pch_phub.o
45obj-y += ti-st/ 45obj-y += ti-st/
46obj-y += lis3lv02d/ 46obj-y += lis3lv02d/
47obj-y += carma/
48obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o 47obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
49obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ 48obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
50obj-$(CONFIG_INTEL_MEI) += mei/ 49obj-$(CONFIG_INTEL_MEI) += mei/
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
deleted file mode 100644
index 295882bfb14e..000000000000
--- a/drivers/misc/carma/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
1config CARMA_FPGA
2 tristate "CARMA DATA-FPGA Access Driver"
3 depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
4 default n
5 help
6 Say Y here to include support for communicating with the data
7 processing FPGAs on the OVRO CARMA board.
8
9config CARMA_FPGA_PROGRAM
10 tristate "CARMA DATA-FPGA Programmer"
11 depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
12 default n
13 help
14 Say Y here to include support for programming the data processing
15 FPGAs on the OVRO CARMA board.
diff --git a/drivers/misc/carma/Makefile b/drivers/misc/carma/Makefile
deleted file mode 100644
index ff36ac2ce534..000000000000
--- a/drivers/misc/carma/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o
2obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
deleted file mode 100644
index 0b1bd85e4ae6..000000000000
--- a/drivers/misc/carma/carma-fpga-program.c
+++ /dev/null
@@ -1,1182 +0,0 @@
1/*
2 * CARMA Board DATA-FPGA Programmer
3 *
4 * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/dma-mapping.h>
13#include <linux/of_address.h>
14#include <linux/of_irq.h>
15#include <linux/of_platform.h>
16#include <linux/completion.h>
17#include <linux/miscdevice.h>
18#include <linux/dmaengine.h>
19#include <linux/fsldma.h>
20#include <linux/interrupt.h>
21#include <linux/highmem.h>
22#include <linux/vmalloc.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/leds.h>
29#include <linux/slab.h>
30#include <linux/kref.h>
31#include <linux/fs.h>
32#include <linux/io.h>
33
34/* MPC8349EMDS specific get_immrbase() */
35#include <sysdev/fsl_soc.h>
36
37static const char drv_name[] = "carma-fpga-program";
38
39/*
40 * Firmware images are always this exact size
41 *
42 * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs)
43 * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs)
44 */
45#define FW_SIZE_EP2S90 12849552
46#define FW_SIZE_EP2S130 18662880
47
48struct fpga_dev {
49 struct miscdevice miscdev;
50
51 /* Reference count */
52 struct kref ref;
53
54 /* Device Registers */
55 struct device *dev;
56 void __iomem *regs;
57 void __iomem *immr;
58
59 /* Freescale DMA Device */
60 struct dma_chan *chan;
61
62 /* Interrupts */
63 int irq, status;
64 struct completion completion;
65
66 /* FPGA Bitfile */
67 struct mutex lock;
68
69 void *vaddr;
70 struct scatterlist *sglist;
71 int sglen;
72 int nr_pages;
73 bool buf_allocated;
74
75 /* max size and written bytes */
76 size_t fw_size;
77 size_t bytes;
78};
79
80static int fpga_dma_init(struct fpga_dev *priv, int nr_pages)
81{
82 struct page *pg;
83 int i;
84
85 priv->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
86 if (NULL == priv->vaddr) {
87 pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
88 return -ENOMEM;
89 }
90
91 pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
92 (unsigned long)priv->vaddr,
93 nr_pages << PAGE_SHIFT);
94
95 memset(priv->vaddr, 0, nr_pages << PAGE_SHIFT);
96 priv->nr_pages = nr_pages;
97
98 priv->sglist = vzalloc(priv->nr_pages * sizeof(*priv->sglist));
99 if (NULL == priv->sglist)
100 goto vzalloc_err;
101
102 sg_init_table(priv->sglist, priv->nr_pages);
103 for (i = 0; i < priv->nr_pages; i++) {
104 pg = vmalloc_to_page(priv->vaddr + i * PAGE_SIZE);
105 if (NULL == pg)
106 goto vmalloc_to_page_err;
107 sg_set_page(&priv->sglist[i], pg, PAGE_SIZE, 0);
108 }
109 return 0;
110
111vmalloc_to_page_err:
112 vfree(priv->sglist);
113 priv->sglist = NULL;
114vzalloc_err:
115 vfree(priv->vaddr);
116 priv->vaddr = NULL;
117 return -ENOMEM;
118}
119
120static int fpga_dma_map(struct fpga_dev *priv)
121{
122 priv->sglen = dma_map_sg(priv->dev, priv->sglist,
123 priv->nr_pages, DMA_TO_DEVICE);
124
125 if (0 == priv->sglen) {
126 pr_warn("%s: dma_map_sg failed\n", __func__);
127 return -ENOMEM;
128 }
129 return 0;
130}
131
132static int fpga_dma_unmap(struct fpga_dev *priv)
133{
134 if (!priv->sglen)
135 return 0;
136
137 dma_unmap_sg(priv->dev, priv->sglist, priv->sglen, DMA_TO_DEVICE);
138 priv->sglen = 0;
139 return 0;
140}
141
142/*
143 * FPGA Bitfile Helpers
144 */
145
146/**
147 * fpga_drop_firmware_data() - drop the bitfile image from memory
148 * @priv: the driver's private data structure
149 *
150 * LOCKING: must hold priv->lock
151 */
152static void fpga_drop_firmware_data(struct fpga_dev *priv)
153{
154 vfree(priv->sglist);
155 vfree(priv->vaddr);
156 priv->buf_allocated = false;
157 priv->bytes = 0;
158}
159
160/*
161 * Private Data Reference Count
162 */
163
164static void fpga_dev_remove(struct kref *ref)
165{
166 struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref);
167
168 /* free any firmware image that was not programmed */
169 fpga_drop_firmware_data(priv);
170
171 mutex_destroy(&priv->lock);
172 kfree(priv);
173}
174
175/*
176 * LED Trigger (could be a seperate module)
177 */
178
179/*
180 * NOTE: this whole thing does have the problem that whenever the led's are
181 * NOTE: first set to use the fpga trigger, they could be in the wrong state
182 */
183
184DEFINE_LED_TRIGGER(ledtrig_fpga);
185
186static void ledtrig_fpga_programmed(bool enabled)
187{
188 if (enabled)
189 led_trigger_event(ledtrig_fpga, LED_FULL);
190 else
191 led_trigger_event(ledtrig_fpga, LED_OFF);
192}
193
194/*
195 * FPGA Register Helpers
196 */
197
198/* Register Definitions */
199#define FPGA_CONFIG_CONTROL 0x40
200#define FPGA_CONFIG_STATUS 0x44
201#define FPGA_CONFIG_FIFO_SIZE 0x48
202#define FPGA_CONFIG_FIFO_USED 0x4C
203#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50
204#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54
205
206#define FPGA_FIFO_ADDRESS 0x3000
207
208static int fpga_fifo_size(void __iomem *regs)
209{
210 return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE);
211}
212
213#define CFG_STATUS_ERR_MASK 0xfffe
214
215static int fpga_config_error(void __iomem *regs)
216{
217 return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK;
218}
219
220static int fpga_fifo_empty(void __iomem *regs)
221{
222 return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0;
223}
224
225static void fpga_fifo_write(void __iomem *regs, u32 val)
226{
227 iowrite32be(val, regs + FPGA_FIFO_ADDRESS);
228}
229
230static void fpga_set_byte_count(void __iomem *regs, u32 count)
231{
232 iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
233}
234
235#define CFG_CTL_ENABLE (1 << 0)
236#define CFG_CTL_RESET (1 << 1)
237#define CFG_CTL_DMA (1 << 2)
238
239static void fpga_programmer_enable(struct fpga_dev *priv, bool dma)
240{
241 u32 val;
242
243 val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE;
244 iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
245}
246
247static void fpga_programmer_disable(struct fpga_dev *priv)
248{
249 iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
250}
251
252static void fpga_dump_registers(struct fpga_dev *priv)
253{
254 u32 control, status, size, used, total, curr;
255
256 /* good status: do nothing */
257 if (priv->status == 0)
258 return;
259
260 /* Dump all status registers */
261 control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL);
262 status = ioread32be(priv->regs + FPGA_CONFIG_STATUS);
263 size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE);
264 used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED);
265 total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
266 curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT);
267
268 dev_err(priv->dev, "Configuration failed, dumping status registers\n");
269 dev_err(priv->dev, "Control: 0x%.8x\n", control);
270 dev_err(priv->dev, "Status: 0x%.8x\n", status);
271 dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size);
272 dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used);
273 dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total);
274 dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr);
275}
276
277/*
278 * FPGA Power Supply Code
279 */
280
281#define CTL_PWR_CONTROL 0x2006
282#define CTL_PWR_STATUS 0x200A
283#define CTL_PWR_FAIL 0x200B
284
285#define PWR_CONTROL_ENABLE 0x01
286
287#define PWR_STATUS_ERROR_MASK 0x10
288#define PWR_STATUS_GOOD 0x0f
289
290/*
291 * Determine if the FPGA power is good for all supplies
292 */
293static bool fpga_power_good(struct fpga_dev *priv)
294{
295 u8 val;
296
297 val = ioread8(priv->regs + CTL_PWR_STATUS);
298 if (val & PWR_STATUS_ERROR_MASK)
299 return false;
300
301 return val == PWR_STATUS_GOOD;
302}
303
304/*
305 * Disable the FPGA power supplies
306 */
307static void fpga_disable_power_supplies(struct fpga_dev *priv)
308{
309 unsigned long start;
310 u8 val;
311
312 iowrite8(0x0, priv->regs + CTL_PWR_CONTROL);
313
314 /*
315 * Wait 500ms for the power rails to discharge
316 *
317 * Without this delay, the CTL-CPLD state machine can get into a
318 * state where it is waiting for the power-goods to assert, but they
319 * never do. This only happens when enabling and disabling the
320 * power sequencer very rapidly.
321 *
322 * The loop below will also wait for the power goods to de-assert,
323 * but testing has shown that they are always disabled by the time
324 * the sleep completes. However, omitting the sleep and only waiting
325 * for the power-goods to de-assert was not sufficient to ensure
326 * that the power sequencer would not wedge itself.
327 */
328 msleep(500);
329
330 start = jiffies;
331 while (time_before(jiffies, start + HZ)) {
332 val = ioread8(priv->regs + CTL_PWR_STATUS);
333 if (!(val & PWR_STATUS_GOOD))
334 break;
335
336 usleep_range(5000, 10000);
337 }
338
339 val = ioread8(priv->regs + CTL_PWR_STATUS);
340 if (val & PWR_STATUS_GOOD) {
341 dev_err(priv->dev, "power disable failed: "
342 "power goods: status 0x%.2x\n", val);
343 }
344
345 if (val & PWR_STATUS_ERROR_MASK) {
346 dev_err(priv->dev, "power disable failed: "
347 "alarm bit set: status 0x%.2x\n", val);
348 }
349}
350
351/**
352 * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies
353 * @priv: the driver's private data structure
354 *
355 * Enable the DATA-FPGA power supplies, waiting up to 1 second for
356 * them to enable successfully.
357 *
358 * Returns 0 on success, -ERRNO otherwise
359 */
360static int fpga_enable_power_supplies(struct fpga_dev *priv)
361{
362 unsigned long start = jiffies;
363
364 if (fpga_power_good(priv)) {
365 dev_dbg(priv->dev, "power was already good\n");
366 return 0;
367 }
368
369 iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL);
370 while (time_before(jiffies, start + HZ)) {
371 if (fpga_power_good(priv))
372 return 0;
373
374 usleep_range(5000, 10000);
375 }
376
377 return fpga_power_good(priv) ? 0 : -ETIMEDOUT;
378}
379
380/*
381 * Determine if the FPGA power supplies are all enabled
382 */
383static bool fpga_power_enabled(struct fpga_dev *priv)
384{
385 u8 val;
386
387 val = ioread8(priv->regs + CTL_PWR_CONTROL);
388 if (val & PWR_CONTROL_ENABLE)
389 return true;
390
391 return false;
392}
393
394/*
395 * Determine if the FPGA's are programmed and running correctly
396 */
397static bool fpga_running(struct fpga_dev *priv)
398{
399 if (!fpga_power_good(priv))
400 return false;
401
402 /* Check the config done bit */
403 return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18);
404}
405
406/*
407 * FPGA Programming Code
408 */
409
410/**
411 * fpga_program_block() - put a block of data into the programmer's FIFO
412 * @priv: the driver's private data structure
413 * @buf: the data to program
414 * @count: the length of data to program (must be a multiple of 4 bytes)
415 *
416 * Returns 0 on success, -ERRNO otherwise
417 */
418static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
419{
420 u32 *data = buf;
421 int size = fpga_fifo_size(priv->regs);
422 int i, len;
423 unsigned long timeout;
424
425 /* enforce correct data length for the FIFO */
426 BUG_ON(count % 4 != 0);
427
428 while (count > 0) {
429
430 /* Get the size of the block to write (maximum is FIFO_SIZE) */
431 len = min_t(size_t, count, size);
432 timeout = jiffies + HZ / 4;
433
434 /* Write the block */
435 for (i = 0; i < len / 4; i++)
436 fpga_fifo_write(priv->regs, data[i]);
437
438 /* Update the amounts left */
439 count -= len;
440 data += len / 4;
441
442 /* Wait for the fifo to empty */
443 while (true) {
444
445 if (fpga_fifo_empty(priv->regs)) {
446 break;
447 } else {
448 dev_dbg(priv->dev, "Fifo not empty\n");
449 cpu_relax();
450 }
451
452 if (fpga_config_error(priv->regs)) {
453 dev_err(priv->dev, "Error detected\n");
454 return -EIO;
455 }
456
457 if (time_after(jiffies, timeout)) {
458 dev_err(priv->dev, "Fifo drain timeout\n");
459 return -ETIMEDOUT;
460 }
461
462 usleep_range(5000, 10000);
463 }
464 }
465
466 return 0;
467}
468
469/**
470 * fpga_program_cpu() - program the DATA-FPGA's using the CPU
471 * @priv: the driver's private data structure
472 *
473 * This is useful when the DMA programming method fails. It is possible to
474 * wedge the Freescale DMA controller such that the DMA programming method
475 * always fails. This method has always succeeded.
476 *
477 * Returns 0 on success, -ERRNO otherwise
478 */
479static noinline int fpga_program_cpu(struct fpga_dev *priv)
480{
481 int ret;
482 unsigned long timeout;
483
484 /* Disable the programmer */
485 fpga_programmer_disable(priv);
486
487 /* Set the total byte count */
488 fpga_set_byte_count(priv->regs, priv->bytes);
489 dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
490
491 /* Enable the controller for programming */
492 fpga_programmer_enable(priv, false);
493 dev_dbg(priv->dev, "enabled the controller\n");
494
495 /* Write each chunk of the FPGA bitfile to FPGA programmer */
496 ret = fpga_program_block(priv, priv->vaddr, priv->bytes);
497 if (ret)
498 goto out_disable_controller;
499
500 /* Wait for the interrupt handler to signal that programming finished */
501 timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
502 if (!timeout) {
503 dev_err(priv->dev, "Timed out waiting for completion\n");
504 ret = -ETIMEDOUT;
505 goto out_disable_controller;
506 }
507
508 /* Retrieve the status from the interrupt handler */
509 ret = priv->status;
510
511out_disable_controller:
512 fpga_programmer_disable(priv);
513 return ret;
514}
515
516#define FIFO_DMA_ADDRESS 0xf0003000
517#define FIFO_MAX_LEN 4096
518
519/**
520 * fpga_program_dma() - program the DATA-FPGA's using the DMA engine
521 * @priv: the driver's private data structure
522 *
523 * Program the DATA-FPGA's using the Freescale DMA engine. This requires that
524 * the engine is programmed such that the hardware DMA request lines can
525 * control the entire DMA transaction. The system controller FPGA then
526 * completely offloads the programming from the CPU.
527 *
528 * Returns 0 on success, -ERRNO otherwise
529 */
530static noinline int fpga_program_dma(struct fpga_dev *priv)
531{
532 struct dma_chan *chan = priv->chan;
533 struct dma_async_tx_descriptor *tx;
534 size_t num_pages, len, avail = 0;
535 struct dma_slave_config config;
536 struct scatterlist *sg;
537 struct sg_table table;
538 dma_cookie_t cookie;
539 int ret, i;
540 unsigned long timeout;
541
542 /* Disable the programmer */
543 fpga_programmer_disable(priv);
544
545 /* Allocate a scatterlist for the DMA destination */
546 num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN);
547 ret = sg_alloc_table(&table, num_pages, GFP_KERNEL);
548 if (ret) {
549 dev_err(priv->dev, "Unable to allocate dst scatterlist\n");
550 ret = -ENOMEM;
551 goto out_return;
552 }
553
554 /*
555 * This is an ugly hack
556 *
557 * We fill in a scatterlist as if it were mapped for DMA. This is
558 * necessary because there exists no better structure for this
559 * inside the kernel code.
560 *
561 * As an added bonus, we can use the DMAEngine API for all of this,
562 * rather than inventing another extremely similar API.
563 */
564 avail = priv->bytes;
565 for_each_sg(table.sgl, sg, num_pages, i) {
566 len = min_t(size_t, avail, FIFO_MAX_LEN);
567 sg_dma_address(sg) = FIFO_DMA_ADDRESS;
568 sg_dma_len(sg) = len;
569
570 avail -= len;
571 }
572
573 /* Map the buffer for DMA */
574 ret = fpga_dma_map(priv);
575 if (ret) {
576 dev_err(priv->dev, "Unable to map buffer for DMA\n");
577 goto out_free_table;
578 }
579
580 /*
581 * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per
582 * transaction, and then put it under external control
583 */
584 memset(&config, 0, sizeof(config));
585 config.direction = DMA_MEM_TO_DEV;
586 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
587 config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
588 ret = dmaengine_slave_config(chan, &config);
589 if (ret) {
590 dev_err(priv->dev, "DMA slave configuration failed\n");
591 goto out_dma_unmap;
592 }
593
594 ret = fsl_dma_external_start(chan, 1);
595 if (ret) {
596 dev_err(priv->dev, "DMA external control setup failed\n");
597 goto out_dma_unmap;
598 }
599
600 /* setup and submit the DMA transaction */
601
602 tx = dmaengine_prep_dma_sg(chan, table.sgl, num_pages,
603 priv->sglist, priv->sglen, 0);
604 if (!tx) {
605 dev_err(priv->dev, "Unable to prep DMA transaction\n");
606 ret = -ENOMEM;
607 goto out_dma_unmap;
608 }
609
610 cookie = tx->tx_submit(tx);
611 if (dma_submit_error(cookie)) {
612 dev_err(priv->dev, "Unable to submit DMA transaction\n");
613 ret = -ENOMEM;
614 goto out_dma_unmap;
615 }
616
617 dma_async_issue_pending(chan);
618
619 /* Set the total byte count */
620 fpga_set_byte_count(priv->regs, priv->bytes);
621 dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
622
623 /* Enable the controller for DMA programming */
624 fpga_programmer_enable(priv, true);
625 dev_dbg(priv->dev, "enabled the controller\n");
626
627 /* Wait for the interrupt handler to signal that programming finished */
628 timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
629 if (!timeout) {
630 dev_err(priv->dev, "Timed out waiting for completion\n");
631 ret = -ETIMEDOUT;
632 goto out_disable_controller;
633 }
634
635 /* Retrieve the status from the interrupt handler */
636 ret = priv->status;
637
638out_disable_controller:
639 fpga_programmer_disable(priv);
640out_dma_unmap:
641 fpga_dma_unmap(priv);
642out_free_table:
643 sg_free_table(&table);
644out_return:
645 return ret;
646}
647
648/*
649 * Interrupt Handling
650 */
651
652static irqreturn_t fpga_irq(int irq, void *dev_id)
653{
654 struct fpga_dev *priv = dev_id;
655
656 /* Save the status */
657 priv->status = fpga_config_error(priv->regs) ? -EIO : 0;
658 dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status);
659 fpga_dump_registers(priv);
660
661 /* Disabling the programmer clears the interrupt */
662 fpga_programmer_disable(priv);
663
664 /* Notify any waiters */
665 complete(&priv->completion);
666
667 return IRQ_HANDLED;
668}
669
670/*
671 * SYSFS Helpers
672 */
673
674/**
675 * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's
676 * @priv: the driver's private data structure
677 *
678 * LOCKING: must hold priv->lock
679 */
680static int fpga_do_stop(struct fpga_dev *priv)
681{
682 u32 val;
683
684 /* Set the led to unprogrammed */
685 ledtrig_fpga_programmed(false);
686
687 /* Pulse the config line to reset the FPGA's */
688 val = CFG_CTL_ENABLE | CFG_CTL_RESET;
689 iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
690 iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
691
692 return 0;
693}
694
695static noinline int fpga_do_program(struct fpga_dev *priv)
696{
697 int ret;
698
699 if (priv->bytes != priv->fw_size) {
700 dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, "
701 "should be %zu bytes\n",
702 priv->bytes, priv->fw_size);
703 return -EINVAL;
704 }
705
706 if (!fpga_power_enabled(priv)) {
707 dev_err(priv->dev, "Power not enabled\n");
708 return -EINVAL;
709 }
710
711 if (!fpga_power_good(priv)) {
712 dev_err(priv->dev, "Power not good\n");
713 return -EINVAL;
714 }
715
716 /* Set the LED to unprogrammed */
717 ledtrig_fpga_programmed(false);
718
719 /* Try to program the FPGA's using DMA */
720 ret = fpga_program_dma(priv);
721
722 /* If DMA failed or doesn't exist, try with CPU */
723 if (ret) {
724 dev_warn(priv->dev, "Falling back to CPU programming\n");
725 ret = fpga_program_cpu(priv);
726 }
727
728 if (ret) {
729 dev_err(priv->dev, "Unable to program FPGA's\n");
730 return ret;
731 }
732
733 /* Drop the firmware bitfile from memory */
734 fpga_drop_firmware_data(priv);
735
736 dev_dbg(priv->dev, "FPGA programming successful\n");
737 ledtrig_fpga_programmed(true);
738
739 return 0;
740}
741
742/*
743 * File Operations
744 */
745
746static int fpga_open(struct inode *inode, struct file *filp)
747{
748 /*
749 * The miscdevice layer puts our struct miscdevice into the
750 * filp->private_data field. We use this to find our private
751 * data and then overwrite it with our own private structure.
752 */
753 struct fpga_dev *priv = container_of(filp->private_data,
754 struct fpga_dev, miscdev);
755 unsigned int nr_pages;
756 int ret;
757
758 /* We only allow one process at a time */
759 ret = mutex_lock_interruptible(&priv->lock);
760 if (ret)
761 return ret;
762
763 filp->private_data = priv;
764 kref_get(&priv->ref);
765
766 /* Truncation: drop any existing data */
767 if (filp->f_flags & O_TRUNC)
768 priv->bytes = 0;
769
770 /* Check if we have already allocated a buffer */
771 if (priv->buf_allocated)
772 return 0;
773
774 /* Allocate a buffer to hold enough data for the bitfile */
775 nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
776 ret = fpga_dma_init(priv, nr_pages);
777 if (ret) {
778 dev_err(priv->dev, "unable to allocate data buffer\n");
779 mutex_unlock(&priv->lock);
780 kref_put(&priv->ref, fpga_dev_remove);
781 return ret;
782 }
783
784 priv->buf_allocated = true;
785 return 0;
786}
787
788static int fpga_release(struct inode *inode, struct file *filp)
789{
790 struct fpga_dev *priv = filp->private_data;
791
792 mutex_unlock(&priv->lock);
793 kref_put(&priv->ref, fpga_dev_remove);
794 return 0;
795}
796
797static ssize_t fpga_write(struct file *filp, const char __user *buf,
798 size_t count, loff_t *f_pos)
799{
800 struct fpga_dev *priv = filp->private_data;
801
802 /* FPGA bitfiles have an exact size: disallow anything else */
803 if (priv->bytes >= priv->fw_size)
804 return -ENOSPC;
805
806 count = min_t(size_t, priv->fw_size - priv->bytes, count);
807 if (copy_from_user(priv->vaddr + priv->bytes, buf, count))
808 return -EFAULT;
809
810 priv->bytes += count;
811 return count;
812}
813
814static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
815 loff_t *f_pos)
816{
817 struct fpga_dev *priv = filp->private_data;
818 return simple_read_from_buffer(buf, count, f_pos,
819 priv->vaddr, priv->bytes);
820}
821
822static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
823{
824 struct fpga_dev *priv = filp->private_data;
825
826 /* only read-only opens are allowed to seek */
827 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
828 return -EINVAL;
829
830 return fixed_size_llseek(filp, offset, origin, priv->fw_size);
831}
832
833static const struct file_operations fpga_fops = {
834 .open = fpga_open,
835 .release = fpga_release,
836 .write = fpga_write,
837 .read = fpga_read,
838 .llseek = fpga_llseek,
839};
840
841/*
842 * Device Attributes
843 */
844
845static ssize_t pfail_show(struct device *dev, struct device_attribute *attr,
846 char *buf)
847{
848 struct fpga_dev *priv = dev_get_drvdata(dev);
849 u8 val;
850
851 val = ioread8(priv->regs + CTL_PWR_FAIL);
852 return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val);
853}
854
855static ssize_t pgood_show(struct device *dev, struct device_attribute *attr,
856 char *buf)
857{
858 struct fpga_dev *priv = dev_get_drvdata(dev);
859 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv));
860}
861
862static ssize_t penable_show(struct device *dev, struct device_attribute *attr,
863 char *buf)
864{
865 struct fpga_dev *priv = dev_get_drvdata(dev);
866 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv));
867}
868
869static ssize_t penable_store(struct device *dev, struct device_attribute *attr,
870 const char *buf, size_t count)
871{
872 struct fpga_dev *priv = dev_get_drvdata(dev);
873 unsigned long val;
874 int ret;
875
876 ret = kstrtoul(buf, 0, &val);
877 if (ret)
878 return ret;
879
880 if (val) {
881 ret = fpga_enable_power_supplies(priv);
882 if (ret)
883 return ret;
884 } else {
885 fpga_do_stop(priv);
886 fpga_disable_power_supplies(priv);
887 }
888
889 return count;
890}
891
892static ssize_t program_show(struct device *dev, struct device_attribute *attr,
893 char *buf)
894{
895 struct fpga_dev *priv = dev_get_drvdata(dev);
896 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv));
897}
898
899static ssize_t program_store(struct device *dev, struct device_attribute *attr,
900 const char *buf, size_t count)
901{
902 struct fpga_dev *priv = dev_get_drvdata(dev);
903 unsigned long val;
904 int ret;
905
906 ret = kstrtoul(buf, 0, &val);
907 if (ret)
908 return ret;
909
910 /* We can't have an image writer and be programming simultaneously */
911 if (mutex_lock_interruptible(&priv->lock))
912 return -ERESTARTSYS;
913
914 /* Program or Reset the FPGA's */
915 ret = val ? fpga_do_program(priv) : fpga_do_stop(priv);
916 if (ret)
917 goto out_unlock;
918
919 /* Success */
920 ret = count;
921
922out_unlock:
923 mutex_unlock(&priv->lock);
924 return ret;
925}
926
927static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL);
928static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL);
929static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR,
930 penable_show, penable_store);
931
932static DEVICE_ATTR(program, S_IRUGO | S_IWUSR,
933 program_show, program_store);
934
935static struct attribute *fpga_attributes[] = {
936 &dev_attr_power_fail.attr,
937 &dev_attr_power_good.attr,
938 &dev_attr_power_enable.attr,
939 &dev_attr_program.attr,
940 NULL,
941};
942
943static const struct attribute_group fpga_attr_group = {
944 .attrs = fpga_attributes,
945};
946
947/*
948 * OpenFirmware Device Subsystem
949 */
950
951#define SYS_REG_VERSION 0x00
952#define SYS_REG_GEOGRAPHIC 0x10
953
954static bool dma_filter(struct dma_chan *chan, void *data)
955{
956 /*
957 * DMA Channel #0 is the only acceptable device
958 *
959 * This probably won't survive an unload/load cycle of the Freescale
960 * DMAEngine driver, but that won't be a problem
961 */
962 return chan->chan_id == 0 && chan->device->dev_id == 0;
963}
964
965static int fpga_of_remove(struct platform_device *op)
966{
967 struct fpga_dev *priv = platform_get_drvdata(op);
968 struct device *this_device = priv->miscdev.this_device;
969
970 sysfs_remove_group(&this_device->kobj, &fpga_attr_group);
971 misc_deregister(&priv->miscdev);
972
973 free_irq(priv->irq, priv);
974 irq_dispose_mapping(priv->irq);
975
976 /* make sure the power supplies are off */
977 fpga_disable_power_supplies(priv);
978
979 /* unmap registers */
980 iounmap(priv->immr);
981 iounmap(priv->regs);
982
983 dma_release_channel(priv->chan);
984
985 /* drop our reference to the private data structure */
986 kref_put(&priv->ref, fpga_dev_remove);
987 return 0;
988}
989
990/* CTL-CPLD Version Register */
991#define CTL_CPLD_VERSION 0x2000
992
993static int fpga_of_probe(struct platform_device *op)
994{
995 struct device_node *of_node = op->dev.of_node;
996 struct device *this_device;
997 struct fpga_dev *priv;
998 dma_cap_mask_t mask;
999 u32 ver;
1000 int ret;
1001
1002 /* Allocate private data */
1003 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1004 if (!priv) {
1005 dev_err(&op->dev, "Unable to allocate private data\n");
1006 ret = -ENOMEM;
1007 goto out_return;
1008 }
1009
1010 /* Setup the miscdevice */
1011 priv->miscdev.minor = MISC_DYNAMIC_MINOR;
1012 priv->miscdev.name = drv_name;
1013 priv->miscdev.fops = &fpga_fops;
1014
1015 kref_init(&priv->ref);
1016
1017 platform_set_drvdata(op, priv);
1018 priv->dev = &op->dev;
1019 mutex_init(&priv->lock);
1020 init_completion(&priv->completion);
1021
1022 dev_set_drvdata(priv->dev, priv);
1023 dma_cap_zero(mask);
1024 dma_cap_set(DMA_MEMCPY, mask);
1025 dma_cap_set(DMA_SLAVE, mask);
1026 dma_cap_set(DMA_SG, mask);
1027
1028 /* Get control of DMA channel #0 */
1029 priv->chan = dma_request_channel(mask, dma_filter, NULL);
1030 if (!priv->chan) {
1031 dev_err(&op->dev, "Unable to acquire DMA channel #0\n");
1032 ret = -ENODEV;
1033 goto out_free_priv;
1034 }
1035
1036 /* Remap the registers for use */
1037 priv->regs = of_iomap(of_node, 0);
1038 if (!priv->regs) {
1039 dev_err(&op->dev, "Unable to ioremap registers\n");
1040 ret = -ENOMEM;
1041 goto out_dma_release_channel;
1042 }
1043
1044 /* Remap the IMMR for use */
1045 priv->immr = ioremap(get_immrbase(), 0x100000);
1046 if (!priv->immr) {
1047 dev_err(&op->dev, "Unable to ioremap IMMR\n");
1048 ret = -ENOMEM;
1049 goto out_unmap_regs;
1050 }
1051
1052 /*
1053 * Check that external DMA is configured
1054 *
1055 * U-Boot does this for us, but we should check it and bail out if
1056 * there is a problem. Failing to have this register setup correctly
1057 * will cause the DMA controller to transfer a single cacheline
1058 * worth of data, then wedge itself.
1059 */
1060 if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) {
1061 dev_err(&op->dev, "External DMA control not configured\n");
1062 ret = -ENODEV;
1063 goto out_unmap_immr;
1064 }
1065
1066 /*
1067 * Check the CTL-CPLD version
1068 *
1069 * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we
1070 * don't want to run on any version of the CTL-CPLD that does not use
1071 * a compatible register layout.
1072 *
1073 * v2: changed register layout, added power sequencer
1074 * v3: added glitch filter on the i2c overcurrent/overtemp outputs
1075 */
1076 ver = ioread8(priv->regs + CTL_CPLD_VERSION);
1077 if (ver != 0x02 && ver != 0x03) {
1078 dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n");
1079 ret = -ENODEV;
1080 goto out_unmap_immr;
1081 }
1082
1083 /* Set the exact size that the firmware image should be */
1084 ver = ioread32be(priv->regs + SYS_REG_VERSION);
1085 priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90;
1086
1087 /* Find the correct IRQ number */
1088 priv->irq = irq_of_parse_and_map(of_node, 0);
1089 if (priv->irq == NO_IRQ) {
1090 dev_err(&op->dev, "Unable to find IRQ line\n");
1091 ret = -ENODEV;
1092 goto out_unmap_immr;
1093 }
1094
1095 /* Request the IRQ */
1096 ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv);
1097 if (ret) {
1098 dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq);
1099 ret = -ENODEV;
1100 goto out_irq_dispose_mapping;
1101 }
1102
1103 /* Reset and stop the FPGA's, just in case */
1104 fpga_do_stop(priv);
1105
1106 /* Register the miscdevice */
1107 ret = misc_register(&priv->miscdev);
1108 if (ret) {
1109 dev_err(&op->dev, "Unable to register miscdevice\n");
1110 goto out_free_irq;
1111 }
1112
1113 /* Create the sysfs files */
1114 this_device = priv->miscdev.this_device;
1115 dev_set_drvdata(this_device, priv);
1116 ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group);
1117 if (ret) {
1118 dev_err(&op->dev, "Unable to create sysfs files\n");
1119 goto out_misc_deregister;
1120 }
1121
1122 dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n",
1123 (ver & (1 << 17)) ? "Correlator" : "Digitizer",
1124 (ver & (1 << 16)) ? "B" : "A",
1125 (ver & (1 << 18)) ? "EP2S130" : "EP2S90");
1126
1127 return 0;
1128
1129out_misc_deregister:
1130 misc_deregister(&priv->miscdev);
1131out_free_irq:
1132 free_irq(priv->irq, priv);
1133out_irq_dispose_mapping:
1134 irq_dispose_mapping(priv->irq);
1135out_unmap_immr:
1136 iounmap(priv->immr);
1137out_unmap_regs:
1138 iounmap(priv->regs);
1139out_dma_release_channel:
1140 dma_release_channel(priv->chan);
1141out_free_priv:
1142 kref_put(&priv->ref, fpga_dev_remove);
1143out_return:
1144 return ret;
1145}
1146
1147static const struct of_device_id fpga_of_match[] = {
1148 { .compatible = "carma,fpga-programmer", },
1149 {},
1150};
1151
1152static struct platform_driver fpga_of_driver = {
1153 .probe = fpga_of_probe,
1154 .remove = fpga_of_remove,
1155 .driver = {
1156 .name = drv_name,
1157 .of_match_table = fpga_of_match,
1158 },
1159};
1160
1161/*
1162 * Module Init / Exit
1163 */
1164
1165static int __init fpga_init(void)
1166{
1167 led_trigger_register_simple("fpga", &ledtrig_fpga);
1168 return platform_driver_register(&fpga_of_driver);
1169}
1170
1171static void __exit fpga_exit(void)
1172{
1173 platform_driver_unregister(&fpga_of_driver);
1174 led_trigger_unregister_simple(ledtrig_fpga);
1175}
1176
1177MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1178MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer");
1179MODULE_LICENSE("GPL");
1180
1181module_init(fpga_init);
1182module_exit(fpga_exit);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
deleted file mode 100644
index 5aba3fd789de..000000000000
--- a/drivers/misc/carma/carma-fpga.c
+++ /dev/null
@@ -1,1507 +0,0 @@
1/*
2 * CARMA DATA-FPGA Access Driver
3 *
4 * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/*
13 * FPGA Memory Dump Format
14 *
15 * FPGA #0 control registers (32 x 32-bit words)
16 * FPGA #1 control registers (32 x 32-bit words)
17 * FPGA #2 control registers (32 x 32-bit words)
18 * FPGA #3 control registers (32 x 32-bit words)
19 * SYSFPGA control registers (32 x 32-bit words)
20 * FPGA #0 correlation array (NUM_CORL0 correlation blocks)
21 * FPGA #1 correlation array (NUM_CORL1 correlation blocks)
22 * FPGA #2 correlation array (NUM_CORL2 correlation blocks)
23 * FPGA #3 correlation array (NUM_CORL3 correlation blocks)
24 *
25 * Each correlation array consists of:
26 *
27 * Correlation Data (2 x NUM_LAGSn x 32-bit words)
28 * Pipeline Metadata (2 x NUM_METAn x 32-bit words)
29 * Quantization Counters (2 x NUM_QCNTn x 32-bit words)
30 *
31 * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from
32 * the FPGA configuration registers. They do not change once the FPGA's
33 * have been programmed, they only change on re-programming.
34 */
35
36/*
37 * Basic Description:
38 *
39 * This driver is used to capture correlation spectra off of the four data
40 * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore
41 * this driver supports dynamic enable/disable of capture while the device
42 * remains open.
43 *
44 * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast
45 * capture rate, all buffers are pre-allocated to avoid any potentially long
46 * running memory allocations while capturing.
47 *
48 * There are two lists and one pointer which are used to keep track of the
49 * different states of data buffers.
50 *
51 * 1) free list
52 * This list holds all empty data buffers which are ready to receive data.
53 *
54 * 2) inflight pointer
55 * This pointer holds the currently inflight data buffer. This buffer is having
56 * data copied into it by the DMA engine.
57 *
58 * 3) used list
59 * This list holds data buffers which have been filled, and are waiting to be
60 * read by userspace.
61 *
62 * All buffers start life on the free list, then move successively to the
63 * inflight pointer, and then to the used list. After they have been read by
64 * userspace, they are moved back to the free list. The cycle repeats as long
65 * as necessary.
66 *
67 * It should be noted that all buffers are mapped and ready for DMA when they
68 * are on any of the three lists. They are only unmapped when they are in the
69 * process of being read by userspace.
70 */
71
72/*
73 * Notes on the IRQ masking scheme:
74 *
75 * The IRQ masking scheme here is different than most other hardware. The only
76 * way for the DATA-FPGAs to detect if the kernel has taken too long to copy
77 * the data is if the status registers are not cleared before the next
78 * correlation data dump is ready.
79 *
80 * The interrupt line is connected to the status registers, such that when they
81 * are cleared, the interrupt is de-asserted. Therein lies our problem. We need
82 * to schedule a long-running DMA operation and return from the interrupt
83 * handler quickly, but we cannot clear the status registers.
84 *
85 * To handle this, the system controller FPGA has the capability to connect the
86 * interrupt line to a user-controlled GPIO pin. This pin is driven high
87 * (unasserted) and left that way. To mask the interrupt, we change the
88 * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
89 */
90
91#include <linux/of_address.h>
92#include <linux/of_irq.h>
93#include <linux/of_platform.h>
94#include <linux/dma-mapping.h>
95#include <linux/miscdevice.h>
96#include <linux/interrupt.h>
97#include <linux/dmaengine.h>
98#include <linux/seq_file.h>
99#include <linux/highmem.h>
100#include <linux/debugfs.h>
101#include <linux/vmalloc.h>
102#include <linux/kernel.h>
103#include <linux/module.h>
104#include <linux/poll.h>
105#include <linux/slab.h>
106#include <linux/kref.h>
107#include <linux/io.h>
108
109/* system controller registers */
110#define SYS_IRQ_SOURCE_CTL 0x24
111#define SYS_IRQ_OUTPUT_EN 0x28
112#define SYS_IRQ_OUTPUT_DATA 0x2C
113#define SYS_IRQ_INPUT_DATA 0x30
114#define SYS_FPGA_CONFIG_STATUS 0x44
115
116/* GPIO IRQ line assignment */
117#define IRQ_CORL_DONE 0x10
118
119/* FPGA registers */
120#define MMAP_REG_VERSION 0x00
121#define MMAP_REG_CORL_CONF1 0x08
122#define MMAP_REG_CORL_CONF2 0x0C
123#define MMAP_REG_STATUS 0x48
124
125#define SYS_FPGA_BLOCK 0xF0000000
126
127#define DATA_FPGA_START 0x400000
128#define DATA_FPGA_SIZE 0x80000
129
130static const char drv_name[] = "carma-fpga";
131
132#define NUM_FPGA 4
133
134#define MIN_DATA_BUFS 8
135#define MAX_DATA_BUFS 64
136
137struct fpga_info {
138 unsigned int num_lag_ram;
139 unsigned int blk_size;
140};
141
142struct data_buf {
143 struct list_head entry;
144 void *vaddr;
145 struct scatterlist *sglist;
146 int sglen;
147 int nr_pages;
148 size_t size;
149};
150
151struct fpga_device {
152 /* character device */
153 struct miscdevice miscdev;
154 struct device *dev;
155 struct mutex mutex;
156
157 /* reference count */
158 struct kref ref;
159
160 /* FPGA registers and information */
161 struct fpga_info info[NUM_FPGA];
162 void __iomem *regs;
163 int irq;
164
165 /* FPGA Physical Address/Size Information */
166 resource_size_t phys_addr;
167 size_t phys_size;
168
169 /* DMA structures */
170 struct sg_table corl_table;
171 unsigned int corl_nents;
172 struct dma_chan *chan;
173
174 /* Protection for all members below */
175 spinlock_t lock;
176
177 /* Device enable/disable flag */
178 bool enabled;
179
180 /* Correlation data buffers */
181 wait_queue_head_t wait;
182 struct list_head free;
183 struct list_head used;
184 struct data_buf *inflight;
185
186 /* Information about data buffers */
187 unsigned int num_dropped;
188 unsigned int num_buffers;
189 size_t bufsize;
190 struct dentry *dbg_entry;
191};
192
193struct fpga_reader {
194 struct fpga_device *priv;
195 struct data_buf *buf;
196 off_t buf_start;
197};
198
199static void fpga_device_release(struct kref *ref)
200{
201 struct fpga_device *priv = container_of(ref, struct fpga_device, ref);
202
203 /* the last reader has exited, cleanup the last bits */
204 mutex_destroy(&priv->mutex);
205 kfree(priv);
206}
207
208/*
209 * Data Buffer Allocation Helpers
210 */
211
212static int carma_dma_init(struct data_buf *buf, int nr_pages)
213{
214 struct page *pg;
215 int i;
216
217 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
218 if (NULL == buf->vaddr) {
219 pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
220 return -ENOMEM;
221 }
222
223 pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
224 (unsigned long)buf->vaddr,
225 nr_pages << PAGE_SHIFT);
226
227 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
228 buf->nr_pages = nr_pages;
229
230 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
231 if (NULL == buf->sglist)
232 goto vzalloc_err;
233
234 sg_init_table(buf->sglist, buf->nr_pages);
235 for (i = 0; i < buf->nr_pages; i++) {
236 pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
237 if (NULL == pg)
238 goto vmalloc_to_page_err;
239 sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
240 }
241 return 0;
242
243vmalloc_to_page_err:
244 vfree(buf->sglist);
245 buf->sglist = NULL;
246vzalloc_err:
247 vfree(buf->vaddr);
248 buf->vaddr = NULL;
249 return -ENOMEM;
250}
251
252static int carma_dma_map(struct device *dev, struct data_buf *buf)
253{
254 buf->sglen = dma_map_sg(dev, buf->sglist,
255 buf->nr_pages, DMA_FROM_DEVICE);
256
257 if (0 == buf->sglen) {
258 pr_warn("%s: dma_map_sg failed\n", __func__);
259 return -ENOMEM;
260 }
261 return 0;
262}
263
264static int carma_dma_unmap(struct device *dev, struct data_buf *buf)
265{
266 if (!buf->sglen)
267 return 0;
268
269 dma_unmap_sg(dev, buf->sglist, buf->sglen, DMA_FROM_DEVICE);
270 buf->sglen = 0;
271 return 0;
272}
273
274/**
275 * data_free_buffer() - free a single data buffer and all allocated memory
276 * @buf: the buffer to free
277 *
278 * This will free all of the pages allocated to the given data buffer, and
279 * then free the structure itself
280 */
281static void data_free_buffer(struct data_buf *buf)
282{
283 /* It is ok to free a NULL buffer */
284 if (!buf)
285 return;
286
287 /* free all memory */
288 vfree(buf->sglist);
289 vfree(buf->vaddr);
290 kfree(buf);
291}
292
293/**
294 * data_alloc_buffer() - allocate and fill a data buffer with pages
295 * @bytes: the number of bytes required
296 *
297 * This allocates all space needed for a data buffer. It must be mapped before
298 * use in a DMA transaction using carma_dma_map().
299 *
300 * Returns NULL on failure
301 */
302static struct data_buf *data_alloc_buffer(const size_t bytes)
303{
304 unsigned int nr_pages;
305 struct data_buf *buf;
306 int ret;
307
308 /* calculate the number of pages necessary */
309 nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
310
311 /* allocate the buffer structure */
312 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
313 if (!buf)
314 goto out_return;
315
316 /* initialize internal fields */
317 INIT_LIST_HEAD(&buf->entry);
318 buf->size = bytes;
319
320 /* allocate the buffer */
321 ret = carma_dma_init(buf, nr_pages);
322 if (ret)
323 goto out_free_buf;
324
325 return buf;
326
327out_free_buf:
328 kfree(buf);
329out_return:
330 return NULL;
331}
332
333/**
334 * data_free_buffers() - free all allocated buffers
335 * @priv: the driver's private data structure
336 *
337 * Free all buffers allocated by the driver (except those currently in the
338 * process of being read by userspace).
339 *
340 * LOCKING: must hold dev->mutex
341 * CONTEXT: user
342 */
343static void data_free_buffers(struct fpga_device *priv)
344{
345 struct data_buf *buf, *tmp;
346
347 /* the device should be stopped, no DMA in progress */
348 BUG_ON(priv->inflight != NULL);
349
350 list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
351 list_del_init(&buf->entry);
352 carma_dma_unmap(priv->dev, buf);
353 data_free_buffer(buf);
354 }
355
356 list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
357 list_del_init(&buf->entry);
358 carma_dma_unmap(priv->dev, buf);
359 data_free_buffer(buf);
360 }
361
362 priv->num_buffers = 0;
363 priv->bufsize = 0;
364}
365
366/**
367 * data_alloc_buffers() - allocate 1 seconds worth of data buffers
368 * @priv: the driver's private data structure
369 *
370 * Allocate enough buffers for a whole second worth of data
371 *
372 * This routine will attempt to degrade nicely by succeeding even if a full
373 * second worth of data buffers could not be allocated, as long as a minimum
374 * number were allocated. In this case, it will print a message to the kernel
375 * log.
376 *
377 * The device must not be modifying any lists when this is called.
378 *
379 * CONTEXT: user
380 * LOCKING: must hold dev->mutex
381 *
382 * Returns 0 on success, -ERRNO otherwise
383 */
384static int data_alloc_buffers(struct fpga_device *priv)
385{
386 struct data_buf *buf;
387 int i, ret;
388
389 for (i = 0; i < MAX_DATA_BUFS; i++) {
390
391 /* allocate a buffer */
392 buf = data_alloc_buffer(priv->bufsize);
393 if (!buf)
394 break;
395
396 /* map it for DMA */
397 ret = carma_dma_map(priv->dev, buf);
398 if (ret) {
399 data_free_buffer(buf);
400 break;
401 }
402
403 /* add it to the list of free buffers */
404 list_add_tail(&buf->entry, &priv->free);
405 priv->num_buffers++;
406 }
407
408 /* Make sure we allocated the minimum required number of buffers */
409 if (priv->num_buffers < MIN_DATA_BUFS) {
410 dev_err(priv->dev, "Unable to allocate enough data buffers\n");
411 data_free_buffers(priv);
412 return -ENOMEM;
413 }
414
415 /* Warn if we are running in a degraded state, but do not fail */
416 if (priv->num_buffers < MAX_DATA_BUFS) {
417 dev_warn(priv->dev,
418 "Unable to allocate %d buffers, using %d buffers instead\n",
419 MAX_DATA_BUFS, i);
420 }
421
422 return 0;
423}
424
425/*
426 * DMA Operations Helpers
427 */
428
429/**
430 * fpga_start_addr() - get the physical address a DATA-FPGA
431 * @priv: the driver's private data structure
432 * @fpga: the DATA-FPGA number (zero based)
433 */
434static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga)
435{
436 return priv->phys_addr + 0x400000 + (0x80000 * fpga);
437}
438
439/**
440 * fpga_block_addr() - get the physical address of a correlation data block
441 * @priv: the driver's private data structure
442 * @fpga: the DATA-FPGA number (zero based)
443 * @blknum: the correlation block number (zero based)
444 */
445static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga,
446 unsigned int blknum)
447{
448 return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum));
449}
450
451#define REG_BLOCK_SIZE (32 * 4)
452
453/**
454 * data_setup_corl_table() - create the scatterlist for correlation dumps
455 * @priv: the driver's private data structure
456 *
457 * Create the scatterlist for transferring a correlation dump from the
458 * DATA FPGAs. This structure will be reused for each buffer than needs
459 * to be filled with correlation data.
460 *
461 * Returns 0 on success, -ERRNO otherwise
462 */
463static int data_setup_corl_table(struct fpga_device *priv)
464{
465 struct sg_table *table = &priv->corl_table;
466 struct scatterlist *sg;
467 struct fpga_info *info;
468 int i, j, ret;
469
470 /* Calculate the number of entries needed */
471 priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
472 for (i = 0; i < NUM_FPGA; i++)
473 priv->corl_nents += priv->info[i].num_lag_ram;
474
475 /* Allocate the scatterlist table */
476 ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL);
477 if (ret) {
478 dev_err(priv->dev, "unable to allocate DMA table\n");
479 return ret;
480 }
481
482 /* Add the DATA FPGA registers to the scatterlist */
483 sg = table->sgl;
484 for (i = 0; i < NUM_FPGA; i++) {
485 sg_dma_address(sg) = fpga_start_addr(priv, i);
486 sg_dma_len(sg) = REG_BLOCK_SIZE;
487 sg = sg_next(sg);
488 }
489
490 /* Add the SYS-FPGA registers to the scatterlist */
491 sg_dma_address(sg) = SYS_FPGA_BLOCK;
492 sg_dma_len(sg) = REG_BLOCK_SIZE;
493 sg = sg_next(sg);
494
495 /* Add the FPGA correlation data blocks to the scatterlist */
496 for (i = 0; i < NUM_FPGA; i++) {
497 info = &priv->info[i];
498 for (j = 0; j < info->num_lag_ram; j++) {
499 sg_dma_address(sg) = fpga_block_addr(priv, i, j);
500 sg_dma_len(sg) = info->blk_size;
501 sg = sg_next(sg);
502 }
503 }
504
505 /*
506 * All physical addresses and lengths are present in the structure
507 * now. It can be reused for every FPGA DATA interrupt
508 */
509 return 0;
510}
511
512/*
513 * FPGA Register Access Helpers
514 */
515
516static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga,
517 unsigned int reg, u32 val)
518{
519 const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
520 iowrite32be(val, priv->regs + fpga_start + reg);
521}
522
523static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga,
524 unsigned int reg)
525{
526 const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
527 return ioread32be(priv->regs + fpga_start + reg);
528}
529
530/**
531 * data_calculate_bufsize() - calculate the data buffer size required
532 * @priv: the driver's private data structure
533 *
534 * Calculate the total buffer size needed to hold a single block
535 * of correlation data
536 *
537 * CONTEXT: user
538 *
539 * Returns 0 on success, -ERRNO otherwise
540 */
541static int data_calculate_bufsize(struct fpga_device *priv)
542{
543 u32 num_corl, num_lags, num_meta, num_qcnt, num_pack;
544 u32 conf1, conf2, version;
545 u32 num_lag_ram, blk_size;
546 int i;
547
548 /* Each buffer starts with the 5 FPGA register areas */
549 priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
550
551 /* Read and store the configuration data for each FPGA */
552 for (i = 0; i < NUM_FPGA; i++) {
553 version = fpga_read_reg(priv, i, MMAP_REG_VERSION);
554 conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1);
555 conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2);
556
557 /* minor version 2 and later */
558 if ((version & 0x000000FF) >= 2) {
559 num_corl = (conf1 & 0x000000F0) >> 4;
560 num_pack = (conf1 & 0x00000F00) >> 8;
561 num_lags = (conf1 & 0x00FFF000) >> 12;
562 num_meta = (conf1 & 0x7F000000) >> 24;
563 num_qcnt = (conf2 & 0x00000FFF) >> 0;
564 } else {
565 num_corl = (conf1 & 0x000000F0) >> 4;
566 num_pack = 1; /* implied */
567 num_lags = (conf1 & 0x000FFF00) >> 8;
568 num_meta = (conf1 & 0x7FF00000) >> 20;
569 num_qcnt = (conf2 & 0x00000FFF) >> 0;
570 }
571
572 num_lag_ram = (num_corl + num_pack - 1) / num_pack;
573 blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8;
574
575 priv->info[i].num_lag_ram = num_lag_ram;
576 priv->info[i].blk_size = blk_size;
577 priv->bufsize += num_lag_ram * blk_size;
578
579 dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl);
580 dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack);
581 dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags);
582 dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta);
583 dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt);
584 dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size);
585 }
586
587 dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize);
588 return 0;
589}
590
591/*
592 * Interrupt Handling
593 */
594
595/**
596 * data_disable_interrupts() - stop the device from generating interrupts
597 * @priv: the driver's private data structure
598 *
599 * Hide interrupts by switching to GPIO interrupt source
600 *
601 * LOCKING: must hold dev->lock
602 */
603static void data_disable_interrupts(struct fpga_device *priv)
604{
605 /* hide the interrupt by switching the IRQ driver to GPIO */
606 iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL);
607}
608
609/**
610 * data_enable_interrupts() - allow the device to generate interrupts
611 * @priv: the driver's private data structure
612 *
613 * Unhide interrupts by switching to the FPGA interrupt source. At the
614 * same time, clear the DATA-FPGA status registers.
615 *
616 * LOCKING: must hold dev->lock
617 */
618static void data_enable_interrupts(struct fpga_device *priv)
619{
620 /* clear the actual FPGA corl_done interrupt */
621 fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0);
622 fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0);
623 fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0);
624 fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0);
625
626 /* flush the writes */
627 fpga_read_reg(priv, 0, MMAP_REG_STATUS);
628 fpga_read_reg(priv, 1, MMAP_REG_STATUS);
629 fpga_read_reg(priv, 2, MMAP_REG_STATUS);
630 fpga_read_reg(priv, 3, MMAP_REG_STATUS);
631
632 /* switch back to the external interrupt source */
633 iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
634}
635
636/**
637 * data_dma_cb() - DMAEngine callback for DMA completion
638 * @data: the driver's private data structure
639 *
640 * Complete a DMA transfer from the DATA-FPGA's
641 *
642 * This is called via the DMA callback mechanism, and will handle moving the
643 * completed DMA transaction to the used list, and then wake any processes
644 * waiting for new data
645 *
646 * CONTEXT: any, softirq expected
647 */
648static void data_dma_cb(void *data)
649{
650 struct fpga_device *priv = data;
651 unsigned long flags;
652
653 spin_lock_irqsave(&priv->lock, flags);
654
655 /* If there is no inflight buffer, we've got a bug */
656 BUG_ON(priv->inflight == NULL);
657
658 /* Move the inflight buffer onto the used list */
659 list_move_tail(&priv->inflight->entry, &priv->used);
660 priv->inflight = NULL;
661
662 /*
663 * If data dumping is still enabled, then clear the FPGA
664 * status registers and re-enable FPGA interrupts
665 */
666 if (priv->enabled)
667 data_enable_interrupts(priv);
668
669 spin_unlock_irqrestore(&priv->lock, flags);
670
671 /*
672 * We've changed both the inflight and used lists, so we need
673 * to wake up any processes that are blocking for those events
674 */
675 wake_up(&priv->wait);
676}
677
678/**
679 * data_submit_dma() - prepare and submit the required DMA to fill a buffer
680 * @priv: the driver's private data structure
681 * @buf: the data buffer
682 *
683 * Prepare and submit the necessary DMA transactions to fill a correlation
684 * data buffer.
685 *
686 * LOCKING: must hold dev->lock
687 * CONTEXT: hardirq only
688 *
689 * Returns 0 on success, -ERRNO otherwise
690 */
691static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
692{
693 struct scatterlist *dst_sg, *src_sg;
694 unsigned int dst_nents, src_nents;
695 struct dma_chan *chan = priv->chan;
696 struct dma_async_tx_descriptor *tx;
697 dma_cookie_t cookie;
698 dma_addr_t dst, src;
699 unsigned long dma_flags = 0;
700
701 dst_sg = buf->sglist;
702 dst_nents = buf->sglen;
703
704 src_sg = priv->corl_table.sgl;
705 src_nents = priv->corl_nents;
706
707 /*
708 * All buffers passed to this function should be ready and mapped
709 * for DMA already. Therefore, we don't need to do anything except
710 * submit it to the Freescale DMA Engine for processing
711 */
712
713 /* setup the scatterlist to scatterlist transfer */
714 tx = chan->device->device_prep_dma_sg(chan,
715 dst_sg, dst_nents,
716 src_sg, src_nents,
717 0);
718 if (!tx) {
719 dev_err(priv->dev, "unable to prep scatterlist DMA\n");
720 return -ENOMEM;
721 }
722
723 /* submit the transaction to the DMA controller */
724 cookie = tx->tx_submit(tx);
725 if (dma_submit_error(cookie)) {
726 dev_err(priv->dev, "unable to submit scatterlist DMA\n");
727 return -ENOMEM;
728 }
729
730 /* Prepare the re-read of the SYS-FPGA block */
731 dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE);
732 src = SYS_FPGA_BLOCK;
733 tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
734 REG_BLOCK_SIZE,
735 dma_flags);
736 if (!tx) {
737 dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
738 return -ENOMEM;
739 }
740
741 /* Setup the callback */
742 tx->callback = data_dma_cb;
743 tx->callback_param = priv;
744
745 /* submit the transaction to the DMA controller */
746 cookie = tx->tx_submit(tx);
747 if (dma_submit_error(cookie)) {
748 dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n");
749 return -ENOMEM;
750 }
751
752 return 0;
753}
754
755#define CORL_DONE 0x1
756#define CORL_ERR 0x2
757
758static irqreturn_t data_irq(int irq, void *dev_id)
759{
760 struct fpga_device *priv = dev_id;
761 bool submitted = false;
762 struct data_buf *buf;
763 u32 status;
764 int i;
765
766 /* detect spurious interrupts via FPGA status */
767 for (i = 0; i < 4; i++) {
768 status = fpga_read_reg(priv, i, MMAP_REG_STATUS);
769 if (!(status & (CORL_DONE | CORL_ERR))) {
770 dev_err(priv->dev, "spurious irq detected (FPGA)\n");
771 return IRQ_NONE;
772 }
773 }
774
775 /* detect spurious interrupts via raw IRQ pin readback */
776 status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA);
777 if (status & IRQ_CORL_DONE) {
778 dev_err(priv->dev, "spurious irq detected (IRQ)\n");
779 return IRQ_NONE;
780 }
781
782 spin_lock(&priv->lock);
783
784 /*
785 * This is an error case that should never happen.
786 *
787 * If this driver has a bug and manages to re-enable interrupts while
788 * a DMA is in progress, then we will hit this statement and should
789 * start paying attention immediately.
790 */
791 BUG_ON(priv->inflight != NULL);
792
793 /* hide the interrupt by switching the IRQ driver to GPIO */
794 data_disable_interrupts(priv);
795
796 /* If there are no free buffers, drop this data */
797 if (list_empty(&priv->free)) {
798 priv->num_dropped++;
799 goto out;
800 }
801
802 buf = list_first_entry(&priv->free, struct data_buf, entry);
803 list_del_init(&buf->entry);
804 BUG_ON(buf->size != priv->bufsize);
805
806 /* Submit a DMA transfer to get the correlation data */
807 if (data_submit_dma(priv, buf)) {
808 dev_err(priv->dev, "Unable to setup DMA transfer\n");
809 list_move_tail(&buf->entry, &priv->free);
810 goto out;
811 }
812
813 /* Save the buffer for the DMA callback */
814 priv->inflight = buf;
815 submitted = true;
816
817 /* Start the DMA Engine */
818 dma_async_issue_pending(priv->chan);
819
820out:
821 /* If no DMA was submitted, re-enable interrupts */
822 if (!submitted)
823 data_enable_interrupts(priv);
824
825 spin_unlock(&priv->lock);
826 return IRQ_HANDLED;
827}
828
829/*
830 * Realtime Device Enable Helpers
831 */
832
833/**
834 * data_device_enable() - enable the device for buffered dumping
835 * @priv: the driver's private data structure
836 *
837 * Enable the device for buffered dumping. Allocates buffers and hooks up
838 * the interrupt handler. When this finishes, data will come pouring in.
839 *
840 * LOCKING: must hold dev->mutex
841 * CONTEXT: user context only
842 *
843 * Returns 0 on success, -ERRNO otherwise
844 */
845static int data_device_enable(struct fpga_device *priv)
846{
847 bool enabled;
848 u32 val;
849 int ret;
850
851 /* multiple enables are safe: they do nothing */
852 spin_lock_irq(&priv->lock);
853 enabled = priv->enabled;
854 spin_unlock_irq(&priv->lock);
855 if (enabled)
856 return 0;
857
858 /* check that the FPGAs are programmed */
859 val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS);
860 if (!(val & (1 << 18))) {
861 dev_err(priv->dev, "DATA-FPGAs are not enabled\n");
862 return -ENODATA;
863 }
864
865 /* read the FPGAs to calculate the buffer size */
866 ret = data_calculate_bufsize(priv);
867 if (ret) {
868 dev_err(priv->dev, "unable to calculate buffer size\n");
869 goto out_error;
870 }
871
872 /* allocate the correlation data buffers */
873 ret = data_alloc_buffers(priv);
874 if (ret) {
875 dev_err(priv->dev, "unable to allocate buffers\n");
876 goto out_error;
877 }
878
879 /* setup the source scatterlist for dumping correlation data */
880 ret = data_setup_corl_table(priv);
881 if (ret) {
882 dev_err(priv->dev, "unable to setup correlation DMA table\n");
883 goto out_error;
884 }
885
886 /* prevent the FPGAs from generating interrupts */
887 data_disable_interrupts(priv);
888
889 /* hookup the irq handler */
890 ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
891 if (ret) {
892 dev_err(priv->dev, "unable to request IRQ handler\n");
893 goto out_error;
894 }
895
896 /* allow the DMA callback to re-enable FPGA interrupts */
897 spin_lock_irq(&priv->lock);
898 priv->enabled = true;
899 spin_unlock_irq(&priv->lock);
900
901 /* allow the FPGAs to generate interrupts */
902 data_enable_interrupts(priv);
903 return 0;
904
905out_error:
906 sg_free_table(&priv->corl_table);
907 priv->corl_nents = 0;
908
909 data_free_buffers(priv);
910 return ret;
911}
912
913/**
914 * data_device_disable() - disable the device for buffered dumping
915 * @priv: the driver's private data structure
916 *
917 * Disable the device for buffered dumping. Stops new DMA transactions from
918 * being generated, waits for all outstanding DMA to complete, and then frees
919 * all buffers.
920 *
921 * LOCKING: must hold dev->mutex
922 * CONTEXT: user only
923 *
924 * Returns 0 on success, -ERRNO otherwise
925 */
926static int data_device_disable(struct fpga_device *priv)
927{
928 spin_lock_irq(&priv->lock);
929
930 /* allow multiple disable */
931 if (!priv->enabled) {
932 spin_unlock_irq(&priv->lock);
933 return 0;
934 }
935
936 /*
937 * Mark the device disabled
938 *
939 * This stops DMA callbacks from re-enabling interrupts
940 */
941 priv->enabled = false;
942
943 /* prevent the FPGAs from generating interrupts */
944 data_disable_interrupts(priv);
945
946 /* wait until all ongoing DMA has finished */
947 while (priv->inflight != NULL) {
948 spin_unlock_irq(&priv->lock);
949 wait_event(priv->wait, priv->inflight == NULL);
950 spin_lock_irq(&priv->lock);
951 }
952
953 spin_unlock_irq(&priv->lock);
954
955 /* unhook the irq handler */
956 free_irq(priv->irq, priv);
957
958 /* free the correlation table */
959 sg_free_table(&priv->corl_table);
960 priv->corl_nents = 0;
961
962 /* free all buffers: the free and used lists are not being changed */
963 data_free_buffers(priv);
964 return 0;
965}
966
967/*
968 * DEBUGFS Interface
969 */
970#ifdef CONFIG_DEBUG_FS
971
972/*
973 * Count the number of entries in the given list
974 */
975static unsigned int list_num_entries(struct list_head *list)
976{
977 struct list_head *entry;
978 unsigned int ret = 0;
979
980 list_for_each(entry, list)
981 ret++;
982
983 return ret;
984}
985
986static int data_debug_show(struct seq_file *f, void *offset)
987{
988 struct fpga_device *priv = f->private;
989
990 spin_lock_irq(&priv->lock);
991
992 seq_printf(f, "enabled: %d\n", priv->enabled);
993 seq_printf(f, "bufsize: %d\n", priv->bufsize);
994 seq_printf(f, "num_buffers: %d\n", priv->num_buffers);
995 seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free));
996 seq_printf(f, "inflight: %d\n", priv->inflight != NULL);
997 seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used));
998 seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
999
1000 spin_unlock_irq(&priv->lock);
1001 return 0;
1002}
1003
1004static int data_debug_open(struct inode *inode, struct file *file)
1005{
1006 return single_open(file, data_debug_show, inode->i_private);
1007}
1008
1009static const struct file_operations data_debug_fops = {
1010 .owner = THIS_MODULE,
1011 .open = data_debug_open,
1012 .read = seq_read,
1013 .llseek = seq_lseek,
1014 .release = single_release,
1015};
1016
1017static int data_debugfs_init(struct fpga_device *priv)
1018{
1019 priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
1020 &data_debug_fops);
1021 return PTR_ERR_OR_ZERO(priv->dbg_entry);
1022}
1023
1024static void data_debugfs_exit(struct fpga_device *priv)
1025{
1026 debugfs_remove(priv->dbg_entry);
1027}
1028
1029#else
1030
1031static inline int data_debugfs_init(struct fpga_device *priv)
1032{
1033 return 0;
1034}
1035
1036static inline void data_debugfs_exit(struct fpga_device *priv)
1037{
1038}
1039
1040#endif /* CONFIG_DEBUG_FS */
1041
1042/*
1043 * SYSFS Attributes
1044 */
1045
1046static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
1047 char *buf)
1048{
1049 struct fpga_device *priv = dev_get_drvdata(dev);
1050 int ret;
1051
1052 spin_lock_irq(&priv->lock);
1053 ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
1054 spin_unlock_irq(&priv->lock);
1055
1056 return ret;
1057}
1058
1059static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
1060 const char *buf, size_t count)
1061{
1062 struct fpga_device *priv = dev_get_drvdata(dev);
1063 unsigned long enable;
1064 int ret;
1065
1066 ret = kstrtoul(buf, 0, &enable);
1067 if (ret) {
1068 dev_err(priv->dev, "unable to parse enable input\n");
1069 return ret;
1070 }
1071
1072 /* protect against concurrent enable/disable */
1073 ret = mutex_lock_interruptible(&priv->mutex);
1074 if (ret)
1075 return ret;
1076
1077 if (enable)
1078 ret = data_device_enable(priv);
1079 else
1080 ret = data_device_disable(priv);
1081
1082 if (ret) {
1083 dev_err(priv->dev, "device %s failed\n",
1084 enable ? "enable" : "disable");
1085 count = ret;
1086 goto out_unlock;
1087 }
1088
1089out_unlock:
1090 mutex_unlock(&priv->mutex);
1091 return count;
1092}
1093
1094static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set);
1095
1096static struct attribute *data_sysfs_attrs[] = {
1097 &dev_attr_enable.attr,
1098 NULL,
1099};
1100
1101static const struct attribute_group rt_sysfs_attr_group = {
1102 .attrs = data_sysfs_attrs,
1103};
1104
1105/*
1106 * FPGA Realtime Data Character Device
1107 */
1108
1109static int data_open(struct inode *inode, struct file *filp)
1110{
1111 /*
1112 * The miscdevice layer puts our struct miscdevice into the
1113 * filp->private_data field. We use this to find our private
1114 * data and then overwrite it with our own private structure.
1115 */
1116 struct fpga_device *priv = container_of(filp->private_data,
1117 struct fpga_device, miscdev);
1118 struct fpga_reader *reader;
1119 int ret;
1120
1121 /* allocate private data */
1122 reader = kzalloc(sizeof(*reader), GFP_KERNEL);
1123 if (!reader)
1124 return -ENOMEM;
1125
1126 reader->priv = priv;
1127 reader->buf = NULL;
1128
1129 filp->private_data = reader;
1130 ret = nonseekable_open(inode, filp);
1131 if (ret) {
1132 dev_err(priv->dev, "nonseekable-open failed\n");
1133 kfree(reader);
1134 return ret;
1135 }
1136
1137 /*
1138 * success, increase the reference count of the private data structure
1139 * so that it doesn't disappear if the device is unbound
1140 */
1141 kref_get(&priv->ref);
1142 return 0;
1143}
1144
1145static int data_release(struct inode *inode, struct file *filp)
1146{
1147 struct fpga_reader *reader = filp->private_data;
1148 struct fpga_device *priv = reader->priv;
1149
1150 /* free the per-reader structure */
1151 data_free_buffer(reader->buf);
1152 kfree(reader);
1153 filp->private_data = NULL;
1154
1155 /* decrement our reference count to the private data */
1156 kref_put(&priv->ref, fpga_device_release);
1157 return 0;
1158}
1159
1160static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
1161 loff_t *f_pos)
1162{
1163 struct fpga_reader *reader = filp->private_data;
1164 struct fpga_device *priv = reader->priv;
1165 struct list_head *used = &priv->used;
1166 bool drop_buffer = false;
1167 struct data_buf *dbuf;
1168 size_t avail;
1169 void *data;
1170 int ret;
1171
1172 /* check if we already have a partial buffer */
1173 if (reader->buf) {
1174 dbuf = reader->buf;
1175 goto have_buffer;
1176 }
1177
1178 spin_lock_irq(&priv->lock);
1179
1180 /* Block until there is at least one buffer on the used list */
1181 while (list_empty(used)) {
1182 spin_unlock_irq(&priv->lock);
1183
1184 if (filp->f_flags & O_NONBLOCK)
1185 return -EAGAIN;
1186
1187 ret = wait_event_interruptible(priv->wait, !list_empty(used));
1188 if (ret)
1189 return ret;
1190
1191 spin_lock_irq(&priv->lock);
1192 }
1193
1194 /* Grab the first buffer off of the used list */
1195 dbuf = list_first_entry(used, struct data_buf, entry);
1196 list_del_init(&dbuf->entry);
1197
1198 spin_unlock_irq(&priv->lock);
1199
1200 /* Buffers are always mapped: unmap it */
1201 carma_dma_unmap(priv->dev, dbuf);
1202
1203 /* save the buffer for later */
1204 reader->buf = dbuf;
1205 reader->buf_start = 0;
1206
1207have_buffer:
1208 /* Get the number of bytes available */
1209 avail = dbuf->size - reader->buf_start;
1210 data = dbuf->vaddr + reader->buf_start;
1211
1212 /* Get the number of bytes we can transfer */
1213 count = min(count, avail);
1214
1215 /* Copy the data to the userspace buffer */
1216 if (copy_to_user(ubuf, data, count))
1217 return -EFAULT;
1218
1219 /* Update the amount of available space */
1220 avail -= count;
1221
1222 /*
1223 * If there is still some data available, save the buffer for the
1224 * next userspace call to read() and return
1225 */
1226 if (avail > 0) {
1227 reader->buf_start += count;
1228 reader->buf = dbuf;
1229 return count;
1230 }
1231
1232 /*
1233 * Get the buffer ready to be reused for DMA
1234 *
1235 * If it fails, we pretend that the read never happed and return
1236 * -EFAULT to userspace. The read will be retried.
1237 */
1238 ret = carma_dma_map(priv->dev, dbuf);
1239 if (ret) {
1240 dev_err(priv->dev, "unable to remap buffer for DMA\n");
1241 return -EFAULT;
1242 }
1243
1244 /* Lock against concurrent enable/disable */
1245 spin_lock_irq(&priv->lock);
1246
1247 /* the reader is finished with this buffer */
1248 reader->buf = NULL;
1249
1250 /*
1251 * One of two things has happened, the device is disabled, or the
1252 * device has been reconfigured underneath us. In either case, we
1253 * should just throw away the buffer.
1254 *
1255 * Lockdep complains if this is done under the spinlock, so we
1256 * handle it during the unlock path.
1257 */
1258 if (!priv->enabled || dbuf->size != priv->bufsize) {
1259 drop_buffer = true;
1260 goto out_unlock;
1261 }
1262
1263 /* The buffer is safe to reuse, so add it back to the free list */
1264 list_add_tail(&dbuf->entry, &priv->free);
1265
1266out_unlock:
1267 spin_unlock_irq(&priv->lock);
1268
1269 if (drop_buffer) {
1270 carma_dma_unmap(priv->dev, dbuf);
1271 data_free_buffer(dbuf);
1272 }
1273
1274 return count;
1275}
1276
1277static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl)
1278{
1279 struct fpga_reader *reader = filp->private_data;
1280 struct fpga_device *priv = reader->priv;
1281 unsigned int mask = 0;
1282
1283 poll_wait(filp, &priv->wait, tbl);
1284
1285 if (!list_empty(&priv->used))
1286 mask |= POLLIN | POLLRDNORM;
1287
1288 return mask;
1289}
1290
1291static int data_mmap(struct file *filp, struct vm_area_struct *vma)
1292{
1293 struct fpga_reader *reader = filp->private_data;
1294 struct fpga_device *priv = reader->priv;
1295 unsigned long offset, vsize, psize, addr;
1296
1297 /* VMA properties */
1298 offset = vma->vm_pgoff << PAGE_SHIFT;
1299 vsize = vma->vm_end - vma->vm_start;
1300 psize = priv->phys_size - offset;
1301 addr = (priv->phys_addr + offset) >> PAGE_SHIFT;
1302
1303 /* Check against the FPGA region's physical memory size */
1304 if (vsize > psize) {
1305 dev_err(priv->dev, "requested mmap mapping too large\n");
1306 return -EINVAL;
1307 }
1308
1309 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1310
1311 return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
1312 vma->vm_page_prot);
1313}
1314
1315static const struct file_operations data_fops = {
1316 .owner = THIS_MODULE,
1317 .open = data_open,
1318 .release = data_release,
1319 .read = data_read,
1320 .poll = data_poll,
1321 .mmap = data_mmap,
1322 .llseek = no_llseek,
1323};
1324
1325/*
1326 * OpenFirmware Device Subsystem
1327 */
1328
1329static bool dma_filter(struct dma_chan *chan, void *data)
1330{
1331 /*
1332 * DMA Channel #0 is used for the FPGA Programmer, so ignore it
1333 *
1334 * This probably won't survive an unload/load cycle of the Freescale
1335 * DMAEngine driver, but that won't be a problem
1336 */
1337 if (chan->chan_id == 0 && chan->device->dev_id == 0)
1338 return false;
1339
1340 return true;
1341}
1342
1343static int data_of_probe(struct platform_device *op)
1344{
1345 struct device_node *of_node = op->dev.of_node;
1346 struct device *this_device;
1347 struct fpga_device *priv;
1348 struct resource res;
1349 dma_cap_mask_t mask;
1350 int ret;
1351
1352 /* Allocate private data */
1353 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1354 if (!priv) {
1355 dev_err(&op->dev, "Unable to allocate device private data\n");
1356 ret = -ENOMEM;
1357 goto out_return;
1358 }
1359
1360 platform_set_drvdata(op, priv);
1361 priv->dev = &op->dev;
1362 kref_init(&priv->ref);
1363 mutex_init(&priv->mutex);
1364
1365 dev_set_drvdata(priv->dev, priv);
1366 spin_lock_init(&priv->lock);
1367 INIT_LIST_HEAD(&priv->free);
1368 INIT_LIST_HEAD(&priv->used);
1369 init_waitqueue_head(&priv->wait);
1370
1371 /* Setup the misc device */
1372 priv->miscdev.minor = MISC_DYNAMIC_MINOR;
1373 priv->miscdev.name = drv_name;
1374 priv->miscdev.fops = &data_fops;
1375
1376 /* Get the physical address of the FPGA registers */
1377 ret = of_address_to_resource(of_node, 0, &res);
1378 if (ret) {
1379 dev_err(&op->dev, "Unable to find FPGA physical address\n");
1380 ret = -ENODEV;
1381 goto out_free_priv;
1382 }
1383
1384 priv->phys_addr = res.start;
1385 priv->phys_size = resource_size(&res);
1386
1387 /* ioremap the registers for use */
1388 priv->regs = of_iomap(of_node, 0);
1389 if (!priv->regs) {
1390 dev_err(&op->dev, "Unable to ioremap registers\n");
1391 ret = -ENOMEM;
1392 goto out_free_priv;
1393 }
1394
1395 dma_cap_zero(mask);
1396 dma_cap_set(DMA_MEMCPY, mask);
1397 dma_cap_set(DMA_INTERRUPT, mask);
1398 dma_cap_set(DMA_SLAVE, mask);
1399 dma_cap_set(DMA_SG, mask);
1400
1401 /* Request a DMA channel */
1402 priv->chan = dma_request_channel(mask, dma_filter, NULL);
1403 if (!priv->chan) {
1404 dev_err(&op->dev, "Unable to request DMA channel\n");
1405 ret = -ENODEV;
1406 goto out_unmap_regs;
1407 }
1408
1409 /* Find the correct IRQ number */
1410 priv->irq = irq_of_parse_and_map(of_node, 0);
1411 if (priv->irq == NO_IRQ) {
1412 dev_err(&op->dev, "Unable to find IRQ line\n");
1413 ret = -ENODEV;
1414 goto out_release_dma;
1415 }
1416
1417 /* Drive the GPIO for FPGA IRQ high (no interrupt) */
1418 iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA);
1419
1420 /* Register the miscdevice */
1421 ret = misc_register(&priv->miscdev);
1422 if (ret) {
1423 dev_err(&op->dev, "Unable to register miscdevice\n");
1424 goto out_irq_dispose_mapping;
1425 }
1426
1427 /* Create the debugfs files */
1428 ret = data_debugfs_init(priv);
1429 if (ret) {
1430 dev_err(&op->dev, "Unable to create debugfs files\n");
1431 goto out_misc_deregister;
1432 }
1433
1434 /* Create the sysfs files */
1435 this_device = priv->miscdev.this_device;
1436 dev_set_drvdata(this_device, priv);
1437 ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group);
1438 if (ret) {
1439 dev_err(&op->dev, "Unable to create sysfs files\n");
1440 goto out_data_debugfs_exit;
1441 }
1442
1443 dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n");
1444 return 0;
1445
1446out_data_debugfs_exit:
1447 data_debugfs_exit(priv);
1448out_misc_deregister:
1449 misc_deregister(&priv->miscdev);
1450out_irq_dispose_mapping:
1451 irq_dispose_mapping(priv->irq);
1452out_release_dma:
1453 dma_release_channel(priv->chan);
1454out_unmap_regs:
1455 iounmap(priv->regs);
1456out_free_priv:
1457 kref_put(&priv->ref, fpga_device_release);
1458out_return:
1459 return ret;
1460}
1461
1462static int data_of_remove(struct platform_device *op)
1463{
1464 struct fpga_device *priv = platform_get_drvdata(op);
1465 struct device *this_device = priv->miscdev.this_device;
1466
1467 /* remove all sysfs files, now the device cannot be re-enabled */
1468 sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group);
1469
1470 /* remove all debugfs files */
1471 data_debugfs_exit(priv);
1472
1473 /* disable the device from generating data */
1474 data_device_disable(priv);
1475
1476 /* remove the character device to stop new readers from appearing */
1477 misc_deregister(&priv->miscdev);
1478
1479 /* cleanup everything not needed by readers */
1480 irq_dispose_mapping(priv->irq);
1481 dma_release_channel(priv->chan);
1482 iounmap(priv->regs);
1483
1484 /* release our reference */
1485 kref_put(&priv->ref, fpga_device_release);
1486 return 0;
1487}
1488
1489static const struct of_device_id data_of_match[] = {
1490 { .compatible = "carma,carma-fpga", },
1491 {},
1492};
1493
1494static struct platform_driver data_of_driver = {
1495 .probe = data_of_probe,
1496 .remove = data_of_remove,
1497 .driver = {
1498 .name = drv_name,
1499 .of_match_table = data_of_match,
1500 },
1501};
1502
1503module_platform_driver(data_of_driver);
1504
1505MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1506MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver");
1507MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d2cd53e3fac3..1e42781592d8 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -59,46 +59,29 @@ void mei_amthif_reset_params(struct mei_device *dev)
59 * mei_amthif_host_init - mei initialization amthif client. 59 * mei_amthif_host_init - mei initialization amthif client.
60 * 60 *
61 * @dev: the device structure 61 * @dev: the device structure
62 * @me_cl: me client
62 * 63 *
63 * Return: 0 on success, <0 on failure. 64 * Return: 0 on success, <0 on failure.
64 */ 65 */
65int mei_amthif_host_init(struct mei_device *dev) 66int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
66{ 67{
67 struct mei_cl *cl = &dev->iamthif_cl; 68 struct mei_cl *cl = &dev->iamthif_cl;
68 struct mei_me_client *me_cl;
69 int ret; 69 int ret;
70 70
71 dev->iamthif_state = MEI_IAMTHIF_IDLE; 71 dev->iamthif_state = MEI_IAMTHIF_IDLE;
72 72
73 mei_cl_init(cl, dev); 73 mei_cl_init(cl, dev);
74 74
75 me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
76 if (!me_cl) {
77 dev_info(dev->dev, "amthif: failed to find the client");
78 return -ENOTTY;
79 }
80
81 cl->me_client_id = me_cl->client_id;
82 cl->cl_uuid = me_cl->props.protocol_name;
83
84 /* Assign iamthif_mtu to the value received from ME */
85
86 dev->iamthif_mtu = me_cl->props.max_msg_length;
87 dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu);
88
89
90 ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); 75 ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
91 if (ret < 0) { 76 if (ret < 0) {
92 dev_err(dev->dev, "amthif: failed cl_link %d\n", ret); 77 dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
93 goto out; 78 return ret;
94 } 79 }
95 80
96 ret = mei_cl_connect(cl, NULL); 81 ret = mei_cl_connect(cl, me_cl, NULL);
97 82
98 dev->iamthif_state = MEI_IAMTHIF_IDLE; 83 dev->iamthif_state = MEI_IAMTHIF_IDLE;
99 84
100out:
101 mei_me_cl_put(me_cl);
102 return ret; 85 return ret;
103} 86}
104 87
@@ -250,7 +233,6 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
250{ 233{
251 struct mei_device *dev = cl->dev; 234 struct mei_device *dev = cl->dev;
252 struct mei_cl_cb *cb; 235 struct mei_cl_cb *cb;
253 size_t length = dev->iamthif_mtu;
254 int rets; 236 int rets;
255 237
256 cb = mei_io_cb_init(cl, MEI_FOP_READ, file); 238 cb = mei_io_cb_init(cl, MEI_FOP_READ, file);
@@ -259,7 +241,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
259 goto err; 241 goto err;
260 } 242 }
261 243
262 rets = mei_io_cb_alloc_buf(cb, length); 244 rets = mei_io_cb_alloc_buf(cb, mei_cl_mtu(cl));
263 if (rets) 245 if (rets)
264 goto err; 246 goto err;
265 247
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 4cf38c39878a..357b6ae4d207 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -35,18 +35,30 @@ static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
35 struct mei_cl_device *device = to_mei_cl_device(dev); 35 struct mei_cl_device *device = to_mei_cl_device(dev);
36 struct mei_cl_driver *driver = to_mei_cl_driver(drv); 36 struct mei_cl_driver *driver = to_mei_cl_driver(drv);
37 const struct mei_cl_device_id *id; 37 const struct mei_cl_device_id *id;
38 const uuid_le *uuid;
39 const char *name;
38 40
39 if (!device) 41 if (!device)
40 return 0; 42 return 0;
41 43
44 uuid = mei_me_cl_uuid(device->me_cl);
45 name = device->name;
46
42 if (!driver || !driver->id_table) 47 if (!driver || !driver->id_table)
43 return 0; 48 return 0;
44 49
45 id = driver->id_table; 50 id = driver->id_table;
46 51
47 while (id->name[0]) { 52 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
48 if (!strncmp(dev_name(dev), id->name, sizeof(id->name))) 53
49 return 1; 54 if (!uuid_le_cmp(*uuid, id->uuid)) {
55 if (id->name[0]) {
56 if (!strncmp(name, id->name, sizeof(id->name)))
57 return 1;
58 } else {
59 return 1;
60 }
61 }
50 62
51 id++; 63 id++;
52 } 64 }
@@ -69,7 +81,7 @@ static int mei_cl_device_probe(struct device *dev)
69 81
70 dev_dbg(dev, "Device probe\n"); 82 dev_dbg(dev, "Device probe\n");
71 83
72 strlcpy(id.name, dev_name(dev), sizeof(id.name)); 84 strlcpy(id.name, device->name, sizeof(id.name));
73 85
74 return driver->probe(device, &id); 86 return driver->probe(device, &id);
75} 87}
@@ -97,18 +109,48 @@ static int mei_cl_device_remove(struct device *dev)
97 return driver->remove(device); 109 return driver->remove(device);
98} 110}
99 111
112static ssize_t name_show(struct device *dev, struct device_attribute *a,
113 char *buf)
114{
115 struct mei_cl_device *device = to_mei_cl_device(dev);
116 size_t len;
117
118 len = snprintf(buf, PAGE_SIZE, "%s", device->name);
119
120 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
121}
122static DEVICE_ATTR_RO(name);
123
124static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
125 char *buf)
126{
127 struct mei_cl_device *device = to_mei_cl_device(dev);
128 const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
129 size_t len;
130
131 len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
132
133 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
134}
135static DEVICE_ATTR_RO(uuid);
136
100static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 137static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
101 char *buf) 138 char *buf)
102{ 139{
103 int len; 140 struct mei_cl_device *device = to_mei_cl_device(dev);
141 const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
142 size_t len;
104 143
105 len = snprintf(buf, PAGE_SIZE, "mei:%s\n", dev_name(dev)); 144 len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
145 device->name, MEI_CL_UUID_ARGS(uuid->b));
106 146
107 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; 147 return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
108} 148}
109static DEVICE_ATTR_RO(modalias); 149static DEVICE_ATTR_RO(modalias);
110 150
111static struct attribute *mei_cl_dev_attrs[] = { 151static struct attribute *mei_cl_dev_attrs[] = {
152 &dev_attr_name.attr,
153 &dev_attr_uuid.attr,
112 &dev_attr_modalias.attr, 154 &dev_attr_modalias.attr,
113 NULL, 155 NULL,
114}; 156};
@@ -116,7 +158,17 @@ ATTRIBUTE_GROUPS(mei_cl_dev);
116 158
117static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env) 159static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
118{ 160{
119 if (add_uevent_var(env, "MODALIAS=mei:%s", dev_name(dev))) 161 struct mei_cl_device *device = to_mei_cl_device(dev);
162 const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
163
164 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
165 return -ENOMEM;
166
167 if (add_uevent_var(env, "MEI_CL_NAME=%s", device->name))
168 return -ENOMEM;
169
170 if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
171 device->name, MEI_CL_UUID_ARGS(uuid->b)))
120 return -ENOMEM; 172 return -ENOMEM;
121 173
122 return 0; 174 return 0;
@@ -133,7 +185,13 @@ static struct bus_type mei_cl_bus_type = {
133 185
134static void mei_cl_dev_release(struct device *dev) 186static void mei_cl_dev_release(struct device *dev)
135{ 187{
136 kfree(to_mei_cl_device(dev)); 188 struct mei_cl_device *device = to_mei_cl_device(dev);
189
190 if (!device)
191 return;
192
193 mei_me_cl_put(device->me_cl);
194 kfree(device);
137} 195}
138 196
139static struct device_type mei_cl_device_type = { 197static struct device_type mei_cl_device_type = {
@@ -141,45 +199,50 @@ static struct device_type mei_cl_device_type = {
141}; 199};
142 200
143struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, 201struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev,
144 uuid_le uuid) 202 uuid_le uuid)
145{ 203{
146 struct mei_cl *cl; 204 struct mei_cl *cl;
147 205
148 list_for_each_entry(cl, &dev->device_list, device_link) { 206 list_for_each_entry(cl, &dev->device_list, device_link) {
149 if (!uuid_le_cmp(uuid, cl->cl_uuid)) 207 if (cl->device && cl->device->me_cl &&
208 !uuid_le_cmp(uuid, *mei_me_cl_uuid(cl->device->me_cl)))
150 return cl; 209 return cl;
151 } 210 }
152 211
153 return NULL; 212 return NULL;
154} 213}
214
155struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, 215struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
156 uuid_le uuid, char *name, 216 struct mei_me_client *me_cl,
157 struct mei_cl_ops *ops) 217 struct mei_cl *cl,
218 char *name)
158{ 219{
159 struct mei_cl_device *device; 220 struct mei_cl_device *device;
160 struct mei_cl *cl;
161 int status; 221 int status;
162 222
163 cl = mei_cl_bus_find_cl_by_uuid(dev, uuid);
164 if (cl == NULL)
165 return NULL;
166
167 device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL); 223 device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
168 if (!device) 224 if (!device)
169 return NULL; 225 return NULL;
170 226
171 device->cl = cl; 227 device->me_cl = mei_me_cl_get(me_cl);
172 device->ops = ops; 228 if (!device->me_cl) {
229 kfree(device);
230 return NULL;
231 }
173 232
233 device->cl = cl;
174 device->dev.parent = dev->dev; 234 device->dev.parent = dev->dev;
175 device->dev.bus = &mei_cl_bus_type; 235 device->dev.bus = &mei_cl_bus_type;
176 device->dev.type = &mei_cl_device_type; 236 device->dev.type = &mei_cl_device_type;
177 237
178 dev_set_name(&device->dev, "%s", name); 238 strlcpy(device->name, name, sizeof(device->name));
239
240 dev_set_name(&device->dev, "mei:%s:%pUl", name, mei_me_cl_uuid(me_cl));
179 241
180 status = device_register(&device->dev); 242 status = device_register(&device->dev);
181 if (status) { 243 if (status) {
182 dev_err(dev->dev, "Failed to register MEI device\n"); 244 dev_err(dev->dev, "Failed to register MEI device\n");
245 mei_me_cl_put(device->me_cl);
183 kfree(device); 246 kfree(device);
184 return NULL; 247 return NULL;
185 } 248 }
@@ -224,11 +287,10 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver)
224} 287}
225EXPORT_SYMBOL_GPL(mei_cl_driver_unregister); 288EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
226 289
227static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, 290ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
228 bool blocking) 291 bool blocking)
229{ 292{
230 struct mei_device *dev; 293 struct mei_device *dev;
231 struct mei_me_client *me_cl = NULL;
232 struct mei_cl_cb *cb = NULL; 294 struct mei_cl_cb *cb = NULL;
233 ssize_t rets; 295 ssize_t rets;
234 296
@@ -244,13 +306,12 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
244 } 306 }
245 307
246 /* Check if we have an ME client device */ 308 /* Check if we have an ME client device */
247 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); 309 if (!mei_me_cl_is_active(cl->me_cl)) {
248 if (!me_cl) {
249 rets = -ENOTTY; 310 rets = -ENOTTY;
250 goto out; 311 goto out;
251 } 312 }
252 313
253 if (length > me_cl->props.max_msg_length) { 314 if (length > mei_cl_mtu(cl)) {
254 rets = -EFBIG; 315 rets = -EFBIG;
255 goto out; 316 goto out;
256 } 317 }
@@ -266,7 +327,6 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
266 rets = mei_cl_write(cl, cb, blocking); 327 rets = mei_cl_write(cl, cb, blocking);
267 328
268out: 329out:
269 mei_me_cl_put(me_cl);
270 mutex_unlock(&dev->device_lock); 330 mutex_unlock(&dev->device_lock);
271 if (rets < 0) 331 if (rets < 0)
272 mei_io_cb_free(cb); 332 mei_io_cb_free(cb);
@@ -341,16 +401,6 @@ out:
341 return rets; 401 return rets;
342} 402}
343 403
344inline ssize_t __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length)
345{
346 return ___mei_cl_send(cl, buf, length, 0);
347}
348
349inline ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length)
350{
351 return ___mei_cl_send(cl, buf, length, 1);
352}
353
354ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length) 404ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
355{ 405{
356 struct mei_cl *cl = device->cl; 406 struct mei_cl *cl = device->cl;
@@ -358,23 +408,17 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
358 if (cl == NULL) 408 if (cl == NULL)
359 return -ENODEV; 409 return -ENODEV;
360 410
361 if (device->ops && device->ops->send) 411 return __mei_cl_send(cl, buf, length, 1);
362 return device->ops->send(device, buf, length);
363
364 return __mei_cl_send(cl, buf, length);
365} 412}
366EXPORT_SYMBOL_GPL(mei_cl_send); 413EXPORT_SYMBOL_GPL(mei_cl_send);
367 414
368ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length) 415ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
369{ 416{
370 struct mei_cl *cl = device->cl; 417 struct mei_cl *cl = device->cl;
371 418
372 if (cl == NULL) 419 if (cl == NULL)
373 return -ENODEV; 420 return -ENODEV;
374 421
375 if (device->ops && device->ops->recv)
376 return device->ops->recv(device, buf, length);
377
378 return __mei_cl_recv(cl, buf, length); 422 return __mei_cl_recv(cl, buf, length);
379} 423}
380EXPORT_SYMBOL_GPL(mei_cl_recv); 424EXPORT_SYMBOL_GPL(mei_cl_recv);
@@ -436,7 +480,13 @@ int mei_cl_enable_device(struct mei_cl_device *device)
436 480
437 mutex_lock(&dev->device_lock); 481 mutex_lock(&dev->device_lock);
438 482
439 err = mei_cl_connect(cl, NULL); 483 if (mei_cl_is_connected(cl)) {
484 mutex_unlock(&dev->device_lock);
485 dev_warn(dev->dev, "Already connected");
486 return -EBUSY;
487 }
488
489 err = mei_cl_connect(cl, device->me_cl, NULL);
440 if (err < 0) { 490 if (err < 0) {
441 mutex_unlock(&dev->device_lock); 491 mutex_unlock(&dev->device_lock);
442 dev_err(dev->dev, "Could not connect to the ME client"); 492 dev_err(dev->dev, "Could not connect to the ME client");
@@ -449,10 +499,7 @@ int mei_cl_enable_device(struct mei_cl_device *device)
449 if (device->event_cb) 499 if (device->event_cb)
450 mei_cl_read_start(device->cl, 0, NULL); 500 mei_cl_read_start(device->cl, 0, NULL);
451 501
452 if (!device->ops || !device->ops->enable) 502 return 0;
453 return 0;
454
455 return device->ops->enable(device);
456} 503}
457EXPORT_SYMBOL_GPL(mei_cl_enable_device); 504EXPORT_SYMBOL_GPL(mei_cl_enable_device);
458 505
@@ -467,9 +514,6 @@ int mei_cl_disable_device(struct mei_cl_device *device)
467 514
468 dev = cl->dev; 515 dev = cl->dev;
469 516
470 if (device->ops && device->ops->disable)
471 device->ops->disable(device);
472
473 device->event_cb = NULL; 517 device->event_cb = NULL;
474 518
475 mutex_lock(&dev->device_lock); 519 mutex_lock(&dev->device_lock);
@@ -480,8 +524,6 @@ int mei_cl_disable_device(struct mei_cl_device *device)
480 goto out; 524 goto out;
481 } 525 }
482 526
483 cl->state = MEI_FILE_DISCONNECTING;
484
485 err = mei_cl_disconnect(cl); 527 err = mei_cl_disconnect(cl);
486 if (err < 0) { 528 if (err < 0) {
487 dev_err(dev->dev, "Could not disconnect from the ME client"); 529 dev_err(dev->dev, "Could not disconnect from the ME client");
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1e99ef6a54a2..6decbe136ea7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -83,7 +83,7 @@ void mei_me_cl_put(struct mei_me_client *me_cl)
83} 83}
84 84
85/** 85/**
86 * __mei_me_cl_del - delete me client form the list and decrease 86 * __mei_me_cl_del - delete me client from the list and decrease
87 * reference counter 87 * reference counter
88 * 88 *
89 * @dev: mei device 89 * @dev: mei device
@@ -96,11 +96,25 @@ static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
96 if (!me_cl) 96 if (!me_cl)
97 return; 97 return;
98 98
99 list_del(&me_cl->list); 99 list_del_init(&me_cl->list);
100 mei_me_cl_put(me_cl); 100 mei_me_cl_put(me_cl);
101} 101}
102 102
103/** 103/**
104 * mei_me_cl_del - delete me client from the list and decrease
105 * reference counter
106 *
107 * @dev: mei device
108 * @me_cl: me client
109 */
110void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
111{
112 down_write(&dev->me_clients_rwsem);
113 __mei_me_cl_del(dev, me_cl);
114 up_write(&dev->me_clients_rwsem);
115}
116
117/**
104 * mei_me_cl_add - add me client to the list 118 * mei_me_cl_add - add me client to the list
105 * 119 *
106 * @dev: mei device 120 * @dev: mei device
@@ -317,7 +331,7 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
317{ 331{
318 return cl1 && cl2 && 332 return cl1 && cl2 &&
319 (cl1->host_client_id == cl2->host_client_id) && 333 (cl1->host_client_id == cl2->host_client_id) &&
320 (cl1->me_client_id == cl2->me_client_id); 334 (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
321} 335}
322 336
323/** 337/**
@@ -546,6 +560,7 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
546 INIT_LIST_HEAD(&cl->link); 560 INIT_LIST_HEAD(&cl->link);
547 INIT_LIST_HEAD(&cl->device_link); 561 INIT_LIST_HEAD(&cl->device_link);
548 cl->writing_state = MEI_IDLE; 562 cl->writing_state = MEI_IDLE;
563 cl->state = MEI_FILE_INITIALIZING;
549 cl->dev = dev; 564 cl->dev = dev;
550} 565}
551 566
@@ -619,7 +634,7 @@ int mei_cl_link(struct mei_cl *cl, int id)
619} 634}
620 635
621/** 636/**
622 * mei_cl_unlink - remove me_cl from the list 637 * mei_cl_unlink - remove host client from the list
623 * 638 *
624 * @cl: host client 639 * @cl: host client
625 * 640 *
@@ -667,17 +682,17 @@ void mei_host_client_init(struct work_struct *work)
667 682
668 me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid); 683 me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
669 if (me_cl) 684 if (me_cl)
670 mei_amthif_host_init(dev); 685 mei_amthif_host_init(dev, me_cl);
671 mei_me_cl_put(me_cl); 686 mei_me_cl_put(me_cl);
672 687
673 me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid); 688 me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
674 if (me_cl) 689 if (me_cl)
675 mei_wd_host_init(dev); 690 mei_wd_host_init(dev, me_cl);
676 mei_me_cl_put(me_cl); 691 mei_me_cl_put(me_cl);
677 692
678 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid); 693 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
679 if (me_cl) 694 if (me_cl)
680 mei_nfc_host_init(dev); 695 mei_nfc_host_init(dev, me_cl);
681 mei_me_cl_put(me_cl); 696 mei_me_cl_put(me_cl);
682 697
683 698
@@ -699,7 +714,7 @@ void mei_host_client_init(struct work_struct *work)
699bool mei_hbuf_acquire(struct mei_device *dev) 714bool mei_hbuf_acquire(struct mei_device *dev)
700{ 715{
701 if (mei_pg_state(dev) == MEI_PG_ON || 716 if (mei_pg_state(dev) == MEI_PG_ON ||
702 dev->pg_event == MEI_PG_EVENT_WAIT) { 717 mei_pg_in_transition(dev)) {
703 dev_dbg(dev->dev, "device is in pg\n"); 718 dev_dbg(dev->dev, "device is in pg\n");
704 return false; 719 return false;
705 } 720 }
@@ -715,6 +730,120 @@ bool mei_hbuf_acquire(struct mei_device *dev)
715} 730}
716 731
717/** 732/**
733 * mei_cl_set_disconnected - set disconnected state and clear
734 * associated states and resources
735 *
736 * @cl: host client
737 */
738void mei_cl_set_disconnected(struct mei_cl *cl)
739{
740 struct mei_device *dev = cl->dev;
741
742 if (cl->state == MEI_FILE_DISCONNECTED ||
743 cl->state == MEI_FILE_INITIALIZING)
744 return;
745
746 cl->state = MEI_FILE_DISCONNECTED;
747 mei_io_list_flush(&dev->ctrl_rd_list, cl);
748 mei_io_list_flush(&dev->ctrl_wr_list, cl);
749 cl->mei_flow_ctrl_creds = 0;
750 cl->timer_count = 0;
751
752 if (!cl->me_cl)
753 return;
754
755 if (!WARN_ON(cl->me_cl->connect_count == 0))
756 cl->me_cl->connect_count--;
757
758 if (cl->me_cl->connect_count == 0)
759 cl->me_cl->mei_flow_ctrl_creds = 0;
760
761 mei_me_cl_put(cl->me_cl);
762 cl->me_cl = NULL;
763}
764
765static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
766{
767 if (!mei_me_cl_get(me_cl))
768 return -ENOENT;
769
770 /* only one connection is allowed for fixed address clients */
771 if (me_cl->props.fixed_address) {
772 if (me_cl->connect_count) {
773 mei_me_cl_put(me_cl);
774 return -EBUSY;
775 }
776 }
777
778 cl->me_cl = me_cl;
779 cl->state = MEI_FILE_CONNECTING;
780 cl->me_cl->connect_count++;
781
782 return 0;
783}
784
785/*
786 * mei_cl_send_disconnect - send disconnect request
787 *
788 * @cl: host client
789 * @cb: callback block
790 *
791 * Return: 0, OK; otherwise, error.
792 */
793static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
794{
795 struct mei_device *dev;
796 int ret;
797
798 dev = cl->dev;
799
800 ret = mei_hbm_cl_disconnect_req(dev, cl);
801 cl->status = ret;
802 if (ret) {
803 cl->state = MEI_FILE_DISCONNECT_REPLY;
804 return ret;
805 }
806
807 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
808 cl->timer_count = MEI_CONNECT_TIMEOUT;
809
810 return 0;
811}
812
813/**
814 * mei_cl_irq_disconnect - processes close related operation from
815 * interrupt thread context - send disconnect request
816 *
817 * @cl: client
818 * @cb: callback block.
819 * @cmpl_list: complete list.
820 *
821 * Return: 0, OK; otherwise, error.
822 */
823int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
824 struct mei_cl_cb *cmpl_list)
825{
826 struct mei_device *dev = cl->dev;
827 u32 msg_slots;
828 int slots;
829 int ret;
830
831 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
832 slots = mei_hbuf_empty_slots(dev);
833
834 if (slots < msg_slots)
835 return -EMSGSIZE;
836
837 ret = mei_cl_send_disconnect(cl, cb);
838 if (ret)
839 list_move_tail(&cb->list, &cmpl_list->list);
840
841 return ret;
842}
843
844
845
846/**
718 * mei_cl_disconnect - disconnect host client from the me one 847 * mei_cl_disconnect - disconnect host client from the me one
719 * 848 *
720 * @cl: host client 849 * @cl: host client
@@ -736,8 +865,13 @@ int mei_cl_disconnect(struct mei_cl *cl)
736 865
737 cl_dbg(dev, cl, "disconnecting"); 866 cl_dbg(dev, cl, "disconnecting");
738 867
739 if (cl->state != MEI_FILE_DISCONNECTING) 868 if (!mei_cl_is_connected(cl))
869 return 0;
870
871 if (mei_cl_is_fixed_address(cl)) {
872 mei_cl_set_disconnected(cl);
740 return 0; 873 return 0;
874 }
741 875
742 rets = pm_runtime_get(dev->dev); 876 rets = pm_runtime_get(dev->dev);
743 if (rets < 0 && rets != -EINPROGRESS) { 877 if (rets < 0 && rets != -EINPROGRESS) {
@@ -746,44 +880,41 @@ int mei_cl_disconnect(struct mei_cl *cl)
746 return rets; 880 return rets;
747 } 881 }
748 882
883 cl->state = MEI_FILE_DISCONNECTING;
884
749 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL); 885 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
750 rets = cb ? 0 : -ENOMEM; 886 rets = cb ? 0 : -ENOMEM;
751 if (rets) 887 if (rets)
752 goto free; 888 goto out;
889
890 cl_dbg(dev, cl, "add disconnect cb to control write list\n");
891 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
753 892
754 if (mei_hbuf_acquire(dev)) { 893 if (mei_hbuf_acquire(dev)) {
755 if (mei_hbm_cl_disconnect_req(dev, cl)) { 894 rets = mei_cl_send_disconnect(cl, cb);
756 rets = -ENODEV; 895 if (rets) {
757 cl_err(dev, cl, "failed to disconnect.\n"); 896 cl_err(dev, cl, "failed to disconnect.\n");
758 goto free; 897 goto out;
759 } 898 }
760 cl->timer_count = MEI_CONNECT_TIMEOUT;
761 mdelay(10); /* Wait for hardware disconnection ready */
762 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
763 } else {
764 cl_dbg(dev, cl, "add disconnect cb to control write list\n");
765 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
766
767 } 899 }
768 mutex_unlock(&dev->device_lock);
769
770 wait_event_timeout(cl->wait,
771 MEI_FILE_DISCONNECTED == cl->state,
772 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
773 900
901 mutex_unlock(&dev->device_lock);
902 wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY,
903 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
774 mutex_lock(&dev->device_lock); 904 mutex_lock(&dev->device_lock);
775 905
776 if (MEI_FILE_DISCONNECTED == cl->state) { 906 rets = cl->status;
777 rets = 0; 907 if (cl->state != MEI_FILE_DISCONNECT_REPLY) {
778 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
779 } else {
780 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 908 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
781 rets = -ETIME; 909 rets = -ETIME;
782 } 910 }
783 911
784 mei_io_list_flush(&dev->ctrl_rd_list, cl); 912out:
785 mei_io_list_flush(&dev->ctrl_wr_list, cl); 913 /* we disconnect also on error */
786free: 914 mei_cl_set_disconnected(cl);
915 if (!rets)
916 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
917
787 cl_dbg(dev, cl, "rpm: autosuspend\n"); 918 cl_dbg(dev, cl, "rpm: autosuspend\n");
788 pm_runtime_mark_last_busy(dev->dev); 919 pm_runtime_mark_last_busy(dev->dev);
789 pm_runtime_put_autosuspend(dev->dev); 920 pm_runtime_put_autosuspend(dev->dev);
@@ -801,53 +932,119 @@ free:
801 * 932 *
802 * Return: true if other client is connected, false - otherwise. 933 * Return: true if other client is connected, false - otherwise.
803 */ 934 */
804bool mei_cl_is_other_connecting(struct mei_cl *cl) 935static bool mei_cl_is_other_connecting(struct mei_cl *cl)
805{ 936{
806 struct mei_device *dev; 937 struct mei_device *dev;
807 struct mei_cl *ocl; /* the other client */ 938 struct mei_cl_cb *cb;
808
809 if (WARN_ON(!cl || !cl->dev))
810 return false;
811 939
812 dev = cl->dev; 940 dev = cl->dev;
813 941
814 list_for_each_entry(ocl, &dev->file_list, link) { 942 list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) {
815 if (ocl->state == MEI_FILE_CONNECTING && 943 if (cb->fop_type == MEI_FOP_CONNECT &&
816 ocl != cl && 944 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
817 cl->me_client_id == ocl->me_client_id)
818 return true; 945 return true;
819
820 } 946 }
821 947
822 return false; 948 return false;
823} 949}
824 950
825/** 951/**
952 * mei_cl_send_connect - send connect request
953 *
954 * @cl: host client
955 * @cb: callback block
956 *
957 * Return: 0, OK; otherwise, error.
958 */
959static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
960{
961 struct mei_device *dev;
962 int ret;
963
964 dev = cl->dev;
965
966 ret = mei_hbm_cl_connect_req(dev, cl);
967 cl->status = ret;
968 if (ret) {
969 cl->state = MEI_FILE_DISCONNECT_REPLY;
970 return ret;
971 }
972
973 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
974 cl->timer_count = MEI_CONNECT_TIMEOUT;
975 return 0;
976}
977
978/**
979 * mei_cl_irq_connect - send connect request in irq_thread context
980 *
981 * @cl: host client
982 * @cb: callback block
983 * @cmpl_list: complete list
984 *
985 * Return: 0, OK; otherwise, error.
986 */
987int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
988 struct mei_cl_cb *cmpl_list)
989{
990 struct mei_device *dev = cl->dev;
991 u32 msg_slots;
992 int slots;
993 int rets;
994
995 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
996 slots = mei_hbuf_empty_slots(dev);
997
998 if (mei_cl_is_other_connecting(cl))
999 return 0;
1000
1001 if (slots < msg_slots)
1002 return -EMSGSIZE;
1003
1004 rets = mei_cl_send_connect(cl, cb);
1005 if (rets)
1006 list_move_tail(&cb->list, &cmpl_list->list);
1007
1008 return rets;
1009}
1010
1011/**
826 * mei_cl_connect - connect host client to the me one 1012 * mei_cl_connect - connect host client to the me one
827 * 1013 *
828 * @cl: host client 1014 * @cl: host client
1015 * @me_cl: me client
829 * @file: pointer to file structure 1016 * @file: pointer to file structure
830 * 1017 *
831 * Locking: called under "dev->device_lock" lock 1018 * Locking: called under "dev->device_lock" lock
832 * 1019 *
833 * Return: 0 on success, <0 on failure. 1020 * Return: 0 on success, <0 on failure.
834 */ 1021 */
835int mei_cl_connect(struct mei_cl *cl, struct file *file) 1022int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1023 struct file *file)
836{ 1024{
837 struct mei_device *dev; 1025 struct mei_device *dev;
838 struct mei_cl_cb *cb; 1026 struct mei_cl_cb *cb;
839 int rets; 1027 int rets;
840 1028
841 if (WARN_ON(!cl || !cl->dev)) 1029 if (WARN_ON(!cl || !cl->dev || !me_cl))
842 return -ENODEV; 1030 return -ENODEV;
843 1031
844 dev = cl->dev; 1032 dev = cl->dev;
845 1033
1034 rets = mei_cl_set_connecting(cl, me_cl);
1035 if (rets)
1036 return rets;
1037
1038 if (mei_cl_is_fixed_address(cl)) {
1039 cl->state = MEI_FILE_CONNECTED;
1040 return 0;
1041 }
1042
846 rets = pm_runtime_get(dev->dev); 1043 rets = pm_runtime_get(dev->dev);
847 if (rets < 0 && rets != -EINPROGRESS) { 1044 if (rets < 0 && rets != -EINPROGRESS) {
848 pm_runtime_put_noidle(dev->dev); 1045 pm_runtime_put_noidle(dev->dev);
849 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1046 cl_err(dev, cl, "rpm: get failed %d\n", rets);
850 return rets; 1047 goto nortpm;
851 } 1048 }
852 1049
853 cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file); 1050 cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
@@ -855,45 +1052,40 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
855 if (rets) 1052 if (rets)
856 goto out; 1053 goto out;
857 1054
1055 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
1056
858 /* run hbuf acquire last so we don't have to undo */ 1057 /* run hbuf acquire last so we don't have to undo */
859 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1058 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
860 cl->state = MEI_FILE_CONNECTING; 1059 rets = mei_cl_send_connect(cl, cb);
861 if (mei_hbm_cl_connect_req(dev, cl)) { 1060 if (rets)
862 rets = -ENODEV;
863 goto out; 1061 goto out;
864 }
865 cl->timer_count = MEI_CONNECT_TIMEOUT;
866 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
867 } else {
868 cl->state = MEI_FILE_INITIALIZING;
869 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
870 } 1062 }
871 1063
872 mutex_unlock(&dev->device_lock); 1064 mutex_unlock(&dev->device_lock);
873 wait_event_timeout(cl->wait, 1065 wait_event_timeout(cl->wait,
874 (cl->state == MEI_FILE_CONNECTED || 1066 (cl->state == MEI_FILE_CONNECTED ||
875 cl->state == MEI_FILE_DISCONNECTED), 1067 cl->state == MEI_FILE_DISCONNECT_REPLY),
876 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1068 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
877 mutex_lock(&dev->device_lock); 1069 mutex_lock(&dev->device_lock);
878 1070
879 if (!mei_cl_is_connected(cl)) { 1071 if (!mei_cl_is_connected(cl)) {
880 cl->state = MEI_FILE_DISCONNECTED; 1072 /* timeout or something went really wrong */
881 /* something went really wrong */
882 if (!cl->status) 1073 if (!cl->status)
883 cl->status = -EFAULT; 1074 cl->status = -EFAULT;
884
885 mei_io_list_flush(&dev->ctrl_rd_list, cl);
886 mei_io_list_flush(&dev->ctrl_wr_list, cl);
887 } 1075 }
888 1076
889 rets = cl->status; 1077 rets = cl->status;
890
891out: 1078out:
892 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1079 cl_dbg(dev, cl, "rpm: autosuspend\n");
893 pm_runtime_mark_last_busy(dev->dev); 1080 pm_runtime_mark_last_busy(dev->dev);
894 pm_runtime_put_autosuspend(dev->dev); 1081 pm_runtime_put_autosuspend(dev->dev);
895 1082
896 mei_io_cb_free(cb); 1083 mei_io_cb_free(cb);
1084
1085nortpm:
1086 if (!mei_cl_is_connected(cl))
1087 mei_cl_set_disconnected(cl);
1088
897 return rets; 1089 return rets;
898} 1090}
899 1091
@@ -934,36 +1126,29 @@ err:
934 * @cl: private data of the file object 1126 * @cl: private data of the file object
935 * 1127 *
936 * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. 1128 * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
937 * -ENOENT if mei_cl is not present
938 * -EINVAL if single_recv_buf == 0
939 */ 1129 */
940int mei_cl_flow_ctrl_creds(struct mei_cl *cl) 1130int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
941{ 1131{
942 struct mei_device *dev; 1132 int rets;
943 struct mei_me_client *me_cl;
944 int rets = 0;
945 1133
946 if (WARN_ON(!cl || !cl->dev)) 1134 if (WARN_ON(!cl || !cl->me_cl))
947 return -EINVAL; 1135 return -EINVAL;
948 1136
949 dev = cl->dev;
950
951 if (cl->mei_flow_ctrl_creds > 0) 1137 if (cl->mei_flow_ctrl_creds > 0)
952 return 1; 1138 return 1;
953 1139
954 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); 1140 if (mei_cl_is_fixed_address(cl)) {
955 if (!me_cl) { 1141 rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL);
956 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 1142 if (rets && rets != -EBUSY)
957 return -ENOENT; 1143 return rets;
1144 return 1;
958 } 1145 }
959 1146
960 if (me_cl->mei_flow_ctrl_creds > 0) { 1147 if (mei_cl_is_single_recv_buf(cl)) {
961 rets = 1; 1148 if (cl->me_cl->mei_flow_ctrl_creds > 0)
962 if (WARN_ON(me_cl->props.single_recv_buf == 0)) 1149 return 1;
963 rets = -EINVAL;
964 } 1150 }
965 mei_me_cl_put(me_cl); 1151 return 0;
966 return rets;
967} 1152}
968 1153
969/** 1154/**
@@ -973,43 +1158,26 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
973 * 1158 *
974 * Return: 1159 * Return:
975 * 0 on success 1160 * 0 on success
976 * -ENOENT when me client is not found
977 * -EINVAL when ctrl credits are <= 0 1161 * -EINVAL when ctrl credits are <= 0
978 */ 1162 */
979int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) 1163int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
980{ 1164{
981 struct mei_device *dev; 1165 if (WARN_ON(!cl || !cl->me_cl))
982 struct mei_me_client *me_cl;
983 int rets;
984
985 if (WARN_ON(!cl || !cl->dev))
986 return -EINVAL; 1166 return -EINVAL;
987 1167
988 dev = cl->dev; 1168 if (mei_cl_is_fixed_address(cl))
989 1169 return 0;
990 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
991 if (!me_cl) {
992 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
993 return -ENOENT;
994 }
995 1170
996 if (me_cl->props.single_recv_buf) { 1171 if (mei_cl_is_single_recv_buf(cl)) {
997 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) { 1172 if (WARN_ON(cl->me_cl->mei_flow_ctrl_creds <= 0))
998 rets = -EINVAL; 1173 return -EINVAL;
999 goto out; 1174 cl->me_cl->mei_flow_ctrl_creds--;
1000 }
1001 me_cl->mei_flow_ctrl_creds--;
1002 } else { 1175 } else {
1003 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) { 1176 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
1004 rets = -EINVAL; 1177 return -EINVAL;
1005 goto out;
1006 }
1007 cl->mei_flow_ctrl_creds--; 1178 cl->mei_flow_ctrl_creds--;
1008 } 1179 }
1009 rets = 0; 1180 return 0;
1010out:
1011 mei_me_cl_put(me_cl);
1012 return rets;
1013} 1181}
1014 1182
1015/** 1183/**
@@ -1025,7 +1193,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
1025{ 1193{
1026 struct mei_device *dev; 1194 struct mei_device *dev;
1027 struct mei_cl_cb *cb; 1195 struct mei_cl_cb *cb;
1028 struct mei_me_client *me_cl;
1029 int rets; 1196 int rets;
1030 1197
1031 if (WARN_ON(!cl || !cl->dev)) 1198 if (WARN_ON(!cl || !cl->dev))
@@ -1040,27 +1207,29 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
1040 if (!list_empty(&cl->rd_pending)) 1207 if (!list_empty(&cl->rd_pending))
1041 return -EBUSY; 1208 return -EBUSY;
1042 1209
1043 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); 1210 if (!mei_me_cl_is_active(cl->me_cl)) {
1044 if (!me_cl) { 1211 cl_err(dev, cl, "no such me client\n");
1045 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
1046 return -ENOTTY; 1212 return -ENOTTY;
1047 } 1213 }
1214
1048 /* always allocate at least client max message */ 1215 /* always allocate at least client max message */
1049 length = max_t(size_t, length, me_cl->props.max_msg_length); 1216 length = max_t(size_t, length, mei_cl_mtu(cl));
1050 mei_me_cl_put(me_cl); 1217 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
1218 if (!cb)
1219 return -ENOMEM;
1220
1221 if (mei_cl_is_fixed_address(cl)) {
1222 list_add_tail(&cb->list, &cl->rd_pending);
1223 return 0;
1224 }
1051 1225
1052 rets = pm_runtime_get(dev->dev); 1226 rets = pm_runtime_get(dev->dev);
1053 if (rets < 0 && rets != -EINPROGRESS) { 1227 if (rets < 0 && rets != -EINPROGRESS) {
1054 pm_runtime_put_noidle(dev->dev); 1228 pm_runtime_put_noidle(dev->dev);
1055 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1229 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1056 return rets; 1230 goto nortpm;
1057 } 1231 }
1058 1232
1059 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
1060 rets = cb ? 0 : -ENOMEM;
1061 if (rets)
1062 goto out;
1063
1064 if (mei_hbuf_acquire(dev)) { 1233 if (mei_hbuf_acquire(dev)) {
1065 rets = mei_hbm_cl_flow_control_req(dev, cl); 1234 rets = mei_hbm_cl_flow_control_req(dev, cl);
1066 if (rets < 0) 1235 if (rets < 0)
@@ -1068,6 +1237,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
1068 1237
1069 list_add_tail(&cb->list, &cl->rd_pending); 1238 list_add_tail(&cb->list, &cl->rd_pending);
1070 } else { 1239 } else {
1240 rets = 0;
1071 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1241 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
1072 } 1242 }
1073 1243
@@ -1075,7 +1245,7 @@ out:
1075 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1245 cl_dbg(dev, cl, "rpm: autosuspend\n");
1076 pm_runtime_mark_last_busy(dev->dev); 1246 pm_runtime_mark_last_busy(dev->dev);
1077 pm_runtime_put_autosuspend(dev->dev); 1247 pm_runtime_put_autosuspend(dev->dev);
1078 1248nortpm:
1079 if (rets) 1249 if (rets)
1080 mei_io_cb_free(cb); 1250 mei_io_cb_free(cb);
1081 1251
@@ -1102,6 +1272,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1102 u32 msg_slots; 1272 u32 msg_slots;
1103 int slots; 1273 int slots;
1104 int rets; 1274 int rets;
1275 bool first_chunk;
1105 1276
1106 if (WARN_ON(!cl || !cl->dev)) 1277 if (WARN_ON(!cl || !cl->dev))
1107 return -ENODEV; 1278 return -ENODEV;
@@ -1110,7 +1281,9 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1110 1281
1111 buf = &cb->buf; 1282 buf = &cb->buf;
1112 1283
1113 rets = mei_cl_flow_ctrl_creds(cl); 1284 first_chunk = cb->buf_idx == 0;
1285
1286 rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1;
1114 if (rets < 0) 1287 if (rets < 0)
1115 return rets; 1288 return rets;
1116 1289
@@ -1123,8 +1296,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1123 len = buf->size - cb->buf_idx; 1296 len = buf->size - cb->buf_idx;
1124 msg_slots = mei_data2slots(len); 1297 msg_slots = mei_data2slots(len);
1125 1298
1126 mei_hdr.host_addr = cl->host_client_id; 1299 mei_hdr.host_addr = mei_cl_host_addr(cl);
1127 mei_hdr.me_addr = cl->me_client_id; 1300 mei_hdr.me_addr = mei_cl_me_id(cl);
1128 mei_hdr.reserved = 0; 1301 mei_hdr.reserved = 0;
1129 mei_hdr.internal = cb->internal; 1302 mei_hdr.internal = cb->internal;
1130 1303
@@ -1157,12 +1330,14 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1157 cb->buf_idx += mei_hdr.length; 1330 cb->buf_idx += mei_hdr.length;
1158 cb->completed = mei_hdr.msg_complete == 1; 1331 cb->completed = mei_hdr.msg_complete == 1;
1159 1332
1160 if (mei_hdr.msg_complete) { 1333 if (first_chunk) {
1161 if (mei_cl_flow_ctrl_reduce(cl)) 1334 if (mei_cl_flow_ctrl_reduce(cl))
1162 return -EIO; 1335 return -EIO;
1163 list_move_tail(&cb->list, &dev->write_waiting_list.list);
1164 } 1336 }
1165 1337
1338 if (mei_hdr.msg_complete)
1339 list_move_tail(&cb->list, &dev->write_waiting_list.list);
1340
1166 return 0; 1341 return 0;
1167} 1342}
1168 1343
@@ -1207,8 +1382,8 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1207 cb->buf_idx = 0; 1382 cb->buf_idx = 0;
1208 cl->writing_state = MEI_IDLE; 1383 cl->writing_state = MEI_IDLE;
1209 1384
1210 mei_hdr.host_addr = cl->host_client_id; 1385 mei_hdr.host_addr = mei_cl_host_addr(cl);
1211 mei_hdr.me_addr = cl->me_client_id; 1386 mei_hdr.me_addr = mei_cl_me_id(cl);
1212 mei_hdr.reserved = 0; 1387 mei_hdr.reserved = 0;
1213 mei_hdr.msg_complete = 0; 1388 mei_hdr.msg_complete = 0;
1214 mei_hdr.internal = cb->internal; 1389 mei_hdr.internal = cb->internal;
@@ -1241,21 +1416,19 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1241 if (rets) 1416 if (rets)
1242 goto err; 1417 goto err;
1243 1418
1419 rets = mei_cl_flow_ctrl_reduce(cl);
1420 if (rets)
1421 goto err;
1422
1244 cl->writing_state = MEI_WRITING; 1423 cl->writing_state = MEI_WRITING;
1245 cb->buf_idx = mei_hdr.length; 1424 cb->buf_idx = mei_hdr.length;
1246 cb->completed = mei_hdr.msg_complete == 1; 1425 cb->completed = mei_hdr.msg_complete == 1;
1247 1426
1248out: 1427out:
1249 if (mei_hdr.msg_complete) { 1428 if (mei_hdr.msg_complete)
1250 rets = mei_cl_flow_ctrl_reduce(cl);
1251 if (rets < 0)
1252 goto err;
1253
1254 list_add_tail(&cb->list, &dev->write_waiting_list.list); 1429 list_add_tail(&cb->list, &dev->write_waiting_list.list);
1255 } else { 1430 else
1256 list_add_tail(&cb->list, &dev->write_list.list); 1431 list_add_tail(&cb->list, &dev->write_list.list);
1257 }
1258
1259 1432
1260 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 1433 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1261 1434
@@ -1289,20 +1462,36 @@ err:
1289 */ 1462 */
1290void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 1463void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1291{ 1464{
1292 if (cb->fop_type == MEI_FOP_WRITE) { 1465 struct mei_device *dev = cl->dev;
1466
1467 switch (cb->fop_type) {
1468 case MEI_FOP_WRITE:
1293 mei_io_cb_free(cb); 1469 mei_io_cb_free(cb);
1294 cb = NULL;
1295 cl->writing_state = MEI_WRITE_COMPLETE; 1470 cl->writing_state = MEI_WRITE_COMPLETE;
1296 if (waitqueue_active(&cl->tx_wait)) 1471 if (waitqueue_active(&cl->tx_wait)) {
1297 wake_up_interruptible(&cl->tx_wait); 1472 wake_up_interruptible(&cl->tx_wait);
1473 } else {
1474 pm_runtime_mark_last_busy(dev->dev);
1475 pm_request_autosuspend(dev->dev);
1476 }
1477 break;
1298 1478
1299 } else if (cb->fop_type == MEI_FOP_READ) { 1479 case MEI_FOP_READ:
1300 list_add_tail(&cb->list, &cl->rd_completed); 1480 list_add_tail(&cb->list, &cl->rd_completed);
1301 if (waitqueue_active(&cl->rx_wait)) 1481 if (waitqueue_active(&cl->rx_wait))
1302 wake_up_interruptible_all(&cl->rx_wait); 1482 wake_up_interruptible_all(&cl->rx_wait);
1303 else 1483 else
1304 mei_cl_bus_rx_event(cl); 1484 mei_cl_bus_rx_event(cl);
1485 break;
1486
1487 case MEI_FOP_CONNECT:
1488 case MEI_FOP_DISCONNECT:
1489 if (waitqueue_active(&cl->wait))
1490 wake_up(&cl->wait);
1305 1491
1492 break;
1493 default:
1494 BUG_ON(0);
1306 } 1495 }
1307} 1496}
1308 1497
@@ -1312,16 +1501,12 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1312 * 1501 *
1313 * @dev: mei device 1502 * @dev: mei device
1314 */ 1503 */
1315
1316void mei_cl_all_disconnect(struct mei_device *dev) 1504void mei_cl_all_disconnect(struct mei_device *dev)
1317{ 1505{
1318 struct mei_cl *cl; 1506 struct mei_cl *cl;
1319 1507
1320 list_for_each_entry(cl, &dev->file_list, link) { 1508 list_for_each_entry(cl, &dev->file_list, link)
1321 cl->state = MEI_FILE_DISCONNECTED; 1509 mei_cl_set_disconnected(cl);
1322 cl->mei_flow_ctrl_creds = 0;
1323 cl->timer_count = 0;
1324 }
1325} 1510}
1326 1511
1327 1512
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 0a39e5d45171..8d7f057f1045 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -44,6 +44,30 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev,
44 const uuid_le *uuid, u8 id); 44 const uuid_le *uuid, u8 id);
45void mei_me_cl_rm_all(struct mei_device *dev); 45void mei_me_cl_rm_all(struct mei_device *dev);
46 46
47/**
48 * mei_me_cl_is_active - check whether me client is active in the fw
49 *
50 * @me_cl: me client
51 *
52 * Return: true if the me client is active in the firmware
53 */
54static inline bool mei_me_cl_is_active(const struct mei_me_client *me_cl)
55{
56 return !list_empty_careful(&me_cl->list);
57}
58
59/**
60 * mei_me_cl_uuid - return me client protocol name (uuid)
61 *
62 * @me_cl: me client
63 *
64 * Return: me client protocol name
65 */
66static inline const uuid_le *mei_me_cl_uuid(const struct mei_me_client *me_cl)
67{
68 return &me_cl->props.protocol_name;
69}
70
47/* 71/*
48 * MEI IO Functions 72 * MEI IO Functions
49 */ 73 */
@@ -94,18 +118,96 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
94/** 118/**
95 * mei_cl_is_connected - host client is connected 119 * mei_cl_is_connected - host client is connected
96 * 120 *
97 * @cl: host clinet 121 * @cl: host client
98 * 122 *
99 * Return: true if the host clinet is connected 123 * Return: true if the host client is connected
100 */ 124 */
101static inline bool mei_cl_is_connected(struct mei_cl *cl) 125static inline bool mei_cl_is_connected(struct mei_cl *cl)
102{ 126{
103 return cl->state == MEI_FILE_CONNECTED; 127 return cl->state == MEI_FILE_CONNECTED;
104} 128}
105 129
106bool mei_cl_is_other_connecting(struct mei_cl *cl); 130/**
131 * mei_cl_me_id - me client id
132 *
133 * @cl: host client
134 *
135 * Return: me client id or 0 if client is not connected
136 */
137static inline u8 mei_cl_me_id(const struct mei_cl *cl)
138{
139 return cl->me_cl ? cl->me_cl->client_id : 0;
140}
141
142/**
143 * mei_cl_mtu - maximal message that client can send and receive
144 *
145 * @cl: host client
146 *
147 * Return: mtu
148 */
149static inline size_t mei_cl_mtu(const struct mei_cl *cl)
150{
151 return cl->me_cl->props.max_msg_length;
152}
153
154/**
155 * mei_cl_is_fixed_address - check whether the me client uses fixed address
156 *
157 * @cl: host client
158 *
159 * Return: true if the client is connected and it has fixed me address
160 */
161static inline bool mei_cl_is_fixed_address(const struct mei_cl *cl)
162{
163 return cl->me_cl && cl->me_cl->props.fixed_address;
164}
165
166/**
167 * mei_cl_is_single_recv_buf- check whether the me client
168 * uses single receiving buffer
169 *
170 * @cl: host client
171 *
172 * Return: true if single_recv_buf == 1; 0 otherwise
173 */
174static inline bool mei_cl_is_single_recv_buf(const struct mei_cl *cl)
175{
176 return cl->me_cl->props.single_recv_buf;
177}
178
179/**
180 * mei_cl_uuid - client's uuid
181 *
182 * @cl: host client
183 *
184 * Return: return uuid of connected me client
185 */
186static inline const uuid_le *mei_cl_uuid(const struct mei_cl *cl)
187{
188 return mei_me_cl_uuid(cl->me_cl);
189}
190
191/**
192 * mei_cl_host_addr - client's host address
193 *
194 * @cl: host client
195 *
196 * Return: 0 for fixed address client, host address for dynamic client
197 */
198static inline u8 mei_cl_host_addr(const struct mei_cl *cl)
199{
200 return mei_cl_is_fixed_address(cl) ? 0 : cl->host_client_id;
201}
202
107int mei_cl_disconnect(struct mei_cl *cl); 203int mei_cl_disconnect(struct mei_cl *cl);
108int mei_cl_connect(struct mei_cl *cl, struct file *file); 204void mei_cl_set_disconnected(struct mei_cl *cl);
205int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
206 struct mei_cl_cb *cmpl_list);
207int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
208 struct file *file);
209int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
210 struct mei_cl_cb *cmpl_list);
109int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp); 211int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
110int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr, 212int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
111 struct mei_cl_cb *cmpl_list); 213 struct mei_cl_cb *cmpl_list);
@@ -117,14 +219,12 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
117 219
118void mei_host_client_init(struct work_struct *work); 220void mei_host_client_init(struct work_struct *work);
119 221
120
121
122void mei_cl_all_disconnect(struct mei_device *dev); 222void mei_cl_all_disconnect(struct mei_device *dev);
123void mei_cl_all_wakeup(struct mei_device *dev); 223void mei_cl_all_wakeup(struct mei_device *dev);
124void mei_cl_all_write_clear(struct mei_device *dev); 224void mei_cl_all_write_clear(struct mei_device *dev);
125 225
126#define MEI_CL_FMT "cl:host=%02d me=%02d " 226#define MEI_CL_FMT "cl:host=%02d me=%02d "
127#define MEI_CL_PRM(cl) (cl)->host_client_id, (cl)->me_client_id 227#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
128 228
129#define cl_dbg(dev, cl, format, arg...) \ 229#define cl_dbg(dev, cl, format, arg...) \
130 dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) 230 dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index d9cd7e6ee484..eb868341247f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -116,7 +116,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
116 116
117 pos += scnprintf(buf + pos, bufsz - pos, 117 pos += scnprintf(buf + pos, bufsz - pos,
118 "%2d|%2d|%4d|%5d|%2d|%2d|\n", 118 "%2d|%2d|%4d|%5d|%2d|%2d|\n",
119 i, cl->me_client_id, cl->host_client_id, cl->state, 119 i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
120 !list_empty(&cl->rd_completed), cl->writing_state); 120 !list_empty(&cl->rd_completed), cl->writing_state);
121 i++; 121 i++;
122 } 122 }
@@ -149,6 +149,13 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
149 mei_dev_state_str(dev->dev_state)); 149 mei_dev_state_str(dev->dev_state));
150 pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n", 150 pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
151 mei_hbm_state_str(dev->hbm_state)); 151 mei_hbm_state_str(dev->hbm_state));
152
153 if (dev->hbm_state == MEI_HBM_STARTED) {
154 pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
155 pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
156 dev->hbm_f_pg_supported);
157 }
158
152 pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", 159 pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
153 mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED", 160 mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
154 mei_pg_state_str(mei_pg_state(dev))); 161 mei_pg_state_str(mei_pg_state(dev)));
@@ -209,6 +216,12 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
209 dev_err(dev->dev, "devstate: registration failed\n"); 216 dev_err(dev->dev, "devstate: registration failed\n");
210 goto err; 217 goto err;
211 } 218 }
219 f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
220 &dev->allow_fixed_address);
221 if (!f) {
222 dev_err(dev->dev, "allow_fixed_address: registration failed\n");
223 goto err;
224 }
212 dev->dbgfs_dir = dir; 225 dev->dbgfs_dir = dir;
213 return 0; 226 return 0;
214err: 227err:
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 58da92565c5e..a4f283165a33 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -150,8 +150,8 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
150 memset(cmd, 0, len); 150 memset(cmd, 0, len);
151 151
152 cmd->hbm_cmd = hbm_cmd; 152 cmd->hbm_cmd = hbm_cmd;
153 cmd->host_addr = cl->host_client_id; 153 cmd->host_addr = mei_cl_host_addr(cl);
154 cmd->me_addr = cl->me_client_id; 154 cmd->me_addr = mei_cl_me_id(cl);
155} 155}
156 156
157/** 157/**
@@ -188,8 +188,8 @@ int mei_hbm_cl_write(struct mei_device *dev,
188static inline 188static inline
189bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd) 189bool mei_hbm_cl_addr_equal(struct mei_cl *cl, struct mei_hbm_cl_cmd *cmd)
190{ 190{
191 return cl->host_client_id == cmd->host_addr && 191 return mei_cl_host_addr(cl) == cmd->host_addr &&
192 cl->me_client_id == cmd->me_addr; 192 mei_cl_me_id(cl) == cmd->me_addr;
193} 193}
194 194
195/** 195/**
@@ -572,7 +572,7 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
572 cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status); 572 cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status);
573 573
574 if (rs->status == MEI_CL_DISCONN_SUCCESS) 574 if (rs->status == MEI_CL_DISCONN_SUCCESS)
575 cl->state = MEI_FILE_DISCONNECTED; 575 cl->state = MEI_FILE_DISCONNECT_REPLY;
576 cl->status = 0; 576 cl->status = 0;
577} 577}
578 578
@@ -611,7 +611,7 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
611 if (rs->status == MEI_CL_CONN_SUCCESS) 611 if (rs->status == MEI_CL_CONN_SUCCESS)
612 cl->state = MEI_FILE_CONNECTED; 612 cl->state = MEI_FILE_CONNECTED;
613 else 613 else
614 cl->state = MEI_FILE_DISCONNECTED; 614 cl->state = MEI_FILE_DISCONNECT_REPLY;
615 cl->status = mei_cl_conn_status_to_errno(rs->status); 615 cl->status = mei_cl_conn_status_to_errno(rs->status);
616} 616}
617 617
@@ -680,8 +680,8 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
680 680
681 cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req); 681 cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
682 if (cl) { 682 if (cl) {
683 cl_dbg(dev, cl, "disconnect request received\n"); 683 cl_dbg(dev, cl, "fw disconnect request received\n");
684 cl->state = MEI_FILE_DISCONNECTED; 684 cl->state = MEI_FILE_DISCONNECTING;
685 cl->timer_count = 0; 685 cl->timer_count = 0;
686 686
687 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL); 687 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 6fb75e62a764..43d7101ff993 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -663,11 +663,27 @@ int mei_me_pg_exit_sync(struct mei_device *dev)
663 mutex_lock(&dev->device_lock); 663 mutex_lock(&dev->device_lock);
664 664
665reply: 665reply:
666 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) 666 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
667 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD); 667 ret = -ETIME;
668 goto out;
669 }
670
671 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
672 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
673 if (ret)
674 return ret;
675
676 mutex_unlock(&dev->device_lock);
677 wait_event_timeout(dev->wait_pg,
678 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
679 mutex_lock(&dev->device_lock);
680
681 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
682 ret = 0;
668 else 683 else
669 ret = -ETIME; 684 ret = -ETIME;
670 685
686out:
671 dev->pg_event = MEI_PG_EVENT_IDLE; 687 dev->pg_event = MEI_PG_EVENT_IDLE;
672 hw->pg_state = MEI_PG_OFF; 688 hw->pg_state = MEI_PG_OFF;
673 689
@@ -675,6 +691,19 @@ reply:
675} 691}
676 692
677/** 693/**
694 * mei_me_pg_in_transition - is device now in pg transition
695 *
696 * @dev: the device structure
697 *
698 * Return: true if in pg transition, false otherwise
699 */
700static bool mei_me_pg_in_transition(struct mei_device *dev)
701{
702 return dev->pg_event >= MEI_PG_EVENT_WAIT &&
703 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
704}
705
706/**
678 * mei_me_pg_is_enabled - detect if PG is supported by HW 707 * mei_me_pg_is_enabled - detect if PG is supported by HW
679 * 708 *
680 * @dev: the device structure 709 * @dev: the device structure
@@ -705,6 +734,24 @@ notsupported:
705} 734}
706 735
707/** 736/**
737 * mei_me_pg_intr - perform pg processing in interrupt thread handler
738 *
739 * @dev: the device structure
740 */
741static void mei_me_pg_intr(struct mei_device *dev)
742{
743 struct mei_me_hw *hw = to_me_hw(dev);
744
745 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
746 return;
747
748 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
749 hw->pg_state = MEI_PG_OFF;
750 if (waitqueue_active(&dev->wait_pg))
751 wake_up(&dev->wait_pg);
752}
753
754/**
708 * mei_me_irq_quick_handler - The ISR of the MEI device 755 * mei_me_irq_quick_handler - The ISR of the MEI device
709 * 756 *
710 * @irq: The irq number 757 * @irq: The irq number
@@ -761,6 +808,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
761 goto end; 808 goto end;
762 } 809 }
763 810
811 mei_me_pg_intr(dev);
812
764 /* check if we need to start the dev */ 813 /* check if we need to start the dev */
765 if (!mei_host_is_ready(dev)) { 814 if (!mei_host_is_ready(dev)) {
766 if (mei_hw_is_ready(dev)) { 815 if (mei_hw_is_ready(dev)) {
@@ -797,9 +846,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
797 /* 846 /*
798 * During PG handshake only allowed write is the replay to the 847 * During PG handshake only allowed write is the replay to the
799 * PG exit message, so block calling write function 848 * PG exit message, so block calling write function
800 * if the pg state is not idle 849 * if the pg event is in PG handshake
801 */ 850 */
802 if (dev->pg_event == MEI_PG_EVENT_IDLE) { 851 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
852 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
803 rets = mei_irq_write_handler(dev, &complete_list); 853 rets = mei_irq_write_handler(dev, &complete_list);
804 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 854 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
805 } 855 }
@@ -824,6 +874,7 @@ static const struct mei_hw_ops mei_me_hw_ops = {
824 .hw_config = mei_me_hw_config, 874 .hw_config = mei_me_hw_config,
825 .hw_start = mei_me_hw_start, 875 .hw_start = mei_me_hw_start,
826 876
877 .pg_in_transition = mei_me_pg_in_transition,
827 .pg_is_enabled = mei_me_pg_is_enabled, 878 .pg_is_enabled = mei_me_pg_is_enabled,
828 879
829 .intr_clear = mei_me_intr_clear, 880 .intr_clear = mei_me_intr_clear,
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 7abafe7d120d..bae680c648ff 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/jiffies.h> 18#include <linux/jiffies.h>
19#include <linux/ktime.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/kthread.h> 21#include <linux/kthread.h>
21#include <linux/irqreturn.h> 22#include <linux/irqreturn.h>
@@ -218,26 +219,25 @@ static u32 mei_txe_aliveness_get(struct mei_device *dev)
218 * 219 *
219 * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set 220 * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
220 * 221 *
221 * Return: > 0 if the expected value was received, -ETIME otherwise 222 * Return: 0 if the expected value was received, -ETIME otherwise
222 */ 223 */
223static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected) 224static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
224{ 225{
225 struct mei_txe_hw *hw = to_txe_hw(dev); 226 struct mei_txe_hw *hw = to_txe_hw(dev);
226 int t = 0; 227 ktime_t stop, start;
227 228
229 start = ktime_get();
230 stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
228 do { 231 do {
229 hw->aliveness = mei_txe_aliveness_get(dev); 232 hw->aliveness = mei_txe_aliveness_get(dev);
230 if (hw->aliveness == expected) { 233 if (hw->aliveness == expected) {
231 dev->pg_event = MEI_PG_EVENT_IDLE; 234 dev->pg_event = MEI_PG_EVENT_IDLE;
232 dev_dbg(dev->dev, 235 dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
233 "aliveness settled after %d msecs\n", t); 236 ktime_to_us(ktime_sub(ktime_get(), start)));
234 return t; 237 return 0;
235 } 238 }
236 mutex_unlock(&dev->device_lock); 239 usleep_range(20, 50);
237 msleep(MSEC_PER_SEC / 5); 240 } while (ktime_before(ktime_get(), stop));
238 mutex_lock(&dev->device_lock);
239 t += MSEC_PER_SEC / 5;
240 } while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
241 241
242 dev->pg_event = MEI_PG_EVENT_IDLE; 242 dev->pg_event = MEI_PG_EVENT_IDLE;
243 dev_err(dev->dev, "aliveness timed out\n"); 243 dev_err(dev->dev, "aliveness timed out\n");
@@ -302,6 +302,18 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
302} 302}
303 303
304/** 304/**
305 * mei_txe_pg_in_transition - is device now in pg transition
306 *
307 * @dev: the device structure
308 *
309 * Return: true if in pg transition, false otherwise
310 */
311static bool mei_txe_pg_in_transition(struct mei_device *dev)
312{
313 return dev->pg_event == MEI_PG_EVENT_WAIT;
314}
315
316/**
305 * mei_txe_pg_is_enabled - detect if PG is supported by HW 317 * mei_txe_pg_is_enabled - detect if PG is supported by HW
306 * 318 *
307 * @dev: the device structure 319 * @dev: the device structure
@@ -1138,6 +1150,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
1138 .hw_config = mei_txe_hw_config, 1150 .hw_config = mei_txe_hw_config,
1139 .hw_start = mei_txe_hw_start, 1151 .hw_start = mei_txe_hw_start,
1140 1152
1153 .pg_in_transition = mei_txe_pg_in_transition,
1141 .pg_is_enabled = mei_txe_pg_is_enabled, 1154 .pg_is_enabled = mei_txe_pg_is_enabled,
1142 1155
1143 .intr_clear = mei_txe_intr_clear, 1156 .intr_clear = mei_txe_intr_clear,
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 97353cf8d9b6..94514b2c7a50 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -361,13 +361,15 @@ bool mei_write_is_idle(struct mei_device *dev)
361{ 361{
362 bool idle = (dev->dev_state == MEI_DEV_ENABLED && 362 bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
363 list_empty(&dev->ctrl_wr_list.list) && 363 list_empty(&dev->ctrl_wr_list.list) &&
364 list_empty(&dev->write_list.list)); 364 list_empty(&dev->write_list.list) &&
365 list_empty(&dev->write_waiting_list.list));
365 366
366 dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%d write=%d\n", 367 dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
367 idle, 368 idle,
368 mei_dev_state_str(dev->dev_state), 369 mei_dev_state_str(dev->dev_state),
369 list_empty(&dev->ctrl_wr_list.list), 370 list_empty(&dev->ctrl_wr_list.list),
370 list_empty(&dev->write_list.list)); 371 list_empty(&dev->write_list.list),
372 list_empty(&dev->write_waiting_list.list));
371 373
372 return idle; 374 return idle;
373} 375}
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 3f84d2edcde4..3f3405269c39 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -65,8 +65,8 @@ EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
65static inline int mei_cl_hbm_equal(struct mei_cl *cl, 65static inline int mei_cl_hbm_equal(struct mei_cl *cl,
66 struct mei_msg_hdr *mei_hdr) 66 struct mei_msg_hdr *mei_hdr)
67{ 67{
68 return cl->host_client_id == mei_hdr->host_addr && 68 return mei_cl_host_addr(cl) == mei_hdr->host_addr &&
69 cl->me_client_id == mei_hdr->me_addr; 69 mei_cl_me_id(cl) == mei_hdr->me_addr;
70} 70}
71 71
72/** 72/**
@@ -180,56 +180,14 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
180 return -EMSGSIZE; 180 return -EMSGSIZE;
181 181
182 ret = mei_hbm_cl_disconnect_rsp(dev, cl); 182 ret = mei_hbm_cl_disconnect_rsp(dev, cl);
183 183 mei_cl_set_disconnected(cl);
184 cl->state = MEI_FILE_DISCONNECTED;
185 cl->status = 0;
186 mei_io_cb_free(cb); 184 mei_io_cb_free(cb);
185 mei_me_cl_put(cl->me_cl);
186 cl->me_cl = NULL;
187 187
188 return ret; 188 return ret;
189} 189}
190 190
191
192
193/**
194 * mei_cl_irq_disconnect - processes close related operation from
195 * interrupt thread context - send disconnect request
196 *
197 * @cl: client
198 * @cb: callback block.
199 * @cmpl_list: complete list.
200 *
201 * Return: 0, OK; otherwise, error.
202 */
203static int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
204 struct mei_cl_cb *cmpl_list)
205{
206 struct mei_device *dev = cl->dev;
207 u32 msg_slots;
208 int slots;
209
210 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
211 slots = mei_hbuf_empty_slots(dev);
212
213 if (slots < msg_slots)
214 return -EMSGSIZE;
215
216 if (mei_hbm_cl_disconnect_req(dev, cl)) {
217 cl->status = 0;
218 cb->buf_idx = 0;
219 list_move_tail(&cb->list, &cmpl_list->list);
220 return -EIO;
221 }
222
223 cl->state = MEI_FILE_DISCONNECTING;
224 cl->status = 0;
225 cb->buf_idx = 0;
226 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
227 cl->timer_count = MEI_CONNECT_TIMEOUT;
228
229 return 0;
230}
231
232
233/** 191/**
234 * mei_cl_irq_read - processes client read related operation from the 192 * mei_cl_irq_read - processes client read related operation from the
235 * interrupt thread context - request for flow control credits 193 * interrupt thread context - request for flow control credits
@@ -267,49 +225,6 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
267 return 0; 225 return 0;
268} 226}
269 227
270
271/**
272 * mei_cl_irq_connect - send connect request in irq_thread context
273 *
274 * @cl: client
275 * @cb: callback block.
276 * @cmpl_list: complete list.
277 *
278 * Return: 0, OK; otherwise, error.
279 */
280static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
281 struct mei_cl_cb *cmpl_list)
282{
283 struct mei_device *dev = cl->dev;
284 u32 msg_slots;
285 int slots;
286 int ret;
287
288 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
289 slots = mei_hbuf_empty_slots(dev);
290
291 if (mei_cl_is_other_connecting(cl))
292 return 0;
293
294 if (slots < msg_slots)
295 return -EMSGSIZE;
296
297 cl->state = MEI_FILE_CONNECTING;
298
299 ret = mei_hbm_cl_connect_req(dev, cl);
300 if (ret) {
301 cl->status = ret;
302 cb->buf_idx = 0;
303 list_del_init(&cb->list);
304 return ret;
305 }
306
307 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
308 cl->timer_count = MEI_CONNECT_TIMEOUT;
309 return 0;
310}
311
312
313/** 228/**
314 * mei_irq_read_handler - bottom half read routine after ISR to 229 * mei_irq_read_handler - bottom half read routine after ISR to
315 * handle the read processing. 230 * handle the read processing.
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 3e2968159506..8eb0a9500a90 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -94,7 +94,7 @@ static int mei_release(struct inode *inode, struct file *file)
94{ 94{
95 struct mei_cl *cl = file->private_data; 95 struct mei_cl *cl = file->private_data;
96 struct mei_device *dev; 96 struct mei_device *dev;
97 int rets = 0; 97 int rets;
98 98
99 if (WARN_ON(!cl || !cl->dev)) 99 if (WARN_ON(!cl || !cl->dev))
100 return -ENODEV; 100 return -ENODEV;
@@ -106,11 +106,8 @@ static int mei_release(struct inode *inode, struct file *file)
106 rets = mei_amthif_release(dev, file); 106 rets = mei_amthif_release(dev, file);
107 goto out; 107 goto out;
108 } 108 }
109 if (mei_cl_is_connected(cl)) { 109 rets = mei_cl_disconnect(cl);
110 cl->state = MEI_FILE_DISCONNECTING; 110
111 cl_dbg(dev, cl, "disconnecting\n");
112 rets = mei_cl_disconnect(cl);
113 }
114 mei_cl_flush_queues(cl, file); 111 mei_cl_flush_queues(cl, file);
115 cl_dbg(dev, cl, "removing\n"); 112 cl_dbg(dev, cl, "removing\n");
116 113
@@ -186,8 +183,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
186 183
187 err = mei_cl_read_start(cl, length, file); 184 err = mei_cl_read_start(cl, length, file);
188 if (err && err != -EBUSY) { 185 if (err && err != -EBUSY) {
189 dev_dbg(dev->dev, 186 cl_dbg(dev, cl, "mei start read failure status = %d\n", err);
190 "mei start read failure with status = %d\n", err);
191 rets = err; 187 rets = err;
192 goto out; 188 goto out;
193 } 189 }
@@ -218,6 +214,11 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
218 214
219 cb = mei_cl_read_cb(cl, file); 215 cb = mei_cl_read_cb(cl, file);
220 if (!cb) { 216 if (!cb) {
217 if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) {
218 cb = mei_cl_read_cb(cl, NULL);
219 if (cb)
220 goto copy_buffer;
221 }
221 rets = 0; 222 rets = 0;
222 goto out; 223 goto out;
223 } 224 }
@@ -226,11 +227,11 @@ copy_buffer:
226 /* now copy the data to user space */ 227 /* now copy the data to user space */
227 if (cb->status) { 228 if (cb->status) {
228 rets = cb->status; 229 rets = cb->status;
229 dev_dbg(dev->dev, "read operation failed %d\n", rets); 230 cl_dbg(dev, cl, "read operation failed %d\n", rets);
230 goto free; 231 goto free;
231 } 232 }
232 233
233 dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n", 234 cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n",
234 cb->buf.size, cb->buf_idx); 235 cb->buf.size, cb->buf_idx);
235 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { 236 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
236 rets = -EMSGSIZE; 237 rets = -EMSGSIZE;
@@ -256,7 +257,7 @@ free:
256 mei_io_cb_free(cb); 257 mei_io_cb_free(cb);
257 258
258out: 259out:
259 dev_dbg(dev->dev, "end mei read rets= %d\n", rets); 260 cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
260 mutex_unlock(&dev->device_lock); 261 mutex_unlock(&dev->device_lock);
261 return rets; 262 return rets;
262} 263}
@@ -274,7 +275,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
274 size_t length, loff_t *offset) 275 size_t length, loff_t *offset)
275{ 276{
276 struct mei_cl *cl = file->private_data; 277 struct mei_cl *cl = file->private_data;
277 struct mei_me_client *me_cl = NULL;
278 struct mei_cl_cb *write_cb = NULL; 278 struct mei_cl_cb *write_cb = NULL;
279 struct mei_device *dev; 279 struct mei_device *dev;
280 unsigned long timeout = 0; 280 unsigned long timeout = 0;
@@ -292,27 +292,27 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
292 goto out; 292 goto out;
293 } 293 }
294 294
295 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); 295 if (!mei_cl_is_connected(cl)) {
296 if (!me_cl) { 296 cl_err(dev, cl, "is not connected");
297 rets = -ENOTTY; 297 rets = -ENODEV;
298 goto out; 298 goto out;
299 } 299 }
300 300
301 if (length == 0) { 301 if (!mei_me_cl_is_active(cl->me_cl)) {
302 rets = 0; 302 rets = -ENOTTY;
303 goto out; 303 goto out;
304 } 304 }
305 305
306 if (length > me_cl->props.max_msg_length) { 306 if (length > mei_cl_mtu(cl)) {
307 rets = -EFBIG; 307 rets = -EFBIG;
308 goto out; 308 goto out;
309 } 309 }
310 310
311 if (!mei_cl_is_connected(cl)) { 311 if (length == 0) {
312 cl_err(dev, cl, "is not connected"); 312 rets = 0;
313 rets = -ENODEV;
314 goto out; 313 goto out;
315 } 314 }
315
316 if (cl == &dev->iamthif_cl) { 316 if (cl == &dev->iamthif_cl) {
317 write_cb = mei_amthif_find_read_list_entry(dev, file); 317 write_cb = mei_amthif_find_read_list_entry(dev, file);
318 318
@@ -350,14 +350,12 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
350 "amthif write failed with status = %d\n", rets); 350 "amthif write failed with status = %d\n", rets);
351 goto out; 351 goto out;
352 } 352 }
353 mei_me_cl_put(me_cl);
354 mutex_unlock(&dev->device_lock); 353 mutex_unlock(&dev->device_lock);
355 return length; 354 return length;
356 } 355 }
357 356
358 rets = mei_cl_write(cl, write_cb, false); 357 rets = mei_cl_write(cl, write_cb, false);
359out: 358out:
360 mei_me_cl_put(me_cl);
361 mutex_unlock(&dev->device_lock); 359 mutex_unlock(&dev->device_lock);
362 if (rets < 0) 360 if (rets < 0)
363 mei_io_cb_free(write_cb); 361 mei_io_cb_free(write_cb);
@@ -395,17 +393,16 @@ static int mei_ioctl_connect_client(struct file *file,
395 393
396 /* find ME client we're trying to connect to */ 394 /* find ME client we're trying to connect to */
397 me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); 395 me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
398 if (!me_cl || me_cl->props.fixed_address) { 396 if (!me_cl ||
397 (me_cl->props.fixed_address && !dev->allow_fixed_address)) {
399 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", 398 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
400 &data->in_client_uuid); 399 &data->in_client_uuid);
400 mei_me_cl_put(me_cl);
401 return -ENOTTY; 401 return -ENOTTY;
402 } 402 }
403 403
404 cl->me_client_id = me_cl->client_id;
405 cl->cl_uuid = me_cl->props.protocol_name;
406
407 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", 404 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
408 cl->me_client_id); 405 me_cl->client_id);
409 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", 406 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
410 me_cl->props.protocol_version); 407 me_cl->props.protocol_version);
411 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", 408 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
@@ -441,7 +438,7 @@ static int mei_ioctl_connect_client(struct file *file,
441 client->protocol_version = me_cl->props.protocol_version; 438 client->protocol_version = me_cl->props.protocol_version;
442 dev_dbg(dev->dev, "Can connect?\n"); 439 dev_dbg(dev->dev, "Can connect?\n");
443 440
444 rets = mei_cl_connect(cl, file); 441 rets = mei_cl_connect(cl, me_cl, file);
445 442
446end: 443end:
447 mei_me_cl_put(me_cl); 444 mei_me_cl_put(me_cl);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index f066ecd71939..453f6a333b42 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -88,7 +88,8 @@ enum file_state {
88 MEI_FILE_CONNECTING, 88 MEI_FILE_CONNECTING,
89 MEI_FILE_CONNECTED, 89 MEI_FILE_CONNECTED,
90 MEI_FILE_DISCONNECTING, 90 MEI_FILE_DISCONNECTING,
91 MEI_FILE_DISCONNECTED 91 MEI_FILE_DISCONNECT_REPLY,
92 MEI_FILE_DISCONNECTED,
92}; 93};
93 94
94/* MEI device states */ 95/* MEI device states */
@@ -176,6 +177,8 @@ struct mei_fw_status {
176 * @props: client properties 177 * @props: client properties
177 * @client_id: me client id 178 * @client_id: me client id
178 * @mei_flow_ctrl_creds: flow control credits 179 * @mei_flow_ctrl_creds: flow control credits
180 * @connect_count: number connections to this client
181 * @reserved: reserved
179 */ 182 */
180struct mei_me_client { 183struct mei_me_client {
181 struct list_head list; 184 struct list_head list;
@@ -183,6 +186,8 @@ struct mei_me_client {
183 struct mei_client_properties props; 186 struct mei_client_properties props;
184 u8 client_id; 187 u8 client_id;
185 u8 mei_flow_ctrl_creds; 188 u8 mei_flow_ctrl_creds;
189 u8 connect_count;
190 u8 reserved;
186}; 191};
187 192
188 193
@@ -226,11 +231,11 @@ struct mei_cl_cb {
226 * @rx_wait: wait queue for rx completion 231 * @rx_wait: wait queue for rx completion
227 * @wait: wait queue for management operation 232 * @wait: wait queue for management operation
228 * @status: connection status 233 * @status: connection status
229 * @cl_uuid: client uuid name 234 * @me_cl: fw client connected
230 * @host_client_id: host id 235 * @host_client_id: host id
231 * @me_client_id: me/fw id
232 * @mei_flow_ctrl_creds: transmit flow credentials 236 * @mei_flow_ctrl_creds: transmit flow credentials
233 * @timer_count: watchdog timer for operation completion 237 * @timer_count: watchdog timer for operation completion
238 * @reserved: reserved for alignment
234 * @writing_state: state of the tx 239 * @writing_state: state of the tx
235 * @rd_pending: pending read credits 240 * @rd_pending: pending read credits
236 * @rd_completed: completed read 241 * @rd_completed: completed read
@@ -246,11 +251,11 @@ struct mei_cl {
246 wait_queue_head_t rx_wait; 251 wait_queue_head_t rx_wait;
247 wait_queue_head_t wait; 252 wait_queue_head_t wait;
248 int status; 253 int status;
249 uuid_le cl_uuid; 254 struct mei_me_client *me_cl;
250 u8 host_client_id; 255 u8 host_client_id;
251 u8 me_client_id;
252 u8 mei_flow_ctrl_creds; 256 u8 mei_flow_ctrl_creds;
253 u8 timer_count; 257 u8 timer_count;
258 u8 reserved;
254 enum mei_file_transaction_states writing_state; 259 enum mei_file_transaction_states writing_state;
255 struct list_head rd_pending; 260 struct list_head rd_pending;
256 struct list_head rd_completed; 261 struct list_head rd_completed;
@@ -271,6 +276,7 @@ struct mei_cl {
271 276
272 * @fw_status : get fw status registers 277 * @fw_status : get fw status registers
273 * @pg_state : power gating state of the device 278 * @pg_state : power gating state of the device
279 * @pg_in_transition : is device now in pg transition
274 * @pg_is_enabled : is power gating enabled 280 * @pg_is_enabled : is power gating enabled
275 281
276 * @intr_clear : clear pending interrupts 282 * @intr_clear : clear pending interrupts
@@ -300,6 +306,7 @@ struct mei_hw_ops {
300 306
301 int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts); 307 int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
302 enum mei_pg_state (*pg_state)(struct mei_device *dev); 308 enum mei_pg_state (*pg_state)(struct mei_device *dev);
309 bool (*pg_in_transition)(struct mei_device *dev);
303 bool (*pg_is_enabled)(struct mei_device *dev); 310 bool (*pg_is_enabled)(struct mei_device *dev);
304 311
305 void (*intr_clear)(struct mei_device *dev); 312 void (*intr_clear)(struct mei_device *dev);
@@ -323,34 +330,14 @@ struct mei_hw_ops {
323 330
324/* MEI bus API*/ 331/* MEI bus API*/
325 332
326/**
327 * struct mei_cl_ops - MEI CL device ops
328 * This structure allows ME host clients to implement technology
329 * specific operations.
330 *
331 * @enable: Enable an MEI CL device. Some devices require specific
332 * HECI commands to initialize completely.
333 * @disable: Disable an MEI CL device.
334 * @send: Tx hook for the device. This allows ME host clients to trap
335 * the device driver buffers before actually physically
336 * pushing it to the ME.
337 * @recv: Rx hook for the device. This allows ME host clients to trap the
338 * ME buffers before forwarding them to the device driver.
339 */
340struct mei_cl_ops {
341 int (*enable)(struct mei_cl_device *device);
342 int (*disable)(struct mei_cl_device *device);
343 int (*send)(struct mei_cl_device *device, u8 *buf, size_t length);
344 int (*recv)(struct mei_cl_device *device, u8 *buf, size_t length);
345};
346
347struct mei_cl_device *mei_cl_add_device(struct mei_device *dev, 333struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
348 uuid_le uuid, char *name, 334 struct mei_me_client *me_cl,
349 struct mei_cl_ops *ops); 335 struct mei_cl *cl,
336 char *name);
350void mei_cl_remove_device(struct mei_cl_device *device); 337void mei_cl_remove_device(struct mei_cl_device *device);
351 338
352ssize_t __mei_cl_async_send(struct mei_cl *cl, u8 *buf, size_t length); 339ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
353ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length); 340 bool blocking);
354ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); 341ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
355void mei_cl_bus_rx_event(struct mei_cl *cl); 342void mei_cl_bus_rx_event(struct mei_cl *cl);
356void mei_cl_bus_remove_devices(struct mei_device *dev); 343void mei_cl_bus_remove_devices(struct mei_device *dev);
@@ -358,51 +345,21 @@ int mei_cl_bus_init(void);
358void mei_cl_bus_exit(void); 345void mei_cl_bus_exit(void);
359struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid); 346struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
360 347
361
362/**
363 * struct mei_cl_device - MEI device handle
364 * An mei_cl_device pointer is returned from mei_add_device()
365 * and links MEI bus clients to their actual ME host client pointer.
366 * Drivers for MEI devices will get an mei_cl_device pointer
367 * when being probed and shall use it for doing ME bus I/O.
368 *
369 * @dev: linux driver model device pointer
370 * @cl: mei client
371 * @ops: ME transport ops
372 * @event_work: async work to execute event callback
373 * @event_cb: Drivers register this callback to get asynchronous ME
374 * events (e.g. Rx buffer pending) notifications.
375 * @event_context: event callback run context
376 * @events: Events bitmask sent to the driver.
377 * @priv_data: client private data
378 */
379struct mei_cl_device {
380 struct device dev;
381
382 struct mei_cl *cl;
383
384 const struct mei_cl_ops *ops;
385
386 struct work_struct event_work;
387 mei_cl_event_cb_t event_cb;
388 void *event_context;
389 unsigned long events;
390
391 void *priv_data;
392};
393
394
395/** 348/**
396 * enum mei_pg_event - power gating transition events 349 * enum mei_pg_event - power gating transition events
397 * 350 *
398 * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition 351 * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition
399 * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete 352 * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete
400 * @MEI_PG_EVENT_RECEIVED: the driver received pg event 353 * @MEI_PG_EVENT_RECEIVED: the driver received pg event
354 * @MEI_PG_EVENT_INTR_WAIT: the driver is waiting for a pg event interrupt
355 * @MEI_PG_EVENT_INTR_RECEIVED: the driver received pg event interrupt
401 */ 356 */
402enum mei_pg_event { 357enum mei_pg_event {
403 MEI_PG_EVENT_IDLE, 358 MEI_PG_EVENT_IDLE,
404 MEI_PG_EVENT_WAIT, 359 MEI_PG_EVENT_WAIT,
405 MEI_PG_EVENT_RECEIVED, 360 MEI_PG_EVENT_RECEIVED,
361 MEI_PG_EVENT_INTR_WAIT,
362 MEI_PG_EVENT_INTR_RECEIVED,
406}; 363};
407 364
408/** 365/**
@@ -467,6 +424,8 @@ const char *mei_pg_state_str(enum mei_pg_state state);
467 * @host_clients_map : host clients id pool 424 * @host_clients_map : host clients id pool
468 * @me_client_index : last FW client index in enumeration 425 * @me_client_index : last FW client index in enumeration
469 * 426 *
427 * @allow_fixed_address: allow user space to connect a fixed client
428 *
470 * @wd_cl : watchdog client 429 * @wd_cl : watchdog client
471 * @wd_state : watchdog client state 430 * @wd_state : watchdog client state
472 * @wd_pending : watchdog command is pending 431 * @wd_pending : watchdog command is pending
@@ -479,7 +438,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
479 * @iamthif_cl : amthif host client 438 * @iamthif_cl : amthif host client
480 * @iamthif_current_cb : amthif current operation callback 439 * @iamthif_current_cb : amthif current operation callback
481 * @iamthif_open_count : number of opened amthif connections 440 * @iamthif_open_count : number of opened amthif connections
482 * @iamthif_mtu : amthif client max message length
483 * @iamthif_timer : time stamp of current amthif command completion 441 * @iamthif_timer : time stamp of current amthif command completion
484 * @iamthif_stall_timer : timer to detect amthif hang 442 * @iamthif_stall_timer : timer to detect amthif hang
485 * @iamthif_state : amthif processor state 443 * @iamthif_state : amthif processor state
@@ -558,6 +516,8 @@ struct mei_device {
558 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 516 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
559 unsigned long me_client_index; 517 unsigned long me_client_index;
560 518
519 u32 allow_fixed_address;
520
561 struct mei_cl wd_cl; 521 struct mei_cl wd_cl;
562 enum mei_wd_states wd_state; 522 enum mei_wd_states wd_state;
563 bool wd_pending; 523 bool wd_pending;
@@ -573,7 +533,6 @@ struct mei_device {
573 struct mei_cl iamthif_cl; 533 struct mei_cl iamthif_cl;
574 struct mei_cl_cb *iamthif_current_cb; 534 struct mei_cl_cb *iamthif_current_cb;
575 long iamthif_open_count; 535 long iamthif_open_count;
576 int iamthif_mtu;
577 unsigned long iamthif_timer; 536 unsigned long iamthif_timer;
578 u32 iamthif_stall_timer; 537 u32 iamthif_stall_timer;
579 enum iamthif_states iamthif_state; 538 enum iamthif_states iamthif_state;
@@ -652,7 +611,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
652 */ 611 */
653void mei_amthif_reset_params(struct mei_device *dev); 612void mei_amthif_reset_params(struct mei_device *dev);
654 613
655int mei_amthif_host_init(struct mei_device *dev); 614int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
656 615
657int mei_amthif_read(struct mei_device *dev, struct file *file, 616int mei_amthif_read(struct mei_device *dev, struct file *file,
658 char __user *ubuf, size_t length, loff_t *offset); 617 char __user *ubuf, size_t length, loff_t *offset);
@@ -679,7 +638,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
679/* 638/*
680 * NFC functions 639 * NFC functions
681 */ 640 */
682int mei_nfc_host_init(struct mei_device *dev); 641int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
683void mei_nfc_host_exit(struct mei_device *dev); 642void mei_nfc_host_exit(struct mei_device *dev);
684 643
685/* 644/*
@@ -689,7 +648,7 @@ extern const uuid_le mei_nfc_guid;
689 648
690int mei_wd_send(struct mei_device *dev); 649int mei_wd_send(struct mei_device *dev);
691int mei_wd_stop(struct mei_device *dev); 650int mei_wd_stop(struct mei_device *dev);
692int mei_wd_host_init(struct mei_device *dev); 651int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
693/* 652/*
694 * mei_watchdog_register - Registering watchdog interface 653 * mei_watchdog_register - Registering watchdog interface
695 * once we got connection to the WD Client 654 * once we got connection to the WD Client
@@ -717,6 +676,11 @@ static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
717 return dev->ops->pg_state(dev); 676 return dev->ops->pg_state(dev);
718} 677}
719 678
679static inline bool mei_pg_in_transition(struct mei_device *dev)
680{
681 return dev->ops->pg_in_transition(dev);
682}
683
720static inline bool mei_pg_is_enabled(struct mei_device *dev) 684static inline bool mei_pg_is_enabled(struct mei_device *dev)
721{ 685{
722 return dev->ops->pg_is_enabled(dev); 686 return dev->ops->pg_is_enabled(dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index c3bcb63686d7..b983c4ecad38 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -91,30 +91,25 @@ struct mei_nfc_hci_hdr {
91/** 91/**
92 * struct mei_nfc_dev - NFC mei device 92 * struct mei_nfc_dev - NFC mei device
93 * 93 *
94 * @me_cl: NFC me client
94 * @cl: NFC host client 95 * @cl: NFC host client
95 * @cl_info: NFC info host client 96 * @cl_info: NFC info host client
96 * @init_work: perform connection to the info client 97 * @init_work: perform connection to the info client
97 * @send_wq: send completion wait queue
98 * @fw_ivn: NFC Interface Version Number 98 * @fw_ivn: NFC Interface Version Number
99 * @vendor_id: NFC manufacturer ID 99 * @vendor_id: NFC manufacturer ID
100 * @radio_type: NFC radio type 100 * @radio_type: NFC radio type
101 * @bus_name: bus name 101 * @bus_name: bus name
102 * 102 *
103 * @req_id: message counter
104 * @recv_req_id: reception message counter
105 */ 103 */
106struct mei_nfc_dev { 104struct mei_nfc_dev {
105 struct mei_me_client *me_cl;
107 struct mei_cl *cl; 106 struct mei_cl *cl;
108 struct mei_cl *cl_info; 107 struct mei_cl *cl_info;
109 struct work_struct init_work; 108 struct work_struct init_work;
110 wait_queue_head_t send_wq;
111 u8 fw_ivn; 109 u8 fw_ivn;
112 u8 vendor_id; 110 u8 vendor_id;
113 u8 radio_type; 111 u8 radio_type;
114 char *bus_name; 112 char *bus_name;
115
116 u16 req_id;
117 u16 recv_req_id;
118}; 113};
119 114
120/* UUIDs for NFC F/W clients */ 115/* UUIDs for NFC F/W clients */
@@ -151,6 +146,7 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
151 kfree(ndev->cl_info); 146 kfree(ndev->cl_info);
152 } 147 }
153 148
149 mei_me_cl_put(ndev->me_cl);
154 kfree(ndev); 150 kfree(ndev);
155} 151}
156 152
@@ -199,73 +195,6 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
199 return 0; 195 return 0;
200} 196}
201 197
202static int mei_nfc_connect(struct mei_nfc_dev *ndev)
203{
204 struct mei_device *dev;
205 struct mei_cl *cl;
206 struct mei_nfc_cmd *cmd, *reply;
207 struct mei_nfc_connect *connect;
208 struct mei_nfc_connect_resp *connect_resp;
209 size_t connect_length, connect_resp_length;
210 int bytes_recv, ret;
211
212 cl = ndev->cl;
213 dev = cl->dev;
214
215 connect_length = sizeof(struct mei_nfc_cmd) +
216 sizeof(struct mei_nfc_connect);
217
218 connect_resp_length = sizeof(struct mei_nfc_cmd) +
219 sizeof(struct mei_nfc_connect_resp);
220
221 cmd = kzalloc(connect_length, GFP_KERNEL);
222 if (!cmd)
223 return -ENOMEM;
224 connect = (struct mei_nfc_connect *)cmd->data;
225
226 reply = kzalloc(connect_resp_length, GFP_KERNEL);
227 if (!reply) {
228 kfree(cmd);
229 return -ENOMEM;
230 }
231
232 connect_resp = (struct mei_nfc_connect_resp *)reply->data;
233
234 cmd->command = MEI_NFC_CMD_MAINTENANCE;
235 cmd->data_size = 3;
236 cmd->sub_command = MEI_NFC_SUBCMD_CONNECT;
237 connect->fw_ivn = ndev->fw_ivn;
238 connect->vendor_id = ndev->vendor_id;
239
240 ret = __mei_cl_send(cl, (u8 *)cmd, connect_length);
241 if (ret < 0) {
242 dev_err(dev->dev, "Could not send connect cmd\n");
243 goto err;
244 }
245
246 bytes_recv = __mei_cl_recv(cl, (u8 *)reply, connect_resp_length);
247 if (bytes_recv < 0) {
248 dev_err(dev->dev, "Could not read connect response\n");
249 ret = bytes_recv;
250 goto err;
251 }
252
253 dev_info(dev->dev, "IVN 0x%x Vendor ID 0x%x\n",
254 connect_resp->fw_ivn, connect_resp->vendor_id);
255
256 dev_info(dev->dev, "ME FW %d.%d.%d.%d\n",
257 connect_resp->me_major, connect_resp->me_minor,
258 connect_resp->me_hotfix, connect_resp->me_build);
259
260 ret = 0;
261
262err:
263 kfree(reply);
264 kfree(cmd);
265
266 return ret;
267}
268
269static int mei_nfc_if_version(struct mei_nfc_dev *ndev) 198static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
270{ 199{
271 struct mei_device *dev; 200 struct mei_device *dev;
@@ -285,7 +214,7 @@ static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
285 cmd.data_size = 1; 214 cmd.data_size = 1;
286 cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION; 215 cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
287 216
288 ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd)); 217 ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
289 if (ret < 0) { 218 if (ret < 0) {
290 dev_err(dev->dev, "Could not send IF version cmd\n"); 219 dev_err(dev->dev, "Could not send IF version cmd\n");
291 return ret; 220 return ret;
@@ -317,106 +246,13 @@ err:
317 return ret; 246 return ret;
318} 247}
319 248
320static int mei_nfc_enable(struct mei_cl_device *cldev)
321{
322 struct mei_device *dev;
323 struct mei_nfc_dev *ndev;
324 int ret;
325
326 ndev = (struct mei_nfc_dev *)cldev->priv_data;
327 dev = ndev->cl->dev;
328
329 ret = mei_nfc_connect(ndev);
330 if (ret < 0) {
331 dev_err(dev->dev, "Could not connect to NFC");
332 return ret;
333 }
334
335 return 0;
336}
337
338static int mei_nfc_disable(struct mei_cl_device *cldev)
339{
340 return 0;
341}
342
343static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
344{
345 struct mei_device *dev;
346 struct mei_nfc_dev *ndev;
347 struct mei_nfc_hci_hdr *hdr;
348 u8 *mei_buf;
349 int err;
350
351 ndev = (struct mei_nfc_dev *) cldev->priv_data;
352 dev = ndev->cl->dev;
353
354 err = -ENOMEM;
355 mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
356 if (!mei_buf)
357 goto out;
358
359 hdr = (struct mei_nfc_hci_hdr *) mei_buf;
360 hdr->cmd = MEI_NFC_CMD_HCI_SEND;
361 hdr->status = 0;
362 hdr->req_id = ndev->req_id;
363 hdr->reserved = 0;
364 hdr->data_size = length;
365
366 memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
367 err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
368 if (err < 0)
369 goto out;
370
371 if (!wait_event_interruptible_timeout(ndev->send_wq,
372 ndev->recv_req_id == ndev->req_id, HZ)) {
373 dev_err(dev->dev, "NFC MEI command timeout\n");
374 err = -ETIME;
375 } else {
376 ndev->req_id++;
377 }
378out:
379 kfree(mei_buf);
380 return err;
381}
382
383static int mei_nfc_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
384{
385 struct mei_nfc_dev *ndev;
386 struct mei_nfc_hci_hdr *hci_hdr;
387 int received_length;
388
389 ndev = (struct mei_nfc_dev *)cldev->priv_data;
390
391 received_length = __mei_cl_recv(ndev->cl, buf, length);
392 if (received_length < 0)
393 return received_length;
394
395 hci_hdr = (struct mei_nfc_hci_hdr *) buf;
396
397 if (hci_hdr->cmd == MEI_NFC_CMD_HCI_SEND) {
398 ndev->recv_req_id = hci_hdr->req_id;
399 wake_up(&ndev->send_wq);
400
401 return 0;
402 }
403
404 return received_length;
405}
406
407static struct mei_cl_ops nfc_ops = {
408 .enable = mei_nfc_enable,
409 .disable = mei_nfc_disable,
410 .send = mei_nfc_send,
411 .recv = mei_nfc_recv,
412};
413
414static void mei_nfc_init(struct work_struct *work) 249static void mei_nfc_init(struct work_struct *work)
415{ 250{
416 struct mei_device *dev; 251 struct mei_device *dev;
417 struct mei_cl_device *cldev; 252 struct mei_cl_device *cldev;
418 struct mei_nfc_dev *ndev; 253 struct mei_nfc_dev *ndev;
419 struct mei_cl *cl_info; 254 struct mei_cl *cl_info;
255 struct mei_me_client *me_cl_info;
420 256
421 ndev = container_of(work, struct mei_nfc_dev, init_work); 257 ndev = container_of(work, struct mei_nfc_dev, init_work);
422 258
@@ -425,13 +261,22 @@ static void mei_nfc_init(struct work_struct *work)
425 261
426 mutex_lock(&dev->device_lock); 262 mutex_lock(&dev->device_lock);
427 263
428 if (mei_cl_connect(cl_info, NULL) < 0) { 264 /* check for valid client id */
265 me_cl_info = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
266 if (!me_cl_info) {
267 mutex_unlock(&dev->device_lock);
268 dev_info(dev->dev, "nfc: failed to find the info client\n");
269 goto err;
270 }
271
272 if (mei_cl_connect(cl_info, me_cl_info, NULL) < 0) {
273 mei_me_cl_put(me_cl_info);
429 mutex_unlock(&dev->device_lock); 274 mutex_unlock(&dev->device_lock);
430 dev_err(dev->dev, "Could not connect to the NFC INFO ME client"); 275 dev_err(dev->dev, "Could not connect to the NFC INFO ME client");
431 276
432 goto err; 277 goto err;
433 } 278 }
434 279 mei_me_cl_put(me_cl_info);
435 mutex_unlock(&dev->device_lock); 280 mutex_unlock(&dev->device_lock);
436 281
437 if (mei_nfc_if_version(ndev) < 0) { 282 if (mei_nfc_if_version(ndev) < 0) {
@@ -459,7 +304,8 @@ static void mei_nfc_init(struct work_struct *work)
459 return; 304 return;
460 } 305 }
461 306
462 cldev = mei_cl_add_device(dev, mei_nfc_guid, ndev->bus_name, &nfc_ops); 307 cldev = mei_cl_add_device(dev, ndev->me_cl, ndev->cl,
308 ndev->bus_name);
463 if (!cldev) { 309 if (!cldev) {
464 dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n"); 310 dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n");
465 311
@@ -479,11 +325,10 @@ err:
479} 325}
480 326
481 327
482int mei_nfc_host_init(struct mei_device *dev) 328int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
483{ 329{
484 struct mei_nfc_dev *ndev; 330 struct mei_nfc_dev *ndev;
485 struct mei_cl *cl_info, *cl; 331 struct mei_cl *cl_info, *cl;
486 struct mei_me_client *me_cl = NULL;
487 int ret; 332 int ret;
488 333
489 334
@@ -500,11 +345,9 @@ int mei_nfc_host_init(struct mei_device *dev)
500 goto err; 345 goto err;
501 } 346 }
502 347
503 /* check for valid client id */ 348 ndev->me_cl = mei_me_cl_get(me_cl);
504 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); 349 if (!ndev->me_cl) {
505 if (!me_cl) { 350 ret = -ENODEV;
506 dev_info(dev->dev, "nfc: failed to find the client\n");
507 ret = -ENOTTY;
508 goto err; 351 goto err;
509 } 352 }
510 353
@@ -514,48 +357,26 @@ int mei_nfc_host_init(struct mei_device *dev)
514 goto err; 357 goto err;
515 } 358 }
516 359
517 cl_info->me_client_id = me_cl->client_id;
518 cl_info->cl_uuid = me_cl->props.protocol_name;
519 mei_me_cl_put(me_cl);
520 me_cl = NULL;
521
522 list_add_tail(&cl_info->device_link, &dev->device_list); 360 list_add_tail(&cl_info->device_link, &dev->device_list);
523 361
524 ndev->cl_info = cl_info; 362 ndev->cl_info = cl_info;
525 363
526 /* check for valid client id */
527 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
528 if (!me_cl) {
529 dev_info(dev->dev, "nfc: failed to find the client\n");
530 ret = -ENOTTY;
531 goto err;
532 }
533
534 cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY); 364 cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
535 if (IS_ERR(cl)) { 365 if (IS_ERR(cl)) {
536 ret = PTR_ERR(cl); 366 ret = PTR_ERR(cl);
537 goto err; 367 goto err;
538 } 368 }
539 369
540 cl->me_client_id = me_cl->client_id;
541 cl->cl_uuid = me_cl->props.protocol_name;
542 mei_me_cl_put(me_cl);
543 me_cl = NULL;
544
545 list_add_tail(&cl->device_link, &dev->device_list); 370 list_add_tail(&cl->device_link, &dev->device_list);
546 371
547 ndev->cl = cl; 372 ndev->cl = cl;
548 373
549 ndev->req_id = 1;
550
551 INIT_WORK(&ndev->init_work, mei_nfc_init); 374 INIT_WORK(&ndev->init_work, mei_nfc_init);
552 init_waitqueue_head(&ndev->send_wq);
553 schedule_work(&ndev->init_work); 375 schedule_work(&ndev->init_work);
554 376
555 return 0; 377 return 0;
556 378
557err: 379err:
558 mei_me_cl_put(me_cl);
559 mei_nfc_free(ndev); 380 mei_nfc_free(ndev);
560 381
561 return ret; 382 return ret;
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index dcfcba44b6f7..0882c0201907 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -338,7 +338,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
338 * However if device is not wakeable we do not enter 338 * However if device is not wakeable we do not enter
339 * D-low state and we need to keep the interrupt kicking 339 * D-low state and we need to keep the interrupt kicking
340 */ 340 */
341 if (!ret && pci_dev_run_wake(pdev)) 341 if (!ret && pci_dev_run_wake(pdev))
342 mei_disable_interrupts(dev); 342 mei_disable_interrupts(dev);
343 343
344 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 344 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 2725f865c3d6..2bc0f5089f82 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -50,15 +50,15 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
50 * mei_wd_host_init - connect to the watchdog client 50 * mei_wd_host_init - connect to the watchdog client
51 * 51 *
52 * @dev: the device structure 52 * @dev: the device structure
53 * @me_cl: me client
53 * 54 *
54 * Return: -ENOTTY if wd client cannot be found 55 * Return: -ENOTTY if wd client cannot be found
55 * -EIO if write has failed 56 * -EIO if write has failed
56 * 0 on success 57 * 0 on success
57 */ 58 */
58int mei_wd_host_init(struct mei_device *dev) 59int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
59{ 60{
60 struct mei_cl *cl = &dev->wd_cl; 61 struct mei_cl *cl = &dev->wd_cl;
61 struct mei_me_client *me_cl;
62 int ret; 62 int ret;
63 63
64 mei_cl_init(cl, dev); 64 mei_cl_init(cl, dev);
@@ -66,27 +66,13 @@ int mei_wd_host_init(struct mei_device *dev)
66 dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT; 66 dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
67 dev->wd_state = MEI_WD_IDLE; 67 dev->wd_state = MEI_WD_IDLE;
68 68
69
70 /* check for valid client id */
71 me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
72 if (!me_cl) {
73 dev_info(dev->dev, "wd: failed to find the client\n");
74 return -ENOTTY;
75 }
76
77 cl->me_client_id = me_cl->client_id;
78 cl->cl_uuid = me_cl->props.protocol_name;
79 mei_me_cl_put(me_cl);
80
81 ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID); 69 ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
82
83 if (ret < 0) { 70 if (ret < 0) {
84 dev_info(dev->dev, "wd: failed link client\n"); 71 dev_info(dev->dev, "wd: failed link client\n");
85 return ret; 72 return ret;
86 } 73 }
87 74
88 ret = mei_cl_connect(cl, NULL); 75 ret = mei_cl_connect(cl, me_cl, NULL);
89
90 if (ret) { 76 if (ret) {
91 dev_err(dev->dev, "wd: failed to connect = %d\n", ret); 77 dev_err(dev->dev, "wd: failed to connect = %d\n", ret);
92 mei_cl_unlink(cl); 78 mei_cl_unlink(cl);
@@ -118,7 +104,7 @@ int mei_wd_send(struct mei_device *dev)
118 int ret; 104 int ret;
119 105
120 hdr.host_addr = cl->host_client_id; 106 hdr.host_addr = cl->host_client_id;
121 hdr.me_addr = cl->me_client_id; 107 hdr.me_addr = mei_cl_me_id(cl);
122 hdr.msg_complete = 1; 108 hdr.msg_complete = 1;
123 hdr.reserved = 0; 109 hdr.reserved = 0;
124 hdr.internal = 0; 110 hdr.internal = 0;
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index cc4eef040c14..e9f2f56c370d 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -15,11 +15,28 @@ config INTEL_MIC_BUS
15 OS and tools for MIC to use with this driver are available from 15 OS and tools for MIC to use with this driver are available from
16 <http://software.intel.com/en-us/mic-developer>. 16 <http://software.intel.com/en-us/mic-developer>.
17 17
18comment "SCIF Bus Driver"
19
20config SCIF_BUS
21 tristate "SCIF Bus Driver"
22 depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
23 help
24 This option is selected by any driver which registers a
25 device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
26 and CONFIG_INTEL_MIC_CARD.
27
28 If you are building a host/card kernel with an Intel MIC device
29 then say M (recommended) or Y, else say N. If unsure say N.
30
31 More information about the Intel MIC family as well as the Linux
32 OS and tools for MIC to use with this driver are available from
33 <http://software.intel.com/en-us/mic-developer>.
34
18comment "Intel MIC Host Driver" 35comment "Intel MIC Host Driver"
19 36
20config INTEL_MIC_HOST 37config INTEL_MIC_HOST
21 tristate "Intel MIC Host Driver" 38 tristate "Intel MIC Host Driver"
22 depends on 64BIT && PCI && X86 && INTEL_MIC_BUS 39 depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS
23 select VHOST_RING 40 select VHOST_RING
24 help 41 help
25 This enables Host Driver support for the Intel Many Integrated 42 This enables Host Driver support for the Intel Many Integrated
@@ -39,7 +56,7 @@ comment "Intel MIC Card Driver"
39 56
40config INTEL_MIC_CARD 57config INTEL_MIC_CARD
41 tristate "Intel MIC Card Driver" 58 tristate "Intel MIC Card Driver"
42 depends on 64BIT && X86 && INTEL_MIC_BUS 59 depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS
43 select VIRTIO 60 select VIRTIO
44 help 61 help
45 This enables card driver support for the Intel Many Integrated 62 This enables card driver support for the Intel Many Integrated
@@ -52,3 +69,22 @@ config INTEL_MIC_CARD
52 69
53 For more information see 70 For more information see
54 <http://software.intel.com/en-us/mic-developer>. 71 <http://software.intel.com/en-us/mic-developer>.
72
73comment "SCIF Driver"
74
75config SCIF
76 tristate "SCIF Driver"
77 depends on 64BIT && PCI && X86 && SCIF_BUS
78 help
79 This enables SCIF Driver support for the Intel Many Integrated
80 Core (MIC) family of PCIe form factor coprocessor devices that
81 run a 64 bit Linux OS. The Symmetric Communication Interface
82 (SCIF (pronounced as skiff)) is a low level communications API
83 across PCIe currently implemented for MIC.
84
85 If you are building a host kernel with an Intel MIC device then
86 say M (recommended) or Y, else say N. If unsure say N.
87
88 More information about the Intel MIC family as well as the Linux
89 OS and tools for MIC to use with this driver are available from
90 <http://software.intel.com/en-us/mic-developer>.
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index e9bf148755e2..a74042c58649 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -4,4 +4,5 @@
4# 4#
5obj-$(CONFIG_INTEL_MIC_HOST) += host/ 5obj-$(CONFIG_INTEL_MIC_HOST) += host/
6obj-$(CONFIG_INTEL_MIC_CARD) += card/ 6obj-$(CONFIG_INTEL_MIC_CARD) += card/
7obj-$(CONFIG_INTEL_MIC_BUS) += bus/ 7obj-y += bus/
8obj-$(CONFIG_SCIF) += scif/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
index d85c7f2a0af4..1ed37e234c96 100644
--- a/drivers/misc/mic/bus/Makefile
+++ b/drivers/misc/mic/bus/Makefile
@@ -3,3 +3,4 @@
3# Copyright(c) 2014, Intel Corporation. 3# Copyright(c) 2014, Intel Corporation.
4# 4#
5obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o 5obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
6obj-$(CONFIG_SCIF_BUS) += scif_bus.o
diff --git a/drivers/misc/mic/bus/scif_bus.c b/drivers/misc/mic/bus/scif_bus.c
new file mode 100644
index 000000000000..2da7ceed015d
--- /dev/null
+++ b/drivers/misc/mic/bus/scif_bus.c
@@ -0,0 +1,210 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel Symmetric Communications Interface Bus driver.
16 */
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/idr.h>
20#include <linux/dma-mapping.h>
21
22#include "scif_bus.h"
23
24static ssize_t device_show(struct device *d,
25 struct device_attribute *attr, char *buf)
26{
27 struct scif_hw_dev *dev = dev_to_scif(d);
28
29 return sprintf(buf, "0x%04x\n", dev->id.device);
30}
31
32static DEVICE_ATTR_RO(device);
33
34static ssize_t vendor_show(struct device *d,
35 struct device_attribute *attr, char *buf)
36{
37 struct scif_hw_dev *dev = dev_to_scif(d);
38
39 return sprintf(buf, "0x%04x\n", dev->id.vendor);
40}
41
42static DEVICE_ATTR_RO(vendor);
43
44static ssize_t modalias_show(struct device *d,
45 struct device_attribute *attr, char *buf)
46{
47 struct scif_hw_dev *dev = dev_to_scif(d);
48
49 return sprintf(buf, "scif:d%08Xv%08X\n",
50 dev->id.device, dev->id.vendor);
51}
52
53static DEVICE_ATTR_RO(modalias);
54
55static struct attribute *scif_dev_attrs[] = {
56 &dev_attr_device.attr,
57 &dev_attr_vendor.attr,
58 &dev_attr_modalias.attr,
59 NULL,
60};
61ATTRIBUTE_GROUPS(scif_dev);
62
63static inline int scif_id_match(const struct scif_hw_dev *dev,
64 const struct scif_hw_dev_id *id)
65{
66 if (id->device != dev->id.device && id->device != SCIF_DEV_ANY_ID)
67 return 0;
68
69 return id->vendor == SCIF_DEV_ANY_ID || id->vendor == dev->id.vendor;
70}
71
72/*
73 * This looks through all the IDs a driver claims to support. If any of them
74 * match, we return 1 and the kernel will call scif_dev_probe().
75 */
76static int scif_dev_match(struct device *dv, struct device_driver *dr)
77{
78 unsigned int i;
79 struct scif_hw_dev *dev = dev_to_scif(dv);
80 const struct scif_hw_dev_id *ids;
81
82 ids = drv_to_scif(dr)->id_table;
83 for (i = 0; ids[i].device; i++)
84 if (scif_id_match(dev, &ids[i]))
85 return 1;
86 return 0;
87}
88
89static int scif_uevent(struct device *dv, struct kobj_uevent_env *env)
90{
91 struct scif_hw_dev *dev = dev_to_scif(dv);
92
93 return add_uevent_var(env, "MODALIAS=scif:d%08Xv%08X",
94 dev->id.device, dev->id.vendor);
95}
96
97static int scif_dev_probe(struct device *d)
98{
99 struct scif_hw_dev *dev = dev_to_scif(d);
100 struct scif_driver *drv = drv_to_scif(dev->dev.driver);
101
102 return drv->probe(dev);
103}
104
105static int scif_dev_remove(struct device *d)
106{
107 struct scif_hw_dev *dev = dev_to_scif(d);
108 struct scif_driver *drv = drv_to_scif(dev->dev.driver);
109
110 drv->remove(dev);
111 return 0;
112}
113
114static struct bus_type scif_bus = {
115 .name = "scif_bus",
116 .match = scif_dev_match,
117 .dev_groups = scif_dev_groups,
118 .uevent = scif_uevent,
119 .probe = scif_dev_probe,
120 .remove = scif_dev_remove,
121};
122
123int scif_register_driver(struct scif_driver *driver)
124{
125 driver->driver.bus = &scif_bus;
126 return driver_register(&driver->driver);
127}
128EXPORT_SYMBOL_GPL(scif_register_driver);
129
130void scif_unregister_driver(struct scif_driver *driver)
131{
132 driver_unregister(&driver->driver);
133}
134EXPORT_SYMBOL_GPL(scif_unregister_driver);
135
136static void scif_release_dev(struct device *d)
137{
138 struct scif_hw_dev *sdev = dev_to_scif(d);
139
140 kfree(sdev);
141}
142
143struct scif_hw_dev *
144scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
145 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
146 struct mic_mw *mmio, struct mic_mw *aper, void *dp,
147 void __iomem *rdp, struct dma_chan **chan, int num_chan)
148{
149 int ret;
150 struct scif_hw_dev *sdev;
151
152 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
153 if (!sdev)
154 return ERR_PTR(-ENOMEM);
155
156 sdev->dev.parent = pdev;
157 sdev->id.device = id;
158 sdev->id.vendor = SCIF_DEV_ANY_ID;
159 sdev->dev.archdata.dma_ops = dma_ops;
160 sdev->dev.release = scif_release_dev;
161 sdev->hw_ops = hw_ops;
162 sdev->dnode = dnode;
163 sdev->snode = snode;
164 dev_set_drvdata(&sdev->dev, sdev);
165 sdev->dev.bus = &scif_bus;
166 sdev->mmio = mmio;
167 sdev->aper = aper;
168 sdev->dp = dp;
169 sdev->rdp = rdp;
170 sdev->dev.dma_mask = &sdev->dev.coherent_dma_mask;
171 dma_set_mask(&sdev->dev, DMA_BIT_MASK(64));
172 sdev->dma_ch = chan;
173 sdev->num_dma_ch = num_chan;
174 dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode);
175 /*
176 * device_register() causes the bus infrastructure to look for a
177 * matching driver.
178 */
179 ret = device_register(&sdev->dev);
180 if (ret)
181 goto free_sdev;
182 return sdev;
183free_sdev:
184 kfree(sdev);
185 return ERR_PTR(ret);
186}
187EXPORT_SYMBOL_GPL(scif_register_device);
188
189void scif_unregister_device(struct scif_hw_dev *sdev)
190{
191 device_unregister(&sdev->dev);
192}
193EXPORT_SYMBOL_GPL(scif_unregister_device);
194
195static int __init scif_init(void)
196{
197 return bus_register(&scif_bus);
198}
199
200static void __exit scif_exit(void)
201{
202 bus_unregister(&scif_bus);
203}
204
205core_initcall(scif_init);
206module_exit(scif_exit);
207
208MODULE_AUTHOR("Intel Corporation");
209MODULE_DESCRIPTION("Intel(R) SCIF Bus driver");
210MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
new file mode 100644
index 000000000000..335a228a8236
--- /dev/null
+++ b/drivers/misc/mic/bus/scif_bus.h
@@ -0,0 +1,129 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel Symmetric Communications Interface Bus driver.
16 */
17#ifndef _SCIF_BUS_H_
18#define _SCIF_BUS_H_
19/*
20 * Everything a scif driver needs to work with any particular scif
21 * hardware abstraction layer.
22 */
23#include <linux/dma-mapping.h>
24
25#include <linux/mic_common.h>
26#include "../common/mic_dev.h"
27
28struct scif_hw_dev_id {
29 u32 device;
30 u32 vendor;
31};
32
33#define MIC_SCIF_DEV 1
34#define SCIF_DEV_ANY_ID 0xffffffff
35
36/**
37 * scif_hw_dev - representation of a hardware device abstracted for scif
38 * @hw_ops: the hardware ops supported by this device
39 * @id: the device type identification (used to match it with a driver)
40 * @mmio: MMIO memory window
41 * @aper: Aperture memory window
42 * @dev: underlying device
43 * @dnode - The destination node which this device will communicate with.
44 * @snode - The source node for this device.
45 * @dp - Self device page
46 * @rdp - Remote device page
47 * @dma_ch - Array of DMA channels
48 * @num_dma_ch - Number of DMA channels available
49 */
50struct scif_hw_dev {
51 struct scif_hw_ops *hw_ops;
52 struct scif_hw_dev_id id;
53 struct mic_mw *mmio;
54 struct mic_mw *aper;
55 struct device dev;
56 u8 dnode;
57 u8 snode;
58 void *dp;
59 void __iomem *rdp;
60 struct dma_chan **dma_ch;
61 int num_dma_ch;
62};
63
64/**
65 * scif_driver - operations for a scif I/O driver
66 * @driver: underlying device driver (populate name and owner).
67 * @id_table: the ids serviced by this driver.
68 * @probe: the function to call when a device is found. Returns 0 or -errno.
69 * @remove: the function to call when a device is removed.
70 */
71struct scif_driver {
72 struct device_driver driver;
73 const struct scif_hw_dev_id *id_table;
74 int (*probe)(struct scif_hw_dev *dev);
75 void (*remove)(struct scif_hw_dev *dev);
76};
77
78/**
79 * scif_hw_ops - Hardware operations for accessing a SCIF device on the SCIF bus.
80 *
81 * @next_db: Obtain the next available doorbell.
82 * @request_irq: Request an interrupt on a particular doorbell.
83 * @free_irq: Free an interrupt requested previously.
84 * @ack_interrupt: acknowledge an interrupt in the ISR.
85 * @send_intr: Send an interrupt to the remote node on a specified doorbell.
86 * @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell
87 * which is specifically targeted for a peer to peer node.
88 * @ioremap: Map a buffer with the specified physical address and length.
89 * @iounmap: Unmap a buffer previously mapped.
90 */
91struct scif_hw_ops {
92 int (*next_db)(struct scif_hw_dev *sdev);
93 struct mic_irq * (*request_irq)(struct scif_hw_dev *sdev,
94 irqreturn_t (*func)(int irq,
95 void *data),
96 const char *name, void *data,
97 int db);
98 void (*free_irq)(struct scif_hw_dev *sdev,
99 struct mic_irq *cookie, void *data);
100 void (*ack_interrupt)(struct scif_hw_dev *sdev, int num);
101 void (*send_intr)(struct scif_hw_dev *sdev, int db);
102 void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db,
103 struct mic_mw *mw);
104 void __iomem * (*ioremap)(struct scif_hw_dev *sdev,
105 phys_addr_t pa, size_t len);
106 void (*iounmap)(struct scif_hw_dev *sdev, void __iomem *va);
107};
108
109int scif_register_driver(struct scif_driver *driver);
110void scif_unregister_driver(struct scif_driver *driver);
111struct scif_hw_dev *
112scif_register_device(struct device *pdev, int id,
113 struct dma_map_ops *dma_ops,
114 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
115 struct mic_mw *mmio, struct mic_mw *aper,
116 void *dp, void __iomem *rdp,
117 struct dma_chan **chan, int num_chan);
118void scif_unregister_device(struct scif_hw_dev *sdev);
119
120static inline struct scif_hw_dev *dev_to_scif(struct device *dev)
121{
122 return container_of(dev, struct scif_hw_dev, dev);
123}
124
125static inline struct scif_driver *drv_to_scif(struct device_driver *drv)
126{
127 return container_of(drv, struct scif_driver, driver);
128}
129#endif /* _SCIF_BUS_H */
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index 83819eee553b..6338908b2252 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -28,6 +28,8 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/reboot.h> 30#include <linux/reboot.h>
31#include <linux/dmaengine.h>
32#include <linux/kmod.h>
31 33
32#include <linux/mic_common.h> 34#include <linux/mic_common.h>
33#include "../common/mic_dev.h" 35#include "../common/mic_dev.h"
@@ -240,6 +242,111 @@ static void mic_uninit_irq(void)
240 kfree(mdrv->irq_info.irq_usage_count); 242 kfree(mdrv->irq_info.irq_usage_count);
241} 243}
242 244
245static inline struct mic_driver *scdev_to_mdrv(struct scif_hw_dev *scdev)
246{
247 return dev_get_drvdata(scdev->dev.parent);
248}
249
250static struct mic_irq *
251___mic_request_irq(struct scif_hw_dev *scdev,
252 irqreturn_t (*func)(int irq, void *data),
253 const char *name, void *data,
254 int db)
255{
256 return mic_request_card_irq(func, NULL, name, data, db);
257}
258
259static void
260___mic_free_irq(struct scif_hw_dev *scdev,
261 struct mic_irq *cookie, void *data)
262{
263 return mic_free_card_irq(cookie, data);
264}
265
266static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
267{
268 struct mic_driver *mdrv = scdev_to_mdrv(scdev);
269
270 mic_ack_interrupt(&mdrv->mdev);
271}
272
273static int ___mic_next_db(struct scif_hw_dev *scdev)
274{
275 return mic_next_card_db();
276}
277
278static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
279{
280 struct mic_driver *mdrv = scdev_to_mdrv(scdev);
281
282 mic_send_intr(&mdrv->mdev, db);
283}
284
285static void ___mic_send_p2p_intr(struct scif_hw_dev *scdev, int db,
286 struct mic_mw *mw)
287{
288 mic_send_p2p_intr(db, mw);
289}
290
291static void __iomem *
292___mic_ioremap(struct scif_hw_dev *scdev,
293 phys_addr_t pa, size_t len)
294{
295 struct mic_driver *mdrv = scdev_to_mdrv(scdev);
296
297 return mic_card_map(&mdrv->mdev, pa, len);
298}
299
300static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
301{
302 struct mic_driver *mdrv = scdev_to_mdrv(scdev);
303
304 mic_card_unmap(&mdrv->mdev, va);
305}
306
307static struct scif_hw_ops scif_hw_ops = {
308 .request_irq = ___mic_request_irq,
309 .free_irq = ___mic_free_irq,
310 .ack_interrupt = ___mic_ack_interrupt,
311 .next_db = ___mic_next_db,
312 .send_intr = ___mic_send_intr,
313 .send_p2p_intr = ___mic_send_p2p_intr,
314 .ioremap = ___mic_ioremap,
315 .iounmap = ___mic_iounmap,
316};
317
318static int mic_request_dma_chans(struct mic_driver *mdrv)
319{
320 dma_cap_mask_t mask;
321 struct dma_chan *chan;
322
323 request_module("mic_x100_dma");
324 dma_cap_zero(mask);
325 dma_cap_set(DMA_MEMCPY, mask);
326
327 do {
328 chan = dma_request_channel(mask, NULL, NULL);
329 if (chan) {
330 mdrv->dma_ch[mdrv->num_dma_ch++] = chan;
331 if (mdrv->num_dma_ch >= MIC_MAX_DMA_CHAN)
332 break;
333 }
334 } while (chan);
335 dev_info(mdrv->dev, "DMA channels # %d\n", mdrv->num_dma_ch);
336 return mdrv->num_dma_ch;
337}
338
339static void mic_free_dma_chans(struct mic_driver *mdrv)
340{
341 int i = 0;
342
343 for (i = 0; i < mdrv->num_dma_ch; i++) {
344 dma_release_channel(mdrv->dma_ch[i]);
345 mdrv->dma_ch[i] = NULL;
346 }
347 mdrv->num_dma_ch = 0;
348}
349
243/* 350/*
244 * mic_driver_init - MIC driver initialization tasks. 351 * mic_driver_init - MIC driver initialization tasks.
245 * 352 *
@@ -248,6 +355,8 @@ static void mic_uninit_irq(void)
248int __init mic_driver_init(struct mic_driver *mdrv) 355int __init mic_driver_init(struct mic_driver *mdrv)
249{ 356{
250 int rc; 357 int rc;
358 struct mic_bootparam __iomem *bootparam;
359 u8 node_id;
251 360
252 g_drv = mdrv; 361 g_drv = mdrv;
253 /* 362 /*
@@ -268,13 +377,32 @@ int __init mic_driver_init(struct mic_driver *mdrv)
268 rc = mic_shutdown_init(); 377 rc = mic_shutdown_init();
269 if (rc) 378 if (rc)
270 goto irq_uninit; 379 goto irq_uninit;
380 if (!mic_request_dma_chans(mdrv)) {
381 rc = -ENODEV;
382 goto shutdown_uninit;
383 }
271 rc = mic_devices_init(mdrv); 384 rc = mic_devices_init(mdrv);
272 if (rc) 385 if (rc)
273 goto shutdown_uninit; 386 goto dma_free;
387 bootparam = mdrv->dp;
388 node_id = ioread8(&bootparam->node_id);
389 mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
390 NULL, &scif_hw_ops,
391 0, node_id, &mdrv->mdev.mmio, NULL,
392 NULL, mdrv->dp, mdrv->dma_ch,
393 mdrv->num_dma_ch);
394 if (IS_ERR(mdrv->scdev)) {
395 rc = PTR_ERR(mdrv->scdev);
396 goto device_uninit;
397 }
274 mic_create_card_debug_dir(mdrv); 398 mic_create_card_debug_dir(mdrv);
275 atomic_notifier_chain_register(&panic_notifier_list, &mic_panic); 399 atomic_notifier_chain_register(&panic_notifier_list, &mic_panic);
276done: 400done:
277 return rc; 401 return rc;
402device_uninit:
403 mic_devices_uninit(mdrv);
404dma_free:
405 mic_free_dma_chans(mdrv);
278shutdown_uninit: 406shutdown_uninit:
279 mic_shutdown_uninit(); 407 mic_shutdown_uninit();
280irq_uninit: 408irq_uninit:
@@ -294,7 +422,9 @@ put:
294void mic_driver_uninit(struct mic_driver *mdrv) 422void mic_driver_uninit(struct mic_driver *mdrv)
295{ 423{
296 mic_delete_card_debug_dir(mdrv); 424 mic_delete_card_debug_dir(mdrv);
425 scif_unregister_device(mdrv->scdev);
297 mic_devices_uninit(mdrv); 426 mic_devices_uninit(mdrv);
427 mic_free_dma_chans(mdrv);
298 /* 428 /*
299 * Inform the host about the shutdown status i.e. poweroff/restart etc. 429 * Inform the host about the shutdown status i.e. poweroff/restart etc.
300 * The module cannot be unloaded so the only code path to call 430 * The module cannot be unloaded so the only code path to call
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 844be8fc9b22..1dbf83c41289 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -29,9 +29,9 @@
29 29
30#include <linux/workqueue.h> 30#include <linux/workqueue.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/irqreturn.h>
33#include <linux/interrupt.h> 32#include <linux/interrupt.h>
34#include <linux/mic_bus.h> 33#include <linux/mic_bus.h>
34#include "../bus/scif_bus.h"
35 35
36/** 36/**
37 * struct mic_intr_info - Contains h/w specific interrupt sources info 37 * struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -73,6 +73,9 @@ struct mic_device {
73 * @irq_info: The OS specific irq information 73 * @irq_info: The OS specific irq information
74 * @intr_info: H/W specific interrupt information. 74 * @intr_info: H/W specific interrupt information.
75 * @dma_mbdev: dma device on the MIC virtual bus. 75 * @dma_mbdev: dma device on the MIC virtual bus.
76 * @dma_ch - Array of DMA channels
77 * @num_dma_ch - Number of DMA channels available
78 * @scdev: SCIF device on the SCIF virtual bus.
76 */ 79 */
77struct mic_driver { 80struct mic_driver {
78 char name[20]; 81 char name[20];
@@ -84,6 +87,9 @@ struct mic_driver {
84 struct mic_irq_info irq_info; 87 struct mic_irq_info irq_info;
85 struct mic_intr_info intr_info; 88 struct mic_intr_info intr_info;
86 struct mbus_device *dma_mbdev; 89 struct mbus_device *dma_mbdev;
90 struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
91 int num_dma_ch;
92 struct scif_hw_dev *scdev;
87}; 93};
88 94
89/** 95/**
@@ -122,10 +128,11 @@ void mic_driver_uninit(struct mic_driver *mdrv);
122int mic_next_card_db(void); 128int mic_next_card_db(void);
123struct mic_irq * 129struct mic_irq *
124mic_request_card_irq(irq_handler_t handler, irq_handler_t thread_fn, 130mic_request_card_irq(irq_handler_t handler, irq_handler_t thread_fn,
125 const char *name, void *data, int intr_src); 131 const char *name, void *data, int db);
126void mic_free_card_irq(struct mic_irq *cookie, void *data); 132void mic_free_card_irq(struct mic_irq *cookie, void *data);
127u32 mic_read_spad(struct mic_device *mdev, unsigned int idx); 133u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
128void mic_send_intr(struct mic_device *mdev, int doorbell); 134void mic_send_intr(struct mic_device *mdev, int doorbell);
135void mic_send_p2p_intr(int doorbell, struct mic_mw *mw);
129int mic_db_to_irq(struct mic_driver *mdrv, int db); 136int mic_db_to_irq(struct mic_driver *mdrv, int db);
130u32 mic_ack_interrupt(struct mic_device *mdev); 137u32 mic_ack_interrupt(struct mic_device *mdev);
131void mic_hw_intr_init(struct mic_driver *mdrv); 138void mic_hw_intr_init(struct mic_driver *mdrv);
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index e98e537d68e3..77fd41781c2e 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -70,6 +70,41 @@ void mic_send_intr(struct mic_device *mdev, int doorbell)
70 (MIC_X100_SBOX_SDBIC0 + (4 * doorbell))); 70 (MIC_X100_SBOX_SDBIC0 + (4 * doorbell)));
71} 71}
72 72
73/*
74 * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
75 */
76static void mic_x100_send_sbox_intr(struct mic_mw *mw, int doorbell)
77{
78 u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
79 u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS +
80 apic_icr_offset);
81
82 /* for MIC we need to make sure we "hit" the send_icr bit (13) */
83 apicicr_low = (apicicr_low | (1 << 13));
84 /*
85 * Ensure that the interrupt is ordered w.r.t. previous stores
86 * to main memory. Fence instructions are not implemented in X100
87 * since execution is in order but a compiler barrier is still
88 * required.
89 */
90 wmb();
91 mic_mmio_write(mw, apicicr_low,
92 MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
93}
94
95static void mic_x100_send_rdmasr_intr(struct mic_mw *mw, int doorbell)
96{
97 int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
98 /*
99 * Ensure that the interrupt is ordered w.r.t. previous stores
100 * to main memory. Fence instructions are not implemented in X100
101 * since execution is in order but a compiler barrier is still
102 * required.
103 */
104 wmb();
105 mic_mmio_write(mw, 0, MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset);
106}
107
73/** 108/**
74 * mic_ack_interrupt - Device specific interrupt handling. 109 * mic_ack_interrupt - Device specific interrupt handling.
75 * @mdev: pointer to mic_device instance 110 * @mdev: pointer to mic_device instance
@@ -91,6 +126,18 @@ static inline int mic_get_rdmasr_irq(int index)
91 return MIC_X100_RDMASR_IRQ_BASE + index; 126 return MIC_X100_RDMASR_IRQ_BASE + index;
92} 127}
93 128
129void mic_send_p2p_intr(int db, struct mic_mw *mw)
130{
131 int rdmasr_index;
132
133 if (db < MIC_X100_NUM_SBOX_IRQ) {
134 mic_x100_send_sbox_intr(mw, db);
135 } else {
136 rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
137 mic_x100_send_rdmasr_intr(mw, rdmasr_index);
138 }
139}
140
94/** 141/**
95 * mic_hw_intr_init - Initialize h/w specific interrupt 142 * mic_hw_intr_init - Initialize h/w specific interrupt
96 * information. 143 * information.
@@ -113,11 +160,15 @@ void mic_hw_intr_init(struct mic_driver *mdrv)
113int mic_db_to_irq(struct mic_driver *mdrv, int db) 160int mic_db_to_irq(struct mic_driver *mdrv, int db)
114{ 161{
115 int rdmasr_index; 162 int rdmasr_index;
163
164 /*
165 * The total number of doorbell interrupts on the card are 16. Indices
166 * 0-8 falls in the SBOX category and 8-15 fall in the RDMASR category.
167 */
116 if (db < MIC_X100_NUM_SBOX_IRQ) { 168 if (db < MIC_X100_NUM_SBOX_IRQ) {
117 return mic_get_sbox_irq(db); 169 return mic_get_sbox_irq(db);
118 } else { 170 } else {
119 rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ + 171 rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
120 MIC_X100_RDMASR_IRQ_BASE;
121 return mic_get_rdmasr_irq(rdmasr_index); 172 return mic_get_rdmasr_irq(rdmasr_index);
122 } 173 }
123} 174}
@@ -243,10 +294,16 @@ static void mic_platform_shutdown(struct platform_device *pdev)
243 mic_remove(pdev); 294 mic_remove(pdev);
244} 295}
245 296
297static u64 mic_dma_mask = DMA_BIT_MASK(64);
298
246static struct platform_device mic_platform_dev = { 299static struct platform_device mic_platform_dev = {
247 .name = mic_driver_name, 300 .name = mic_driver_name,
248 .id = 0, 301 .id = 0,
249 .num_resources = 0, 302 .num_resources = 0,
303 .dev = {
304 .dma_mask = &mic_dma_mask,
305 .coherent_dma_mask = DMA_BIT_MASK(64),
306 },
250}; 307};
251 308
252static struct platform_driver __refdata mic_platform_driver = { 309static struct platform_driver __refdata mic_platform_driver = {
diff --git a/drivers/misc/mic/card/mic_x100.h b/drivers/misc/mic/card/mic_x100.h
index d66ea55639c3..7e2224934ba8 100644
--- a/drivers/misc/mic/card/mic_x100.h
+++ b/drivers/misc/mic/card/mic_x100.h
@@ -35,6 +35,7 @@
35#define MIC_X100_SBOX_SDBIC0 0x0000CC90 35#define MIC_X100_SBOX_SDBIC0 0x0000CC90
36#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000 36#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000
37#define MIC_X100_SBOX_RDMASR0 0x0000B180 37#define MIC_X100_SBOX_RDMASR0 0x0000B180
38#define MIC_X100_SBOX_APICICR0 0x0000A9D0
38 39
39#define MIC_X100_MAX_DOORBELL_IDX 8 40#define MIC_X100_MAX_DOORBELL_IDX 8
40 41
diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h
index 92999c2bbf82..0b58c46045dc 100644
--- a/drivers/misc/mic/common/mic_dev.h
+++ b/drivers/misc/mic/common/mic_dev.h
@@ -48,4 +48,7 @@ struct mic_mw {
48#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1 48#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1
49#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2 49#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2
50 50
51/* Maximum number of DMA channels */
52#define MIC_MAX_DMA_CHAN 4
53
51#endif 54#endif
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index d9fa609da061..e5f6a5e7bca1 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -21,6 +21,7 @@
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/kmod.h>
24 25
25#include <linux/mic_common.h> 26#include <linux/mic_common.h>
26#include <linux/mic_bus.h> 27#include <linux/mic_bus.h>
@@ -29,6 +30,188 @@
29#include "mic_smpt.h" 30#include "mic_smpt.h"
30#include "mic_virtio.h" 31#include "mic_virtio.h"
31 32
33static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
34{
35 return dev_get_drvdata(scdev->dev.parent);
36}
37
38static void *__mic_dma_alloc(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t gfp,
40 struct dma_attrs *attrs)
41{
42 struct scif_hw_dev *scdev = dev_get_drvdata(dev);
43 struct mic_device *mdev = scdev_to_mdev(scdev);
44 dma_addr_t tmp;
45 void *va = kmalloc(size, gfp);
46
47 if (va) {
48 tmp = mic_map_single(mdev, va, size);
49 if (dma_mapping_error(dev, tmp)) {
50 kfree(va);
51 va = NULL;
52 } else {
53 *dma_handle = tmp;
54 }
55 }
56 return va;
57}
58
59static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
60 dma_addr_t dma_handle, struct dma_attrs *attrs)
61{
62 struct scif_hw_dev *scdev = dev_get_drvdata(dev);
63 struct mic_device *mdev = scdev_to_mdev(scdev);
64
65 mic_unmap_single(mdev, dma_handle, size);
66 kfree(vaddr);
67}
68
69static dma_addr_t
70__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
71 size_t size, enum dma_data_direction dir,
72 struct dma_attrs *attrs)
73{
74 void *va = phys_to_virt(page_to_phys(page)) + offset;
75 struct scif_hw_dev *scdev = dev_get_drvdata(dev);
76 struct mic_device *mdev = scdev_to_mdev(scdev);
77
78 return mic_map_single(mdev, va, size);
79}
80
81static void
82__mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
83 size_t size, enum dma_data_direction dir,
84 struct dma_attrs *attrs)
85{
86 struct scif_hw_dev *scdev = dev_get_drvdata(dev);
87 struct mic_device *mdev = scdev_to_mdev(scdev);
88
89 mic_unmap_single(mdev, dma_addr, size);
90}
91
92static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
93 int nents, enum dma_data_direction dir,
94 struct dma_attrs *attrs)
95{
96 struct scif_hw_dev *scdev = dev_get_drvdata(dev);
97 struct mic_device *mdev = scdev_to_mdev(scdev);
98 struct scatterlist *s;
99 int i, j, ret;
100 dma_addr_t da;
101
102 ret = dma_map_sg(mdev->sdev->parent, sg, nents, dir);
103 if (ret <= 0)
104 return 0;
105
106 for_each_sg(sg, s, nents, i) {
107 da = mic_map(mdev, sg_dma_address(s) + s->offset, s->length);
108 if (!da)
109 goto err;
110 sg_dma_address(s) = da;
111 }
112 return nents;
113err:
114 for_each_sg(sg, s, i, j) {
115 mic_unmap(mdev, sg_dma_address(s), s->length);
116 sg_dma_address(s) = mic_to_dma_addr(mdev, sg_dma_address(s));
117 }
118 dma_unmap_sg(mdev->sdev->parent, sg, nents, dir);
119 return 0;
120}
121
122static void __mic_dma_unmap_sg(struct device *dev,
123 struct scatterlist *sg, int nents,
124 enum dma_data_direction dir,
125 struct dma_attrs *attrs)
126{
127 struct scif_hw_dev *scdev = dev_get_drvdata(dev);
128 struct mic_device *mdev = scdev_to_mdev(scdev);
129 struct scatterlist *s;
130 dma_addr_t da;
131 int i;
132
133 for_each_sg(sg, s, nents, i) {
134 da = mic_to_dma_addr(mdev, sg_dma_address(s));
135 mic_unmap(mdev, sg_dma_address(s), s->length);
136 sg_dma_address(s) = da;
137 }
138 dma_unmap_sg(mdev->sdev->parent, sg, nents, dir);
139}
140
141static struct dma_map_ops __mic_dma_ops = {
142 .alloc = __mic_dma_alloc,
143 .free = __mic_dma_free,
144 .map_page = __mic_dma_map_page,
145 .unmap_page = __mic_dma_unmap_page,
146 .map_sg = __mic_dma_map_sg,
147 .unmap_sg = __mic_dma_unmap_sg,
148};
149
150static struct mic_irq *
151___mic_request_irq(struct scif_hw_dev *scdev,
152 irqreturn_t (*func)(int irq, void *data),
153 const char *name,
154 void *data, int db)
155{
156 struct mic_device *mdev = scdev_to_mdev(scdev);
157
158 return mic_request_threaded_irq(mdev, func, NULL, name, data,
159 db, MIC_INTR_DB);
160}
161
162static void
163___mic_free_irq(struct scif_hw_dev *scdev,
164 struct mic_irq *cookie, void *data)
165{
166 struct mic_device *mdev = scdev_to_mdev(scdev);
167
168 return mic_free_irq(mdev, cookie, data);
169}
170
171static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
172{
173 struct mic_device *mdev = scdev_to_mdev(scdev);
174
175 mdev->ops->intr_workarounds(mdev);
176}
177
178static int ___mic_next_db(struct scif_hw_dev *scdev)
179{
180 struct mic_device *mdev = scdev_to_mdev(scdev);
181
182 return mic_next_db(mdev);
183}
184
185static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
186{
187 struct mic_device *mdev = scdev_to_mdev(scdev);
188
189 mdev->ops->send_intr(mdev, db);
190}
191
192static void __iomem *___mic_ioremap(struct scif_hw_dev *scdev,
193 phys_addr_t pa, size_t len)
194{
195 struct mic_device *mdev = scdev_to_mdev(scdev);
196
197 return mdev->aper.va + pa;
198}
199
200static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
201{
202 /* nothing to do */
203}
204
205static struct scif_hw_ops scif_hw_ops = {
206 .request_irq = ___mic_request_irq,
207 .free_irq = ___mic_free_irq,
208 .ack_interrupt = ___mic_ack_interrupt,
209 .next_db = ___mic_next_db,
210 .send_intr = ___mic_send_intr,
211 .ioremap = ___mic_ioremap,
212 .iounmap = ___mic_iounmap,
213};
214
32static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev) 215static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
33{ 216{
34 return dev_get_drvdata(mbdev->dev.parent); 217 return dev_get_drvdata(mbdev->dev.parent);
@@ -127,6 +310,58 @@ void mic_bootparam_init(struct mic_device *mdev)
127 bootparam->h2c_config_db = -1; 310 bootparam->h2c_config_db = -1;
128 bootparam->shutdown_status = 0; 311 bootparam->shutdown_status = 0;
129 bootparam->shutdown_card = 0; 312 bootparam->shutdown_card = 0;
313 /* Total nodes = number of MICs + 1 for self node */
314 bootparam->tot_nodes = atomic_read(&g_num_mics) + 1;
315 bootparam->node_id = mdev->id + 1;
316 bootparam->scif_host_dma_addr = 0x0;
317 bootparam->scif_card_dma_addr = 0x0;
318 bootparam->c2h_scif_db = -1;
319 bootparam->h2c_scif_db = -1;
320}
321
322/**
323 * mic_request_dma_chans - Request DMA channels
324 * @mdev: pointer to mic_device instance
325 *
326 * returns number of DMA channels acquired
327 */
328static int mic_request_dma_chans(struct mic_device *mdev)
329{
330 dma_cap_mask_t mask;
331 struct dma_chan *chan;
332
333 request_module("mic_x100_dma");
334 dma_cap_zero(mask);
335 dma_cap_set(DMA_MEMCPY, mask);
336
337 do {
338 chan = dma_request_channel(mask, mdev->ops->dma_filter,
339 mdev->sdev->parent);
340 if (chan) {
341 mdev->dma_ch[mdev->num_dma_ch++] = chan;
342 if (mdev->num_dma_ch >= MIC_MAX_DMA_CHAN)
343 break;
344 }
345 } while (chan);
346 dev_info(mdev->sdev->parent, "DMA channels # %d\n", mdev->num_dma_ch);
347 return mdev->num_dma_ch;
348}
349
350/**
351 * mic_free_dma_chans - release DMA channels
352 * @mdev: pointer to mic_device instance
353 *
354 * returns none
355 */
356static void mic_free_dma_chans(struct mic_device *mdev)
357{
358 int i = 0;
359
360 for (i = 0; i < mdev->num_dma_ch; i++) {
361 dma_release_channel(mdev->dma_ch[i]);
362 mdev->dma_ch[i] = NULL;
363 }
364 mdev->num_dma_ch = 0;
130} 365}
131 366
132/** 367/**
@@ -141,6 +376,7 @@ int mic_start(struct mic_device *mdev, const char *buf)
141{ 376{
142 int rc; 377 int rc;
143 mutex_lock(&mdev->mic_mutex); 378 mutex_lock(&mdev->mic_mutex);
379 mic_bootparam_init(mdev);
144retry: 380retry:
145 if (MIC_OFFLINE != mdev->state) { 381 if (MIC_OFFLINE != mdev->state) {
146 rc = -EINVAL; 382 rc = -EINVAL;
@@ -161,14 +397,22 @@ retry:
161 rc = PTR_ERR(mdev->dma_mbdev); 397 rc = PTR_ERR(mdev->dma_mbdev);
162 goto unlock_ret; 398 goto unlock_ret;
163 } 399 }
164 mdev->dma_ch = mic_request_dma_chan(mdev); 400 if (!mic_request_dma_chans(mdev)) {
165 if (!mdev->dma_ch) { 401 rc = -ENODEV;
166 rc = -ENXIO;
167 goto dma_remove; 402 goto dma_remove;
168 } 403 }
404 mdev->scdev = scif_register_device(mdev->sdev->parent, MIC_SCIF_DEV,
405 &__mic_dma_ops, &scif_hw_ops,
406 mdev->id + 1, 0, &mdev->mmio,
407 &mdev->aper, mdev->dp, NULL,
408 mdev->dma_ch, mdev->num_dma_ch);
409 if (IS_ERR(mdev->scdev)) {
410 rc = PTR_ERR(mdev->scdev);
411 goto dma_free;
412 }
169 rc = mdev->ops->load_mic_fw(mdev, buf); 413 rc = mdev->ops->load_mic_fw(mdev, buf);
170 if (rc) 414 if (rc)
171 goto dma_release; 415 goto scif_remove;
172 mic_smpt_restore(mdev); 416 mic_smpt_restore(mdev);
173 mic_intr_restore(mdev); 417 mic_intr_restore(mdev);
174 mdev->intr_ops->enable_interrupts(mdev); 418 mdev->intr_ops->enable_interrupts(mdev);
@@ -177,8 +421,10 @@ retry:
177 mdev->ops->send_firmware_intr(mdev); 421 mdev->ops->send_firmware_intr(mdev);
178 mic_set_state(mdev, MIC_ONLINE); 422 mic_set_state(mdev, MIC_ONLINE);
179 goto unlock_ret; 423 goto unlock_ret;
180dma_release: 424scif_remove:
181 dma_release_channel(mdev->dma_ch); 425 scif_unregister_device(mdev->scdev);
426dma_free:
427 mic_free_dma_chans(mdev);
182dma_remove: 428dma_remove:
183 mbus_unregister_device(mdev->dma_mbdev); 429 mbus_unregister_device(mdev->dma_mbdev);
184unlock_ret: 430unlock_ret:
@@ -197,11 +443,9 @@ void mic_stop(struct mic_device *mdev, bool force)
197{ 443{
198 mutex_lock(&mdev->mic_mutex); 444 mutex_lock(&mdev->mic_mutex);
199 if (MIC_OFFLINE != mdev->state || force) { 445 if (MIC_OFFLINE != mdev->state || force) {
446 scif_unregister_device(mdev->scdev);
200 mic_virtio_reset_devices(mdev); 447 mic_virtio_reset_devices(mdev);
201 if (mdev->dma_ch) { 448 mic_free_dma_chans(mdev);
202 dma_release_channel(mdev->dma_ch);
203 mdev->dma_ch = NULL;
204 }
205 mbus_unregister_device(mdev->dma_mbdev); 449 mbus_unregister_device(mdev->dma_mbdev);
206 mic_bootparam_init(mdev); 450 mic_bootparam_init(mdev);
207 mic_reset(mdev); 451 mic_reset(mdev);
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 687e9aacf3bb..3c9ea4896f3c 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -214,6 +214,19 @@ static int mic_dp_show(struct seq_file *s, void *pos)
214 bootparam->shutdown_status); 214 bootparam->shutdown_status);
215 seq_printf(s, "Bootparam: shutdown_card %d\n", 215 seq_printf(s, "Bootparam: shutdown_card %d\n",
216 bootparam->shutdown_card); 216 bootparam->shutdown_card);
217 seq_printf(s, "Bootparam: tot_nodes %d\n",
218 bootparam->tot_nodes);
219 seq_printf(s, "Bootparam: node_id %d\n",
220 bootparam->node_id);
221 seq_printf(s, "Bootparam: c2h_scif_db %d\n",
222 bootparam->c2h_scif_db);
223 seq_printf(s, "Bootparam: h2c_scif_db %d\n",
224 bootparam->h2c_scif_db);
225 seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
226 bootparam->scif_host_dma_addr);
227 seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
228 bootparam->scif_card_dma_addr);
229
217 230
218 for (i = sizeof(*bootparam); i < MIC_DP_SIZE; 231 for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
219 i += mic_total_desc_size(d)) { 232 i += mic_total_desc_size(d)) {
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 016bd15a7bd1..01a7555aa648 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -27,7 +27,7 @@
27#include <linux/irqreturn.h> 27#include <linux/irqreturn.h>
28#include <linux/dmaengine.h> 28#include <linux/dmaengine.h>
29#include <linux/mic_bus.h> 29#include <linux/mic_bus.h>
30 30#include "../bus/scif_bus.h"
31#include "mic_intr.h" 31#include "mic_intr.h"
32 32
33/* The maximum number of MIC devices supported in a single host system. */ 33/* The maximum number of MIC devices supported in a single host system. */
@@ -90,7 +90,9 @@ enum mic_stepping {
90 * @vdev_list: list of virtio devices. 90 * @vdev_list: list of virtio devices.
91 * @pm_notifier: Handles PM notifications from the OS. 91 * @pm_notifier: Handles PM notifications from the OS.
92 * @dma_mbdev: MIC BUS DMA device. 92 * @dma_mbdev: MIC BUS DMA device.
93 * @dma_ch: DMA channel reserved by this driver for use by virtio devices. 93 * @dma_ch - Array of DMA channels
94 * @num_dma_ch - Number of DMA channels available
95 * @scdev: SCIF device on the SCIF virtual bus.
94 */ 96 */
95struct mic_device { 97struct mic_device {
96 struct mic_mw mmio; 98 struct mic_mw mmio;
@@ -129,7 +131,9 @@ struct mic_device {
129 struct list_head vdev_list; 131 struct list_head vdev_list;
130 struct notifier_block pm_notifier; 132 struct notifier_block pm_notifier;
131 struct mbus_device *dma_mbdev; 133 struct mbus_device *dma_mbdev;
132 struct dma_chan *dma_ch; 134 struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
135 int num_dma_ch;
136 struct scif_hw_dev *scdev;
133}; 137};
134 138
135/** 139/**
@@ -228,4 +232,5 @@ void mic_exit_debugfs(void);
228void mic_prepare_suspend(struct mic_device *mdev); 232void mic_prepare_suspend(struct mic_device *mdev);
229void mic_complete_resume(struct mic_device *mdev); 233void mic_complete_resume(struct mic_device *mdev);
230void mic_suspend(struct mic_device *mdev); 234void mic_suspend(struct mic_device *mdev);
235extern atomic_t g_num_mics;
231#endif 236#endif
diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h
index 9f783d4ad7f1..cce28824db8a 100644
--- a/drivers/misc/mic/host/mic_intr.h
+++ b/drivers/misc/mic/host/mic_intr.h
@@ -28,8 +28,9 @@
28 * 3 for virtio network, console and block devices. 28 * 3 for virtio network, console and block devices.
29 * 1 for card shutdown notifications. 29 * 1 for card shutdown notifications.
30 * 4 for host owned DMA channels. 30 * 4 for host owned DMA channels.
31 * 1 for SCIF
31 */ 32 */
32#define MIC_MIN_MSIX 8 33#define MIC_MIN_MSIX 9
33#define MIC_NUM_OFFSETS 32 34#define MIC_NUM_OFFSETS 32
34 35
35/** 36/**
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index ab37a3117d23..456462932151 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -67,6 +67,8 @@ static struct ida g_mic_ida;
67static struct class *g_mic_class; 67static struct class *g_mic_class;
68/* Base device node number for MIC devices */ 68/* Base device node number for MIC devices */
69static dev_t g_mic_devno; 69static dev_t g_mic_devno;
70/* Track the total number of MIC devices */
71atomic_t g_num_mics;
70 72
71static const struct file_operations mic_fops = { 73static const struct file_operations mic_fops = {
72 .open = mic_open, 74 .open = mic_open,
@@ -408,6 +410,7 @@ static int mic_probe(struct pci_dev *pdev,
408 dev_err(&pdev->dev, "cdev_add err id %d rc %d\n", mdev->id, rc); 410 dev_err(&pdev->dev, "cdev_add err id %d rc %d\n", mdev->id, rc);
409 goto cleanup_debug_dir; 411 goto cleanup_debug_dir;
410 } 412 }
413 atomic_inc(&g_num_mics);
411 return 0; 414 return 0;
412cleanup_debug_dir: 415cleanup_debug_dir:
413 mic_delete_debug_dir(mdev); 416 mic_delete_debug_dir(mdev);
@@ -459,6 +462,7 @@ static void mic_remove(struct pci_dev *pdev)
459 return; 462 return;
460 463
461 mic_stop(mdev, false); 464 mic_stop(mdev, false);
465 atomic_dec(&g_num_mics);
462 cdev_del(&mdev->cdev); 466 cdev_del(&mdev->cdev);
463 mic_delete_debug_dir(mdev); 467 mic_delete_debug_dir(mdev);
464 mutex_lock(&mdev->mic_mutex); 468 mutex_lock(&mdev->mic_mutex);
@@ -478,6 +482,7 @@ static void mic_remove(struct pci_dev *pdev)
478 ida_simple_remove(&g_mic_ida, mdev->id); 482 ida_simple_remove(&g_mic_ida, mdev->id);
479 kfree(mdev); 483 kfree(mdev);
480} 484}
485
481static struct pci_driver mic_driver = { 486static struct pci_driver mic_driver = {
482 .name = mic_driver_name, 487 .name = mic_driver_name,
483 .id_table = mic_pci_tbl, 488 .id_table = mic_pci_tbl,
@@ -512,6 +517,7 @@ static int __init mic_init(void)
512 } 517 }
513 return ret; 518 return ret;
514cleanup_debugfs: 519cleanup_debugfs:
520 ida_destroy(&g_mic_ida);
515 mic_exit_debugfs(); 521 mic_exit_debugfs();
516 class_destroy(g_mic_class); 522 class_destroy(g_mic_class);
517cleanup_chrdev: 523cleanup_chrdev:
diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c
index fae474c4899e..cec82034875f 100644
--- a/drivers/misc/mic/host/mic_smpt.c
+++ b/drivers/misc/mic/host/mic_smpt.c
@@ -174,8 +174,7 @@ static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr,
174 * 174 *
175 * returns a DMA address. 175 * returns a DMA address.
176 */ 176 */
177static dma_addr_t 177dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
178mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
179{ 178{
180 struct mic_smpt_info *smpt_info = mdev->smpt; 179 struct mic_smpt_info *smpt_info = mdev->smpt;
181 int spt; 180 int spt;
@@ -214,7 +213,7 @@ dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
214 if (!size || size > mic_max_system_memory(mdev)) 213 if (!size || size > mic_max_system_memory(mdev))
215 return mic_addr; 214 return mic_addr;
216 215
217 ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL); 216 ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
218 if (!ref) 217 if (!ref)
219 return mic_addr; 218 return mic_addr;
220 219
@@ -271,7 +270,7 @@ void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
271 } 270 }
272 271
273 spt = mic_sys_addr_to_smpt(mdev, mic_addr); 272 spt = mic_sys_addr_to_smpt(mdev, mic_addr);
274 ref = kmalloc(mdev->smpt->info.num_reg * sizeof(s64), GFP_KERNEL); 273 ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
275 if (!ref) 274 if (!ref)
276 return; 275 return;
277 276
diff --git a/drivers/misc/mic/host/mic_smpt.h b/drivers/misc/mic/host/mic_smpt.h
index 51970abfe7df..68721c6e7455 100644
--- a/drivers/misc/mic/host/mic_smpt.h
+++ b/drivers/misc/mic/host/mic_smpt.h
@@ -78,6 +78,7 @@ void mic_unmap_single(struct mic_device *mdev,
78dma_addr_t mic_map(struct mic_device *mdev, 78dma_addr_t mic_map(struct mic_device *mdev,
79 dma_addr_t dma_addr, size_t size); 79 dma_addr_t dma_addr, size_t size);
80void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size); 80void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size);
81dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr);
81 82
82/** 83/**
83 * mic_map_error - Check a MIC address for errors. 84 * mic_map_error - Check a MIC address for errors.
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index a020e4eb435a..cc08e9f733c9 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -40,7 +40,7 @@ static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
40{ 40{
41 int err = 0; 41 int err = 0;
42 struct dma_async_tx_descriptor *tx; 42 struct dma_async_tx_descriptor *tx;
43 struct dma_chan *mic_ch = mdev->dma_ch; 43 struct dma_chan *mic_ch = mdev->dma_ch[0];
44 44
45 if (!mic_ch) { 45 if (!mic_ch) {
46 err = -EBUSY; 46 err = -EBUSY;
@@ -80,7 +80,7 @@ static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
80 struct mic_device *mdev = mvdev->mdev; 80 struct mic_device *mdev = mvdev->mdev;
81 void __iomem *dbuf = mdev->aper.va + daddr; 81 void __iomem *dbuf = mdev->aper.va + daddr;
82 struct mic_vringh *mvr = &mvdev->mvr[vr_idx]; 82 struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
83 size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align; 83 size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
84 size_t dma_offset; 84 size_t dma_offset;
85 size_t partlen; 85 size_t partlen;
86 int err; 86 int err;
@@ -129,7 +129,7 @@ static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
129 struct mic_device *mdev = mvdev->mdev; 129 struct mic_device *mdev = mvdev->mdev;
130 void __iomem *dbuf = mdev->aper.va + daddr; 130 void __iomem *dbuf = mdev->aper.va + daddr;
131 struct mic_vringh *mvr = &mvdev->mvr[vr_idx]; 131 struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
132 size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align; 132 size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
133 size_t partlen; 133 size_t partlen;
134 int err; 134 int err;
135 135
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index b7a21e11dcdf..3341e90dede4 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -167,8 +167,7 @@ static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
167 if (doorbell < MIC_X100_NUM_SBOX_IRQ) { 167 if (doorbell < MIC_X100_NUM_SBOX_IRQ) {
168 mic_x100_send_sbox_intr(mdev, doorbell); 168 mic_x100_send_sbox_intr(mdev, doorbell);
169 } else { 169 } else {
170 rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ + 170 rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ;
171 MIC_X100_RDMASR_IRQ_BASE;
172 mic_x100_send_rdmasr_intr(mdev, rdmasr_db); 171 mic_x100_send_rdmasr_intr(mdev, rdmasr_db);
173 } 172 }
174} 173}
diff --git a/drivers/misc/mic/scif/Makefile b/drivers/misc/mic/scif/Makefile
new file mode 100644
index 000000000000..bf10bb7e2b91
--- /dev/null
+++ b/drivers/misc/mic/scif/Makefile
@@ -0,0 +1,15 @@
1#
2# Makefile - SCIF driver.
3# Copyright(c) 2014, Intel Corporation.
4#
5obj-$(CONFIG_SCIF) += scif.o
6scif-objs := scif_main.o
7scif-objs += scif_peer_bus.o
8scif-objs += scif_ports.o
9scif-objs += scif_debugfs.o
10scif-objs += scif_fd.o
11scif-objs += scif_api.o
12scif-objs += scif_epd.o
13scif-objs += scif_rb.o
14scif-objs += scif_nodeqp.o
15scif-objs += scif_nm.o
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
new file mode 100644
index 000000000000..f39d3135a9ef
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -0,0 +1,1276 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include <linux/scif.h>
19#include "scif_main.h"
20#include "scif_map.h"
21
22static const char * const scif_ep_states[] = {
23 "Unbound",
24 "Bound",
25 "Listening",
26 "Connected",
27 "Connecting",
28 "Mapping",
29 "Closing",
30 "Close Listening",
31 "Disconnected",
32 "Zombie"};
33
34enum conn_async_state {
35 ASYNC_CONN_IDLE = 1, /* ep setup for async connect */
36 ASYNC_CONN_INPROGRESS, /* async connect in progress */
37 ASYNC_CONN_FLUSH_WORK /* async work flush in progress */
38};
39
40scif_epd_t scif_open(void)
41{
42 struct scif_endpt *ep;
43
44 might_sleep();
45 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
46 if (!ep)
47 goto err_ep_alloc;
48
49 ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL);
50 if (!ep->qp_info.qp)
51 goto err_qp_alloc;
52
53 spin_lock_init(&ep->lock);
54 mutex_init(&ep->sendlock);
55 mutex_init(&ep->recvlock);
56
57 ep->state = SCIFEP_UNBOUND;
58 dev_dbg(scif_info.mdev.this_device,
59 "SCIFAPI open: ep %p success\n", ep);
60 return ep;
61
62err_qp_alloc:
63 kfree(ep);
64err_ep_alloc:
65 return NULL;
66}
67EXPORT_SYMBOL_GPL(scif_open);
68
69/*
70 * scif_disconnect_ep - Disconnects the endpoint if found
71 * @epd: The end point returned from scif_open()
72 */
73static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep)
74{
75 struct scifmsg msg;
76 struct scif_endpt *fep = NULL;
77 struct scif_endpt *tmpep;
78 struct list_head *pos, *tmpq;
79 int err;
80
81 /*
82 * Wake up any threads blocked in send()/recv() before closing
83 * out the connection. Grabbing and releasing the send/recv lock
84 * will ensure that any blocked senders/receivers have exited for
85 * Ring 0 endpoints. It is a Ring 0 bug to call send/recv after
86 * close. Ring 3 endpoints are not affected since close will not
87 * be called while there are IOCTLs executing.
88 */
89 wake_up_interruptible(&ep->sendwq);
90 wake_up_interruptible(&ep->recvwq);
91 mutex_lock(&ep->sendlock);
92 mutex_unlock(&ep->sendlock);
93 mutex_lock(&ep->recvlock);
94 mutex_unlock(&ep->recvlock);
95
96 /* Remove from the connected list */
97 mutex_lock(&scif_info.connlock);
98 list_for_each_safe(pos, tmpq, &scif_info.connected) {
99 tmpep = list_entry(pos, struct scif_endpt, list);
100 if (tmpep == ep) {
101 list_del(pos);
102 fep = tmpep;
103 spin_lock(&ep->lock);
104 break;
105 }
106 }
107
108 if (!fep) {
109 /*
110 * The other side has completed the disconnect before
111 * the end point can be removed from the list. Therefore
112 * the ep lock is not locked, traverse the disconnected
113 * list to find the endpoint and release the conn lock.
114 */
115 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
116 tmpep = list_entry(pos, struct scif_endpt, list);
117 if (tmpep == ep) {
118 list_del(pos);
119 break;
120 }
121 }
122 mutex_unlock(&scif_info.connlock);
123 return NULL;
124 }
125
126 init_completion(&ep->discon);
127 msg.uop = SCIF_DISCNCT;
128 msg.src = ep->port;
129 msg.dst = ep->peer;
130 msg.payload[0] = (u64)ep;
131 msg.payload[1] = ep->remote_ep;
132
133 err = scif_nodeqp_send(ep->remote_dev, &msg);
134 spin_unlock(&ep->lock);
135 mutex_unlock(&scif_info.connlock);
136
137 if (!err)
138 /* Wait for the remote node to respond with SCIF_DISCNT_ACK */
139 wait_for_completion_timeout(&ep->discon,
140 SCIF_NODE_ALIVE_TIMEOUT);
141 return ep;
142}
143
144int scif_close(scif_epd_t epd)
145{
146 struct scif_endpt *ep = (struct scif_endpt *)epd;
147 struct scif_endpt *tmpep;
148 struct list_head *pos, *tmpq;
149 enum scif_epd_state oldstate;
150 bool flush_conn;
151
152 dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n",
153 ep, scif_ep_states[ep->state]);
154 might_sleep();
155 spin_lock(&ep->lock);
156 flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS);
157 spin_unlock(&ep->lock);
158
159 if (flush_conn)
160 flush_work(&scif_info.conn_work);
161
162 spin_lock(&ep->lock);
163 oldstate = ep->state;
164
165 ep->state = SCIFEP_CLOSING;
166
167 switch (oldstate) {
168 case SCIFEP_ZOMBIE:
169 case SCIFEP_DISCONNECTED:
170 spin_unlock(&ep->lock);
171 /* Remove from the disconnected list */
172 mutex_lock(&scif_info.connlock);
173 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
174 tmpep = list_entry(pos, struct scif_endpt, list);
175 if (tmpep == ep) {
176 list_del(pos);
177 break;
178 }
179 }
180 mutex_unlock(&scif_info.connlock);
181 break;
182 case SCIFEP_UNBOUND:
183 case SCIFEP_BOUND:
184 case SCIFEP_CONNECTING:
185 spin_unlock(&ep->lock);
186 break;
187 case SCIFEP_MAPPING:
188 case SCIFEP_CONNECTED:
189 case SCIFEP_CLOSING:
190 {
191 spin_unlock(&ep->lock);
192 scif_disconnect_ep(ep);
193 break;
194 }
195 case SCIFEP_LISTENING:
196 case SCIFEP_CLLISTEN:
197 {
198 struct scif_conreq *conreq;
199 struct scifmsg msg;
200 struct scif_endpt *aep;
201
202 spin_unlock(&ep->lock);
203 spin_lock(&scif_info.eplock);
204
205 /* remove from listen list */
206 list_for_each_safe(pos, tmpq, &scif_info.listen) {
207 tmpep = list_entry(pos, struct scif_endpt, list);
208 if (tmpep == ep)
209 list_del(pos);
210 }
211 /* Remove any dangling accepts */
212 while (ep->acceptcnt) {
213 aep = list_first_entry(&ep->li_accept,
214 struct scif_endpt, liacceptlist);
215 list_del(&aep->liacceptlist);
216 scif_put_port(aep->port.port);
217 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
218 tmpep = list_entry(pos, struct scif_endpt,
219 miacceptlist);
220 if (tmpep == aep) {
221 list_del(pos);
222 break;
223 }
224 }
225 spin_unlock(&scif_info.eplock);
226 mutex_lock(&scif_info.connlock);
227 list_for_each_safe(pos, tmpq, &scif_info.connected) {
228 tmpep = list_entry(pos,
229 struct scif_endpt, list);
230 if (tmpep == aep) {
231 list_del(pos);
232 break;
233 }
234 }
235 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
236 tmpep = list_entry(pos,
237 struct scif_endpt, list);
238 if (tmpep == aep) {
239 list_del(pos);
240 break;
241 }
242 }
243 mutex_unlock(&scif_info.connlock);
244 scif_teardown_ep(aep);
245 spin_lock(&scif_info.eplock);
246 scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD);
247 ep->acceptcnt--;
248 }
249
250 spin_lock(&ep->lock);
251 spin_unlock(&scif_info.eplock);
252
253 /* Remove and reject any pending connection requests. */
254 while (ep->conreqcnt) {
255 conreq = list_first_entry(&ep->conlist,
256 struct scif_conreq, list);
257 list_del(&conreq->list);
258
259 msg.uop = SCIF_CNCT_REJ;
260 msg.dst.node = conreq->msg.src.node;
261 msg.dst.port = conreq->msg.src.port;
262 msg.payload[0] = conreq->msg.payload[0];
263 msg.payload[1] = conreq->msg.payload[1];
264 /*
265 * No Error Handling on purpose for scif_nodeqp_send().
266 * If the remote node is lost we still want free the
267 * connection requests on the self node.
268 */
269 scif_nodeqp_send(&scif_dev[conreq->msg.src.node],
270 &msg);
271 ep->conreqcnt--;
272 kfree(conreq);
273 }
274
275 spin_unlock(&ep->lock);
276 /* If a kSCIF accept is waiting wake it up */
277 wake_up_interruptible(&ep->conwq);
278 break;
279 }
280 }
281 scif_put_port(ep->port.port);
282 scif_teardown_ep(ep);
283 scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD);
284 return 0;
285}
286EXPORT_SYMBOL_GPL(scif_close);
287
288/**
289 * scif_flush() - Wakes up any blocking accepts. The endpoint will no longer
290 * accept new connections.
291 * @epd: The end point returned from scif_open()
292 */
293int __scif_flush(scif_epd_t epd)
294{
295 struct scif_endpt *ep = (struct scif_endpt *)epd;
296
297 switch (ep->state) {
298 case SCIFEP_LISTENING:
299 {
300 ep->state = SCIFEP_CLLISTEN;
301
302 /* If an accept is waiting wake it up */
303 wake_up_interruptible(&ep->conwq);
304 break;
305 }
306 default:
307 break;
308 }
309 return 0;
310}
311
312int scif_bind(scif_epd_t epd, u16 pn)
313{
314 struct scif_endpt *ep = (struct scif_endpt *)epd;
315 int ret = 0;
316 int tmp;
317
318 dev_dbg(scif_info.mdev.this_device,
319 "SCIFAPI bind: ep %p %s requested port number %d\n",
320 ep, scif_ep_states[ep->state], pn);
321 if (pn) {
322 /*
323 * Similar to IETF RFC 1700, SCIF ports below
324 * SCIF_ADMIN_PORT_END can only be bound by system (or root)
325 * processes or by processes executed by privileged users.
326 */
327 if (pn < SCIF_ADMIN_PORT_END && !capable(CAP_SYS_ADMIN)) {
328 ret = -EACCES;
329 goto scif_bind_admin_exit;
330 }
331 }
332
333 spin_lock(&ep->lock);
334 if (ep->state == SCIFEP_BOUND) {
335 ret = -EINVAL;
336 goto scif_bind_exit;
337 } else if (ep->state != SCIFEP_UNBOUND) {
338 ret = -EISCONN;
339 goto scif_bind_exit;
340 }
341
342 if (pn) {
343 tmp = scif_rsrv_port(pn);
344 if (tmp != pn) {
345 ret = -EINVAL;
346 goto scif_bind_exit;
347 }
348 } else {
349 pn = scif_get_new_port();
350 if (!pn) {
351 ret = -ENOSPC;
352 goto scif_bind_exit;
353 }
354 }
355
356 ep->state = SCIFEP_BOUND;
357 ep->port.node = scif_info.nodeid;
358 ep->port.port = pn;
359 ep->conn_async_state = ASYNC_CONN_IDLE;
360 ret = pn;
361 dev_dbg(scif_info.mdev.this_device,
362 "SCIFAPI bind: bound to port number %d\n", pn);
363scif_bind_exit:
364 spin_unlock(&ep->lock);
365scif_bind_admin_exit:
366 return ret;
367}
368EXPORT_SYMBOL_GPL(scif_bind);
369
370int scif_listen(scif_epd_t epd, int backlog)
371{
372 struct scif_endpt *ep = (struct scif_endpt *)epd;
373
374 dev_dbg(scif_info.mdev.this_device,
375 "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]);
376 spin_lock(&ep->lock);
377 switch (ep->state) {
378 case SCIFEP_ZOMBIE:
379 case SCIFEP_CLOSING:
380 case SCIFEP_CLLISTEN:
381 case SCIFEP_UNBOUND:
382 case SCIFEP_DISCONNECTED:
383 spin_unlock(&ep->lock);
384 return -EINVAL;
385 case SCIFEP_LISTENING:
386 case SCIFEP_CONNECTED:
387 case SCIFEP_CONNECTING:
388 case SCIFEP_MAPPING:
389 spin_unlock(&ep->lock);
390 return -EISCONN;
391 case SCIFEP_BOUND:
392 break;
393 }
394
395 ep->state = SCIFEP_LISTENING;
396 ep->backlog = backlog;
397
398 ep->conreqcnt = 0;
399 ep->acceptcnt = 0;
400 INIT_LIST_HEAD(&ep->conlist);
401 init_waitqueue_head(&ep->conwq);
402 INIT_LIST_HEAD(&ep->li_accept);
403 spin_unlock(&ep->lock);
404
405 /*
406 * Listen status is complete so delete the qp information not needed
407 * on a listen before placing on the list of listening ep's
408 */
409 scif_teardown_ep(ep);
410 ep->qp_info.qp = NULL;
411
412 spin_lock(&scif_info.eplock);
413 list_add_tail(&ep->list, &scif_info.listen);
414 spin_unlock(&scif_info.eplock);
415 return 0;
416}
417EXPORT_SYMBOL_GPL(scif_listen);
418
419/*
420 ************************************************************************
421 * SCIF connection flow:
422 *
423 * 1) A SCIF listening endpoint can call scif_accept(..) to wait for SCIF
424 * connections via a SCIF_CNCT_REQ message
425 * 2) A SCIF endpoint can initiate a SCIF connection by calling
426 * scif_connect(..) which calls scif_setup_qp_connect(..) which
427 * allocates the local qp for the endpoint ring buffer and then sends
428 * a SCIF_CNCT_REQ to the remote node and waits for a SCIF_CNCT_GNT or
429 * a SCIF_CNCT_REJ message
430 * 3) The peer node handles a SCIF_CNCT_REQ via scif_cnctreq_resp(..) which
431 * wakes up any threads blocked in step 1 or sends a SCIF_CNCT_REJ
432 * message otherwise
433 * 4) A thread blocked waiting for incoming connections allocates its local
434 * endpoint QP and ring buffer following which it sends a SCIF_CNCT_GNT
435 * and waits for a SCIF_CNCT_GNT(N)ACK. If the allocation fails then
436 * the node sends a SCIF_CNCT_REJ message
437 * 5) Upon receipt of a SCIF_CNCT_GNT or a SCIF_CNCT_REJ message the
438 * connecting endpoint is woken up as part of handling
439 * scif_cnctgnt_resp(..) following which it maps the remote endpoints'
440 * QP, updates its outbound QP and sends a SCIF_CNCT_GNTACK message on
441 * success or a SCIF_CNCT_GNTNACK message on failure and completes
442 * the scif_connect(..) API
443 * 6) Upon receipt of a SCIF_CNCT_GNT(N)ACK the accepting endpoint blocked
444 * in step 4 is woken up and completes the scif_accept(..) API
445 * 7) The SCIF connection is now established between the two SCIF endpoints.
446 */
447static int scif_conn_func(struct scif_endpt *ep)
448{
449 int err = 0;
450 struct scifmsg msg;
451 struct device *spdev;
452
453 /* Initiate the first part of the endpoint QP setup */
454 err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset,
455 SCIF_ENDPT_QP_SIZE, ep->remote_dev);
456 if (err) {
457 dev_err(&ep->remote_dev->sdev->dev,
458 "%s err %d qp_offset 0x%llx\n",
459 __func__, err, ep->qp_info.qp_offset);
460 ep->state = SCIFEP_BOUND;
461 goto connect_error_simple;
462 }
463
464 spdev = scif_get_peer_dev(ep->remote_dev);
465 if (IS_ERR(spdev)) {
466 err = PTR_ERR(spdev);
467 goto cleanup_qp;
468 }
469 /* Format connect message and send it */
470 msg.src = ep->port;
471 msg.dst = ep->conn_port;
472 msg.uop = SCIF_CNCT_REQ;
473 msg.payload[0] = (u64)ep;
474 msg.payload[1] = ep->qp_info.qp_offset;
475 err = _scif_nodeqp_send(ep->remote_dev, &msg);
476 if (err)
477 goto connect_error_dec;
478 scif_put_peer_dev(spdev);
479 /*
480 * Wait for the remote node to respond with SCIF_CNCT_GNT or
481 * SCIF_CNCT_REJ message.
482 */
483 err = wait_event_timeout(ep->conwq, ep->state != SCIFEP_CONNECTING,
484 SCIF_NODE_ALIVE_TIMEOUT);
485 if (!err) {
486 dev_err(&ep->remote_dev->sdev->dev,
487 "%s %d timeout\n", __func__, __LINE__);
488 ep->state = SCIFEP_BOUND;
489 }
490 spdev = scif_get_peer_dev(ep->remote_dev);
491 if (IS_ERR(spdev)) {
492 err = PTR_ERR(spdev);
493 goto cleanup_qp;
494 }
495 if (ep->state == SCIFEP_MAPPING) {
496 err = scif_setup_qp_connect_response(ep->remote_dev,
497 ep->qp_info.qp,
498 ep->qp_info.gnt_pld);
499 /*
500 * If the resource to map the queue are not available then
501 * we need to tell the other side to terminate the accept
502 */
503 if (err) {
504 dev_err(&ep->remote_dev->sdev->dev,
505 "%s %d err %d\n", __func__, __LINE__, err);
506 msg.uop = SCIF_CNCT_GNTNACK;
507 msg.payload[0] = ep->remote_ep;
508 _scif_nodeqp_send(ep->remote_dev, &msg);
509 ep->state = SCIFEP_BOUND;
510 goto connect_error_dec;
511 }
512
513 msg.uop = SCIF_CNCT_GNTACK;
514 msg.payload[0] = ep->remote_ep;
515 err = _scif_nodeqp_send(ep->remote_dev, &msg);
516 if (err) {
517 ep->state = SCIFEP_BOUND;
518 goto connect_error_dec;
519 }
520 ep->state = SCIFEP_CONNECTED;
521 mutex_lock(&scif_info.connlock);
522 list_add_tail(&ep->list, &scif_info.connected);
523 mutex_unlock(&scif_info.connlock);
524 dev_dbg(&ep->remote_dev->sdev->dev,
525 "SCIFAPI connect: ep %p connected\n", ep);
526 } else if (ep->state == SCIFEP_BOUND) {
527 dev_dbg(&ep->remote_dev->sdev->dev,
528 "SCIFAPI connect: ep %p connection refused\n", ep);
529 err = -ECONNREFUSED;
530 goto connect_error_dec;
531 }
532 scif_put_peer_dev(spdev);
533 return err;
534connect_error_dec:
535 scif_put_peer_dev(spdev);
536cleanup_qp:
537 scif_cleanup_ep_qp(ep);
538connect_error_simple:
539 return err;
540}
541
542/*
543 * scif_conn_handler:
544 *
545 * Workqueue handler for servicing non-blocking SCIF connect
546 *
547 */
548void scif_conn_handler(struct work_struct *work)
549{
550 struct scif_endpt *ep;
551
552 do {
553 ep = NULL;
554 spin_lock(&scif_info.nb_connect_lock);
555 if (!list_empty(&scif_info.nb_connect_list)) {
556 ep = list_first_entry(&scif_info.nb_connect_list,
557 struct scif_endpt, conn_list);
558 list_del(&ep->conn_list);
559 }
560 spin_unlock(&scif_info.nb_connect_lock);
561 if (ep)
562 ep->conn_err = scif_conn_func(ep);
563 } while (ep);
564}
565
566int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
567{
568 struct scif_endpt *ep = (struct scif_endpt *)epd;
569 int err = 0;
570 struct scif_dev *remote_dev;
571 struct device *spdev;
572
573 dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep,
574 scif_ep_states[ep->state]);
575
576 if (!scif_dev || dst->node > scif_info.maxid)
577 return -ENODEV;
578
579 might_sleep();
580
581 remote_dev = &scif_dev[dst->node];
582 spdev = scif_get_peer_dev(remote_dev);
583 if (IS_ERR(spdev)) {
584 err = PTR_ERR(spdev);
585 return err;
586 }
587
588 spin_lock(&ep->lock);
589 switch (ep->state) {
590 case SCIFEP_ZOMBIE:
591 case SCIFEP_CLOSING:
592 err = -EINVAL;
593 break;
594 case SCIFEP_DISCONNECTED:
595 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
596 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
597 else
598 err = -EINVAL;
599 break;
600 case SCIFEP_LISTENING:
601 case SCIFEP_CLLISTEN:
602 err = -EOPNOTSUPP;
603 break;
604 case SCIFEP_CONNECTING:
605 case SCIFEP_MAPPING:
606 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
607 err = -EINPROGRESS;
608 else
609 err = -EISCONN;
610 break;
611 case SCIFEP_CONNECTED:
612 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
613 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
614 else
615 err = -EISCONN;
616 break;
617 case SCIFEP_UNBOUND:
618 ep->port.port = scif_get_new_port();
619 if (!ep->port.port) {
620 err = -ENOSPC;
621 } else {
622 ep->port.node = scif_info.nodeid;
623 ep->conn_async_state = ASYNC_CONN_IDLE;
624 }
625 /* Fall through */
626 case SCIFEP_BOUND:
627 /*
628 * If a non-blocking connect has been already initiated
629 * (conn_async_state is either ASYNC_CONN_INPROGRESS or
630 * ASYNC_CONN_FLUSH_WORK), the end point could end up in
631 * SCIF_BOUND due an error in the connection process
632 * (e.g., connection refused) If conn_async_state is
633 * ASYNC_CONN_INPROGRESS - transition to ASYNC_CONN_FLUSH_WORK
634 * so that the error status can be collected. If the state is
635 * already ASYNC_CONN_FLUSH_WORK - then set the error to
636 * EINPROGRESS since some other thread is waiting to collect
637 * error status.
638 */
639 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
640 ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
641 } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
642 err = -EINPROGRESS;
643 } else {
644 ep->conn_port = *dst;
645 init_waitqueue_head(&ep->sendwq);
646 init_waitqueue_head(&ep->recvwq);
647 init_waitqueue_head(&ep->conwq);
648 ep->conn_async_state = 0;
649
650 if (unlikely(non_block))
651 ep->conn_async_state = ASYNC_CONN_INPROGRESS;
652 }
653 break;
654 }
655
656 if (err || ep->conn_async_state == ASYNC_CONN_FLUSH_WORK)
657 goto connect_simple_unlock1;
658
659 ep->state = SCIFEP_CONNECTING;
660 ep->remote_dev = &scif_dev[dst->node];
661 ep->qp_info.qp->magic = SCIFEP_MAGIC;
662 if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
663 spin_lock(&scif_info.nb_connect_lock);
664 list_add_tail(&ep->conn_list, &scif_info.nb_connect_list);
665 spin_unlock(&scif_info.nb_connect_lock);
666 err = -EINPROGRESS;
667 schedule_work(&scif_info.conn_work);
668 }
669connect_simple_unlock1:
670 spin_unlock(&ep->lock);
671 scif_put_peer_dev(spdev);
672 if (err) {
673 return err;
674 } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
675 flush_work(&scif_info.conn_work);
676 err = ep->conn_err;
677 spin_lock(&ep->lock);
678 ep->conn_async_state = ASYNC_CONN_IDLE;
679 spin_unlock(&ep->lock);
680 } else {
681 err = scif_conn_func(ep);
682 }
683 return err;
684}
685
686int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
687{
688 return __scif_connect(epd, dst, false);
689}
690EXPORT_SYMBOL_GPL(scif_connect);
691
692/**
693 * scif_accept() - Accept a connection request from the remote node
694 *
695 * The function accepts a connection request from the remote node. Successful
696 * complete is indicate by a new end point being created and passed back
697 * to the caller for future reference.
698 *
699 * Upon successful complete a zero will be returned and the peer information
700 * will be filled in.
701 *
702 * If the end point is not in the listening state -EINVAL will be returned.
703 *
704 * If during the connection sequence resource allocation fails the -ENOMEM
705 * will be returned.
706 *
707 * If the function is called with the ASYNC flag set and no connection requests
708 * are pending it will return -EAGAIN.
709 *
710 * If the remote side is not sending any connection requests the caller may
711 * terminate this function with a signal. If so a -EINTR will be returned.
712 */
713int scif_accept(scif_epd_t epd, struct scif_port_id *peer,
714 scif_epd_t *newepd, int flags)
715{
716 struct scif_endpt *lep = (struct scif_endpt *)epd;
717 struct scif_endpt *cep;
718 struct scif_conreq *conreq;
719 struct scifmsg msg;
720 int err;
721 struct device *spdev;
722
723 dev_dbg(scif_info.mdev.this_device,
724 "SCIFAPI accept: ep %p %s\n", lep, scif_ep_states[lep->state]);
725
726 if (flags & ~SCIF_ACCEPT_SYNC)
727 return -EINVAL;
728
729 if (!peer || !newepd)
730 return -EINVAL;
731
732 might_sleep();
733 spin_lock(&lep->lock);
734 if (lep->state != SCIFEP_LISTENING) {
735 spin_unlock(&lep->lock);
736 return -EINVAL;
737 }
738
739 if (!lep->conreqcnt && !(flags & SCIF_ACCEPT_SYNC)) {
740 /* No connection request present and we do not want to wait */
741 spin_unlock(&lep->lock);
742 return -EAGAIN;
743 }
744
745 lep->files = current->files;
746retry_connection:
747 spin_unlock(&lep->lock);
748 /* Wait for the remote node to send us a SCIF_CNCT_REQ */
749 err = wait_event_interruptible(lep->conwq,
750 (lep->conreqcnt ||
751 (lep->state != SCIFEP_LISTENING)));
752 if (err)
753 return err;
754
755 if (lep->state != SCIFEP_LISTENING)
756 return -EINTR;
757
758 spin_lock(&lep->lock);
759
760 if (!lep->conreqcnt)
761 goto retry_connection;
762
763 /* Get the first connect request off the list */
764 conreq = list_first_entry(&lep->conlist, struct scif_conreq, list);
765 list_del(&conreq->list);
766 lep->conreqcnt--;
767 spin_unlock(&lep->lock);
768
769 /* Fill in the peer information */
770 peer->node = conreq->msg.src.node;
771 peer->port = conreq->msg.src.port;
772
773 cep = kzalloc(sizeof(*cep), GFP_KERNEL);
774 if (!cep) {
775 err = -ENOMEM;
776 goto scif_accept_error_epalloc;
777 }
778 spin_lock_init(&cep->lock);
779 mutex_init(&cep->sendlock);
780 mutex_init(&cep->recvlock);
781 cep->state = SCIFEP_CONNECTING;
782 cep->remote_dev = &scif_dev[peer->node];
783 cep->remote_ep = conreq->msg.payload[0];
784
785 cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL);
786 if (!cep->qp_info.qp) {
787 err = -ENOMEM;
788 goto scif_accept_error_qpalloc;
789 }
790
791 cep->qp_info.qp->magic = SCIFEP_MAGIC;
792 spdev = scif_get_peer_dev(cep->remote_dev);
793 if (IS_ERR(spdev)) {
794 err = PTR_ERR(spdev);
795 goto scif_accept_error_map;
796 }
797 err = scif_setup_qp_accept(cep->qp_info.qp, &cep->qp_info.qp_offset,
798 conreq->msg.payload[1], SCIF_ENDPT_QP_SIZE,
799 cep->remote_dev);
800 if (err) {
801 dev_dbg(&cep->remote_dev->sdev->dev,
802 "SCIFAPI accept: ep %p new %p scif_setup_qp_accept %d qp_offset 0x%llx\n",
803 lep, cep, err, cep->qp_info.qp_offset);
804 scif_put_peer_dev(spdev);
805 goto scif_accept_error_map;
806 }
807
808 cep->port.node = lep->port.node;
809 cep->port.port = lep->port.port;
810 cep->peer.node = peer->node;
811 cep->peer.port = peer->port;
812 init_waitqueue_head(&cep->sendwq);
813 init_waitqueue_head(&cep->recvwq);
814 init_waitqueue_head(&cep->conwq);
815
816 msg.uop = SCIF_CNCT_GNT;
817 msg.src = cep->port;
818 msg.payload[0] = cep->remote_ep;
819 msg.payload[1] = cep->qp_info.qp_offset;
820 msg.payload[2] = (u64)cep;
821
822 err = _scif_nodeqp_send(cep->remote_dev, &msg);
823 scif_put_peer_dev(spdev);
824 if (err)
825 goto scif_accept_error_map;
826retry:
827 /* Wait for the remote node to respond with SCIF_CNCT_GNT(N)ACK */
828 err = wait_event_timeout(cep->conwq, cep->state != SCIFEP_CONNECTING,
829 SCIF_NODE_ACCEPT_TIMEOUT);
830 if (!err && scifdev_alive(cep))
831 goto retry;
832 err = !err ? -ENODEV : 0;
833 if (err)
834 goto scif_accept_error_map;
835 kfree(conreq);
836
837 spin_lock(&cep->lock);
838
839 if (cep->state == SCIFEP_CLOSING) {
840 /*
841 * Remote failed to allocate resources and NAKed the grant.
842 * There is at this point nothing referencing the new end point.
843 */
844 spin_unlock(&cep->lock);
845 scif_teardown_ep(cep);
846 kfree(cep);
847
848 /* If call with sync flag then go back and wait. */
849 if (flags & SCIF_ACCEPT_SYNC) {
850 spin_lock(&lep->lock);
851 goto retry_connection;
852 }
853 return -EAGAIN;
854 }
855
856 scif_get_port(cep->port.port);
857 *newepd = (scif_epd_t)cep;
858 spin_unlock(&cep->lock);
859 return 0;
860scif_accept_error_map:
861 scif_teardown_ep(cep);
862scif_accept_error_qpalloc:
863 kfree(cep);
864scif_accept_error_epalloc:
865 msg.uop = SCIF_CNCT_REJ;
866 msg.dst.node = conreq->msg.src.node;
867 msg.dst.port = conreq->msg.src.port;
868 msg.payload[0] = conreq->msg.payload[0];
869 msg.payload[1] = conreq->msg.payload[1];
870 scif_nodeqp_send(&scif_dev[conreq->msg.src.node], &msg);
871 kfree(conreq);
872 return err;
873}
874EXPORT_SYMBOL_GPL(scif_accept);
875
876/*
877 * scif_msg_param_check:
878 * @epd: The end point returned from scif_open()
879 * @len: Length to receive
880 * @flags: blocking or non blocking
881 *
882 * Validate parameters for messaging APIs scif_send(..)/scif_recv(..).
883 */
884static inline int scif_msg_param_check(scif_epd_t epd, int len, int flags)
885{
886 int ret = -EINVAL;
887
888 if (len < 0)
889 goto err_ret;
890 if (flags && (!(flags & SCIF_RECV_BLOCK)))
891 goto err_ret;
892 ret = 0;
893err_ret:
894 return ret;
895}
896
897static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
898{
899 struct scif_endpt *ep = (struct scif_endpt *)epd;
900 struct scifmsg notif_msg;
901 int curr_xfer_len = 0, sent_len = 0, write_count;
902 int ret = 0;
903 struct scif_qp *qp = ep->qp_info.qp;
904
905 if (flags & SCIF_SEND_BLOCK)
906 might_sleep();
907
908 spin_lock(&ep->lock);
909 while (sent_len != len && SCIFEP_CONNECTED == ep->state) {
910 write_count = scif_rb_space(&qp->outbound_q);
911 if (write_count) {
912 /* Best effort to send as much data as possible */
913 curr_xfer_len = min(len - sent_len, write_count);
914 ret = scif_rb_write(&qp->outbound_q, msg,
915 curr_xfer_len);
916 if (ret < 0)
917 break;
918 /* Success. Update write pointer */
919 scif_rb_commit(&qp->outbound_q);
920 /*
921 * Send a notification to the peer about the
922 * produced data message.
923 */
924 notif_msg.src = ep->port;
925 notif_msg.uop = SCIF_CLIENT_SENT;
926 notif_msg.payload[0] = ep->remote_ep;
927 ret = _scif_nodeqp_send(ep->remote_dev, &notif_msg);
928 if (ret)
929 break;
930 sent_len += curr_xfer_len;
931 msg = msg + curr_xfer_len;
932 continue;
933 }
934 curr_xfer_len = min(len - sent_len, SCIF_ENDPT_QP_SIZE - 1);
935 /* Not enough RB space. return for the Non Blocking case */
936 if (!(flags & SCIF_SEND_BLOCK))
937 break;
938
939 spin_unlock(&ep->lock);
940 /* Wait for a SCIF_CLIENT_RCVD message in the Blocking case */
941 ret =
942 wait_event_interruptible(ep->sendwq,
943 (SCIFEP_CONNECTED != ep->state) ||
944 (scif_rb_space(&qp->outbound_q) >=
945 curr_xfer_len));
946 spin_lock(&ep->lock);
947 if (ret)
948 break;
949 }
950 if (sent_len)
951 ret = sent_len;
952 else if (!ret && SCIFEP_CONNECTED != ep->state)
953 ret = SCIFEP_DISCONNECTED == ep->state ?
954 -ECONNRESET : -ENOTCONN;
955 spin_unlock(&ep->lock);
956 return ret;
957}
958
959static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
960{
961 int read_size;
962 struct scif_endpt *ep = (struct scif_endpt *)epd;
963 struct scifmsg notif_msg;
964 int curr_recv_len = 0, remaining_len = len, read_count;
965 int ret = 0;
966 struct scif_qp *qp = ep->qp_info.qp;
967
968 if (flags & SCIF_RECV_BLOCK)
969 might_sleep();
970 spin_lock(&ep->lock);
971 while (remaining_len && (SCIFEP_CONNECTED == ep->state ||
972 SCIFEP_DISCONNECTED == ep->state)) {
973 read_count = scif_rb_count(&qp->inbound_q, remaining_len);
974 if (read_count) {
975 /*
976 * Best effort to recv as much data as there
977 * are bytes to read in the RB particularly
978 * important for the Non Blocking case.
979 */
980 curr_recv_len = min(remaining_len, read_count);
981 read_size = scif_rb_get_next(&qp->inbound_q,
982 msg, curr_recv_len);
983 if (ep->state == SCIFEP_CONNECTED) {
984 /*
985 * Update the read pointer only if the endpoint
986 * is still connected else the read pointer
987 * might no longer exist since the peer has
988 * freed resources!
989 */
990 scif_rb_update_read_ptr(&qp->inbound_q);
991 /*
992 * Send a notification to the peer about the
993 * consumed data message only if the EP is in
994 * SCIFEP_CONNECTED state.
995 */
996 notif_msg.src = ep->port;
997 notif_msg.uop = SCIF_CLIENT_RCVD;
998 notif_msg.payload[0] = ep->remote_ep;
999 ret = _scif_nodeqp_send(ep->remote_dev,
1000 &notif_msg);
1001 if (ret)
1002 break;
1003 }
1004 remaining_len -= curr_recv_len;
1005 msg = msg + curr_recv_len;
1006 continue;
1007 }
1008 /*
1009 * Bail out now if the EP is in SCIFEP_DISCONNECTED state else
1010 * we will keep looping forever.
1011 */
1012 if (ep->state == SCIFEP_DISCONNECTED)
1013 break;
1014 /*
1015 * Return in the Non Blocking case if there is no data
1016 * to read in this iteration.
1017 */
1018 if (!(flags & SCIF_RECV_BLOCK))
1019 break;
1020 curr_recv_len = min(remaining_len, SCIF_ENDPT_QP_SIZE - 1);
1021 spin_unlock(&ep->lock);
1022 /*
1023 * Wait for a SCIF_CLIENT_SEND message in the blocking case
1024 * or until other side disconnects.
1025 */
1026 ret =
1027 wait_event_interruptible(ep->recvwq,
1028 SCIFEP_CONNECTED != ep->state ||
1029 scif_rb_count(&qp->inbound_q,
1030 curr_recv_len)
1031 >= curr_recv_len);
1032 spin_lock(&ep->lock);
1033 if (ret)
1034 break;
1035 }
1036 if (len - remaining_len)
1037 ret = len - remaining_len;
1038 else if (!ret && ep->state != SCIFEP_CONNECTED)
1039 ret = ep->state == SCIFEP_DISCONNECTED ?
1040 -ECONNRESET : -ENOTCONN;
1041 spin_unlock(&ep->lock);
1042 return ret;
1043}
1044
1045/**
1046 * scif_user_send() - Send data to connection queue
1047 * @epd: The end point returned from scif_open()
1048 * @msg: Address to place data
1049 * @len: Length to receive
1050 * @flags: blocking or non blocking
1051 *
1052 * This function is called from the driver IOCTL entry point
1053 * only and is a wrapper for _scif_send().
1054 */
1055int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags)
1056{
1057 struct scif_endpt *ep = (struct scif_endpt *)epd;
1058 int err = 0;
1059 int sent_len = 0;
1060 char *tmp;
1061 int loop_len;
1062 int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
1063
1064 dev_dbg(scif_info.mdev.this_device,
1065 "SCIFAPI send (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
1066 if (!len)
1067 return 0;
1068
1069 err = scif_msg_param_check(epd, len, flags);
1070 if (err)
1071 goto send_err;
1072
1073 tmp = kmalloc(chunk_len, GFP_KERNEL);
1074 if (!tmp) {
1075 err = -ENOMEM;
1076 goto send_err;
1077 }
1078 /*
1079 * Grabbing the lock before breaking up the transfer in
1080 * multiple chunks is required to ensure that messages do
1081 * not get fragmented and reordered.
1082 */
1083 mutex_lock(&ep->sendlock);
1084 while (sent_len != len) {
1085 loop_len = len - sent_len;
1086 loop_len = min(chunk_len, loop_len);
1087 if (copy_from_user(tmp, msg, loop_len)) {
1088 err = -EFAULT;
1089 goto send_free_err;
1090 }
1091 err = _scif_send(epd, tmp, loop_len, flags);
1092 if (err < 0)
1093 goto send_free_err;
1094 sent_len += err;
1095 msg += err;
1096 if (err != loop_len)
1097 goto send_free_err;
1098 }
1099send_free_err:
1100 mutex_unlock(&ep->sendlock);
1101 kfree(tmp);
1102send_err:
1103 return err < 0 ? err : sent_len;
1104}
1105
1106/**
1107 * scif_user_recv() - Receive data from connection queue
1108 * @epd: The end point returned from scif_open()
1109 * @msg: Address to place data
1110 * @len: Length to receive
1111 * @flags: blocking or non blocking
1112 *
1113 * This function is called from the driver IOCTL entry point
1114 * only and is a wrapper for _scif_recv().
1115 */
1116int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags)
1117{
1118 struct scif_endpt *ep = (struct scif_endpt *)epd;
1119 int err = 0;
1120 int recv_len = 0;
1121 char *tmp;
1122 int loop_len;
1123 int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
1124
1125 dev_dbg(scif_info.mdev.this_device,
1126 "SCIFAPI recv (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
1127 if (!len)
1128 return 0;
1129
1130 err = scif_msg_param_check(epd, len, flags);
1131 if (err)
1132 goto recv_err;
1133
1134 tmp = kmalloc(chunk_len, GFP_KERNEL);
1135 if (!tmp) {
1136 err = -ENOMEM;
1137 goto recv_err;
1138 }
1139 /*
1140 * Grabbing the lock before breaking up the transfer in
1141 * multiple chunks is required to ensure that messages do
1142 * not get fragmented and reordered.
1143 */
1144 mutex_lock(&ep->recvlock);
1145 while (recv_len != len) {
1146 loop_len = len - recv_len;
1147 loop_len = min(chunk_len, loop_len);
1148 err = _scif_recv(epd, tmp, loop_len, flags);
1149 if (err < 0)
1150 goto recv_free_err;
1151 if (copy_to_user(msg, tmp, err)) {
1152 err = -EFAULT;
1153 goto recv_free_err;
1154 }
1155 recv_len += err;
1156 msg += err;
1157 if (err != loop_len)
1158 goto recv_free_err;
1159 }
1160recv_free_err:
1161 mutex_unlock(&ep->recvlock);
1162 kfree(tmp);
1163recv_err:
1164 return err < 0 ? err : recv_len;
1165}
1166
1167/**
1168 * scif_send() - Send data to connection queue
1169 * @epd: The end point returned from scif_open()
1170 * @msg: Address to place data
1171 * @len: Length to receive
1172 * @flags: blocking or non blocking
1173 *
1174 * This function is called from the kernel mode only and is
1175 * a wrapper for _scif_send().
1176 */
1177int scif_send(scif_epd_t epd, void *msg, int len, int flags)
1178{
1179 struct scif_endpt *ep = (struct scif_endpt *)epd;
1180 int ret;
1181
1182 dev_dbg(scif_info.mdev.this_device,
1183 "SCIFAPI send (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
1184 if (!len)
1185 return 0;
1186
1187 ret = scif_msg_param_check(epd, len, flags);
1188 if (ret)
1189 return ret;
1190 if (!ep->remote_dev)
1191 return -ENOTCONN;
1192 /*
1193 * Grab the mutex lock in the blocking case only
1194 * to ensure messages do not get fragmented/reordered.
1195 * The non blocking mode is protected using spin locks
1196 * in _scif_send().
1197 */
1198 if (flags & SCIF_SEND_BLOCK)
1199 mutex_lock(&ep->sendlock);
1200
1201 ret = _scif_send(epd, msg, len, flags);
1202
1203 if (flags & SCIF_SEND_BLOCK)
1204 mutex_unlock(&ep->sendlock);
1205 return ret;
1206}
1207EXPORT_SYMBOL_GPL(scif_send);
1208
1209/**
1210 * scif_recv() - Receive data from connection queue
1211 * @epd: The end point returned from scif_open()
1212 * @msg: Address to place data
1213 * @len: Length to receive
1214 * @flags: blocking or non blocking
1215 *
1216 * This function is called from the kernel mode only and is
1217 * a wrapper for _scif_recv().
1218 */
1219int scif_recv(scif_epd_t epd, void *msg, int len, int flags)
1220{
1221 struct scif_endpt *ep = (struct scif_endpt *)epd;
1222 int ret;
1223
1224 dev_dbg(scif_info.mdev.this_device,
1225 "SCIFAPI recv (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
1226 if (!len)
1227 return 0;
1228
1229 ret = scif_msg_param_check(epd, len, flags);
1230 if (ret)
1231 return ret;
1232 /*
1233 * Grab the mutex lock in the blocking case only
1234 * to ensure messages do not get fragmented/reordered.
1235 * The non blocking mode is protected using spin locks
1236 * in _scif_send().
1237 */
1238 if (flags & SCIF_RECV_BLOCK)
1239 mutex_lock(&ep->recvlock);
1240
1241 ret = _scif_recv(epd, msg, len, flags);
1242
1243 if (flags & SCIF_RECV_BLOCK)
1244 mutex_unlock(&ep->recvlock);
1245
1246 return ret;
1247}
1248EXPORT_SYMBOL_GPL(scif_recv);
1249
1250int scif_get_node_ids(u16 *nodes, int len, u16 *self)
1251{
1252 int online = 0;
1253 int offset = 0;
1254 int node;
1255
1256 if (!scif_is_mgmt_node())
1257 scif_get_node_info();
1258
1259 *self = scif_info.nodeid;
1260 mutex_lock(&scif_info.conflock);
1261 len = min_t(int, len, scif_info.total);
1262 for (node = 0; node <= scif_info.maxid; node++) {
1263 if (_scifdev_alive(&scif_dev[node])) {
1264 online++;
1265 if (offset < len)
1266 nodes[offset++] = node;
1267 }
1268 }
1269 dev_dbg(scif_info.mdev.this_device,
1270 "SCIFAPI get_node_ids total %d online %d filled in %d nodes\n",
1271 scif_info.total, online, offset);
1272 mutex_unlock(&scif_info.conflock);
1273
1274 return online;
1275}
1276EXPORT_SYMBOL_GPL(scif_get_node_ids);
diff --git a/drivers/misc/mic/scif/scif_debugfs.c b/drivers/misc/mic/scif/scif_debugfs.c
new file mode 100644
index 000000000000..51f14e2a1196
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_debugfs.c
@@ -0,0 +1,85 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20
21#include "../common/mic_dev.h"
22#include "scif_main.h"
23
24/* Debugfs parent dir */
25static struct dentry *scif_dbg;
26
27static int scif_dev_test(struct seq_file *s, void *unused)
28{
29 int node;
30
31 seq_printf(s, "Total Nodes %d Self Node Id %d Maxid %d\n",
32 scif_info.total, scif_info.nodeid,
33 scif_info.maxid);
34
35 if (!scif_dev)
36 return 0;
37
38 seq_printf(s, "%-16s\t%-16s\n", "node_id", "state");
39
40 for (node = 0; node <= scif_info.maxid; node++)
41 seq_printf(s, "%-16d\t%-16s\n", scif_dev[node].node,
42 _scifdev_alive(&scif_dev[node]) ?
43 "Running" : "Offline");
44 return 0;
45}
46
47static int scif_dev_test_open(struct inode *inode, struct file *file)
48{
49 return single_open(file, scif_dev_test, inode->i_private);
50}
51
52static int scif_dev_test_release(struct inode *inode, struct file *file)
53{
54 return single_release(inode, file);
55}
56
57static const struct file_operations scif_dev_ops = {
58 .owner = THIS_MODULE,
59 .open = scif_dev_test_open,
60 .read = seq_read,
61 .llseek = seq_lseek,
62 .release = scif_dev_test_release
63};
64
65void __init scif_init_debugfs(void)
66{
67 struct dentry *d;
68
69 scif_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
70 if (!scif_dbg) {
71 dev_err(scif_info.mdev.this_device,
72 "can't create debugfs dir scif\n");
73 return;
74 }
75
76 d = debugfs_create_file("scif_dev", 0444, scif_dbg,
77 NULL, &scif_dev_ops);
78 debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log);
79 debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable);
80}
81
82void scif_exit_debugfs(void)
83{
84 debugfs_remove_recursive(scif_dbg);
85}
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
new file mode 100644
index 000000000000..b4bfbb08a8e3
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_epd.c
@@ -0,0 +1,353 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19#include "scif_map.h"
20
21void scif_cleanup_ep_qp(struct scif_endpt *ep)
22{
23 struct scif_qp *qp = ep->qp_info.qp;
24
25 if (qp->outbound_q.rb_base) {
26 scif_iounmap((void *)qp->outbound_q.rb_base,
27 qp->outbound_q.size, ep->remote_dev);
28 qp->outbound_q.rb_base = NULL;
29 }
30 if (qp->remote_qp) {
31 scif_iounmap((void *)qp->remote_qp,
32 sizeof(struct scif_qp), ep->remote_dev);
33 qp->remote_qp = NULL;
34 }
35 if (qp->local_qp) {
36 scif_unmap_single(qp->local_qp, ep->remote_dev,
37 sizeof(struct scif_qp));
38 qp->local_qp = 0x0;
39 }
40 if (qp->local_buf) {
41 scif_unmap_single(qp->local_buf, ep->remote_dev,
42 SCIF_ENDPT_QP_SIZE);
43 qp->local_buf = 0;
44 }
45}
46
47void scif_teardown_ep(void *endpt)
48{
49 struct scif_endpt *ep = endpt;
50 struct scif_qp *qp = ep->qp_info.qp;
51
52 if (qp) {
53 spin_lock(&ep->lock);
54 scif_cleanup_ep_qp(ep);
55 spin_unlock(&ep->lock);
56 kfree(qp->inbound_q.rb_base);
57 kfree(qp);
58 }
59}
60
61/*
62 * Enqueue the endpoint to the zombie list for cleanup.
63 * The endpoint should not be accessed once this API returns.
64 */
65void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
66{
67 if (!eplock_held)
68 spin_lock(&scif_info.eplock);
69 spin_lock(&ep->lock);
70 ep->state = SCIFEP_ZOMBIE;
71 spin_unlock(&ep->lock);
72 list_add_tail(&ep->list, &scif_info.zombie);
73 scif_info.nr_zombies++;
74 if (!eplock_held)
75 spin_unlock(&scif_info.eplock);
76 schedule_work(&scif_info.misc_work);
77}
78
79static struct scif_endpt *scif_find_listen_ep(u16 port)
80{
81 struct scif_endpt *ep = NULL;
82 struct list_head *pos, *tmpq;
83
84 spin_lock(&scif_info.eplock);
85 list_for_each_safe(pos, tmpq, &scif_info.listen) {
86 ep = list_entry(pos, struct scif_endpt, list);
87 if (ep->port.port == port) {
88 spin_lock(&ep->lock);
89 spin_unlock(&scif_info.eplock);
90 return ep;
91 }
92 }
93 spin_unlock(&scif_info.eplock);
94 return NULL;
95}
96
97void scif_cleanup_zombie_epd(void)
98{
99 struct list_head *pos, *tmpq;
100 struct scif_endpt *ep;
101
102 spin_lock(&scif_info.eplock);
103 list_for_each_safe(pos, tmpq, &scif_info.zombie) {
104 ep = list_entry(pos, struct scif_endpt, list);
105 list_del(pos);
106 scif_info.nr_zombies--;
107 kfree(ep);
108 }
109 spin_unlock(&scif_info.eplock);
110}
111
112/**
113 * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
114 * @msg: Interrupt message
115 *
116 * This message is initiated by the remote node to request a connection
117 * to the local node. This function looks for an end point in the
118 * listen state on the requested port id.
119 *
120 * If it finds a listening port it places the connect request on the
121 * listening end points queue and wakes up any pending accept calls.
122 *
123 * If it does not find a listening end point it sends a connection
124 * reject message to the remote node.
125 */
126void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
127{
128 struct scif_endpt *ep = NULL;
129 struct scif_conreq *conreq;
130
131 conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
132 if (!conreq)
133 /* Lack of resources so reject the request. */
134 goto conreq_sendrej;
135
136 ep = scif_find_listen_ep(msg->dst.port);
137 if (!ep)
138 /* Send reject due to no listening ports */
139 goto conreq_sendrej_free;
140
141 if (ep->backlog <= ep->conreqcnt) {
142 /* Send reject due to too many pending requests */
143 spin_unlock(&ep->lock);
144 goto conreq_sendrej_free;
145 }
146
147 conreq->msg = *msg;
148 list_add_tail(&conreq->list, &ep->conlist);
149 ep->conreqcnt++;
150 wake_up_interruptible(&ep->conwq);
151 spin_unlock(&ep->lock);
152 return;
153
154conreq_sendrej_free:
155 kfree(conreq);
156conreq_sendrej:
157 msg->uop = SCIF_CNCT_REJ;
158 scif_nodeqp_send(&scif_dev[msg->src.node], msg);
159}
160
161/**
162 * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
163 * @msg: Interrupt message
164 *
165 * An accept() on the remote node has occurred and sent this message
166 * to indicate success. Place the end point in the MAPPING state and
167 * save the remote nodes memory information. Then wake up the connect
168 * request so it can finish.
169 */
170void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
171{
172 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
173
174 spin_lock(&ep->lock);
175 if (SCIFEP_CONNECTING == ep->state) {
176 ep->peer.node = msg->src.node;
177 ep->peer.port = msg->src.port;
178 ep->qp_info.gnt_pld = msg->payload[1];
179 ep->remote_ep = msg->payload[2];
180 ep->state = SCIFEP_MAPPING;
181
182 wake_up(&ep->conwq);
183 }
184 spin_unlock(&ep->lock);
185}
186
187/**
188 * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
189 * @msg: Interrupt message
190 *
191 * The remote connection request has finished mapping the local memory.
192 * Place the connection in the connected state and wake up the pending
193 * accept() call.
194 */
195void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
196{
197 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
198
199 mutex_lock(&scif_info.connlock);
200 spin_lock(&ep->lock);
201 /* New ep is now connected with all resources set. */
202 ep->state = SCIFEP_CONNECTED;
203 list_add_tail(&ep->list, &scif_info.connected);
204 wake_up(&ep->conwq);
205 spin_unlock(&ep->lock);
206 mutex_unlock(&scif_info.connlock);
207}
208
209/**
210 * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
211 * @msg: Interrupt message
212 *
213 * The remote connection request failed to map the local memory it was sent.
214 * Place the end point in the CLOSING state to indicate it and wake up
215 * the pending accept();
216 */
217void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
218{
219 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
220
221 spin_lock(&ep->lock);
222 ep->state = SCIFEP_CLOSING;
223 wake_up(&ep->conwq);
224 spin_unlock(&ep->lock);
225}
226
227/**
228 * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
229 * @msg: Interrupt message
230 *
231 * The remote end has rejected the connection request. Set the end
232 * point back to the bound state and wake up the pending connect().
233 */
234void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
235{
236 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
237
238 spin_lock(&ep->lock);
239 if (SCIFEP_CONNECTING == ep->state) {
240 ep->state = SCIFEP_BOUND;
241 wake_up(&ep->conwq);
242 }
243 spin_unlock(&ep->lock);
244}
245
246/**
247 * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
248 * @msg: Interrupt message
249 *
250 * The remote node has indicated close() has been called on its end
251 * point. Remove the local end point from the connected list, set its
252 * state to disconnected and ensure accesses to the remote node are
253 * shutdown.
254 *
255 * When all accesses to the remote end have completed then send a
256 * DISCNT_ACK to indicate it can remove its resources and complete
257 * the close routine.
258 */
259void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
260{
261 struct scif_endpt *ep = NULL;
262 struct scif_endpt *tmpep;
263 struct list_head *pos, *tmpq;
264
265 mutex_lock(&scif_info.connlock);
266 list_for_each_safe(pos, tmpq, &scif_info.connected) {
267 tmpep = list_entry(pos, struct scif_endpt, list);
268 /*
269 * The local ep may have sent a disconnect and and been closed
270 * due to a message response time out. It may have been
271 * allocated again and formed a new connection so we want to
272 * check if the remote ep matches
273 */
274 if (((u64)tmpep == msg->payload[1]) &&
275 ((u64)tmpep->remote_ep == msg->payload[0])) {
276 list_del(pos);
277 ep = tmpep;
278 spin_lock(&ep->lock);
279 break;
280 }
281 }
282
283 /*
284 * If the terminated end is not found then this side started closing
285 * before the other side sent the disconnect. If so the ep will no
286 * longer be on the connected list. Regardless the other side
287 * needs to be acked to let it know close is complete.
288 */
289 if (!ep) {
290 mutex_unlock(&scif_info.connlock);
291 goto discnct_ack;
292 }
293
294 ep->state = SCIFEP_DISCONNECTED;
295 list_add_tail(&ep->list, &scif_info.disconnected);
296
297 wake_up_interruptible(&ep->sendwq);
298 wake_up_interruptible(&ep->recvwq);
299 spin_unlock(&ep->lock);
300 mutex_unlock(&scif_info.connlock);
301
302discnct_ack:
303 msg->uop = SCIF_DISCNT_ACK;
304 scif_nodeqp_send(&scif_dev[msg->src.node], msg);
305}
306
307/**
308 * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
309 * @msg: Interrupt message
310 *
311 * Remote side has indicated it has not more references to local resources
312 */
313void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
314{
315 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
316
317 spin_lock(&ep->lock);
318 ep->state = SCIFEP_DISCONNECTED;
319 spin_unlock(&ep->lock);
320 complete(&ep->discon);
321}
322
323/**
324 * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
325 * @msg: Interrupt message
326 *
327 * Remote side is confirming send or receive interrupt handling is complete.
328 */
329void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
330{
331 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
332
333 spin_lock(&ep->lock);
334 if (SCIFEP_CONNECTED == ep->state)
335 wake_up_interruptible(&ep->recvwq);
336 spin_unlock(&ep->lock);
337}
338
339/**
340 * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
341 * @msg: Interrupt message
342 *
343 * Remote side is confirming send or receive interrupt handling is complete.
344 */
345void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
346{
347 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
348
349 spin_lock(&ep->lock);
350 if (SCIFEP_CONNECTED == ep->state)
351 wake_up_interruptible(&ep->sendwq);
352 spin_unlock(&ep->lock);
353}
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
new file mode 100644
index 000000000000..331322a25213
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -0,0 +1,160 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#ifndef SCIF_EPD_H
19#define SCIF_EPD_H
20
21#include <linux/delay.h>
22#include <linux/scif.h>
23#include <linux/scif_ioctl.h>
24
25#define SCIF_EPLOCK_HELD true
26
27enum scif_epd_state {
28 SCIFEP_UNBOUND,
29 SCIFEP_BOUND,
30 SCIFEP_LISTENING,
31 SCIFEP_CONNECTED,
32 SCIFEP_CONNECTING,
33 SCIFEP_MAPPING,
34 SCIFEP_CLOSING,
35 SCIFEP_CLLISTEN,
36 SCIFEP_DISCONNECTED,
37 SCIFEP_ZOMBIE
38};
39
40/*
41 * struct scif_conreq - Data structure added to the connection list.
42 *
43 * @msg: connection request message received
44 * @list: link to list of connection requests
45 */
46struct scif_conreq {
47 struct scifmsg msg;
48 struct list_head list;
49};
50
51/* Size of the RB for the Endpoint QP */
52#define SCIF_ENDPT_QP_SIZE 0x1000
53
54/*
55 * scif_endpt_qp_info - SCIF endpoint queue pair
56 *
57 * @qp - Qpair for this endpoint
58 * @qp_offset - DMA address of the QP
59 * @gnt_pld - Payload in a SCIF_CNCT_GNT message containing the
60 * physical address of the remote_qp.
61 */
62struct scif_endpt_qp_info {
63 struct scif_qp *qp;
64 dma_addr_t qp_offset;
65 dma_addr_t gnt_pld;
66};
67
68/*
69 * struct scif_endpt - The SCIF endpoint data structure
70 *
71 * @state: end point state
72 * @lock: lock synchronizing access to endpoint fields like state etc
73 * @port: self port information
74 * @peer: peer port information
75 * @backlog: maximum pending connection requests
76 * @qp_info: Endpoint QP information for SCIF messaging
77 * @remote_dev: scifdev used by this endpt to communicate with remote node.
78 * @remote_ep: remote endpoint
79 * @conreqcnt: Keep track of number of connection requests.
80 * @files: Open file information used to match the id passed in with
81 * the flush routine.
82 * @conlist: list of connection requests
83 * @conwq: waitqueue for connection processing
84 * @discon: completion used during disconnection
85 * @sendwq: waitqueue used during sending messages
86 * @recvwq: waitqueue used during message receipt
87 * @sendlock: Synchronize ordering of messages sent
88 * @recvlock: Synchronize ordering of messages received
89 * @list: link to list of various endpoints like connected, listening etc
90 * @li_accept: pending ACCEPTREG
91 * @acceptcnt: pending ACCEPTREG cnt
92 * @liacceptlist: link to listen accept
93 * @miacceptlist: link to uaccept
94 * @listenep: associated listen ep
95 * @conn_work: Non blocking connect work
96 * @conn_port: Connection port
97 * @conn_err: Errors during connection
98 * @conn_async_state: Async connection
99 * @conn_list: List of async connection requests
100 */
101struct scif_endpt {
102 enum scif_epd_state state;
103 spinlock_t lock;
104 struct scif_port_id port;
105 struct scif_port_id peer;
106 int backlog;
107 struct scif_endpt_qp_info qp_info;
108 struct scif_dev *remote_dev;
109 u64 remote_ep;
110 int conreqcnt;
111 struct files_struct *files;
112 struct list_head conlist;
113 wait_queue_head_t conwq;
114 struct completion discon;
115 wait_queue_head_t sendwq;
116 wait_queue_head_t recvwq;
117 struct mutex sendlock;
118 struct mutex recvlock;
119 struct list_head list;
120 struct list_head li_accept;
121 int acceptcnt;
122 struct list_head liacceptlist;
123 struct list_head miacceptlist;
124 struct scif_endpt *listenep;
125 struct scif_port_id conn_port;
126 int conn_err;
127 int conn_async_state;
128 struct list_head conn_list;
129};
130
131static inline int scifdev_alive(struct scif_endpt *ep)
132{
133 return _scifdev_alive(ep->remote_dev);
134}
135
136void scif_cleanup_zombie_epd(void);
137void scif_teardown_ep(void *endpt);
138void scif_cleanup_ep_qp(struct scif_endpt *ep);
139void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held);
140void scif_get_node_info(void);
141void scif_send_acks(struct scif_dev *dev);
142void scif_conn_handler(struct work_struct *work);
143int scif_rsrv_port(u16 port);
144void scif_get_port(u16 port);
145int scif_get_new_port(void);
146void scif_put_port(u16 port);
147int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags);
148int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags);
149void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg);
150void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg);
151void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
152void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg);
153void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg);
154void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg);
155void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
156void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg);
157void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
158int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
159int __scif_flush(scif_epd_t epd);
160#endif /* SCIF_EPD_H */
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
new file mode 100644
index 000000000000..eccf7e7135f9
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_fd.c
@@ -0,0 +1,303 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_main.h"
19
20static int scif_fdopen(struct inode *inode, struct file *f)
21{
22 struct scif_endpt *priv = scif_open();
23
24 if (!priv)
25 return -ENOMEM;
26 f->private_data = priv;
27 return 0;
28}
29
30static int scif_fdclose(struct inode *inode, struct file *f)
31{
32 struct scif_endpt *priv = f->private_data;
33
34 return scif_close(priv);
35}
36
37static int scif_fdflush(struct file *f, fl_owner_t id)
38{
39 struct scif_endpt *ep = f->private_data;
40
41 spin_lock(&ep->lock);
42 /*
43 * The listening endpoint stashes the open file information before
44 * waiting for incoming connections. The release callback would never be
45 * called if the application closed the endpoint, while waiting for
46 * incoming connections from a separate thread since the file descriptor
47 * reference count is bumped up in the accept IOCTL. Call the flush
48 * routine if the id matches the endpoint open file information so that
49 * the listening endpoint can be woken up and the fd released.
50 */
51 if (ep->files == id)
52 __scif_flush(ep);
53 spin_unlock(&ep->lock);
54 return 0;
55}
56
57static __always_inline void scif_err_debug(int err, const char *str)
58{
59 /*
60 * ENOTCONN is a common uninteresting error which is
61 * flooding debug messages to the console unnecessarily.
62 */
63 if (err < 0 && err != -ENOTCONN)
64 dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
65}
66
67static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
68{
69 struct scif_endpt *priv = f->private_data;
70 void __user *argp = (void __user *)arg;
71 int err = 0;
72 struct scifioctl_msg request;
73 bool non_block = false;
74
75 non_block = !!(f->f_flags & O_NONBLOCK);
76
77 switch (cmd) {
78 case SCIF_BIND:
79 {
80 int pn;
81
82 if (copy_from_user(&pn, argp, sizeof(pn)))
83 return -EFAULT;
84
85 pn = scif_bind(priv, pn);
86 if (pn < 0)
87 return pn;
88
89 if (copy_to_user(argp, &pn, sizeof(pn)))
90 return -EFAULT;
91
92 return 0;
93 }
94 case SCIF_LISTEN:
95 return scif_listen(priv, arg);
96 case SCIF_CONNECT:
97 {
98 struct scifioctl_connect req;
99 struct scif_endpt *ep = (struct scif_endpt *)priv;
100
101 if (copy_from_user(&req, argp, sizeof(req)))
102 return -EFAULT;
103
104 err = __scif_connect(priv, &req.peer, non_block);
105 if (err < 0)
106 return err;
107
108 req.self.node = ep->port.node;
109 req.self.port = ep->port.port;
110
111 if (copy_to_user(argp, &req, sizeof(req)))
112 return -EFAULT;
113
114 return 0;
115 }
116 /*
117 * Accept is done in two halves. The request ioctl does the basic
118 * functionality of accepting the request and returning the information
119 * about it including the internal ID of the end point. The register
120 * is done with the internal ID on a new file descriptor opened by the
121 * requesting process.
122 */
123 case SCIF_ACCEPTREQ:
124 {
125 struct scifioctl_accept request;
126 scif_epd_t *ep = (scif_epd_t *)&request.endpt;
127
128 if (copy_from_user(&request, argp, sizeof(request)))
129 return -EFAULT;
130
131 err = scif_accept(priv, &request.peer, ep, request.flags);
132 if (err < 0)
133 return err;
134
135 if (copy_to_user(argp, &request, sizeof(request))) {
136 scif_close(*ep);
137 return -EFAULT;
138 }
139 /*
140 * Add to the list of user mode eps where the second half
141 * of the accept is not yet completed.
142 */
143 spin_lock(&scif_info.eplock);
144 list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
145 list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
146 (*ep)->listenep = priv;
147 priv->acceptcnt++;
148 spin_unlock(&scif_info.eplock);
149
150 return 0;
151 }
152 case SCIF_ACCEPTREG:
153 {
154 struct scif_endpt *priv = f->private_data;
155 struct scif_endpt *newep;
156 struct scif_endpt *lisep;
157 struct scif_endpt *fep = NULL;
158 struct scif_endpt *tmpep;
159 struct list_head *pos, *tmpq;
160
161 /* Finally replace the pointer to the accepted endpoint */
162 if (copy_from_user(&newep, argp, sizeof(void *)))
163 return -EFAULT;
164
165 /* Remove form the user accept queue */
166 spin_lock(&scif_info.eplock);
167 list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
168 tmpep = list_entry(pos,
169 struct scif_endpt, miacceptlist);
170 if (tmpep == newep) {
171 list_del(pos);
172 fep = tmpep;
173 break;
174 }
175 }
176
177 if (!fep) {
178 spin_unlock(&scif_info.eplock);
179 return -ENOENT;
180 }
181
182 lisep = newep->listenep;
183 list_for_each_safe(pos, tmpq, &lisep->li_accept) {
184 tmpep = list_entry(pos,
185 struct scif_endpt, liacceptlist);
186 if (tmpep == newep) {
187 list_del(pos);
188 lisep->acceptcnt--;
189 break;
190 }
191 }
192
193 spin_unlock(&scif_info.eplock);
194
195 /* Free the resources automatically created from the open. */
196 scif_teardown_ep(priv);
197 scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
198 f->private_data = newep;
199 return 0;
200 }
201 case SCIF_SEND:
202 {
203 struct scif_endpt *priv = f->private_data;
204
205 if (copy_from_user(&request, argp,
206 sizeof(struct scifioctl_msg))) {
207 err = -EFAULT;
208 goto send_err;
209 }
210 err = scif_user_send(priv, (void __user *)request.msg,
211 request.len, request.flags);
212 if (err < 0)
213 goto send_err;
214 if (copy_to_user(&
215 ((struct scifioctl_msg __user *)argp)->out_len,
216 &err, sizeof(err))) {
217 err = -EFAULT;
218 goto send_err;
219 }
220 err = 0;
221send_err:
222 scif_err_debug(err, "scif_send");
223 return err;
224 }
225 case SCIF_RECV:
226 {
227 struct scif_endpt *priv = f->private_data;
228
229 if (copy_from_user(&request, argp,
230 sizeof(struct scifioctl_msg))) {
231 err = -EFAULT;
232 goto recv_err;
233 }
234
235 err = scif_user_recv(priv, (void __user *)request.msg,
236 request.len, request.flags);
237 if (err < 0)
238 goto recv_err;
239
240 if (copy_to_user(&
241 ((struct scifioctl_msg __user *)argp)->out_len,
242 &err, sizeof(err))) {
243 err = -EFAULT;
244 goto recv_err;
245 }
246 err = 0;
247recv_err:
248 scif_err_debug(err, "scif_recv");
249 return err;
250 }
251 case SCIF_GET_NODEIDS:
252 {
253 struct scifioctl_node_ids node_ids;
254 int entries;
255 u16 *nodes;
256 void __user *unodes, *uself;
257 u16 self;
258
259 if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
260 err = -EFAULT;
261 goto getnodes_err2;
262 }
263
264 entries = min_t(int, scif_info.maxid, node_ids.len);
265 nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
266 if (entries && !nodes) {
267 err = -ENOMEM;
268 goto getnodes_err2;
269 }
270 node_ids.len = scif_get_node_ids(nodes, entries, &self);
271
272 unodes = (void __user *)node_ids.nodes;
273 if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
274 err = -EFAULT;
275 goto getnodes_err1;
276 }
277
278 uself = (void __user *)node_ids.self;
279 if (copy_to_user(uself, &self, sizeof(u16))) {
280 err = -EFAULT;
281 goto getnodes_err1;
282 }
283
284 if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
285 err = -EFAULT;
286 goto getnodes_err1;
287 }
288getnodes_err1:
289 kfree(nodes);
290getnodes_err2:
291 return err;
292 }
293 }
294 return -EINVAL;
295}
296
297const struct file_operations scif_fops = {
298 .open = scif_fdopen,
299 .release = scif_fdclose,
300 .unlocked_ioctl = scif_fdioctl,
301 .flush = scif_fdflush,
302 .owner = THIS_MODULE,
303};
diff --git a/drivers/misc/mic/scif/scif_main.c b/drivers/misc/mic/scif/scif_main.c
new file mode 100644
index 000000000000..6ce851f5c7e6
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_main.c
@@ -0,0 +1,388 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include <linux/module.h>
19#include <linux/idr.h>
20
21#include <linux/mic_common.h>
22#include "../common/mic_dev.h"
23#include "../bus/scif_bus.h"
24#include "scif_peer_bus.h"
25#include "scif_main.h"
26#include "scif_map.h"
27
28struct scif_info scif_info = {
29 .mdev = {
30 .minor = MISC_DYNAMIC_MINOR,
31 .name = "scif",
32 .fops = &scif_fops,
33 }
34};
35
36struct scif_dev *scif_dev;
37static atomic_t g_loopb_cnt;
38
39/* Runs in the context of intr_wq */
40static void scif_intr_bh_handler(struct work_struct *work)
41{
42 struct scif_dev *scifdev =
43 container_of(work, struct scif_dev, intr_bh);
44
45 if (scifdev_self(scifdev))
46 scif_loopb_msg_handler(scifdev, scifdev->qpairs);
47 else
48 scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
49}
50
51int scif_setup_intr_wq(struct scif_dev *scifdev)
52{
53 if (!scifdev->intr_wq) {
54 snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
55 "SCIF INTR %d", scifdev->node);
56 scifdev->intr_wq =
57 alloc_ordered_workqueue(scifdev->intr_wqname, 0);
58 if (!scifdev->intr_wq)
59 return -ENOMEM;
60 INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
61 }
62 return 0;
63}
64
65void scif_destroy_intr_wq(struct scif_dev *scifdev)
66{
67 if (scifdev->intr_wq) {
68 destroy_workqueue(scifdev->intr_wq);
69 scifdev->intr_wq = NULL;
70 }
71}
72
73irqreturn_t scif_intr_handler(int irq, void *data)
74{
75 struct scif_dev *scifdev = data;
76 struct scif_hw_dev *sdev = scifdev->sdev;
77
78 sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
79 queue_work(scifdev->intr_wq, &scifdev->intr_bh);
80 return IRQ_HANDLED;
81}
82
83static int scif_peer_probe(struct scif_peer_dev *spdev)
84{
85 struct scif_dev *scifdev = &scif_dev[spdev->dnode];
86
87 mutex_lock(&scif_info.conflock);
88 scif_info.total++;
89 scif_info.maxid = max_t(u32, spdev->dnode, scif_info.maxid);
90 mutex_unlock(&scif_info.conflock);
91 rcu_assign_pointer(scifdev->spdev, spdev);
92
93 /* In the future SCIF kernel client devices will be added here */
94 return 0;
95}
96
97static void scif_peer_remove(struct scif_peer_dev *spdev)
98{
99 struct scif_dev *scifdev = &scif_dev[spdev->dnode];
100
101 /* In the future SCIF kernel client devices will be removed here */
102 spdev = rcu_dereference(scifdev->spdev);
103 if (spdev)
104 RCU_INIT_POINTER(scifdev->spdev, NULL);
105 synchronize_rcu();
106
107 mutex_lock(&scif_info.conflock);
108 scif_info.total--;
109 mutex_unlock(&scif_info.conflock);
110}
111
112static void scif_qp_setup_handler(struct work_struct *work)
113{
114 struct scif_dev *scifdev = container_of(work, struct scif_dev,
115 qp_dwork.work);
116 struct scif_hw_dev *sdev = scifdev->sdev;
117 dma_addr_t da = 0;
118 int err;
119
120 if (scif_is_mgmt_node()) {
121 struct mic_bootparam *bp = sdev->dp;
122
123 da = bp->scif_card_dma_addr;
124 scifdev->rdb = bp->h2c_scif_db;
125 } else {
126 struct mic_bootparam __iomem *bp = sdev->rdp;
127
128 da = readq(&bp->scif_host_dma_addr);
129 scifdev->rdb = ioread8(&bp->c2h_scif_db);
130 }
131 if (da) {
132 err = scif_qp_response(da, scifdev);
133 if (err)
134 dev_err(&scifdev->sdev->dev,
135 "scif_qp_response err %d\n", err);
136 } else {
137 schedule_delayed_work(&scifdev->qp_dwork,
138 msecs_to_jiffies(1000));
139 }
140}
141
142static int scif_setup_scifdev(struct scif_hw_dev *sdev)
143{
144 int i;
145 u8 num_nodes;
146
147 if (sdev->snode) {
148 struct mic_bootparam __iomem *bp = sdev->rdp;
149
150 num_nodes = ioread8(&bp->tot_nodes);
151 } else {
152 struct mic_bootparam *bp = sdev->dp;
153
154 num_nodes = bp->tot_nodes;
155 }
156 scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
157 if (!scif_dev)
158 return -ENOMEM;
159 for (i = 0; i < num_nodes; i++) {
160 struct scif_dev *scifdev = &scif_dev[i];
161
162 scifdev->node = i;
163 scifdev->exit = OP_IDLE;
164 init_waitqueue_head(&scifdev->disconn_wq);
165 mutex_init(&scifdev->lock);
166 INIT_WORK(&scifdev->init_msg_work, scif_qp_response_ack);
167 INIT_DELAYED_WORK(&scifdev->p2p_dwork,
168 scif_poll_qp_state);
169 INIT_DELAYED_WORK(&scifdev->qp_dwork,
170 scif_qp_setup_handler);
171 INIT_LIST_HEAD(&scifdev->p2p);
172 RCU_INIT_POINTER(scifdev->spdev, NULL);
173 }
174 return 0;
175}
176
177static void scif_destroy_scifdev(void)
178{
179 kfree(scif_dev);
180}
181
182static int scif_probe(struct scif_hw_dev *sdev)
183{
184 struct scif_dev *scifdev;
185 int rc;
186
187 dev_set_drvdata(&sdev->dev, sdev);
188 if (1 == atomic_add_return(1, &g_loopb_cnt)) {
189 struct scif_dev *loopb_dev;
190
191 rc = scif_setup_scifdev(sdev);
192 if (rc)
193 goto exit;
194 scifdev = &scif_dev[sdev->dnode];
195 scifdev->sdev = sdev;
196 loopb_dev = &scif_dev[sdev->snode];
197 loopb_dev->sdev = sdev;
198 rc = scif_setup_loopback_qp(loopb_dev);
199 if (rc)
200 goto free_sdev;
201 } else {
202 scifdev = &scif_dev[sdev->dnode];
203 scifdev->sdev = sdev;
204 }
205 rc = scif_setup_intr_wq(scifdev);
206 if (rc)
207 goto destroy_loopb;
208 rc = scif_setup_qp(scifdev);
209 if (rc)
210 goto destroy_intr;
211 scifdev->db = sdev->hw_ops->next_db(sdev);
212 scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
213 "SCIF_INTR", scifdev,
214 scifdev->db);
215 if (IS_ERR(scifdev->cookie)) {
216 rc = PTR_ERR(scifdev->cookie);
217 goto free_qp;
218 }
219 if (scif_is_mgmt_node()) {
220 struct mic_bootparam *bp = sdev->dp;
221
222 bp->c2h_scif_db = scifdev->db;
223 bp->scif_host_dma_addr = scifdev->qp_dma_addr;
224 } else {
225 struct mic_bootparam __iomem *bp = sdev->rdp;
226
227 iowrite8(scifdev->db, &bp->h2c_scif_db);
228 writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
229 }
230 schedule_delayed_work(&scifdev->qp_dwork,
231 msecs_to_jiffies(1000));
232 return rc;
233free_qp:
234 scif_free_qp(scifdev);
235destroy_intr:
236 scif_destroy_intr_wq(scifdev);
237destroy_loopb:
238 if (atomic_dec_and_test(&g_loopb_cnt))
239 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
240free_sdev:
241 scif_destroy_scifdev();
242exit:
243 return rc;
244}
245
246void scif_stop(struct scif_dev *scifdev)
247{
248 struct scif_dev *dev;
249 int i;
250
251 for (i = scif_info.maxid; i >= 0; i--) {
252 dev = &scif_dev[i];
253 if (scifdev_self(dev))
254 continue;
255 scif_handle_remove_node(i);
256 }
257}
258
259static void scif_remove(struct scif_hw_dev *sdev)
260{
261 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
262
263 if (scif_is_mgmt_node()) {
264 struct mic_bootparam *bp = sdev->dp;
265
266 bp->c2h_scif_db = -1;
267 bp->scif_host_dma_addr = 0x0;
268 } else {
269 struct mic_bootparam __iomem *bp = sdev->rdp;
270
271 iowrite8(-1, &bp->h2c_scif_db);
272 writeq(0x0, &bp->scif_card_dma_addr);
273 }
274 if (scif_is_mgmt_node()) {
275 scif_disconnect_node(scifdev->node, true);
276 } else {
277 scif_info.card_initiated_exit = true;
278 scif_stop(scifdev);
279 }
280 if (atomic_dec_and_test(&g_loopb_cnt))
281 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
282 if (scifdev->cookie) {
283 sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
284 scifdev->cookie = NULL;
285 }
286 scif_destroy_intr_wq(scifdev);
287 cancel_delayed_work(&scifdev->qp_dwork);
288 scif_free_qp(scifdev);
289 scifdev->rdb = -1;
290 scifdev->sdev = NULL;
291}
292
293static struct scif_peer_driver scif_peer_driver = {
294 .driver.name = KBUILD_MODNAME,
295 .driver.owner = THIS_MODULE,
296 .probe = scif_peer_probe,
297 .remove = scif_peer_remove,
298};
299
300static struct scif_hw_dev_id id_table[] = {
301 { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
302 { 0 },
303};
304
305static struct scif_driver scif_driver = {
306 .driver.name = KBUILD_MODNAME,
307 .driver.owner = THIS_MODULE,
308 .id_table = id_table,
309 .probe = scif_probe,
310 .remove = scif_remove,
311};
312
313static int _scif_init(void)
314{
315 spin_lock_init(&scif_info.eplock);
316 spin_lock_init(&scif_info.nb_connect_lock);
317 spin_lock_init(&scif_info.port_lock);
318 mutex_init(&scif_info.conflock);
319 mutex_init(&scif_info.connlock);
320 INIT_LIST_HEAD(&scif_info.uaccept);
321 INIT_LIST_HEAD(&scif_info.listen);
322 INIT_LIST_HEAD(&scif_info.zombie);
323 INIT_LIST_HEAD(&scif_info.connected);
324 INIT_LIST_HEAD(&scif_info.disconnected);
325 INIT_LIST_HEAD(&scif_info.nb_connect_list);
326 init_waitqueue_head(&scif_info.exitwq);
327 scif_info.en_msg_log = 0;
328 scif_info.p2p_enable = 1;
329 INIT_WORK(&scif_info.misc_work, scif_misc_handler);
330 INIT_WORK(&scif_info.conn_work, scif_conn_handler);
331 idr_init(&scif_ports);
332 return 0;
333}
334
335static void _scif_exit(void)
336{
337 idr_destroy(&scif_ports);
338 scif_destroy_scifdev();
339}
340
341static int __init scif_init(void)
342{
343 struct miscdevice *mdev = &scif_info.mdev;
344 int rc;
345
346 _scif_init();
347 rc = scif_peer_bus_init();
348 if (rc)
349 goto exit;
350 rc = scif_peer_register_driver(&scif_peer_driver);
351 if (rc)
352 goto peer_bus_exit;
353 rc = scif_register_driver(&scif_driver);
354 if (rc)
355 goto unreg_scif_peer;
356 rc = misc_register(mdev);
357 if (rc)
358 goto unreg_scif;
359 scif_init_debugfs();
360 return 0;
361unreg_scif:
362 scif_unregister_driver(&scif_driver);
363unreg_scif_peer:
364 scif_peer_unregister_driver(&scif_peer_driver);
365peer_bus_exit:
366 scif_peer_bus_exit();
367exit:
368 _scif_exit();
369 return rc;
370}
371
372static void __exit scif_exit(void)
373{
374 scif_exit_debugfs();
375 misc_deregister(&scif_info.mdev);
376 scif_unregister_driver(&scif_driver);
377 scif_peer_unregister_driver(&scif_peer_driver);
378 scif_peer_bus_exit();
379 _scif_exit();
380}
381
382module_init(scif_init);
383module_exit(scif_exit);
384
385MODULE_DEVICE_TABLE(scif, id_table);
386MODULE_AUTHOR("Intel Corporation");
387MODULE_DESCRIPTION("Intel(R) SCIF driver");
388MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h
new file mode 100644
index 000000000000..580bc63e1b23
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_main.h
@@ -0,0 +1,254 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#ifndef SCIF_MAIN_H
19#define SCIF_MAIN_H
20
21#include <linux/sched.h>
22#include <linux/pci.h>
23#include <linux/miscdevice.h>
24#include <linux/dmaengine.h>
25#include <linux/file.h>
26#include <linux/scif.h>
27
28#include "../common/mic_dev.h"
29
30#define SCIF_MGMT_NODE 0
31#define SCIF_DEFAULT_WATCHDOG_TO 30
32#define SCIF_NODE_ACCEPT_TIMEOUT (3 * HZ)
33#define SCIF_NODE_ALIVE_TIMEOUT (SCIF_DEFAULT_WATCHDOG_TO * HZ)
34
35/*
36 * Generic state used for certain node QP message exchanges
37 * like Unregister, Alloc etc.
38 */
39enum scif_msg_state {
40 OP_IDLE = 1,
41 OP_IN_PROGRESS,
42 OP_COMPLETED,
43 OP_FAILED
44};
45
46/*
47 * struct scif_info - Global SCIF information
48 *
49 * @nodeid: Node ID this node is to others
50 * @maxid: Max known node ID
51 * @total: Total number of SCIF nodes
52 * @nr_zombies: number of zombie endpoints
53 * @eplock: Lock to synchronize listening, zombie endpoint lists
54 * @connlock: Lock to synchronize connected and disconnected lists
55 * @nb_connect_lock: Synchronize non blocking connect operations
56 * @port_lock: Synchronize access to SCIF ports
57 * @uaccept: List of user acceptreq waiting for acceptreg
58 * @listen: List of listening end points
59 * @zombie: List of zombie end points with pending RMA's
60 * @connected: List of end points in connected state
61 * @disconnected: List of end points in disconnected state
62 * @nb_connect_list: List for non blocking connections
63 * @misc_work: miscellaneous SCIF tasks
64 * @conflock: Lock to synchronize SCIF node configuration changes
65 * @en_msg_log: Enable debug message logging
66 * @p2p_enable: Enable P2P SCIF network
67 * @mdev: The MISC device
68 * @conn_work: Work for workqueue handling all connections
69 * @exitwq: Wait queue for waiting for an EXIT node QP message response
70 * @loopb_dev: Dummy SCIF device used for loopback
71 * @loopb_wq: Workqueue used for handling loopback messages
72 * @loopb_wqname[16]: Name of loopback workqueue
73 * @loopb_work: Used for submitting work to loopb_wq
74 * @loopb_recv_q: List of messages received on the loopb_wq
75 * @card_initiated_exit: set when the card has initiated the exit
76 */
77struct scif_info {
78 u8 nodeid;
79 u8 maxid;
80 u8 total;
81 u32 nr_zombies;
82 spinlock_t eplock;
83 struct mutex connlock;
84 spinlock_t nb_connect_lock;
85 spinlock_t port_lock;
86 struct list_head uaccept;
87 struct list_head listen;
88 struct list_head zombie;
89 struct list_head connected;
90 struct list_head disconnected;
91 struct list_head nb_connect_list;
92 struct work_struct misc_work;
93 struct mutex conflock;
94 u8 en_msg_log;
95 u8 p2p_enable;
96 struct miscdevice mdev;
97 struct work_struct conn_work;
98 wait_queue_head_t exitwq;
99 struct scif_dev *loopb_dev;
100 struct workqueue_struct *loopb_wq;
101 char loopb_wqname[16];
102 struct work_struct loopb_work;
103 struct list_head loopb_recv_q;
104 bool card_initiated_exit;
105};
106
107/*
108 * struct scif_p2p_info - SCIF mapping information used for P2P
109 *
110 * @ppi_peer_id - SCIF peer node id
111 * @ppi_sg - Scatter list for bar information (One for mmio and one for aper)
112 * @sg_nentries - Number of entries in the scatterlist
113 * @ppi_da: DMA address for MMIO and APER bars
114 * @ppi_len: Length of MMIO and APER bars
115 * @ppi_list: Link in list of mapping information
116 */
117struct scif_p2p_info {
118 u8 ppi_peer_id;
119 struct scatterlist *ppi_sg[2];
120 u64 sg_nentries[2];
121 dma_addr_t ppi_da[2];
122 u64 ppi_len[2];
123#define SCIF_PPI_MMIO 0
124#define SCIF_PPI_APER 1
125 struct list_head ppi_list;
126};
127
128/*
129 * struct scif_dev - SCIF remote device specific fields
130 *
131 * @node: Node id
132 * @p2p: List of P2P mapping information
133 * @qpairs: The node queue pair for exchanging control messages
134 * @intr_wq: Workqueue for handling Node QP messages
135 * @intr_wqname: Name of node QP workqueue for handling interrupts
136 * @intr_bh: Used for submitting work to intr_wq
137 * @lock: Lock used for synchronizing access to the scif device
138 * @sdev: SCIF hardware device on the SCIF hardware bus
139 * @db: doorbell the peer will trigger to generate an interrupt on self
140 * @rdb: Doorbell to trigger on the peer to generate an interrupt on the peer
141 * @cookie: Cookie received while registering the interrupt handler
142 * init_msg_work: work scheduled for SCIF_INIT message processing
143 * @p2p_dwork: Delayed work to enable polling for P2P state
144 * @qp_dwork: Delayed work for enabling polling for remote QP information
145 * @p2p_retry: Number of times to retry polling of P2P state
146 * @base_addr: P2P aperture bar base address
147 * @mic_mw mmio: The peer MMIO information used for P2P
148 * @spdev: SCIF peer device on the SCIF peer bus
149 * @node_remove_ack_pending: True if a node_remove_ack is pending
150 * @exit_ack_pending: true if an exit_ack is pending
151 * @disconn_wq: Used while waiting for a node remove response
152 * @disconn_rescnt: Keeps track of number of node remove requests sent
153 * @exit: Status of exit message
154 * @qp_dma_addr: Queue pair DMA address passed to the peer
155*/
156struct scif_dev {
157 u8 node;
158 struct list_head p2p;
159 struct scif_qp *qpairs;
160 struct workqueue_struct *intr_wq;
161 char intr_wqname[16];
162 struct work_struct intr_bh;
163 struct mutex lock;
164 struct scif_hw_dev *sdev;
165 int db;
166 int rdb;
167 struct mic_irq *cookie;
168 struct work_struct init_msg_work;
169 struct delayed_work p2p_dwork;
170 struct delayed_work qp_dwork;
171 int p2p_retry;
172 dma_addr_t base_addr;
173 struct mic_mw mmio;
174 struct scif_peer_dev __rcu *spdev;
175 bool node_remove_ack_pending;
176 bool exit_ack_pending;
177 wait_queue_head_t disconn_wq;
178 atomic_t disconn_rescnt;
179 enum scif_msg_state exit;
180 dma_addr_t qp_dma_addr;
181};
182
183extern struct scif_info scif_info;
184extern struct idr scif_ports;
185extern struct scif_dev *scif_dev;
186extern const struct file_operations scif_fops;
187
188/* Size of the RB for the Node QP */
189#define SCIF_NODE_QP_SIZE 0x10000
190
191#include "scif_nodeqp.h"
192
193/*
194 * scifdev_self:
195 * @dev: The remote SCIF Device
196 *
197 * Returns true if the SCIF Device passed is the self aka Loopback SCIF device.
198 */
199static inline int scifdev_self(struct scif_dev *dev)
200{
201 return dev->node == scif_info.nodeid;
202}
203
204static inline bool scif_is_mgmt_node(void)
205{
206 return !scif_info.nodeid;
207}
208
209/*
210 * scifdev_is_p2p:
211 * @dev: The remote SCIF Device
212 *
213 * Returns true if the SCIF Device is a MIC Peer to Peer SCIF device.
214 */
215static inline bool scifdev_is_p2p(struct scif_dev *dev)
216{
217 if (scif_is_mgmt_node())
218 return false;
219 else
220 return dev != &scif_dev[SCIF_MGMT_NODE] &&
221 !scifdev_self(dev);
222}
223
224/*
225 * scifdev_alive:
226 * @scifdev: The remote SCIF Device
227 *
228 * Returns true if the remote SCIF Device is running or sleeping for
229 * this endpoint.
230 */
231static inline int _scifdev_alive(struct scif_dev *scifdev)
232{
233 struct scif_peer_dev *spdev;
234
235 rcu_read_lock();
236 spdev = rcu_dereference(scifdev->spdev);
237 rcu_read_unlock();
238 return !!spdev;
239}
240
241#include "scif_epd.h"
242
243void __init scif_init_debugfs(void);
244void scif_exit_debugfs(void);
245int scif_setup_intr_wq(struct scif_dev *scifdev);
246void scif_destroy_intr_wq(struct scif_dev *scifdev);
247void scif_cleanup_scifdev(struct scif_dev *dev);
248void scif_handle_remove_node(int node);
249void scif_disconnect_node(u32 node_id, bool mgmt_initiated);
250void scif_free_qp(struct scif_dev *dev);
251void scif_misc_handler(struct work_struct *work);
252void scif_stop(struct scif_dev *scifdev);
253irqreturn_t scif_intr_handler(int irq, void *data);
254#endif /* SCIF_MAIN_H */
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
new file mode 100644
index 000000000000..20e50b4e19b2
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_map.h
@@ -0,0 +1,113 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#ifndef SCIF_MAP_H
19#define SCIF_MAP_H
20
21#include "../bus/scif_bus.h"
22
23static __always_inline void *
24scif_alloc_coherent(dma_addr_t *dma_handle,
25 struct scif_dev *scifdev, size_t size,
26 gfp_t gfp)
27{
28 void *va;
29
30 if (scifdev_self(scifdev)) {
31 va = kmalloc(size, gfp);
32 if (va)
33 *dma_handle = virt_to_phys(va);
34 } else {
35 va = dma_alloc_coherent(&scifdev->sdev->dev,
36 size, dma_handle, gfp);
37 if (va && scifdev_is_p2p(scifdev))
38 *dma_handle = *dma_handle + scifdev->base_addr;
39 }
40 return va;
41}
42
43static __always_inline void
44scif_free_coherent(void *va, dma_addr_t local,
45 struct scif_dev *scifdev, size_t size)
46{
47 if (scifdev_self(scifdev)) {
48 kfree(va);
49 } else {
50 if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr)
51 local = local - scifdev->base_addr;
52 dma_free_coherent(&scifdev->sdev->dev,
53 size, va, local);
54 }
55}
56
57static __always_inline int
58scif_map_single(dma_addr_t *dma_handle,
59 void *local, struct scif_dev *scifdev, size_t size)
60{
61 int err = 0;
62
63 if (scifdev_self(scifdev)) {
64 *dma_handle = virt_to_phys((local));
65 } else {
66 *dma_handle = dma_map_single(&scifdev->sdev->dev,
67 local, size, DMA_BIDIRECTIONAL);
68 if (dma_mapping_error(&scifdev->sdev->dev, *dma_handle))
69 err = -ENOMEM;
70 else if (scifdev_is_p2p(scifdev))
71 *dma_handle = *dma_handle + scifdev->base_addr;
72 }
73 if (err)
74 *dma_handle = 0;
75 return err;
76}
77
78static __always_inline void
79scif_unmap_single(dma_addr_t local, struct scif_dev *scifdev,
80 size_t size)
81{
82 if (!scifdev_self(scifdev)) {
83 if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr)
84 local = local - scifdev->base_addr;
85 dma_unmap_single(&scifdev->sdev->dev, local,
86 size, DMA_BIDIRECTIONAL);
87 }
88}
89
90static __always_inline void *
91scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev)
92{
93 void *out_virt;
94 struct scif_hw_dev *sdev = scifdev->sdev;
95
96 if (scifdev_self(scifdev))
97 out_virt = phys_to_virt(phys);
98 else
99 out_virt = (void __force *)
100 sdev->hw_ops->ioremap(sdev, phys, size);
101 return out_virt;
102}
103
104static __always_inline void
105scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
106{
107 if (!scifdev_self(scifdev)) {
108 struct scif_hw_dev *sdev = scifdev->sdev;
109
110 sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt);
111 }
112}
113#endif /* SCIF_MAP_H */
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
new file mode 100644
index 000000000000..9b4c5382d6a7
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nm.c
@@ -0,0 +1,237 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "scif_peer_bus.h"
19
20#include "scif_main.h"
21#include "scif_map.h"
22
23/**
24 * scif_invalidate_ep() - Set state for all connected endpoints
25 * to disconnected and wake up all send/recv waitqueues
26 */
27static void scif_invalidate_ep(int node)
28{
29 struct scif_endpt *ep;
30 struct list_head *pos, *tmpq;
31
32 flush_work(&scif_info.conn_work);
33 mutex_lock(&scif_info.connlock);
34 list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
35 ep = list_entry(pos, struct scif_endpt, list);
36 if (ep->remote_dev->node == node) {
37 spin_lock(&ep->lock);
38 scif_cleanup_ep_qp(ep);
39 spin_unlock(&ep->lock);
40 }
41 }
42 list_for_each_safe(pos, tmpq, &scif_info.connected) {
43 ep = list_entry(pos, struct scif_endpt, list);
44 if (ep->remote_dev->node == node) {
45 list_del(pos);
46 spin_lock(&ep->lock);
47 ep->state = SCIFEP_DISCONNECTED;
48 list_add_tail(&ep->list, &scif_info.disconnected);
49 scif_cleanup_ep_qp(ep);
50 wake_up_interruptible(&ep->sendwq);
51 wake_up_interruptible(&ep->recvwq);
52 spin_unlock(&ep->lock);
53 }
54 }
55 mutex_unlock(&scif_info.connlock);
56}
57
58void scif_free_qp(struct scif_dev *scifdev)
59{
60 struct scif_qp *qp = scifdev->qpairs;
61
62 if (!qp)
63 return;
64 scif_free_coherent((void *)qp->inbound_q.rb_base,
65 qp->local_buf, scifdev, qp->inbound_q.size);
66 scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp));
67 kfree(scifdev->qpairs);
68 scifdev->qpairs = NULL;
69}
70
71static void scif_cleanup_qp(struct scif_dev *dev)
72{
73 struct scif_qp *qp = &dev->qpairs[0];
74
75 if (!qp)
76 return;
77 scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev);
78 scif_iounmap((void *)qp->outbound_q.rb_base,
79 sizeof(struct scif_qp), dev);
80 qp->remote_qp = NULL;
81 qp->local_write = 0;
82 qp->inbound_q.current_write_offset = 0;
83 qp->inbound_q.current_read_offset = 0;
84 if (scifdev_is_p2p(dev))
85 scif_free_qp(dev);
86}
87
88void scif_send_acks(struct scif_dev *dev)
89{
90 struct scifmsg msg;
91
92 if (dev->node_remove_ack_pending) {
93 msg.uop = SCIF_NODE_REMOVE_ACK;
94 msg.src.node = scif_info.nodeid;
95 msg.dst.node = SCIF_MGMT_NODE;
96 msg.payload[0] = dev->node;
97 scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg);
98 dev->node_remove_ack_pending = false;
99 }
100 if (dev->exit_ack_pending) {
101 msg.uop = SCIF_EXIT_ACK;
102 msg.src.node = scif_info.nodeid;
103 msg.dst.node = dev->node;
104 scif_nodeqp_send(dev, &msg);
105 dev->exit_ack_pending = false;
106 }
107}
108
109/*
110 * scif_cleanup_scifdev
111 *
112 * @dev: Remote SCIF device.
113 * Uninitialize SCIF data structures for remote SCIF device.
114 */
115void scif_cleanup_scifdev(struct scif_dev *dev)
116{
117 struct scif_hw_dev *sdev = dev->sdev;
118
119 if (!dev->sdev)
120 return;
121 if (scifdev_is_p2p(dev)) {
122 if (dev->cookie) {
123 sdev->hw_ops->free_irq(sdev, dev->cookie, dev);
124 dev->cookie = NULL;
125 }
126 scif_destroy_intr_wq(dev);
127 }
128 scif_destroy_p2p(dev);
129 scif_invalidate_ep(dev->node);
130 scif_send_acks(dev);
131 if (!dev->node && scif_info.card_initiated_exit) {
132 /*
133 * Send an SCIF_EXIT message which is the last message from MIC
134 * to the Host and wait for a SCIF_EXIT_ACK
135 */
136 scif_send_exit(dev);
137 scif_info.card_initiated_exit = false;
138 }
139 scif_cleanup_qp(dev);
140}
141
142/*
143 * scif_remove_node:
144 *
145 * @node: Node to remove
146 */
147void scif_handle_remove_node(int node)
148{
149 struct scif_dev *scifdev = &scif_dev[node];
150 struct scif_peer_dev *spdev;
151
152 rcu_read_lock();
153 spdev = rcu_dereference(scifdev->spdev);
154 rcu_read_unlock();
155 if (spdev)
156 scif_peer_unregister_device(spdev);
157 else
158 scif_send_acks(scifdev);
159}
160
161static int scif_send_rmnode_msg(int node, int remove_node)
162{
163 struct scifmsg notif_msg;
164 struct scif_dev *dev = &scif_dev[node];
165
166 notif_msg.uop = SCIF_NODE_REMOVE;
167 notif_msg.src.node = scif_info.nodeid;
168 notif_msg.dst.node = node;
169 notif_msg.payload[0] = remove_node;
170 return scif_nodeqp_send(dev, &notif_msg);
171}
172
173/**
174 * scif_node_disconnect:
175 *
176 * @node_id[in]: source node id.
177 * @mgmt_initiated: Disconnection initiated from the mgmt node
178 *
179 * Disconnect a node from the scif network.
180 */
181void scif_disconnect_node(u32 node_id, bool mgmt_initiated)
182{
183 int ret;
184 int msg_cnt = 0;
185 u32 i = 0;
186 struct scif_dev *scifdev = &scif_dev[node_id];
187
188 if (!node_id)
189 return;
190
191 atomic_set(&scifdev->disconn_rescnt, 0);
192
193 /* Destroy p2p network */
194 for (i = 1; i <= scif_info.maxid; i++) {
195 if (i == node_id)
196 continue;
197 ret = scif_send_rmnode_msg(i, node_id);
198 if (!ret)
199 msg_cnt++;
200 }
201 /* Wait for the remote nodes to respond with SCIF_NODE_REMOVE_ACK */
202 ret = wait_event_timeout(scifdev->disconn_wq,
203 (atomic_read(&scifdev->disconn_rescnt)
204 == msg_cnt), SCIF_NODE_ALIVE_TIMEOUT);
205 /* Tell the card to clean up */
206 if (mgmt_initiated && _scifdev_alive(scifdev))
207 /*
208 * Send an SCIF_EXIT message which is the last message from Host
209 * to the MIC and wait for a SCIF_EXIT_ACK
210 */
211 scif_send_exit(scifdev);
212 atomic_set(&scifdev->disconn_rescnt, 0);
213 /* Tell the mgmt node to clean up */
214 ret = scif_send_rmnode_msg(SCIF_MGMT_NODE, node_id);
215 if (!ret)
216 /* Wait for mgmt node to respond with SCIF_NODE_REMOVE_ACK */
217 wait_event_timeout(scifdev->disconn_wq,
218 (atomic_read(&scifdev->disconn_rescnt) == 1),
219 SCIF_NODE_ALIVE_TIMEOUT);
220}
221
222void scif_get_node_info(void)
223{
224 struct scifmsg msg;
225 DECLARE_COMPLETION_ONSTACK(node_info);
226
227 msg.uop = SCIF_GET_NODE_INFO;
228 msg.src.node = scif_info.nodeid;
229 msg.dst.node = SCIF_MGMT_NODE;
230 msg.payload[3] = (u64)&node_info;
231
232 if ((scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg)))
233 return;
234
235 /* Wait for a response with SCIF_GET_NODE_INFO */
236 wait_for_completion(&node_info);
237}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
new file mode 100644
index 000000000000..41e3bdb10061
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -0,0 +1,1312 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include "../bus/scif_bus.h"
19#include "scif_peer_bus.h"
20#include "scif_main.h"
21#include "scif_nodeqp.h"
22#include "scif_map.h"
23
24/*
25 ************************************************************************
26 * SCIF node Queue Pair (QP) setup flow:
27 *
28 * 1) SCIF driver gets probed with a scif_hw_dev via the scif_hw_bus
29 * 2) scif_setup_qp(..) allocates the local qp and calls
30 * scif_setup_qp_connect(..) which allocates and maps the local
31 * buffer for the inbound QP
32 * 3) The local node updates the device page with the DMA address of the QP
33 * 4) A delayed work is scheduled (qp_dwork) which periodically reads if
34 * the peer node has updated its QP DMA address
35 * 5) Once a valid non zero address is found in the QP DMA address field
36 * in the device page, the local node maps the remote node's QP,
37 * updates its outbound QP and sends a SCIF_INIT message to the peer
38 * 6) The SCIF_INIT message is received by the peer node QP interrupt bottom
39 * half handler by calling scif_init(..)
40 * 7) scif_init(..) registers a new SCIF peer node by calling
41 * scif_peer_register_device(..) which signifies the addition of a new
42 * SCIF node
43 * 8) On the mgmt node, P2P network setup/teardown is initiated if all the
44 * remote nodes are online via scif_p2p_setup(..)
45 * 9) For P2P setup, the host maps the remote nodes' aperture and memory
46 * bars and sends a SCIF_NODE_ADD message to both nodes
47 * 10) As part of scif_nodeadd, both nodes set up their local inbound
48 * QPs and send a SCIF_NODE_ADD_ACK to the mgmt node
49 * 11) As part of scif_node_add_ack(..) the mgmt node forwards the
50 * SCIF_NODE_ADD_ACK to the remote nodes
51 * 12) As part of scif_node_add_ack(..) the remote nodes update their
52 * outbound QPs, make sure they can access memory on the remote node
53 * and then add a new SCIF peer node by calling
54 * scif_peer_register_device(..) which signifies the addition of a new
55 * SCIF node.
56 * 13) The SCIF network is now established across all nodes.
57 *
58 ************************************************************************
59 * SCIF node QP teardown flow (initiated by non mgmt node):
60 *
61 * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
62 * 2) The device page QP DMA address field is updated with 0x0
63 * 3) A non mgmt node now cleans up all local data structures and sends a
64 * SCIF_EXIT message to the peer and waits for a SCIF_EXIT_ACK
65 * 4) As part of scif_exit(..) handling scif_disconnect_node(..) is called
66 * 5) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the
67 * peers and waits for a SCIF_NODE_REMOVE_ACK
68 * 6) As part of scif_node_remove(..) a remote node unregisters the peer
69 * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
70 * 7) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
71 * it sends itself a node remove message whose handling cleans up local
72 * data structures and unregisters the peer node from the SCIF network
73 * 8) The mgmt node sends a SCIF_EXIT_ACK
74 * 9) Upon receipt of the SCIF_EXIT_ACK the node initiating the teardown
75 * completes the SCIF remove routine
76 * 10) The SCIF network is now torn down for the node initiating the
77 * teardown sequence
78 *
79 ************************************************************************
80 * SCIF node QP teardown flow (initiated by mgmt node):
81 *
82 * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
83 * 2) The device page QP DMA address field is updated with 0x0
84 * 3) The mgmt node calls scif_disconnect_node(..)
85 * 4) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the peers
86 * and waits for a SCIF_NODE_REMOVE_ACK
87 * 5) As part of scif_node_remove(..) a remote node unregisters the peer
88 * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
89 * 6) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
90 * it unregisters the peer node from the SCIF network
91 * 7) The mgmt node sends a SCIF_EXIT message and waits for a SCIF_EXIT_ACK.
92 * 8) A non mgmt node upon receipt of a SCIF_EXIT message calls scif_stop(..)
93 * which would clean up local data structures for all SCIF nodes and
94 * then send a SCIF_EXIT_ACK back to the mgmt node
95 * 9) Upon receipt of the SCIF_EXIT_ACK the the mgmt node sends itself a node
96 * remove message whose handling cleans up local data structures and
97 * destroys any P2P mappings.
98 * 10) The SCIF hardware device for which a remove callback was received is now
99 * disconnected from the SCIF network.
100 */
101/*
102 * Initializes "local" data structures for the QP. Allocates the QP
103 * ring buffer (rb) and initializes the "in bound" queue.
104 */
105int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
106 int local_size, struct scif_dev *scifdev)
107{
108 void *local_q = NULL;
109 int err = 0;
110 u32 tmp_rd = 0;
111
112 spin_lock_init(&qp->send_lock);
113 spin_lock_init(&qp->recv_lock);
114
115 local_q = kzalloc(local_size, GFP_KERNEL);
116 if (!local_q) {
117 err = -ENOMEM;
118 return err;
119 }
120 err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
121 if (err)
122 goto kfree;
123 /*
124 * To setup the inbound_q, the buffer lives locally, the read pointer
125 * is remote and the write pointer is local.
126 */
127 scif_rb_init(&qp->inbound_q,
128 &tmp_rd,
129 &qp->local_write,
130 local_q, get_count_order(local_size));
131 /*
132 * The read pointer is NULL initially and it is unsafe to use the ring
133 * buffer til this changes!
134 */
135 qp->inbound_q.read_ptr = NULL;
136 err = scif_map_single(qp_offset, qp,
137 scifdev, sizeof(struct scif_qp));
138 if (err)
139 goto unmap;
140 qp->local_qp = *qp_offset;
141 return err;
142unmap:
143 scif_unmap_single(qp->local_buf, scifdev, local_size);
144 qp->local_buf = 0;
145kfree:
146 kfree(local_q);
147 return err;
148}
149
150/* When the other side has already done it's allocation, this is called */
151int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
152 dma_addr_t phys, int local_size,
153 struct scif_dev *scifdev)
154{
155 void *local_q;
156 void *remote_q;
157 struct scif_qp *remote_qp;
158 int remote_size;
159 int err = 0;
160
161 spin_lock_init(&qp->send_lock);
162 spin_lock_init(&qp->recv_lock);
163 /* Start by figuring out where we need to point */
164 remote_qp = scif_ioremap(phys, sizeof(struct scif_qp), scifdev);
165 if (!remote_qp)
166 return -EIO;
167 qp->remote_qp = remote_qp;
168 if (qp->remote_qp->magic != SCIFEP_MAGIC) {
169 err = -EIO;
170 goto iounmap;
171 }
172 qp->remote_buf = remote_qp->local_buf;
173 remote_size = qp->remote_qp->inbound_q.size;
174 remote_q = scif_ioremap(qp->remote_buf, remote_size, scifdev);
175 if (!remote_q) {
176 err = -EIO;
177 goto iounmap;
178 }
179 qp->remote_qp->local_write = 0;
180 /*
181 * To setup the outbound_q, the buffer lives in remote memory,
182 * the read pointer is local, the write pointer is remote
183 */
184 scif_rb_init(&qp->outbound_q,
185 &qp->local_read,
186 &qp->remote_qp->local_write,
187 remote_q,
188 get_count_order(remote_size));
189 local_q = kzalloc(local_size, GFP_KERNEL);
190 if (!local_q) {
191 err = -ENOMEM;
192 goto iounmap_1;
193 }
194 err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
195 if (err)
196 goto kfree;
197 qp->remote_qp->local_read = 0;
198 /*
199 * To setup the inbound_q, the buffer lives locally, the read pointer
200 * is remote and the write pointer is local
201 */
202 scif_rb_init(&qp->inbound_q,
203 &qp->remote_qp->local_read,
204 &qp->local_write,
205 local_q, get_count_order(local_size));
206 err = scif_map_single(qp_offset, qp, scifdev,
207 sizeof(struct scif_qp));
208 if (err)
209 goto unmap;
210 qp->local_qp = *qp_offset;
211 return err;
212unmap:
213 scif_unmap_single(qp->local_buf, scifdev, local_size);
214 qp->local_buf = 0;
215kfree:
216 kfree(local_q);
217iounmap_1:
218 scif_iounmap(remote_q, remote_size, scifdev);
219 qp->outbound_q.rb_base = NULL;
220iounmap:
221 scif_iounmap(qp->remote_qp, sizeof(struct scif_qp), scifdev);
222 qp->remote_qp = NULL;
223 return err;
224}
225
226int scif_setup_qp_connect_response(struct scif_dev *scifdev,
227 struct scif_qp *qp, u64 payload)
228{
229 int err = 0;
230 void *r_buf;
231 int remote_size;
232 phys_addr_t tmp_phys;
233
234 qp->remote_qp = scif_ioremap(payload, sizeof(struct scif_qp), scifdev);
235
236 if (!qp->remote_qp) {
237 err = -ENOMEM;
238 goto error;
239 }
240
241 if (qp->remote_qp->magic != SCIFEP_MAGIC) {
242 dev_err(&scifdev->sdev->dev,
243 "SCIFEP_MAGIC mismatch between self %d remote %d\n",
244 scif_dev[scif_info.nodeid].node, scifdev->node);
245 err = -ENODEV;
246 goto error;
247 }
248
249 tmp_phys = qp->remote_qp->local_buf;
250 remote_size = qp->remote_qp->inbound_q.size;
251 r_buf = scif_ioremap(tmp_phys, remote_size, scifdev);
252
253 if (!r_buf)
254 return -EIO;
255
256 qp->local_read = 0;
257 scif_rb_init(&qp->outbound_q,
258 &qp->local_read,
259 &qp->remote_qp->local_write,
260 r_buf,
261 get_count_order(remote_size));
262 /*
263 * resetup the inbound_q now that we know where the
264 * inbound_read really is.
265 */
266 scif_rb_init(&qp->inbound_q,
267 &qp->remote_qp->local_read,
268 &qp->local_write,
269 qp->inbound_q.rb_base,
270 get_count_order(qp->inbound_q.size));
271error:
272 return err;
273}
274
275static __always_inline void
276scif_send_msg_intr(struct scif_dev *scifdev)
277{
278 struct scif_hw_dev *sdev = scifdev->sdev;
279
280 if (scifdev_is_p2p(scifdev))
281 sdev->hw_ops->send_p2p_intr(sdev, scifdev->rdb, &scifdev->mmio);
282 else
283 sdev->hw_ops->send_intr(sdev, scifdev->rdb);
284}
285
286int scif_qp_response(phys_addr_t phys, struct scif_dev *scifdev)
287{
288 int err = 0;
289 struct scifmsg msg;
290
291 err = scif_setup_qp_connect_response(scifdev, scifdev->qpairs, phys);
292 if (!err) {
293 /*
294 * Now that everything is setup and mapped, we're ready
295 * to tell the peer about our queue's location
296 */
297 msg.uop = SCIF_INIT;
298 msg.dst.node = scifdev->node;
299 err = scif_nodeqp_send(scifdev, &msg);
300 }
301 return err;
302}
303
304void scif_send_exit(struct scif_dev *scifdev)
305{
306 struct scifmsg msg;
307 int ret;
308
309 scifdev->exit = OP_IN_PROGRESS;
310 msg.uop = SCIF_EXIT;
311 msg.src.node = scif_info.nodeid;
312 msg.dst.node = scifdev->node;
313 ret = scif_nodeqp_send(scifdev, &msg);
314 if (ret)
315 goto done;
316 /* Wait for a SCIF_EXIT_ACK message */
317 wait_event_timeout(scif_info.exitwq, scifdev->exit == OP_COMPLETED,
318 SCIF_NODE_ALIVE_TIMEOUT);
319done:
320 scifdev->exit = OP_IDLE;
321}
322
323int scif_setup_qp(struct scif_dev *scifdev)
324{
325 int err = 0;
326 int local_size;
327 struct scif_qp *qp;
328
329 local_size = SCIF_NODE_QP_SIZE;
330
331 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
332 if (!qp) {
333 err = -ENOMEM;
334 return err;
335 }
336 qp->magic = SCIFEP_MAGIC;
337 scifdev->qpairs = qp;
338 err = scif_setup_qp_connect(qp, &scifdev->qp_dma_addr,
339 local_size, scifdev);
340 if (err)
341 goto free_qp;
342 /*
343 * We're as setup as we can be. The inbound_q is setup, w/o a usable
344 * outbound q. When we get a message, the read_ptr will be updated,
345 * and we will pull the message.
346 */
347 return err;
348free_qp:
349 kfree(scifdev->qpairs);
350 scifdev->qpairs = NULL;
351 return err;
352}
353
354static void scif_p2p_freesg(struct scatterlist *sg)
355{
356 kfree(sg);
357}
358
359static struct scatterlist *
360scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
361{
362 struct scatterlist *sg;
363 struct page *page;
364 int i;
365
366 sg = kcalloc(page_cnt, sizeof(struct scatterlist), GFP_KERNEL);
367 if (!sg)
368 return NULL;
369 sg_init_table(sg, page_cnt);
370 for (i = 0; i < page_cnt; i++) {
371 page = vmalloc_to_page((void __force *)va);
372 if (!page)
373 goto p2p_sg_err;
374 sg_set_page(&sg[i], page, page_size, 0);
375 va += page_size;
376 }
377 return sg;
378p2p_sg_err:
379 kfree(sg);
380 return NULL;
381}
382
383/* Init p2p mappings required to access peerdev from scifdev */
384static struct scif_p2p_info *
385scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
386{
387 struct scif_p2p_info *p2p;
388 int num_mmio_pages, num_aper_pages, sg_page_shift, err, num_aper_chunks;
389 struct scif_hw_dev *psdev = peerdev->sdev;
390 struct scif_hw_dev *sdev = scifdev->sdev;
391
392 num_mmio_pages = psdev->mmio->len >> PAGE_SHIFT;
393 num_aper_pages = psdev->aper->len >> PAGE_SHIFT;
394
395 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
396 if (!p2p)
397 return NULL;
398 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va,
399 PAGE_SIZE, num_mmio_pages);
400 if (!p2p->ppi_sg[SCIF_PPI_MMIO])
401 goto free_p2p;
402 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
403 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
404 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
405 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va,
406 1 << sg_page_shift,
407 num_aper_chunks);
408 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
409 err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
410 num_mmio_pages, PCI_DMA_BIDIRECTIONAL);
411 if (err != num_mmio_pages)
412 goto scif_p2p_free;
413 err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
414 num_aper_chunks, PCI_DMA_BIDIRECTIONAL);
415 if (err != num_aper_chunks)
416 goto dma_unmap;
417 p2p->ppi_da[SCIF_PPI_MMIO] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_MMIO]);
418 p2p->ppi_da[SCIF_PPI_APER] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_APER]);
419 p2p->ppi_len[SCIF_PPI_MMIO] = num_mmio_pages;
420 p2p->ppi_len[SCIF_PPI_APER] = num_aper_pages;
421 p2p->ppi_peer_id = peerdev->node;
422 return p2p;
423dma_unmap:
424 dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
425 p2p->sg_nentries[SCIF_PPI_MMIO], DMA_BIDIRECTIONAL);
426scif_p2p_free:
427 scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
428 scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
429free_p2p:
430 kfree(p2p);
431 return NULL;
432}
433
434/**
435 * scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
436 * @dst: Destination node
437 *
438 * Connect the src and dst node by setting up the p2p connection
439 * between them. Management node here acts like a proxy.
440 */
441static void scif_node_connect(struct scif_dev *scifdev, int dst)
442{
443 struct scif_dev *dev_j = scifdev;
444 struct scif_dev *dev_i = NULL;
445 struct scif_p2p_info *p2p_ij = NULL; /* bus addr for j from i */
446 struct scif_p2p_info *p2p_ji = NULL; /* bus addr for i from j */
447 struct scif_p2p_info *p2p;
448 struct list_head *pos, *tmp;
449 struct scifmsg msg;
450 int err;
451 u64 tmppayload;
452
453 if (dst < 1 || dst > scif_info.maxid)
454 return;
455
456 dev_i = &scif_dev[dst];
457
458 if (!_scifdev_alive(dev_i))
459 return;
460 /*
461 * If the p2p connection is already setup or in the process of setting
462 * up then just ignore this request. The requested node will get
463 * informed by SCIF_NODE_ADD_ACK or SCIF_NODE_ADD_NACK
464 */
465 if (!list_empty(&dev_i->p2p)) {
466 list_for_each_safe(pos, tmp, &dev_i->p2p) {
467 p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
468 if (p2p->ppi_peer_id == dev_j->node)
469 return;
470 }
471 }
472 p2p_ij = scif_init_p2p_info(dev_i, dev_j);
473 if (!p2p_ij)
474 return;
475 p2p_ji = scif_init_p2p_info(dev_j, dev_i);
476 if (!p2p_ji)
477 return;
478 list_add_tail(&p2p_ij->ppi_list, &dev_i->p2p);
479 list_add_tail(&p2p_ji->ppi_list, &dev_j->p2p);
480
481 /*
482 * Send a SCIF_NODE_ADD to dev_i, pass it its bus address
483 * as seen from dev_j
484 */
485 msg.uop = SCIF_NODE_ADD;
486 msg.src.node = dev_j->node;
487 msg.dst.node = dev_i->node;
488
489 msg.payload[0] = p2p_ji->ppi_da[SCIF_PPI_APER];
490 msg.payload[1] = p2p_ij->ppi_da[SCIF_PPI_MMIO];
491 msg.payload[2] = p2p_ij->ppi_da[SCIF_PPI_APER];
492 msg.payload[3] = p2p_ij->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
493
494 err = scif_nodeqp_send(dev_i, &msg);
495 if (err) {
496 dev_err(&scifdev->sdev->dev,
497 "%s %d error %d\n", __func__, __LINE__, err);
498 return;
499 }
500
501 /* Same as above but to dev_j */
502 msg.uop = SCIF_NODE_ADD;
503 msg.src.node = dev_i->node;
504 msg.dst.node = dev_j->node;
505
506 tmppayload = msg.payload[0];
507 msg.payload[0] = msg.payload[2];
508 msg.payload[2] = tmppayload;
509 msg.payload[1] = p2p_ji->ppi_da[SCIF_PPI_MMIO];
510 msg.payload[3] = p2p_ji->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
511
512 scif_nodeqp_send(dev_j, &msg);
513}
514
515static void scif_p2p_setup(void)
516{
517 int i, j;
518
519 if (!scif_info.p2p_enable)
520 return;
521
522 for (i = 1; i <= scif_info.maxid; i++)
523 if (!_scifdev_alive(&scif_dev[i]))
524 return;
525
526 for (i = 1; i <= scif_info.maxid; i++) {
527 for (j = 1; j <= scif_info.maxid; j++) {
528 struct scif_dev *scifdev = &scif_dev[i];
529
530 if (i == j)
531 continue;
532 scif_node_connect(scifdev, j);
533 }
534 }
535}
536
537void scif_qp_response_ack(struct work_struct *work)
538{
539 struct scif_dev *scifdev = container_of(work, struct scif_dev,
540 init_msg_work);
541 struct scif_peer_dev *spdev;
542
543 /* Drop the INIT message if it has already been received */
544 if (_scifdev_alive(scifdev))
545 return;
546
547 spdev = scif_peer_register_device(scifdev);
548 if (IS_ERR(spdev))
549 return;
550
551 if (scif_is_mgmt_node()) {
552 mutex_lock(&scif_info.conflock);
553 scif_p2p_setup();
554 mutex_unlock(&scif_info.conflock);
555 }
556}
557
558static char *message_types[] = {"BAD",
559 "INIT",
560 "EXIT",
561 "SCIF_EXIT_ACK",
562 "SCIF_NODE_ADD",
563 "SCIF_NODE_ADD_ACK",
564 "SCIF_NODE_ADD_NACK",
565 "REMOVE_NODE",
566 "REMOVE_NODE_ACK",
567 "CNCT_REQ",
568 "CNCT_GNT",
569 "CNCT_GNTACK",
570 "CNCT_GNTNACK",
571 "CNCT_REJ",
572 "DISCNCT",
573 "DISCNT_ACK",
574 "CLIENT_SENT",
575 "CLIENT_RCVD",
576 "SCIF_GET_NODE_INFO"};
577
578static void
579scif_display_message(struct scif_dev *scifdev, struct scifmsg *msg,
580 const char *label)
581{
582 if (!scif_info.en_msg_log)
583 return;
584 if (msg->uop > SCIF_MAX_MSG) {
585 dev_err(&scifdev->sdev->dev,
586 "%s: unknown msg type %d\n", label, msg->uop);
587 return;
588 }
589 dev_info(&scifdev->sdev->dev,
590 "%s: msg type %s, src %d:%d, dest %d:%d payload 0x%llx:0x%llx:0x%llx:0x%llx\n",
591 label, message_types[msg->uop], msg->src.node, msg->src.port,
592 msg->dst.node, msg->dst.port, msg->payload[0], msg->payload[1],
593 msg->payload[2], msg->payload[3]);
594}
595
596int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
597{
598 struct scif_qp *qp = scifdev->qpairs;
599 int err = -ENOMEM, loop_cnt = 0;
600
601 scif_display_message(scifdev, msg, "Sent");
602 if (!qp) {
603 err = -EINVAL;
604 goto error;
605 }
606 spin_lock(&qp->send_lock);
607
608 while ((err = scif_rb_write(&qp->outbound_q,
609 msg, sizeof(struct scifmsg)))) {
610 mdelay(1);
611#define SCIF_NODEQP_SEND_TO_MSEC (3 * 1000)
612 if (loop_cnt++ > (SCIF_NODEQP_SEND_TO_MSEC)) {
613 err = -ENODEV;
614 break;
615 }
616 }
617 if (!err)
618 scif_rb_commit(&qp->outbound_q);
619 spin_unlock(&qp->send_lock);
620 if (!err) {
621 if (scifdev_self(scifdev))
622 /*
623 * For loopback we need to emulate an interrupt by
624 * queuing work for the queue handling real node
625 * Qp interrupts.
626 */
627 queue_work(scifdev->intr_wq, &scifdev->intr_bh);
628 else
629 scif_send_msg_intr(scifdev);
630 }
631error:
632 if (err)
633 dev_dbg(&scifdev->sdev->dev,
634 "%s %d error %d uop %d\n",
635 __func__, __LINE__, err, msg->uop);
636 return err;
637}
638
639/**
640 * scif_nodeqp_send - Send a message on the node queue pair
641 * @scifdev: Scif Device.
642 * @msg: The message to be sent.
643 */
644int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
645{
646 int err;
647 struct device *spdev = NULL;
648
649 if (msg->uop > SCIF_EXIT_ACK) {
650 /* Dont send messages once the exit flow has begun */
651 if (OP_IDLE != scifdev->exit)
652 return -ENODEV;
653 spdev = scif_get_peer_dev(scifdev);
654 if (IS_ERR(spdev)) {
655 err = PTR_ERR(spdev);
656 return err;
657 }
658 }
659 err = _scif_nodeqp_send(scifdev, msg);
660 if (msg->uop > SCIF_EXIT_ACK)
661 scif_put_peer_dev(spdev);
662 return err;
663}
664
665/*
666 * scif_misc_handler:
667 *
668 * Work queue handler for servicing miscellaneous SCIF tasks.
669 * Examples include:
670 * 1) Cleanup of zombie endpoints.
671 */
672void scif_misc_handler(struct work_struct *work)
673{
674 scif_cleanup_zombie_epd();
675}
676
677/**
678 * scif_init() - Respond to SCIF_INIT interrupt message
679 * @scifdev: Remote SCIF device node
680 * @msg: Interrupt message
681 */
682static __always_inline void
683scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
684{
685 /*
686 * Allow the thread waiting for device page updates for the peer QP DMA
687 * address to complete initializing the inbound_q.
688 */
689 flush_delayed_work(&scifdev->qp_dwork);
690 /*
691 * Delegate the peer device registration to a workqueue, otherwise if
692 * SCIF client probe (called during peer device registration) calls
693 * scif_connect(..), it will block the message processing thread causing
694 * a deadlock.
695 */
696 schedule_work(&scifdev->init_msg_work);
697}
698
699/**
700 * scif_exit() - Respond to SCIF_EXIT interrupt message
701 * @scifdev: Remote SCIF device node
702 * @msg: Interrupt message
703 *
704 * This function stops the SCIF interface for the node which sent
705 * the SCIF_EXIT message and starts waiting for that node to
706 * resetup the queue pair again.
707 */
708static __always_inline void
709scif_exit(struct scif_dev *scifdev, struct scifmsg *unused)
710{
711 scifdev->exit_ack_pending = true;
712 if (scif_is_mgmt_node())
713 scif_disconnect_node(scifdev->node, false);
714 else
715 scif_stop(scifdev);
716 schedule_delayed_work(&scifdev->qp_dwork,
717 msecs_to_jiffies(1000));
718}
719
720/**
721 * scif_exitack() - Respond to SCIF_EXIT_ACK interrupt message
722 * @scifdev: Remote SCIF device node
723 * @msg: Interrupt message
724 *
725 */
726static __always_inline void
727scif_exit_ack(struct scif_dev *scifdev, struct scifmsg *unused)
728{
729 scifdev->exit = OP_COMPLETED;
730 wake_up(&scif_info.exitwq);
731}
732
733/**
734 * scif_node_add() - Respond to SCIF_NODE_ADD interrupt message
735 * @scifdev: Remote SCIF device node
736 * @msg: Interrupt message
737 *
738 * When the mgmt node driver has finished initializing a MIC node queue pair it
739 * marks the node as online. It then looks for all currently online MIC cards
740 * and send a SCIF_NODE_ADD message to identify the ID of the new card for
741 * peer to peer initialization
742 *
743 * The local node allocates its incoming queue and sends its address in the
744 * SCIF_NODE_ADD_ACK message back to the mgmt node, the mgmt node "reflects"
745 * this message to the new node
746 */
747static __always_inline void
748scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
749{
750 struct scif_dev *newdev;
751 dma_addr_t qp_offset;
752 int qp_connect;
753 struct scif_hw_dev *sdev;
754
755 dev_dbg(&scifdev->sdev->dev,
756 "Scifdev %d:%d received NODE_ADD msg for node %d\n",
757 scifdev->node, msg->dst.node, msg->src.node);
758 dev_dbg(&scifdev->sdev->dev,
759 "Remote address for this node's aperture %llx\n",
760 msg->payload[0]);
761 newdev = &scif_dev[msg->src.node];
762 newdev->node = msg->src.node;
763 newdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
764 sdev = newdev->sdev;
765
766 if (scif_setup_intr_wq(newdev)) {
767 dev_err(&scifdev->sdev->dev,
768 "failed to setup interrupts for %d\n", msg->src.node);
769 goto interrupt_setup_error;
770 }
771 newdev->mmio.va = ioremap_nocache(msg->payload[1], sdev->mmio->len);
772 if (!newdev->mmio.va) {
773 dev_err(&scifdev->sdev->dev,
774 "failed to map mmio for %d\n", msg->src.node);
775 goto mmio_map_error;
776 }
777 newdev->qpairs = kzalloc(sizeof(*newdev->qpairs), GFP_KERNEL);
778 if (!newdev->qpairs)
779 goto qp_alloc_error;
780 /*
781 * Set the base address of the remote node's memory since it gets
782 * added to qp_offset
783 */
784 newdev->base_addr = msg->payload[0];
785
786 qp_connect = scif_setup_qp_connect(newdev->qpairs, &qp_offset,
787 SCIF_NODE_QP_SIZE, newdev);
788 if (qp_connect) {
789 dev_err(&scifdev->sdev->dev,
790 "failed to setup qp_connect %d\n", qp_connect);
791 goto qp_connect_error;
792 }
793
794 newdev->db = sdev->hw_ops->next_db(sdev);
795 newdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
796 "SCIF_INTR", newdev,
797 newdev->db);
798 if (IS_ERR(newdev->cookie))
799 goto qp_connect_error;
800 newdev->qpairs->magic = SCIFEP_MAGIC;
801 newdev->qpairs->qp_state = SCIF_QP_OFFLINE;
802
803 msg->uop = SCIF_NODE_ADD_ACK;
804 msg->dst.node = msg->src.node;
805 msg->src.node = scif_info.nodeid;
806 msg->payload[0] = qp_offset;
807 msg->payload[2] = newdev->db;
808 scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
809 return;
810qp_connect_error:
811 kfree(newdev->qpairs);
812 newdev->qpairs = NULL;
813qp_alloc_error:
814 iounmap(newdev->mmio.va);
815 newdev->mmio.va = NULL;
816mmio_map_error:
817interrupt_setup_error:
818 dev_err(&scifdev->sdev->dev,
819 "node add failed for node %d\n", msg->src.node);
820 msg->uop = SCIF_NODE_ADD_NACK;
821 msg->dst.node = msg->src.node;
822 msg->src.node = scif_info.nodeid;
823 scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
824}
825
826void scif_poll_qp_state(struct work_struct *work)
827{
828#define SCIF_NODE_QP_RETRY 100
829#define SCIF_NODE_QP_TIMEOUT 100
830 struct scif_dev *peerdev = container_of(work, struct scif_dev,
831 p2p_dwork.work);
832 struct scif_qp *qp = &peerdev->qpairs[0];
833
834 if (qp->qp_state != SCIF_QP_ONLINE ||
835 qp->remote_qp->qp_state != SCIF_QP_ONLINE) {
836 if (peerdev->p2p_retry++ == SCIF_NODE_QP_RETRY) {
837 dev_err(&peerdev->sdev->dev,
838 "Warning: QP check timeout with state %d\n",
839 qp->qp_state);
840 goto timeout;
841 }
842 schedule_delayed_work(&peerdev->p2p_dwork,
843 msecs_to_jiffies(SCIF_NODE_QP_TIMEOUT));
844 return;
845 }
846 scif_peer_register_device(peerdev);
847 return;
848timeout:
849 dev_err(&peerdev->sdev->dev,
850 "%s %d remote node %d offline, state = 0x%x\n",
851 __func__, __LINE__, peerdev->node, qp->qp_state);
852 qp->remote_qp->qp_state = SCIF_QP_OFFLINE;
853 scif_cleanup_scifdev(peerdev);
854}
855
856/**
857 * scif_node_add_ack() - Respond to SCIF_NODE_ADD_ACK interrupt message
858 * @scifdev: Remote SCIF device node
859 * @msg: Interrupt message
860 *
861 * After a MIC node receives the SCIF_NODE_ADD_ACK message it send this
862 * message to the mgmt node to confirm the sequence is finished.
863 *
864 */
865static __always_inline void
866scif_node_add_ack(struct scif_dev *scifdev, struct scifmsg *msg)
867{
868 struct scif_dev *peerdev;
869 struct scif_qp *qp;
870 struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
871
872 dev_dbg(&scifdev->sdev->dev,
873 "Scifdev %d received SCIF_NODE_ADD_ACK msg src %d dst %d\n",
874 scifdev->node, msg->src.node, msg->dst.node);
875 dev_dbg(&scifdev->sdev->dev,
876 "payload %llx %llx %llx %llx\n", msg->payload[0],
877 msg->payload[1], msg->payload[2], msg->payload[3]);
878 if (scif_is_mgmt_node()) {
879 /*
880 * the lock serializes with scif_qp_response_ack. The mgmt node
881 * is forwarding the NODE_ADD_ACK message from src to dst we
882 * need to make sure that the dst has already received a
883 * NODE_ADD for src and setup its end of the qp to dst
884 */
885 mutex_lock(&scif_info.conflock);
886 msg->payload[1] = scif_info.maxid;
887 scif_nodeqp_send(dst_dev, msg);
888 mutex_unlock(&scif_info.conflock);
889 return;
890 }
891 peerdev = &scif_dev[msg->src.node];
892 peerdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
893 peerdev->node = msg->src.node;
894
895 qp = &peerdev->qpairs[0];
896
897 if ((scif_setup_qp_connect_response(peerdev, &peerdev->qpairs[0],
898 msg->payload[0])))
899 goto local_error;
900 peerdev->rdb = msg->payload[2];
901 qp->remote_qp->qp_state = SCIF_QP_ONLINE;
902 schedule_delayed_work(&peerdev->p2p_dwork, 0);
903 return;
904local_error:
905 scif_cleanup_scifdev(peerdev);
906}
907
908/**
909 * scif_node_add_nack: Respond to SCIF_NODE_ADD_NACK interrupt message
910 * @msg: Interrupt message
911 *
912 * SCIF_NODE_ADD failed, so inform the waiting wq.
913 */
914static __always_inline void
915scif_node_add_nack(struct scif_dev *scifdev, struct scifmsg *msg)
916{
917 if (scif_is_mgmt_node()) {
918 struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
919
920 dev_dbg(&scifdev->sdev->dev,
921 "SCIF_NODE_ADD_NACK received from %d\n", scifdev->node);
922 scif_nodeqp_send(dst_dev, msg);
923 }
924}
925
926/*
927 * scif_node_remove: Handle SCIF_NODE_REMOVE message
928 * @msg: Interrupt message
929 *
930 * Handle node removal.
931 */
932static __always_inline void
933scif_node_remove(struct scif_dev *scifdev, struct scifmsg *msg)
934{
935 int node = msg->payload[0];
936 struct scif_dev *scdev = &scif_dev[node];
937
938 scdev->node_remove_ack_pending = true;
939 scif_handle_remove_node(node);
940}
941
942/*
943 * scif_node_remove_ack: Handle SCIF_NODE_REMOVE_ACK message
944 * @msg: Interrupt message
945 *
946 * The peer has acked a SCIF_NODE_REMOVE message.
947 */
948static __always_inline void
949scif_node_remove_ack(struct scif_dev *scifdev, struct scifmsg *msg)
950{
951 struct scif_dev *sdev = &scif_dev[msg->payload[0]];
952
953 atomic_inc(&sdev->disconn_rescnt);
954 wake_up(&sdev->disconn_wq);
955}
956
957/**
958 * scif_get_node_info: Respond to SCIF_GET_NODE_INFO interrupt message
959 * @msg: Interrupt message
960 *
961 * Retrieve node info i.e maxid and total from the mgmt node.
962 */
963static __always_inline void
964scif_get_node_info_resp(struct scif_dev *scifdev, struct scifmsg *msg)
965{
966 if (scif_is_mgmt_node()) {
967 swap(msg->dst.node, msg->src.node);
968 mutex_lock(&scif_info.conflock);
969 msg->payload[1] = scif_info.maxid;
970 msg->payload[2] = scif_info.total;
971 mutex_unlock(&scif_info.conflock);
972 scif_nodeqp_send(scifdev, msg);
973 } else {
974 struct completion *node_info =
975 (struct completion *)msg->payload[3];
976
977 mutex_lock(&scif_info.conflock);
978 scif_info.maxid = msg->payload[1];
979 scif_info.total = msg->payload[2];
980 complete_all(node_info);
981 mutex_unlock(&scif_info.conflock);
982 }
983}
984
985static void
986scif_msg_unknown(struct scif_dev *scifdev, struct scifmsg *msg)
987{
988 /* Bogus Node Qp Message? */
989 dev_err(&scifdev->sdev->dev,
990 "Unknown message 0x%xn scifdev->node 0x%x\n",
991 msg->uop, scifdev->node);
992}
993
994static void (*scif_intr_func[SCIF_MAX_MSG + 1])
995 (struct scif_dev *, struct scifmsg *msg) = {
996 scif_msg_unknown, /* Error */
997 scif_init, /* SCIF_INIT */
998 scif_exit, /* SCIF_EXIT */
999 scif_exit_ack, /* SCIF_EXIT_ACK */
1000 scif_node_add, /* SCIF_NODE_ADD */
1001 scif_node_add_ack, /* SCIF_NODE_ADD_ACK */
1002 scif_node_add_nack, /* SCIF_NODE_ADD_NACK */
1003 scif_node_remove, /* SCIF_NODE_REMOVE */
1004 scif_node_remove_ack, /* SCIF_NODE_REMOVE_ACK */
1005 scif_cnctreq, /* SCIF_CNCT_REQ */
1006 scif_cnctgnt, /* SCIF_CNCT_GNT */
1007 scif_cnctgnt_ack, /* SCIF_CNCT_GNTACK */
1008 scif_cnctgnt_nack, /* SCIF_CNCT_GNTNACK */
1009 scif_cnctrej, /* SCIF_CNCT_REJ */
1010 scif_discnct, /* SCIF_DISCNCT */
1011 scif_discnt_ack, /* SCIF_DISCNT_ACK */
1012 scif_clientsend, /* SCIF_CLIENT_SENT */
1013 scif_clientrcvd, /* SCIF_CLIENT_RCVD */
1014 scif_get_node_info_resp,/* SCIF_GET_NODE_INFO */
1015};
1016
1017/**
1018 * scif_nodeqp_msg_handler() - Common handler for node messages
1019 * @scifdev: Remote device to respond to
1020 * @qp: Remote memory pointer
1021 * @msg: The message to be handled.
1022 *
1023 * This routine calls the appropriate routine to handle a Node Qp
1024 * message receipt
1025 */
1026static int scif_max_msg_id = SCIF_MAX_MSG;
1027
1028static void
1029scif_nodeqp_msg_handler(struct scif_dev *scifdev,
1030 struct scif_qp *qp, struct scifmsg *msg)
1031{
1032 scif_display_message(scifdev, msg, "Rcvd");
1033
1034 if (msg->uop > (u32)scif_max_msg_id) {
1035 /* Bogus Node Qp Message? */
1036 dev_err(&scifdev->sdev->dev,
1037 "Unknown message 0x%xn scifdev->node 0x%x\n",
1038 msg->uop, scifdev->node);
1039 return;
1040 }
1041
1042 scif_intr_func[msg->uop](scifdev, msg);
1043}
1044
1045/**
1046 * scif_nodeqp_intrhandler() - Interrupt handler for node messages
1047 * @scifdev: Remote device to respond to
1048 * @qp: Remote memory pointer
1049 *
1050 * This routine is triggered by the interrupt mechanism. It reads
1051 * messages from the node queue RB and calls the Node QP Message handling
1052 * routine.
1053 */
1054void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp)
1055{
1056 struct scifmsg msg;
1057 int read_size;
1058
1059 do {
1060 read_size = scif_rb_get_next(&qp->inbound_q, &msg, sizeof(msg));
1061 if (!read_size)
1062 break;
1063 scif_nodeqp_msg_handler(scifdev, qp, &msg);
1064 /*
1065 * The node queue pair is unmapped so skip the read pointer
1066 * update after receipt of a SCIF_EXIT_ACK
1067 */
1068 if (SCIF_EXIT_ACK == msg.uop)
1069 break;
1070 scif_rb_update_read_ptr(&qp->inbound_q);
1071 } while (1);
1072}
1073
1074/**
1075 * scif_loopb_wq_handler - Loopback Workqueue Handler.
1076 * @work: loop back work
1077 *
1078 * This work queue routine is invoked by the loopback work queue handler.
1079 * It grabs the recv lock, dequeues any available messages from the head
1080 * of the loopback message list, calls the node QP message handler,
1081 * waits for it to return, then frees up this message and dequeues more
1082 * elements of the list if available.
1083 */
1084static void scif_loopb_wq_handler(struct work_struct *unused)
1085{
1086 struct scif_dev *scifdev = scif_info.loopb_dev;
1087 struct scif_qp *qp = scifdev->qpairs;
1088 struct scif_loopb_msg *msg;
1089
1090 do {
1091 msg = NULL;
1092 spin_lock(&qp->recv_lock);
1093 if (!list_empty(&scif_info.loopb_recv_q)) {
1094 msg = list_first_entry(&scif_info.loopb_recv_q,
1095 struct scif_loopb_msg,
1096 list);
1097 list_del(&msg->list);
1098 }
1099 spin_unlock(&qp->recv_lock);
1100
1101 if (msg) {
1102 scif_nodeqp_msg_handler(scifdev, qp, &msg->msg);
1103 kfree(msg);
1104 }
1105 } while (msg);
1106}
1107
1108/**
1109 * scif_loopb_msg_handler() - Workqueue handler for loopback messages.
1110 * @scifdev: SCIF device
1111 * @qp: Queue pair.
1112 *
1113 * This work queue routine is triggered when a loopback message is received.
1114 *
1115 * We need special handling for receiving Node Qp messages on a loopback SCIF
1116 * device via two workqueues for receiving messages.
1117 *
1118 * The reason we need the extra workqueue which is not required with *normal*
1119 * non-loopback SCIF devices is the potential classic deadlock described below:
1120 *
1121 * Thread A tries to send a message on a loopback SCIF device and blocks since
1122 * there is no space in the RB while it has the send_lock held or another
1123 * lock called lock X for example.
1124 *
1125 * Thread B: The Loopback Node QP message receive workqueue receives the message
1126 * and tries to send a message (eg an ACK) to the loopback SCIF device. It tries
1127 * to grab the send lock again or lock X and deadlocks with Thread A. The RB
1128 * cannot be drained any further due to this classic deadlock.
1129 *
1130 * In order to avoid deadlocks as mentioned above we have an extra level of
1131 * indirection achieved by having two workqueues.
1132 * 1) The first workqueue whose handler is scif_loopb_msg_handler reads
1133 * messages from the Node QP RB, adds them to a list and queues work for the
1134 * second workqueue.
1135 *
1136 * 2) The second workqueue whose handler is scif_loopb_wq_handler dequeues
1137 * messages from the list, handles them, frees up the memory and dequeues
1138 * more elements from the list if possible.
1139 */
1140int
1141scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp)
1142{
1143 int read_size;
1144 struct scif_loopb_msg *msg;
1145
1146 do {
1147 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1148 if (!msg)
1149 return -ENOMEM;
1150 read_size = scif_rb_get_next(&qp->inbound_q, &msg->msg,
1151 sizeof(struct scifmsg));
1152 if (read_size != sizeof(struct scifmsg)) {
1153 kfree(msg);
1154 scif_rb_update_read_ptr(&qp->inbound_q);
1155 break;
1156 }
1157 spin_lock(&qp->recv_lock);
1158 list_add_tail(&msg->list, &scif_info.loopb_recv_q);
1159 spin_unlock(&qp->recv_lock);
1160 queue_work(scif_info.loopb_wq, &scif_info.loopb_work);
1161 scif_rb_update_read_ptr(&qp->inbound_q);
1162 } while (read_size == sizeof(struct scifmsg));
1163 return read_size;
1164}
1165
1166/**
1167 * scif_setup_loopback_qp - One time setup work for Loopback Node Qp.
1168 * @scifdev: SCIF device
1169 *
1170 * Sets up the required loopback workqueues, queue pairs and ring buffers
1171 */
1172int scif_setup_loopback_qp(struct scif_dev *scifdev)
1173{
1174 int err = 0;
1175 void *local_q;
1176 struct scif_qp *qp;
1177 struct scif_peer_dev *spdev;
1178
1179 err = scif_setup_intr_wq(scifdev);
1180 if (err)
1181 goto exit;
1182 INIT_LIST_HEAD(&scif_info.loopb_recv_q);
1183 snprintf(scif_info.loopb_wqname, sizeof(scif_info.loopb_wqname),
1184 "SCIF LOOPB %d", scifdev->node);
1185 scif_info.loopb_wq =
1186 alloc_ordered_workqueue(scif_info.loopb_wqname, 0);
1187 if (!scif_info.loopb_wq) {
1188 err = -ENOMEM;
1189 goto destroy_intr;
1190 }
1191 INIT_WORK(&scif_info.loopb_work, scif_loopb_wq_handler);
1192 /* Allocate Self Qpair */
1193 scifdev->qpairs = kzalloc(sizeof(*scifdev->qpairs), GFP_KERNEL);
1194 if (!scifdev->qpairs) {
1195 err = -ENOMEM;
1196 goto destroy_loopb_wq;
1197 }
1198
1199 qp = scifdev->qpairs;
1200 qp->magic = SCIFEP_MAGIC;
1201 spin_lock_init(&qp->send_lock);
1202 spin_lock_init(&qp->recv_lock);
1203
1204 local_q = kzalloc(SCIF_NODE_QP_SIZE, GFP_KERNEL);
1205 if (!local_q) {
1206 err = -ENOMEM;
1207 goto free_qpairs;
1208 }
1209 /*
1210 * For loopback the inbound_q and outbound_q are essentially the same
1211 * since the Node sends a message on the loopback interface to the
1212 * outbound_q which is then received on the inbound_q.
1213 */
1214 scif_rb_init(&qp->outbound_q,
1215 &qp->local_read,
1216 &qp->local_write,
1217 local_q, get_count_order(SCIF_NODE_QP_SIZE));
1218
1219 scif_rb_init(&qp->inbound_q,
1220 &qp->local_read,
1221 &qp->local_write,
1222 local_q, get_count_order(SCIF_NODE_QP_SIZE));
1223 scif_info.nodeid = scifdev->node;
1224 spdev = scif_peer_register_device(scifdev);
1225 if (IS_ERR(spdev)) {
1226 err = PTR_ERR(spdev);
1227 goto free_local_q;
1228 }
1229 scif_info.loopb_dev = scifdev;
1230 return err;
1231free_local_q:
1232 kfree(local_q);
1233free_qpairs:
1234 kfree(scifdev->qpairs);
1235destroy_loopb_wq:
1236 destroy_workqueue(scif_info.loopb_wq);
1237destroy_intr:
1238 scif_destroy_intr_wq(scifdev);
1239exit:
1240 return err;
1241}
1242
1243/**
1244 * scif_destroy_loopback_qp - One time uninit work for Loopback Node Qp
1245 * @scifdev: SCIF device
1246 *
1247 * Destroys the workqueues and frees up the Ring Buffer and Queue Pair memory.
1248 */
1249int scif_destroy_loopback_qp(struct scif_dev *scifdev)
1250{
1251 struct scif_peer_dev *spdev;
1252
1253 rcu_read_lock();
1254 spdev = rcu_dereference(scifdev->spdev);
1255 rcu_read_unlock();
1256 if (spdev)
1257 scif_peer_unregister_device(spdev);
1258 destroy_workqueue(scif_info.loopb_wq);
1259 scif_destroy_intr_wq(scifdev);
1260 kfree(scifdev->qpairs->outbound_q.rb_base);
1261 kfree(scifdev->qpairs);
1262 scifdev->sdev = NULL;
1263 scif_info.loopb_dev = NULL;
1264 return 0;
1265}
1266
1267void scif_destroy_p2p(struct scif_dev *scifdev)
1268{
1269 struct scif_dev *peer_dev;
1270 struct scif_p2p_info *p2p;
1271 struct list_head *pos, *tmp;
1272 int bd;
1273
1274 mutex_lock(&scif_info.conflock);
1275 /* Free P2P mappings in the given node for all its peer nodes */
1276 list_for_each_safe(pos, tmp, &scifdev->p2p) {
1277 p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
1278 dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
1279 p2p->sg_nentries[SCIF_PPI_MMIO],
1280 DMA_BIDIRECTIONAL);
1281 dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
1282 p2p->sg_nentries[SCIF_PPI_APER],
1283 DMA_BIDIRECTIONAL);
1284 scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
1285 scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
1286 list_del(pos);
1287 kfree(p2p);
1288 }
1289
1290 /* Free P2P mapping created in the peer nodes for the given node */
1291 for (bd = SCIF_MGMT_NODE + 1; bd <= scif_info.maxid; bd++) {
1292 peer_dev = &scif_dev[bd];
1293 list_for_each_safe(pos, tmp, &peer_dev->p2p) {
1294 p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
1295 if (p2p->ppi_peer_id == scifdev->node) {
1296 dma_unmap_sg(&peer_dev->sdev->dev,
1297 p2p->ppi_sg[SCIF_PPI_MMIO],
1298 p2p->sg_nentries[SCIF_PPI_MMIO],
1299 DMA_BIDIRECTIONAL);
1300 dma_unmap_sg(&peer_dev->sdev->dev,
1301 p2p->ppi_sg[SCIF_PPI_APER],
1302 p2p->sg_nentries[SCIF_PPI_APER],
1303 DMA_BIDIRECTIONAL);
1304 scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
1305 scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
1306 list_del(pos);
1307 kfree(p2p);
1308 }
1309 }
1310 }
1311 mutex_unlock(&scif_info.conflock);
1312}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.h b/drivers/misc/mic/scif/scif_nodeqp.h
new file mode 100644
index 000000000000..6c0ed6783479
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_nodeqp.h
@@ -0,0 +1,183 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * BSD LICENSE
21 *
22 * Copyright(c) 2014 Intel Corporation.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 * Intel SCIF driver.
51 *
52 */
53#ifndef SCIF_NODEQP
54#define SCIF_NODEQP
55
56#include "scif_rb.h"
57#include "scif_peer_bus.h"
58
59#define SCIF_INIT 1 /* First message sent to the peer node for discovery */
60#define SCIF_EXIT 2 /* Last message from the peer informing intent to exit */
61#define SCIF_EXIT_ACK 3 /* Response to SCIF_EXIT message */
62#define SCIF_NODE_ADD 4 /* Tell Online nodes a new node exits */
63#define SCIF_NODE_ADD_ACK 5 /* Confirm to mgmt node sequence is finished */
64#define SCIF_NODE_ADD_NACK 6 /* SCIF_NODE_ADD failed */
65#define SCIF_NODE_REMOVE 7 /* Request to deactivate a SCIF node */
66#define SCIF_NODE_REMOVE_ACK 8 /* Response to a SCIF_NODE_REMOVE message */
67#define SCIF_CNCT_REQ 9 /* Phys addr of Request connection to a port */
68#define SCIF_CNCT_GNT 10 /* Phys addr of new Grant connection request */
69#define SCIF_CNCT_GNTACK 11 /* Error type Reject a connection request */
70#define SCIF_CNCT_GNTNACK 12 /* Error type Reject a connection request */
71#define SCIF_CNCT_REJ 13 /* Error type Reject a connection request */
72#define SCIF_DISCNCT 14 /* Notify peer that connection is being terminated */
73#define SCIF_DISCNT_ACK 15 /* Notify peer that connection is being terminated */
74#define SCIF_CLIENT_SENT 16 /* Notify the peer that data has been written */
75#define SCIF_CLIENT_RCVD 17 /* Notify the peer that data has been read */
76#define SCIF_GET_NODE_INFO 18 /* Get current node mask from the mgmt node*/
77#define SCIF_MAX_MSG SCIF_GET_NODE_INFO
78
79/*
80 * struct scifmsg - Node QP message format
81 *
82 * @src: Source information
83 * @dst: Destination information
84 * @uop: The message opcode
85 * @payload: Unique payload format for each message
86 */
87struct scifmsg {
88 struct scif_port_id src;
89 struct scif_port_id dst;
90 u32 uop;
91 u64 payload[4];
92} __packed;
93
94/*
95 * struct scif_qp - Node Queue Pair
96 *
97 * Interesting structure -- a little difficult because we can only
98 * write across the PCIe, so any r/w pointer we need to read is
99 * local. We only need to read the read pointer on the inbound_q
100 * and read the write pointer in the outbound_q
101 *
102 * @magic: Magic value to ensure the peer sees the QP correctly
103 * @outbound_q: The outbound ring buffer for sending messages
104 * @inbound_q: The inbound ring buffer for receiving messages
105 * @local_write: Local write index
106 * @local_read: Local read index
107 * @remote_qp: The remote queue pair
108 * @local_buf: DMA address of local ring buffer
109 * @local_qp: DMA address of the local queue pair data structure
110 * @remote_buf: DMA address of remote ring buffer
111 * @qp_state: QP state i.e. online or offline used for P2P
112 * @send_lock: synchronize access to outbound queue
113 * @recv_lock: Synchronize access to inbound queue
114 */
115struct scif_qp {
116 u64 magic;
117#define SCIFEP_MAGIC 0x5c1f000000005c1fULL
118 struct scif_rb outbound_q;
119 struct scif_rb inbound_q;
120
121 u32 local_write __aligned(64);
122 u32 local_read __aligned(64);
123 struct scif_qp *remote_qp;
124 dma_addr_t local_buf;
125 dma_addr_t local_qp;
126 dma_addr_t remote_buf;
127 u32 qp_state;
128#define SCIF_QP_OFFLINE 0xdead
129#define SCIF_QP_ONLINE 0xc0de
130 spinlock_t send_lock;
131 spinlock_t recv_lock;
132};
133
134/*
135 * struct scif_loopb_msg - An element in the loopback Node QP message list.
136 *
137 * @msg - The SCIF node QP message
138 * @list - link in the list of messages
139 */
140struct scif_loopb_msg {
141 struct scifmsg msg;
142 struct list_head list;
143};
144
145int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
146int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
147void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp);
148int scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp);
149int scif_setup_qp(struct scif_dev *scifdev);
150int scif_qp_response(phys_addr_t phys, struct scif_dev *dev);
151int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
152 int local_size, struct scif_dev *scifdev);
153int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
154 dma_addr_t phys, int local_size,
155 struct scif_dev *scifdev);
156int scif_setup_qp_connect_response(struct scif_dev *scifdev,
157 struct scif_qp *qp, u64 payload);
158int scif_setup_loopback_qp(struct scif_dev *scifdev);
159int scif_destroy_loopback_qp(struct scif_dev *scifdev);
160void scif_poll_qp_state(struct work_struct *work);
161void scif_qp_response_ack(struct work_struct *work);
162void scif_destroy_p2p(struct scif_dev *scifdev);
163void scif_send_exit(struct scif_dev *scifdev);
164static inline struct device *scif_get_peer_dev(struct scif_dev *scifdev)
165{
166 struct scif_peer_dev *spdev;
167 struct device *spdev_ret;
168
169 rcu_read_lock();
170 spdev = rcu_dereference(scifdev->spdev);
171 if (spdev)
172 spdev_ret = get_device(&spdev->dev);
173 else
174 spdev_ret = ERR_PTR(-ENODEV);
175 rcu_read_unlock();
176 return spdev_ret;
177}
178
179static inline void scif_put_peer_dev(struct device *dev)
180{
181 put_device(dev);
182}
183#endif /* SCIF_NODEQP */
diff --git a/drivers/misc/mic/scif/scif_peer_bus.c b/drivers/misc/mic/scif/scif_peer_bus.c
new file mode 100644
index 000000000000..589ae9ad2501
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_peer_bus.c
@@ -0,0 +1,124 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 */
17#include "scif_main.h"
18#include "../bus/scif_bus.h"
19#include "scif_peer_bus.h"
20
21static inline struct scif_peer_dev *
22dev_to_scif_peer(struct device *dev)
23{
24 return container_of(dev, struct scif_peer_dev, dev);
25}
26
27static inline struct scif_peer_driver *
28drv_to_scif_peer(struct device_driver *drv)
29{
30 return container_of(drv, struct scif_peer_driver, driver);
31}
32
33static int scif_peer_dev_match(struct device *dv, struct device_driver *dr)
34{
35 return !strncmp(dev_name(dv), dr->name, 4);
36}
37
38static int scif_peer_dev_probe(struct device *d)
39{
40 struct scif_peer_dev *dev = dev_to_scif_peer(d);
41 struct scif_peer_driver *drv = drv_to_scif_peer(dev->dev.driver);
42
43 return drv->probe(dev);
44}
45
46static int scif_peer_dev_remove(struct device *d)
47{
48 struct scif_peer_dev *dev = dev_to_scif_peer(d);
49 struct scif_peer_driver *drv = drv_to_scif_peer(dev->dev.driver);
50
51 drv->remove(dev);
52 return 0;
53}
54
55static struct bus_type scif_peer_bus = {
56 .name = "scif_peer_bus",
57 .match = scif_peer_dev_match,
58 .probe = scif_peer_dev_probe,
59 .remove = scif_peer_dev_remove,
60};
61
62int scif_peer_register_driver(struct scif_peer_driver *driver)
63{
64 driver->driver.bus = &scif_peer_bus;
65 return driver_register(&driver->driver);
66}
67
68void scif_peer_unregister_driver(struct scif_peer_driver *driver)
69{
70 driver_unregister(&driver->driver);
71}
72
73static void scif_peer_release_dev(struct device *d)
74{
75 struct scif_peer_dev *sdev = dev_to_scif_peer(d);
76 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
77
78 scif_cleanup_scifdev(scifdev);
79 kfree(sdev);
80}
81
82struct scif_peer_dev *
83scif_peer_register_device(struct scif_dev *scifdev)
84{
85 int ret;
86 struct scif_peer_dev *spdev;
87
88 spdev = kzalloc(sizeof(*spdev), GFP_KERNEL);
89 if (!spdev)
90 return ERR_PTR(-ENOMEM);
91
92 spdev->dev.parent = scifdev->sdev->dev.parent;
93 spdev->dev.release = scif_peer_release_dev;
94 spdev->dnode = scifdev->node;
95 spdev->dev.bus = &scif_peer_bus;
96
97 dev_set_name(&spdev->dev, "scif_peer-dev%u", spdev->dnode);
98 /*
99 * device_register() causes the bus infrastructure to look for a
100 * matching driver.
101 */
102 ret = device_register(&spdev->dev);
103 if (ret)
104 goto free_spdev;
105 return spdev;
106free_spdev:
107 kfree(spdev);
108 return ERR_PTR(ret);
109}
110
111void scif_peer_unregister_device(struct scif_peer_dev *sdev)
112{
113 device_unregister(&sdev->dev);
114}
115
116int scif_peer_bus_init(void)
117{
118 return bus_register(&scif_peer_bus);
119}
120
121void scif_peer_bus_exit(void)
122{
123 bus_unregister(&scif_peer_bus);
124}
diff --git a/drivers/misc/mic/scif/scif_peer_bus.h b/drivers/misc/mic/scif/scif_peer_bus.h
new file mode 100644
index 000000000000..33f0dbb30152
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_peer_bus.h
@@ -0,0 +1,65 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 */
17#ifndef _SCIF_PEER_BUS_H_
18#define _SCIF_PEER_BUS_H_
19
20#include <linux/device.h>
21#include <linux/mic_common.h>
22
23/*
24 * Peer devices show up as PCIe devices for the mgmt node but not the cards.
25 * The mgmt node discovers all the cards on the PCIe bus and informs the other
26 * cards about their peers. Upon notification of a peer a node adds a peer
27 * device to the peer bus to maintain symmetry in the way devices are
28 * discovered across all nodes in the SCIF network.
29 */
30/**
31 * scif_peer_dev - representation of a peer SCIF device
32 * @dev: underlying device
33 * @dnode - The destination node which this device will communicate with.
34 */
35struct scif_peer_dev {
36 struct device dev;
37 u8 dnode;
38};
39
40/**
41 * scif_peer_driver - operations for a scif_peer I/O driver
42 * @driver: underlying device driver (populate name and owner).
43 * @id_table: the ids serviced by this driver.
44 * @probe: the function to call when a device is found. Returns 0 or -errno.
45 * @remove: the function to call when a device is removed.
46 */
47struct scif_peer_driver {
48 struct device_driver driver;
49 const struct scif_peer_dev_id *id_table;
50
51 int (*probe)(struct scif_peer_dev *dev);
52 void (*remove)(struct scif_peer_dev *dev);
53};
54
55struct scif_dev;
56
57int scif_peer_register_driver(struct scif_peer_driver *driver);
58void scif_peer_unregister_driver(struct scif_peer_driver *driver);
59
60struct scif_peer_dev *scif_peer_register_device(struct scif_dev *sdev);
61void scif_peer_unregister_device(struct scif_peer_dev *sdev);
62
63int scif_peer_bus_init(void);
64void scif_peer_bus_exit(void);
65#endif /* _SCIF_PEER_BUS_H */
diff --git a/drivers/misc/mic/scif/scif_ports.c b/drivers/misc/mic/scif/scif_ports.c
new file mode 100644
index 000000000000..594e18d279d8
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_ports.c
@@ -0,0 +1,124 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include <linux/idr.h>
19
20#include "scif_main.h"
21
22#define SCIF_PORT_COUNT 0x10000 /* Ports available */
23
24struct idr scif_ports;
25
26/*
27 * struct scif_port - SCIF port information
28 *
29 * @ref_cnt - Reference count since there can be multiple endpoints
30 * created via scif_accept(..) simultaneously using a port.
31 */
32struct scif_port {
33 int ref_cnt;
34};
35
36/**
37 * __scif_get_port - Reserve a specified port # for SCIF and add it
38 * to the global list.
39 * @port : port # to be reserved.
40 *
41 * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
42 * On memory allocation failure, returns -ENOMEM.
43 */
44static int __scif_get_port(int start, int end)
45{
46 int id;
47 struct scif_port *port = kzalloc(sizeof(*port), GFP_ATOMIC);
48
49 if (!port)
50 return -ENOMEM;
51 spin_lock(&scif_info.port_lock);
52 id = idr_alloc(&scif_ports, port, start, end, GFP_ATOMIC);
53 if (id >= 0)
54 port->ref_cnt++;
55 spin_unlock(&scif_info.port_lock);
56 return id;
57}
58
59/**
60 * scif_rsrv_port - Reserve a specified port # for SCIF.
61 * @port : port # to be reserved.
62 *
63 * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
64 * On memory allocation failure, returns -ENOMEM.
65 */
66int scif_rsrv_port(u16 port)
67{
68 return __scif_get_port(port, port + 1);
69}
70
71/**
72 * scif_get_new_port - Get and reserve any port # for SCIF in the range
73 * SCIF_PORT_RSVD + 1 to SCIF_PORT_COUNT - 1.
74 *
75 * @return : Allocated SCIF port #, or -ENOSPC if no ports available.
76 * On memory allocation failure, returns -ENOMEM.
77 */
78int scif_get_new_port(void)
79{
80 return __scif_get_port(SCIF_PORT_RSVD + 1, SCIF_PORT_COUNT);
81}
82
83/**
84 * scif_get_port - Increment the reference count for a SCIF port
85 * @id : SCIF port
86 *
87 * @return : None
88 */
89void scif_get_port(u16 id)
90{
91 struct scif_port *port;
92
93 if (!id)
94 return;
95 spin_lock(&scif_info.port_lock);
96 port = idr_find(&scif_ports, id);
97 if (port)
98 port->ref_cnt++;
99 spin_unlock(&scif_info.port_lock);
100}
101
102/**
103 * scif_put_port - Release a reserved SCIF port
104 * @id : SCIF port to be released.
105 *
106 * @return : None
107 */
108void scif_put_port(u16 id)
109{
110 struct scif_port *port;
111
112 if (!id)
113 return;
114 spin_lock(&scif_info.port_lock);
115 port = idr_find(&scif_ports, id);
116 if (port) {
117 port->ref_cnt--;
118 if (!port->ref_cnt) {
119 idr_remove(&scif_ports, id);
120 kfree(port);
121 }
122 }
123 spin_unlock(&scif_info.port_lock);
124}
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
new file mode 100644
index 000000000000..637cc4686742
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rb.c
@@ -0,0 +1,249 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * Intel SCIF driver.
16 *
17 */
18#include <linux/circ_buf.h>
19#include <linux/types.h>
20#include <linux/io.h>
21#include <linux/errno.h>
22
23#include "scif_rb.h"
24
25#define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
26#define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
27
28/**
29 * scif_rb_init - Initializes the ring buffer
30 * @rb: ring buffer
31 * @read_ptr: A pointer to the read offset
32 * @write_ptr: A pointer to the write offset
33 * @rb_base: A pointer to the base of the ring buffer
34 * @size: The size of the ring buffer in powers of two
35 */
36void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
37 void *rb_base, u8 size)
38{
39 rb->rb_base = rb_base;
40 rb->size = (1 << size);
41 rb->read_ptr = read_ptr;
42 rb->write_ptr = write_ptr;
43 rb->current_read_offset = *read_ptr;
44 rb->current_write_offset = *write_ptr;
45}
46
47/* Copies a message to the ring buffer -- handles the wrap around case */
48static void memcpy_torb(struct scif_rb *rb, void *header,
49 void *msg, u32 size)
50{
51 u32 size1, size2;
52
53 if (header + size >= rb->rb_base + rb->size) {
54 /* Need to call two copies if it wraps around */
55 size1 = (u32)(rb->rb_base + rb->size - header);
56 size2 = size - size1;
57 memcpy_toio((void __iomem __force *)header, msg, size1);
58 memcpy_toio((void __iomem __force *)rb->rb_base,
59 msg + size1, size2);
60 } else {
61 memcpy_toio((void __iomem __force *)header, msg, size);
62 }
63}
64
65/* Copies a message from the ring buffer -- handles the wrap around case */
66static void memcpy_fromrb(struct scif_rb *rb, void *header,
67 void *msg, u32 size)
68{
69 u32 size1, size2;
70
71 if (header + size >= rb->rb_base + rb->size) {
72 /* Need to call two copies if it wraps around */
73 size1 = (u32)(rb->rb_base + rb->size - header);
74 size2 = size - size1;
75 memcpy_fromio(msg, (void __iomem __force *)header, size1);
76 memcpy_fromio(msg + size1,
77 (void __iomem __force *)rb->rb_base, size2);
78 } else {
79 memcpy_fromio(msg, (void __iomem __force *)header, size);
80 }
81}
82
83/**
84 * scif_rb_space - Query space available for writing to the RB
85 * @rb: ring buffer
86 *
87 * Return: size available for writing to RB in bytes.
88 */
89u32 scif_rb_space(struct scif_rb *rb)
90{
91 rb->current_read_offset = *rb->read_ptr;
92 /*
93 * Update from the HW read pointer only once the peer has exposed the
94 * new empty slot. This barrier is paired with the memory barrier
95 * scif_rb_update_read_ptr()
96 */
97 mb();
98 return scif_rb_ring_space(rb->current_write_offset,
99 rb->current_read_offset, rb->size);
100}
101
102/**
103 * scif_rb_write - Write a message to the RB
104 * @rb: ring buffer
105 * @msg: buffer to send the message. Must be at least size bytes long
106 * @size: the size (in bytes) to be copied to the RB
107 *
108 * This API does not block if there isn't enough space in the RB.
109 * Returns: 0 on success or -ENOMEM on failure
110 */
111int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
112{
113 void *header;
114
115 if (scif_rb_space(rb) < size)
116 return -ENOMEM;
117 header = rb->rb_base + rb->current_write_offset;
118 memcpy_torb(rb, header, msg, size);
119 /*
120 * Wait until scif_rb_commit(). Update the local ring
121 * buffer data, not the shared data until commit.
122 */
123 rb->current_write_offset =
124 (rb->current_write_offset + size) & (rb->size - 1);
125 return 0;
126}
127
128/**
129 * scif_rb_commit - To submit the message to let the peer fetch it
130 * @rb: ring buffer
131 */
132void scif_rb_commit(struct scif_rb *rb)
133{
134 /*
135 * We must ensure ordering between the all the data committed
136 * previously before we expose the new message to the peer by
137 * updating the write_ptr. This write barrier is paired with
138 * the read barrier in scif_rb_count(..)
139 */
140 wmb();
141 ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
142#ifdef CONFIG_INTEL_MIC_CARD
143 /*
144 * X100 Si bug: For the case where a Core is performing an EXT_WR
145 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
146 * same address with the same data before it does the Doorbell Write.
147 * This way, if ordering is violated for the Interrupt Message, it will
148 * fall just behind the first Posted associated with the first EXT_WR.
149 */
150 ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
151#endif
152}
153
154/**
155 * scif_rb_get - To get next message from the ring buffer
156 * @rb: ring buffer
157 * @size: Number of bytes to be read
158 *
159 * Return: NULL if no bytes to be read from the ring buffer, otherwise the
160 * pointer to the next byte
161 */
162static void *scif_rb_get(struct scif_rb *rb, u32 size)
163{
164 void *header = NULL;
165
166 if (scif_rb_count(rb, size) >= size)
167 header = rb->rb_base + rb->current_read_offset;
168 return header;
169}
170
171/*
172 * scif_rb_get_next - Read from ring buffer.
173 * @rb: ring buffer
174 * @msg: buffer to hold the message. Must be at least size bytes long
175 * @size: Number of bytes to be read
176 *
177 * Return: number of bytes read if available bytes are >= size, otherwise
178 * returns zero.
179 */
180u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
181{
182 void *header = NULL;
183 int read_size = 0;
184
185 header = scif_rb_get(rb, size);
186 if (header) {
187 u32 next_cmd_offset =
188 (rb->current_read_offset + size) & (rb->size - 1);
189
190 read_size = size;
191 rb->current_read_offset = next_cmd_offset;
192 memcpy_fromrb(rb, header, msg, size);
193 }
194 return read_size;
195}
196
197/**
198 * scif_rb_update_read_ptr
199 * @rb: ring buffer
200 */
201void scif_rb_update_read_ptr(struct scif_rb *rb)
202{
203 u32 new_offset;
204
205 new_offset = rb->current_read_offset;
206 /*
207 * We must ensure ordering between the all the data committed or read
208 * previously before we expose the empty slot to the peer by updating
209 * the read_ptr. This barrier is paired with the memory barrier in
210 * scif_rb_space(..)
211 */
212 mb();
213 ACCESS_ONCE(*rb->read_ptr) = new_offset;
214#ifdef CONFIG_INTEL_MIC_CARD
215 /*
216 * X100 Si Bug: For the case where a Core is performing an EXT_WR
217 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
218 * same address with the same data before it does the Doorbell Write.
219 * This way, if ordering is violated for the Interrupt Message, it will
220 * fall just behind the first Posted associated with the first EXT_WR.
221 */
222 ACCESS_ONCE(*rb->read_ptr) = new_offset;
223#endif
224}
225
226/**
227 * scif_rb_count
228 * @rb: ring buffer
229 * @size: Number of bytes expected to be read
230 *
231 * Return: number of bytes that can be read from the RB
232 */
233u32 scif_rb_count(struct scif_rb *rb, u32 size)
234{
235 if (scif_rb_ring_cnt(rb->current_write_offset,
236 rb->current_read_offset,
237 rb->size) < size) {
238 rb->current_write_offset = *rb->write_ptr;
239 /*
240 * Update from the HW write pointer if empty only once the peer
241 * has exposed the new message. This read barrier is paired
242 * with the write barrier in scif_rb_commit(..)
243 */
244 smp_rmb();
245 }
246 return scif_rb_ring_cnt(rb->current_write_offset,
247 rb->current_read_offset,
248 rb->size);
249}
diff --git a/drivers/misc/mic/scif/scif_rb.h b/drivers/misc/mic/scif/scif_rb.h
new file mode 100644
index 000000000000..166dffe3093d
--- /dev/null
+++ b/drivers/misc/mic/scif/scif_rb.h
@@ -0,0 +1,100 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * BSD LICENSE
21 *
22 * Copyright(c) 2014 Intel Corporation.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 * Intel SCIF driver.
51 */
52#ifndef SCIF_RB_H
53#define SCIF_RB_H
54/*
55 * This file describes a general purpose, byte based ring buffer. Writers to the
56 * ring buffer need to synchronize using a lock. The same is true for readers,
57 * although in practice, the ring buffer has a single reader. It is lockless
58 * between producer and consumer so it can handle being used across the PCIe
59 * bus. The ring buffer ensures that there are no reads across the PCIe bus for
60 * performance reasons. Two of these are used to form a single bidirectional
61 * queue-pair across PCIe.
62 */
63/*
64 * struct scif_rb - SCIF Ring Buffer
65 *
66 * @rb_base: The base of the memory used for storing RB messages
67 * @read_ptr: Pointer to the read offset
68 * @write_ptr: Pointer to the write offset
69 * @size: Size of the memory in rb_base
70 * @current_read_offset: Cached read offset for performance
71 * @current_write_offset: Cached write offset for performance
72 */
73struct scif_rb {
74 void *rb_base;
75 u32 *read_ptr;
76 u32 *write_ptr;
77 u32 size;
78 u32 current_read_offset;
79 u32 current_write_offset;
80};
81
82/* methods used by both */
83void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
84 void *rb_base, u8 size);
85/* writer only methods */
86/* write a new command, then scif_rb_commit() */
87int scif_rb_write(struct scif_rb *rb, void *msg, u32 size);
88/* after write(), then scif_rb_commit() */
89void scif_rb_commit(struct scif_rb *rb);
90/* query space available for writing to a RB. */
91u32 scif_rb_space(struct scif_rb *rb);
92
93/* reader only methods */
94/* read a new message from the ring buffer of size bytes */
95u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size);
96/* update the read pointer so that the space can be reused */
97void scif_rb_update_read_ptr(struct scif_rb *rb);
98/* count the number of bytes that can be read */
99u32 scif_rb_count(struct scif_rb *rb, u32 size);
100#endif
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index eeaaf5fca105..15c33cc34a80 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -18,23 +18,20 @@
18 * MA 02110-1301, USA. 18 * MA 02110-1301, USA.
19 */ 19 */
20 20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/clk.h> 21#include <linux/clk.h>
24#include <linux/err.h> 22#include <linux/genalloc.h>
25#include <linux/io.h> 23#include <linux/io.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/list.h>
29#include <linux/list_sort.h> 24#include <linux/list_sort.h>
25#include <linux/of_address.h>
30#include <linux/platform_device.h> 26#include <linux/platform_device.h>
31#include <linux/slab.h> 27#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/genalloc.h>
34 28
35#define SRAM_GRANULARITY 32 29#define SRAM_GRANULARITY 32
36 30
37struct sram_dev { 31struct sram_dev {
32 struct device *dev;
33 void __iomem *virt_base;
34
38 struct gen_pool *pool; 35 struct gen_pool *pool;
39 struct clk *clk; 36 struct clk *clk;
40}; 37};
@@ -54,62 +51,27 @@ static int sram_reserve_cmp(void *priv, struct list_head *a,
54 return ra->start - rb->start; 51 return ra->start - rb->start;
55} 52}
56 53
57static int sram_probe(struct platform_device *pdev) 54static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
58{ 55{
59 void __iomem *virt_base; 56 struct device_node *np = sram->dev->of_node, *child;
60 struct sram_dev *sram;
61 struct resource *res;
62 struct device_node *np = pdev->dev.of_node, *child;
63 unsigned long size, cur_start, cur_size; 57 unsigned long size, cur_start, cur_size;
64 struct sram_reserve *rblocks, *block; 58 struct sram_reserve *rblocks, *block;
65 struct list_head reserve_list; 59 struct list_head reserve_list;
66 unsigned int nblocks; 60 unsigned int nblocks;
67 int ret; 61 int ret = 0;
68 62
69 INIT_LIST_HEAD(&reserve_list); 63 INIT_LIST_HEAD(&reserve_list);
70 64
71 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
72 if (!res) {
73 dev_err(&pdev->dev, "found no memory resource\n");
74 return -EINVAL;
75 }
76
77 size = resource_size(res); 65 size = resource_size(res);
78 66
79 if (!devm_request_mem_region(&pdev->dev,
80 res->start, size, pdev->name)) {
81 dev_err(&pdev->dev, "could not request region for resource\n");
82 return -EBUSY;
83 }
84
85 virt_base = devm_ioremap_wc(&pdev->dev, res->start, size);
86 if (IS_ERR(virt_base))
87 return PTR_ERR(virt_base);
88
89 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
90 if (!sram)
91 return -ENOMEM;
92
93 sram->clk = devm_clk_get(&pdev->dev, NULL);
94 if (IS_ERR(sram->clk))
95 sram->clk = NULL;
96 else
97 clk_prepare_enable(sram->clk);
98
99 sram->pool = devm_gen_pool_create(&pdev->dev, ilog2(SRAM_GRANULARITY), -1);
100 if (!sram->pool)
101 return -ENOMEM;
102
103 /* 67 /*
104 * We need an additional block to mark the end of the memory region 68 * We need an additional block to mark the end of the memory region
105 * after the reserved blocks from the dt are processed. 69 * after the reserved blocks from the dt are processed.
106 */ 70 */
107 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1; 71 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
108 rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL); 72 rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
109 if (!rblocks) { 73 if (!rblocks)
110 ret = -ENOMEM; 74 return -ENOMEM;
111 goto err_alloc;
112 }
113 75
114 block = &rblocks[0]; 76 block = &rblocks[0];
115 for_each_available_child_of_node(np, child) { 77 for_each_available_child_of_node(np, child) {
@@ -117,17 +79,19 @@ static int sram_probe(struct platform_device *pdev)
117 79
118 ret = of_address_to_resource(child, 0, &child_res); 80 ret = of_address_to_resource(child, 0, &child_res);
119 if (ret < 0) { 81 if (ret < 0) {
120 dev_err(&pdev->dev, 82 dev_err(sram->dev,
121 "could not get address for node %s\n", 83 "could not get address for node %s\n",
122 child->full_name); 84 child->full_name);
85 of_node_put(child);
123 goto err_chunks; 86 goto err_chunks;
124 } 87 }
125 88
126 if (child_res.start < res->start || child_res.end > res->end) { 89 if (child_res.start < res->start || child_res.end > res->end) {
127 dev_err(&pdev->dev, 90 dev_err(sram->dev,
128 "reserved block %s outside the sram area\n", 91 "reserved block %s outside the sram area\n",
129 child->full_name); 92 child->full_name);
130 ret = -EINVAL; 93 ret = -EINVAL;
94 of_node_put(child);
131 goto err_chunks; 95 goto err_chunks;
132 } 96 }
133 97
@@ -135,9 +99,8 @@ static int sram_probe(struct platform_device *pdev)
135 block->size = resource_size(&child_res); 99 block->size = resource_size(&child_res);
136 list_add_tail(&block->list, &reserve_list); 100 list_add_tail(&block->list, &reserve_list);
137 101
138 dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n", 102 dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
139 block->start, 103 block->start, block->start + block->size);
140 block->start + block->size);
141 104
142 block++; 105 block++;
143 } 106 }
@@ -154,7 +117,7 @@ static int sram_probe(struct platform_device *pdev)
154 list_for_each_entry(block, &reserve_list, list) { 117 list_for_each_entry(block, &reserve_list, list) {
155 /* can only happen if sections overlap */ 118 /* can only happen if sections overlap */
156 if (block->start < cur_start) { 119 if (block->start < cur_start) {
157 dev_err(&pdev->dev, 120 dev_err(sram->dev,
158 "block at 0x%x starts after current offset 0x%lx\n", 121 "block at 0x%x starts after current offset 0x%lx\n",
159 block->start, cur_start); 122 block->start, cur_start);
160 ret = -EINVAL; 123 ret = -EINVAL;
@@ -174,10 +137,11 @@ static int sram_probe(struct platform_device *pdev)
174 */ 137 */
175 cur_size = block->start - cur_start; 138 cur_size = block->start - cur_start;
176 139
177 dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n", 140 dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
178 cur_start, cur_start + cur_size); 141 cur_start, cur_start + cur_size);
142
179 ret = gen_pool_add_virt(sram->pool, 143 ret = gen_pool_add_virt(sram->pool,
180 (unsigned long)virt_base + cur_start, 144 (unsigned long)sram->virt_base + cur_start,
181 res->start + cur_start, cur_size, -1); 145 res->start + cur_start, cur_size, -1);
182 if (ret < 0) 146 if (ret < 0)
183 goto err_chunks; 147 goto err_chunks;
@@ -186,20 +150,63 @@ static int sram_probe(struct platform_device *pdev)
186 cur_start = block->start + block->size; 150 cur_start = block->start + block->size;
187 } 151 }
188 152
153 err_chunks:
189 kfree(rblocks); 154 kfree(rblocks);
190 155
156 return ret;
157}
158
159static int sram_probe(struct platform_device *pdev)
160{
161 struct sram_dev *sram;
162 struct resource *res;
163 size_t size;
164 int ret;
165
166 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
167 if (!sram)
168 return -ENOMEM;
169
170 sram->dev = &pdev->dev;
171
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 if (!res) {
174 dev_err(sram->dev, "found no memory resource\n");
175 return -EINVAL;
176 }
177
178 size = resource_size(res);
179
180 if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
181 dev_err(sram->dev, "could not request region for resource\n");
182 return -EBUSY;
183 }
184
185 sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
186 if (IS_ERR(sram->virt_base))
187 return PTR_ERR(sram->virt_base);
188
189 sram->pool = devm_gen_pool_create(sram->dev,
190 ilog2(SRAM_GRANULARITY), -1);
191 if (!sram->pool)
192 return -ENOMEM;
193
194 ret = sram_reserve_regions(sram, res);
195 if (ret)
196 return ret;
197
198 sram->clk = devm_clk_get(sram->dev, NULL);
199 if (IS_ERR(sram->clk))
200 sram->clk = NULL;
201 else
202 clk_prepare_enable(sram->clk);
203
191 platform_set_drvdata(pdev, sram); 204 platform_set_drvdata(pdev, sram);
192 205
193 dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base); 206 dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
207 gen_pool_size(sram->pool) / 1024, sram->virt_base);
194 208
195 return 0; 209 return 0;
196
197err_chunks:
198 kfree(rblocks);
199err_alloc:
200 if (sram->clk)
201 clk_disable_unprepare(sram->clk);
202 return ret;
203} 210}
204 211
205static int sram_remove(struct platform_device *pdev) 212static int sram_remove(struct platform_device *pdev)
@@ -207,7 +214,7 @@ static int sram_remove(struct platform_device *pdev)
207 struct sram_dev *sram = platform_get_drvdata(pdev); 214 struct sram_dev *sram = platform_get_drvdata(pdev);
208 215
209 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool)) 216 if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
210 dev_dbg(&pdev->dev, "removed while SRAM allocated\n"); 217 dev_err(sram->dev, "removed while SRAM allocated\n");
211 218
212 if (sram->clk) 219 if (sram->clk)
213 clk_disable_unprepare(sram->clk); 220 clk_disable_unprepare(sram->clk);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 18e7a03985d4..5027b8ffae43 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -752,9 +752,8 @@ static struct ti_st_plat_data *get_platform_data(struct device *dev)
752 int len; 752 int len;
753 753
754 dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL); 754 dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
755
756 if (!dt_pdata) 755 if (!dt_pdata)
757 pr_err("Can't allocate device_tree platform data\n"); 756 return NULL;
758 757
759 dt_property = of_get_property(np, "dev_name", &len); 758 dt_property = of_get_property(np, "dev_name", &len);
760 if (dt_property) 759 if (dt_property)
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 11c7cbdade66..2b77ccf77f81 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -32,6 +32,51 @@ struct mei_nfc_hdr {
32 u16 data_size; 32 u16 data_size;
33} __packed; 33} __packed;
34 34
35struct mei_nfc_cmd {
36 struct mei_nfc_hdr hdr;
37 u8 sub_command;
38 u8 data[];
39} __packed;
40
41struct mei_nfc_reply {
42 struct mei_nfc_hdr hdr;
43 u8 sub_command;
44 u8 reply_status;
45 u8 data[];
46} __packed;
47
48struct mei_nfc_if_version {
49 u8 radio_version_sw[3];
50 u8 reserved[3];
51 u8 radio_version_hw[3];
52 u8 i2c_addr;
53 u8 fw_ivn;
54 u8 vendor_id;
55 u8 radio_type;
56} __packed;
57
58struct mei_nfc_connect {
59 u8 fw_ivn;
60 u8 vendor_id;
61} __packed;
62
63struct mei_nfc_connect_resp {
64 u8 fw_ivn;
65 u8 vendor_id;
66 u16 me_major;
67 u16 me_minor;
68 u16 me_hotfix;
69 u16 me_build;
70} __packed;
71
72
73#define MEI_NFC_CMD_MAINTENANCE 0x00
74#define MEI_NFC_CMD_HCI_SEND 0x01
75#define MEI_NFC_CMD_HCI_RECV 0x02
76
77#define MEI_NFC_SUBCMD_CONNECT 0x00
78#define MEI_NFC_SUBCMD_IF_VERSION 0x01
79
35#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) 80#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
36 81
37#define MEI_DUMP_SKB_IN(info, skb) \ 82#define MEI_DUMP_SKB_IN(info, skb) \
@@ -45,51 +90,169 @@ do { \
45do { \ 90do { \
46 pr_debug("%s:\n", info); \ 91 pr_debug("%s:\n", info); \
47 print_hex_dump_debug("mei out: ", DUMP_PREFIX_OFFSET, \ 92 print_hex_dump_debug("mei out: ", DUMP_PREFIX_OFFSET, \
48 16, 1, (skb)->data, (skb)->len, false); \ 93 16, 1, (skb)->data, (skb)->len, false); \
49} while (0) 94} while (0)
50 95
51int nfc_mei_phy_enable(void *phy_id) 96#define MEI_DUMP_NFC_HDR(info, _hdr) \
97do { \
98 pr_debug("%s:\n", info); \
99 pr_debug("cmd=%02d status=%d req_id=%d rsvd=%d size=%d\n", \
100 (_hdr)->cmd, (_hdr)->status, (_hdr)->req_id, \
101 (_hdr)->reserved, (_hdr)->data_size); \
102} while (0)
103
104static int mei_nfc_if_version(struct nfc_mei_phy *phy)
52{ 105{
53 int r; 106
54 struct nfc_mei_phy *phy = phy_id; 107 struct mei_nfc_cmd cmd;
108 struct mei_nfc_reply *reply = NULL;
109 struct mei_nfc_if_version *version;
110 size_t if_version_length;
111 int bytes_recv, r;
55 112
56 pr_info("%s\n", __func__); 113 pr_info("%s\n", __func__);
57 114
58 if (phy->powered == 1) 115 memset(&cmd, 0, sizeof(struct mei_nfc_cmd));
59 return 0; 116 cmd.hdr.cmd = MEI_NFC_CMD_MAINTENANCE;
117 cmd.hdr.data_size = 1;
118 cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
60 119
61 r = mei_cl_enable_device(phy->device); 120 MEI_DUMP_NFC_HDR("version", &cmd.hdr);
121 r = mei_cl_send(phy->device, (u8 *)&cmd, sizeof(struct mei_nfc_cmd));
62 if (r < 0) { 122 if (r < 0) {
63 pr_err("Could not enable device\n"); 123 pr_err("Could not send IF version cmd\n");
64 return r; 124 return r;
65 } 125 }
66 126
67 r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy); 127 /* to be sure on the stack we alloc memory */
68 if (r) { 128 if_version_length = sizeof(struct mei_nfc_reply) +
69 pr_err("Event cb registration failed\n"); 129 sizeof(struct mei_nfc_if_version);
70 mei_cl_disable_device(phy->device);
71 phy->powered = 0;
72 130
73 return r; 131 reply = kzalloc(if_version_length, GFP_KERNEL);
132 if (!reply)
133 return -ENOMEM;
134
135 bytes_recv = mei_cl_recv(phy->device, (u8 *)reply, if_version_length);
136 if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
137 pr_err("Could not read IF version\n");
138 r = -EIO;
139 goto err;
74 } 140 }
75 141
76 phy->powered = 1; 142 version = (struct mei_nfc_if_version *)reply->data;
77 143
78 return 0; 144 phy->fw_ivn = version->fw_ivn;
145 phy->vendor_id = version->vendor_id;
146 phy->radio_type = version->radio_type;
147
148err:
149 kfree(reply);
150 return r;
79} 151}
80EXPORT_SYMBOL_GPL(nfc_mei_phy_enable);
81 152
82void nfc_mei_phy_disable(void *phy_id) 153static int mei_nfc_connect(struct nfc_mei_phy *phy)
83{ 154{
84 struct nfc_mei_phy *phy = phy_id; 155 struct mei_nfc_cmd *cmd, *reply;
156 struct mei_nfc_connect *connect;
157 struct mei_nfc_connect_resp *connect_resp;
158 size_t connect_length, connect_resp_length;
159 int bytes_recv, r;
85 160
86 pr_info("%s\n", __func__); 161 pr_info("%s\n", __func__);
87 162
88 mei_cl_disable_device(phy->device); 163 connect_length = sizeof(struct mei_nfc_cmd) +
164 sizeof(struct mei_nfc_connect);
89 165
90 phy->powered = 0; 166 connect_resp_length = sizeof(struct mei_nfc_cmd) +
167 sizeof(struct mei_nfc_connect_resp);
168
169 cmd = kzalloc(connect_length, GFP_KERNEL);
170 if (!cmd)
171 return -ENOMEM;
172 connect = (struct mei_nfc_connect *)cmd->data;
173
174 reply = kzalloc(connect_resp_length, GFP_KERNEL);
175 if (!reply) {
176 kfree(cmd);
177 return -ENOMEM;
178 }
179
180 connect_resp = (struct mei_nfc_connect_resp *)reply->data;
181
182 cmd->hdr.cmd = MEI_NFC_CMD_MAINTENANCE;
183 cmd->hdr.data_size = 3;
184 cmd->sub_command = MEI_NFC_SUBCMD_CONNECT;
185 connect->fw_ivn = phy->fw_ivn;
186 connect->vendor_id = phy->vendor_id;
187
188 MEI_DUMP_NFC_HDR("connect request", &cmd->hdr);
189 r = mei_cl_send(phy->device, (u8 *)cmd, connect_length);
190 if (r < 0) {
191 pr_err("Could not send connect cmd %d\n", r);
192 goto err;
193 }
194
195 bytes_recv = mei_cl_recv(phy->device, (u8 *)reply, connect_resp_length);
196 if (bytes_recv < 0) {
197 r = bytes_recv;
198 pr_err("Could not read connect response %d\n", r);
199 goto err;
200 }
201
202 MEI_DUMP_NFC_HDR("connect reply", &reply->hdr);
203
204 pr_info("IVN 0x%x Vendor ID 0x%x\n",
205 connect_resp->fw_ivn, connect_resp->vendor_id);
206
207 pr_info("ME FW %d.%d.%d.%d\n",
208 connect_resp->me_major, connect_resp->me_minor,
209 connect_resp->me_hotfix, connect_resp->me_build);
210
211 r = 0;
212
213err:
214 kfree(reply);
215 kfree(cmd);
216
217 return r;
218}
219
220static int mei_nfc_send(struct nfc_mei_phy *phy, u8 *buf, size_t length)
221{
222 struct mei_nfc_hdr *hdr;
223 u8 *mei_buf;
224 int err;
225
226 err = -ENOMEM;
227 mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
228 if (!mei_buf)
229 goto out;
230
231 hdr = (struct mei_nfc_hdr *)mei_buf;
232 hdr->cmd = MEI_NFC_CMD_HCI_SEND;
233 hdr->status = 0;
234 hdr->req_id = phy->req_id;
235 hdr->reserved = 0;
236 hdr->data_size = length;
237
238 MEI_DUMP_NFC_HDR("send", hdr);
239
240 memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
241 err = mei_cl_send(phy->device, mei_buf, length + MEI_NFC_HEADER_SIZE);
242 if (err < 0)
243 goto out;
244
245 if (!wait_event_interruptible_timeout(phy->send_wq,
246 phy->recv_req_id == phy->req_id, HZ)) {
247 pr_err("NFC MEI command timeout\n");
248 err = -ETIME;
249 } else {
250 phy->req_id++;
251 }
252out:
253 kfree(mei_buf);
254 return err;
91} 255}
92EXPORT_SYMBOL_GPL(nfc_mei_phy_disable);
93 256
94/* 257/*
95 * Writing a frame must not return the number of written bytes. 258 * Writing a frame must not return the number of written bytes.
@@ -103,14 +266,38 @@ static int nfc_mei_phy_write(void *phy_id, struct sk_buff *skb)
103 266
104 MEI_DUMP_SKB_OUT("mei frame sent", skb); 267 MEI_DUMP_SKB_OUT("mei frame sent", skb);
105 268
106 r = mei_cl_send(phy->device, skb->data, skb->len); 269 r = mei_nfc_send(phy, skb->data, skb->len);
107 if (r > 0) 270 if (r > 0)
108 r = 0; 271 r = 0;
109 272
110 return r; 273 return r;
111} 274}
112 275
113void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context) 276static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length)
277{
278 struct mei_nfc_hdr *hdr;
279 int received_length;
280
281 received_length = mei_cl_recv(phy->device, buf, length);
282 if (received_length < 0)
283 return received_length;
284
285 hdr = (struct mei_nfc_hdr *) buf;
286
287 MEI_DUMP_NFC_HDR("receive", hdr);
288 if (hdr->cmd == MEI_NFC_CMD_HCI_SEND) {
289 phy->recv_req_id = hdr->req_id;
290 wake_up(&phy->send_wq);
291
292 return 0;
293 }
294
295 return received_length;
296}
297
298
299static void nfc_mei_event_cb(struct mei_cl_device *device, u32 events,
300 void *context)
114{ 301{
115 struct nfc_mei_phy *phy = context; 302 struct nfc_mei_phy *phy = context;
116 303
@@ -125,7 +312,7 @@ void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
125 if (!skb) 312 if (!skb)
126 return; 313 return;
127 314
128 reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ); 315 reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
129 if (reply_size < MEI_NFC_HEADER_SIZE) { 316 if (reply_size < MEI_NFC_HEADER_SIZE) {
130 kfree_skb(skb); 317 kfree_skb(skb);
131 return; 318 return;
@@ -139,7 +326,61 @@ void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context)
139 nfc_hci_recv_frame(phy->hdev, skb); 326 nfc_hci_recv_frame(phy->hdev, skb);
140 } 327 }
141} 328}
142EXPORT_SYMBOL_GPL(nfc_mei_event_cb); 329
330static int nfc_mei_phy_enable(void *phy_id)
331{
332 int r;
333 struct nfc_mei_phy *phy = phy_id;
334
335 pr_info("%s\n", __func__);
336
337 if (phy->powered == 1)
338 return 0;
339
340 r = mei_cl_enable_device(phy->device);
341 if (r < 0) {
342 pr_err("Could not enable device %d\n", r);
343 return r;
344 }
345
346 r = mei_nfc_if_version(phy);
347 if (r < 0) {
348 pr_err("Could not enable device %d\n", r);
349 goto err;
350 }
351
352 r = mei_nfc_connect(phy);
353 if (r < 0) {
354 pr_err("Could not connect to device %d\n", r);
355 goto err;
356 }
357
358 r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
359 if (r) {
360 pr_err("Event cb registration failed %d\n", r);
361 goto err;
362 }
363
364 phy->powered = 1;
365
366 return 0;
367
368err:
369 phy->powered = 0;
370 mei_cl_disable_device(phy->device);
371 return r;
372}
373
374static void nfc_mei_phy_disable(void *phy_id)
375{
376 struct nfc_mei_phy *phy = phy_id;
377
378 pr_info("%s\n", __func__);
379
380 mei_cl_disable_device(phy->device);
381
382 phy->powered = 0;
383}
143 384
144struct nfc_phy_ops mei_phy_ops = { 385struct nfc_phy_ops mei_phy_ops = {
145 .write = nfc_mei_phy_write, 386 .write = nfc_mei_phy_write,
@@ -157,6 +398,7 @@ struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device)
157 return NULL; 398 return NULL;
158 399
159 phy->device = device; 400 phy->device = device;
401 init_waitqueue_head(&phy->send_wq);
160 mei_cl_set_drvdata(device, phy); 402 mei_cl_set_drvdata(device, phy);
161 403
162 return phy; 404 return phy;
@@ -165,6 +407,7 @@ EXPORT_SYMBOL_GPL(nfc_mei_phy_alloc);
165 407
166void nfc_mei_phy_free(struct nfc_mei_phy *phy) 408void nfc_mei_phy_free(struct nfc_mei_phy *phy)
167{ 409{
410 mei_cl_disable_device(phy->device);
168 kfree(phy); 411 kfree(phy);
169} 412}
170EXPORT_SYMBOL_GPL(nfc_mei_phy_free); 413EXPORT_SYMBOL_GPL(nfc_mei_phy_free);
diff --git a/drivers/nfc/mei_phy.h b/drivers/nfc/mei_phy.h
index d669900f8278..fbfa3e61738f 100644
--- a/drivers/nfc/mei_phy.h
+++ b/drivers/nfc/mei_phy.h
@@ -3,27 +3,49 @@
3 3
4#include <linux/mei_cl_bus.h> 4#include <linux/mei_cl_bus.h>
5#include <net/nfc/hci.h> 5#include <net/nfc/hci.h>
6#include <linux/uuid.h>
6 7
8#define MEI_NFC_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
9 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
7#define MEI_NFC_HEADER_SIZE 10 10#define MEI_NFC_HEADER_SIZE 10
8#define MEI_NFC_MAX_HCI_PAYLOAD 300 11#define MEI_NFC_MAX_HCI_PAYLOAD 300
9 12
13/**
14 * struct nfc_mei_phy
15 *
16 * @device: mei device
17 * @hdev: nfc hci device
18
19 * @send_wq: send completion wait queue
20 * @fw_ivn: NFC Interface Version Number
21 * @vendor_id: NFC manufacturer ID
22 * @radio_type: NFC radio type
23 * @reserved: reserved for alignment
24 * @req_id: message counter
25 * @recv_req_id: reception message counter
26 * @powered: the device is in powered state
27 * @hard_fault: < 0 if hardware error occurred
28 * and prevents normal operation.
29 */
10struct nfc_mei_phy { 30struct nfc_mei_phy {
11 struct mei_cl_device *device; 31 struct mei_cl_device *device;
12 struct nfc_hci_dev *hdev; 32 struct nfc_hci_dev *hdev;
13 33
14 int powered; 34 wait_queue_head_t send_wq;
35 u8 fw_ivn;
36 u8 vendor_id;
37 u8 radio_type;
38 u8 reserved;
39
40 u16 req_id;
41 u16 recv_req_id;
15 42
16 int hard_fault; /* 43 int powered;
17 * < 0 if hardware error occured 44 int hard_fault;
18 * and prevents normal operation.
19 */
20}; 45};
21 46
22extern struct nfc_phy_ops mei_phy_ops; 47extern struct nfc_phy_ops mei_phy_ops;
23 48
24int nfc_mei_phy_enable(void *phy_id);
25void nfc_mei_phy_disable(void *phy_id);
26void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context);
27struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device); 49struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *device);
28void nfc_mei_phy_free(struct nfc_mei_phy *phy); 50void nfc_mei_phy_free(struct nfc_mei_phy *phy);
29 51
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 2d1395be64ae..f9f5fc97cdd7 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -67,7 +67,7 @@ static int microread_mei_remove(struct mei_cl_device *device)
67} 67}
68 68
69static struct mei_cl_device_id microread_mei_tbl[] = { 69static struct mei_cl_device_id microread_mei_tbl[] = {
70 { MICROREAD_DRIVER_NAME }, 70 { MICROREAD_DRIVER_NAME, MEI_NFC_UUID},
71 71
72 /* required last entry */ 72 /* required last entry */
73 { } 73 { }
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 330cd4031009..101a37e12efa 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -67,7 +67,7 @@ static int pn544_mei_remove(struct mei_cl_device *device)
67} 67}
68 68
69static struct mei_cl_device_id pn544_mei_tbl[] = { 69static struct mei_cl_device_id pn544_mei_tbl[] = {
70 { PN544_DRIVER_NAME }, 70 { PN544_DRIVER_NAME, MEI_NFC_UUID},
71 71
72 /* required last entry */ 72 /* required last entry */
73 { } 73 { }
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 53d15b30636a..78530d1714dc 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2255,7 +2255,7 @@ out5:
2255 release_region(base+0x3, 5); 2255 release_region(base+0x3, 5);
2256 release_region(base, 3); 2256 release_region(base, 3);
2257out4: 2257out4:
2258 parport_put_port(p); 2258 parport_del_port(p);
2259out3: 2259out3:
2260 kfree(priv); 2260 kfree(priv);
2261out2: 2261out2:
@@ -2294,7 +2294,7 @@ void parport_pc_unregister_port(struct parport *p)
2294 priv->dma_handle); 2294 priv->dma_handle);
2295#endif 2295#endif
2296 kfree(p->private_data); 2296 kfree(p->private_data);
2297 parport_put_port(p); 2297 parport_del_port(p);
2298 kfree(ops); /* hope no-one cached it */ 2298 kfree(ops); /* hope no-one cached it */
2299} 2299}
2300EXPORT_SYMBOL(parport_pc_unregister_port); 2300EXPORT_SYMBOL(parport_pc_unregister_port);
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 3b470801a04f..c776333a68bc 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -21,6 +21,7 @@
21#include <linux/parport.h> 21#include <linux/parport.h>
22#include <linux/ctype.h> 22#include <linux/ctype.h>
23#include <linux/sysctl.h> 23#include <linux/sysctl.h>
24#include <linux/device.h>
24 25
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26 27
@@ -558,8 +559,18 @@ int parport_device_proc_unregister(struct pardevice *device)
558 559
559static int __init parport_default_proc_register(void) 560static int __init parport_default_proc_register(void)
560{ 561{
562 int ret;
563
561 parport_default_sysctl_table.sysctl_header = 564 parport_default_sysctl_table.sysctl_header =
562 register_sysctl_table(parport_default_sysctl_table.dev_dir); 565 register_sysctl_table(parport_default_sysctl_table.dev_dir);
566 if (!parport_default_sysctl_table.sysctl_header)
567 return -ENOMEM;
568 ret = parport_bus_init();
569 if (ret) {
570 unregister_sysctl_table(parport_default_sysctl_table.
571 sysctl_header);
572 return ret;
573 }
563 return 0; 574 return 0;
564} 575}
565 576
@@ -570,6 +581,7 @@ static void __exit parport_default_proc_unregister(void)
570 sysctl_header); 581 sysctl_header);
571 parport_default_sysctl_table.sysctl_header = NULL; 582 parport_default_sysctl_table.sysctl_header = NULL;
572 } 583 }
584 parport_bus_exit();
573} 585}
574 586
575#else /* no sysctl or no procfs*/ 587#else /* no sysctl or no procfs*/
@@ -596,11 +608,12 @@ int parport_device_proc_unregister(struct pardevice *device)
596 608
597static int __init parport_default_proc_register (void) 609static int __init parport_default_proc_register (void)
598{ 610{
599 return 0; 611 return parport_bus_init();
600} 612}
601 613
602static void __exit parport_default_proc_unregister (void) 614static void __exit parport_default_proc_unregister (void)
603{ 615{
616 parport_bus_exit();
604} 617}
605#endif 618#endif
606 619
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 3fa66244ce32..8067f54ce050 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/kmod.h> 31#include <linux/kmod.h>
32#include <linux/device.h>
32 33
33#include <linux/spinlock.h> 34#include <linux/spinlock.h>
34#include <linux/mutex.h> 35#include <linux/mutex.h>
@@ -100,13 +101,91 @@ static struct parport_operations dead_ops = {
100 .owner = NULL, 101 .owner = NULL,
101}; 102};
102 103
104static struct device_type parport_device_type = {
105 .name = "parport",
106};
107
108static int is_parport(struct device *dev)
109{
110 return dev->type == &parport_device_type;
111}
112
113static int parport_probe(struct device *dev)
114{
115 struct parport_driver *drv;
116
117 if (is_parport(dev))
118 return -ENODEV;
119
120 drv = to_parport_driver(dev->driver);
121 if (!drv->probe) {
122 /* if driver has not defined a custom probe */
123 struct pardevice *par_dev = to_pardevice(dev);
124
125 if (strcmp(par_dev->name, drv->name))
126 return -ENODEV;
127 return 0;
128 }
129 /* if driver defined its own probe */
130 return drv->probe(to_pardevice(dev));
131}
132
133static struct bus_type parport_bus_type = {
134 .name = "parport",
135 .probe = parport_probe,
136};
137
138int parport_bus_init(void)
139{
140 return bus_register(&parport_bus_type);
141}
142
143void parport_bus_exit(void)
144{
145 bus_unregister(&parport_bus_type);
146}
147
148/*
149 * iterates through all the drivers registered with the bus and sends the port
150 * details to the match_port callback of the driver, so that the driver can
151 * know about the new port that just regsitered with the bus and decide if it
152 * wants to use this new port.
153 */
154static int driver_check(struct device_driver *dev_drv, void *_port)
155{
156 struct parport *port = _port;
157 struct parport_driver *drv = to_parport_driver(dev_drv);
158
159 if (drv->match_port)
160 drv->match_port(port);
161 return 0;
162}
163
103/* Call attach(port) for each registered driver. */ 164/* Call attach(port) for each registered driver. */
104static void attach_driver_chain(struct parport *port) 165static void attach_driver_chain(struct parport *port)
105{ 166{
106 /* caller has exclusive registration_lock */ 167 /* caller has exclusive registration_lock */
107 struct parport_driver *drv; 168 struct parport_driver *drv;
169
108 list_for_each_entry(drv, &drivers, list) 170 list_for_each_entry(drv, &drivers, list)
109 drv->attach(port); 171 drv->attach(port);
172
173 /*
174 * call the driver_check function of the drivers registered in
175 * new device model
176 */
177
178 bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
179}
180
181static int driver_detach(struct device_driver *_drv, void *_port)
182{
183 struct parport *port = _port;
184 struct parport_driver *drv = to_parport_driver(_drv);
185
186 if (drv->detach)
187 drv->detach(port);
188 return 0;
110} 189}
111 190
112/* Call detach(port) for each registered driver. */ 191/* Call detach(port) for each registered driver. */
@@ -116,6 +195,13 @@ static void detach_driver_chain(struct parport *port)
116 /* caller has exclusive registration_lock */ 195 /* caller has exclusive registration_lock */
117 list_for_each_entry(drv, &drivers, list) 196 list_for_each_entry(drv, &drivers, list)
118 drv->detach (port); 197 drv->detach (port);
198
199 /*
200 * call the detach function of the drivers registered in
201 * new device model
202 */
203
204 bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
119} 205}
120 206
121/* Ask kmod for some lowlevel drivers. */ 207/* Ask kmod for some lowlevel drivers. */
@@ -126,17 +212,39 @@ static void get_lowlevel_driver (void)
126 request_module ("parport_lowlevel"); 212 request_module ("parport_lowlevel");
127} 213}
128 214
215/*
216 * iterates through all the devices connected to the bus and sends the device
217 * details to the match_port callback of the driver, so that the driver can
218 * know what are all the ports that are connected to the bus and choose the
219 * port to which it wants to register its device.
220 */
221static int port_check(struct device *dev, void *dev_drv)
222{
223 struct parport_driver *drv = dev_drv;
224
225 /* only send ports, do not send other devices connected to bus */
226 if (is_parport(dev))
227 drv->match_port(to_parport_dev(dev));
228 return 0;
229}
230
129/** 231/**
130 * parport_register_driver - register a parallel port device driver 232 * parport_register_driver - register a parallel port device driver
131 * @drv: structure describing the driver 233 * @drv: structure describing the driver
234 * @owner: owner module of drv
235 * @mod_name: module name string
132 * 236 *
133 * This can be called by a parallel port device driver in order 237 * This can be called by a parallel port device driver in order
134 * to receive notifications about ports being found in the 238 * to receive notifications about ports being found in the
135 * system, as well as ports no longer available. 239 * system, as well as ports no longer available.
136 * 240 *
241 * If devmodel is true then the new device model is used
242 * for registration.
243 *
137 * The @drv structure is allocated by the caller and must not be 244 * The @drv structure is allocated by the caller and must not be
138 * deallocated until after calling parport_unregister_driver(). 245 * deallocated until after calling parport_unregister_driver().
139 * 246 *
247 * If using the non device model:
140 * The driver's attach() function may block. The port that 248 * The driver's attach() function may block. The port that
141 * attach() is given will be valid for the duration of the 249 * attach() is given will be valid for the duration of the
142 * callback, but if the driver wants to take a copy of the 250 * callback, but if the driver wants to take a copy of the
@@ -148,21 +256,57 @@ static void get_lowlevel_driver (void)
148 * callback, but if the driver wants to take a copy of the 256 * callback, but if the driver wants to take a copy of the
149 * pointer it must call parport_get_port() to do so. 257 * pointer it must call parport_get_port() to do so.
150 * 258 *
151 * Returns 0 on success. Currently it always succeeds. 259 *
260 * Returns 0 on success. The non device model will always succeeds.
261 * but the new device model can fail and will return the error code.
152 **/ 262 **/
153 263
154int parport_register_driver (struct parport_driver *drv) 264int __parport_register_driver(struct parport_driver *drv, struct module *owner,
265 const char *mod_name)
155{ 266{
156 struct parport *port;
157
158 if (list_empty(&portlist)) 267 if (list_empty(&portlist))
159 get_lowlevel_driver (); 268 get_lowlevel_driver ();
160 269
161 mutex_lock(&registration_lock); 270 if (drv->devmodel) {
162 list_for_each_entry(port, &portlist, list) 271 /* using device model */
163 drv->attach(port); 272 int ret;
164 list_add(&drv->list, &drivers); 273
165 mutex_unlock(&registration_lock); 274 /* initialize common driver fields */
275 drv->driver.name = drv->name;
276 drv->driver.bus = &parport_bus_type;
277 drv->driver.owner = owner;
278 drv->driver.mod_name = mod_name;
279 ret = driver_register(&drv->driver);
280 if (ret)
281 return ret;
282
283 mutex_lock(&registration_lock);
284 if (drv->match_port)
285 bus_for_each_dev(&parport_bus_type, NULL, drv,
286 port_check);
287 mutex_unlock(&registration_lock);
288 } else {
289 struct parport *port;
290
291 drv->devmodel = false;
292
293 mutex_lock(&registration_lock);
294 list_for_each_entry(port, &portlist, list)
295 drv->attach(port);
296 list_add(&drv->list, &drivers);
297 mutex_unlock(&registration_lock);
298 }
299
300 return 0;
301}
302EXPORT_SYMBOL(__parport_register_driver);
303
304static int port_detach(struct device *dev, void *_drv)
305{
306 struct parport_driver *drv = _drv;
307
308 if (is_parport(dev) && drv->detach)
309 drv->detach(to_parport_dev(dev));
166 310
167 return 0; 311 return 0;
168} 312}
@@ -189,15 +333,22 @@ void parport_unregister_driver (struct parport_driver *drv)
189 struct parport *port; 333 struct parport *port;
190 334
191 mutex_lock(&registration_lock); 335 mutex_lock(&registration_lock);
192 list_del_init(&drv->list); 336 if (drv->devmodel) {
193 list_for_each_entry(port, &portlist, list) 337 bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
194 drv->detach(port); 338 driver_unregister(&drv->driver);
339 } else {
340 list_del_init(&drv->list);
341 list_for_each_entry(port, &portlist, list)
342 drv->detach(port);
343 }
195 mutex_unlock(&registration_lock); 344 mutex_unlock(&registration_lock);
196} 345}
197 346
198static void free_port (struct parport *port) 347static void free_port(struct device *dev)
199{ 348{
200 int d; 349 int d;
350 struct parport *port = to_parport_dev(dev);
351
201 spin_lock(&full_list_lock); 352 spin_lock(&full_list_lock);
202 list_del(&port->full_list); 353 list_del(&port->full_list);
203 spin_unlock(&full_list_lock); 354 spin_unlock(&full_list_lock);
@@ -223,25 +374,29 @@ static void free_port (struct parport *port)
223 374
224struct parport *parport_get_port (struct parport *port) 375struct parport *parport_get_port (struct parport *port)
225{ 376{
226 atomic_inc (&port->ref_count); 377 struct device *dev = get_device(&port->bus_dev);
227 return port; 378
379 return to_parport_dev(dev);
380}
381
382void parport_del_port(struct parport *port)
383{
384 device_unregister(&port->bus_dev);
228} 385}
386EXPORT_SYMBOL(parport_del_port);
229 387
230/** 388/**
231 * parport_put_port - decrement a port's reference count 389 * parport_put_port - decrement a port's reference count
232 * @port: the port 390 * @port: the port
233 * 391 *
234 * This should be called once for each call to parport_get_port(), 392 * This should be called once for each call to parport_get_port(),
235 * once the port is no longer needed. 393 * once the port is no longer needed. When the reference count reaches
394 * zero (port is no longer used), free_port is called.
236 **/ 395 **/
237 396
238void parport_put_port (struct parport *port) 397void parport_put_port (struct parport *port)
239{ 398{
240 if (atomic_dec_and_test (&port->ref_count)) 399 put_device(&port->bus_dev);
241 /* Can destroy it now. */
242 free_port (port);
243
244 return;
245} 400}
246 401
247/** 402/**
@@ -281,6 +436,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
281 int num; 436 int num;
282 int device; 437 int device;
283 char *name; 438 char *name;
439 int ret;
284 440
285 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL); 441 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
286 if (!tmp) { 442 if (!tmp) {
@@ -333,6 +489,10 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
333 */ 489 */
334 sprintf(name, "parport%d", tmp->portnum = tmp->number); 490 sprintf(name, "parport%d", tmp->portnum = tmp->number);
335 tmp->name = name; 491 tmp->name = name;
492 tmp->bus_dev.bus = &parport_bus_type;
493 tmp->bus_dev.release = free_port;
494 dev_set_name(&tmp->bus_dev, name);
495 tmp->bus_dev.type = &parport_device_type;
336 496
337 for (device = 0; device < 5; device++) 497 for (device = 0; device < 5; device++)
338 /* assume the worst */ 498 /* assume the worst */
@@ -340,6 +500,12 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma,
340 500
341 tmp->waithead = tmp->waittail = NULL; 501 tmp->waithead = tmp->waittail = NULL;
342 502
503 ret = device_register(&tmp->bus_dev);
504 if (ret) {
505 put_device(&tmp->bus_dev);
506 return NULL;
507 }
508
343 return tmp; 509 return tmp;
344} 510}
345 511
@@ -542,6 +708,20 @@ parport_register_device(struct parport *port, const char *name,
542 } 708 }
543 } 709 }
544 710
711 if (flags & PARPORT_DEV_EXCL) {
712 if (port->physport->devices) {
713 /*
714 * If a device is already registered and this new
715 * device wants exclusive access, then no need to
716 * continue as we can not grant exclusive access to
717 * this device.
718 */
719 pr_err("%s: cannot grant exclusive access for device %s\n",
720 port->name, name);
721 return NULL;
722 }
723 }
724
545 /* We up our own module reference count, and that of the port 725 /* We up our own module reference count, and that of the port
546 on which a device is to be registered, to ensure that 726 on which a device is to be registered, to ensure that
547 neither of us gets unloaded while we sleep in (e.g.) 727 neither of us gets unloaded while we sleep in (e.g.)
@@ -575,6 +755,7 @@ parport_register_device(struct parport *port, const char *name,
575 tmp->irq_func = irq_func; 755 tmp->irq_func = irq_func;
576 tmp->waiting = 0; 756 tmp->waiting = 0;
577 tmp->timeout = 5 * HZ; 757 tmp->timeout = 5 * HZ;
758 tmp->devmodel = false;
578 759
579 /* Chain this onto the list */ 760 /* Chain this onto the list */
580 tmp->prev = NULL; 761 tmp->prev = NULL;
@@ -630,6 +811,150 @@ parport_register_device(struct parport *port, const char *name,
630 return NULL; 811 return NULL;
631} 812}
632 813
814static void free_pardevice(struct device *dev)
815{
816 struct pardevice *par_dev = to_pardevice(dev);
817
818 kfree(par_dev->name);
819 kfree(par_dev);
820}
821
822struct pardevice *
823parport_register_dev_model(struct parport *port, const char *name,
824 const struct pardev_cb *par_dev_cb, int id)
825{
826 struct pardevice *par_dev;
827 int ret;
828 char *devname;
829
830 if (port->physport->flags & PARPORT_FLAG_EXCL) {
831 /* An exclusive device is registered. */
832 pr_err("%s: no more devices allowed\n", port->name);
833 return NULL;
834 }
835
836 if (par_dev_cb->flags & PARPORT_DEV_LURK) {
837 if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
838 pr_info("%s: refused to register lurking device (%s) without callbacks\n",
839 port->name, name);
840 return NULL;
841 }
842 }
843
844 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
845 if (port->physport->devices) {
846 /*
847 * If a device is already registered and this new
848 * device wants exclusive access, then no need to
849 * continue as we can not grant exclusive access to
850 * this device.
851 */
852 pr_err("%s: cannot grant exclusive access for device %s\n",
853 port->name, name);
854 return NULL;
855 }
856 }
857
858 if (!try_module_get(port->ops->owner))
859 return NULL;
860
861 parport_get_port(port);
862
863 par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
864 if (!par_dev)
865 goto err_put_port;
866
867 par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
868 if (!par_dev->state)
869 goto err_put_par_dev;
870
871 devname = kstrdup(name, GFP_KERNEL);
872 if (!devname)
873 goto err_free_par_dev;
874
875 par_dev->name = devname;
876 par_dev->port = port;
877 par_dev->daisy = -1;
878 par_dev->preempt = par_dev_cb->preempt;
879 par_dev->wakeup = par_dev_cb->wakeup;
880 par_dev->private = par_dev_cb->private;
881 par_dev->flags = par_dev_cb->flags;
882 par_dev->irq_func = par_dev_cb->irq_func;
883 par_dev->waiting = 0;
884 par_dev->timeout = 5 * HZ;
885
886 par_dev->dev.parent = &port->bus_dev;
887 par_dev->dev.bus = &parport_bus_type;
888 ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
889 if (ret)
890 goto err_free_devname;
891 par_dev->dev.release = free_pardevice;
892 par_dev->devmodel = true;
893 ret = device_register(&par_dev->dev);
894 if (ret)
895 goto err_put_dev;
896
897 /* Chain this onto the list */
898 par_dev->prev = NULL;
899 /*
900 * This function must not run from an irq handler so we don' t need
901 * to clear irq on the local CPU. -arca
902 */
903 spin_lock(&port->physport->pardevice_lock);
904
905 if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
906 if (port->physport->devices) {
907 spin_unlock(&port->physport->pardevice_lock);
908 pr_debug("%s: cannot grant exclusive access for device %s\n",
909 port->name, name);
910 goto err_put_dev;
911 }
912 port->flags |= PARPORT_FLAG_EXCL;
913 }
914
915 par_dev->next = port->physport->devices;
916 wmb(); /*
917 * Make sure that tmp->next is written before it's
918 * added to the list; see comments marked 'no locking
919 * required'
920 */
921 if (port->physport->devices)
922 port->physport->devices->prev = par_dev;
923 port->physport->devices = par_dev;
924 spin_unlock(&port->physport->pardevice_lock);
925
926 init_waitqueue_head(&par_dev->wait_q);
927 par_dev->timeslice = parport_default_timeslice;
928 par_dev->waitnext = NULL;
929 par_dev->waitprev = NULL;
930
931 /*
932 * This has to be run as last thing since init_state may need other
933 * pardevice fields. -arca
934 */
935 port->ops->init_state(par_dev, par_dev->state);
936 port->proc_device = par_dev;
937 parport_device_proc_register(par_dev);
938
939 return par_dev;
940
941err_put_dev:
942 put_device(&par_dev->dev);
943err_free_devname:
944 kfree(devname);
945err_free_par_dev:
946 kfree(par_dev->state);
947err_put_par_dev:
948 if (!par_dev->devmodel)
949 kfree(par_dev);
950err_put_port:
951 parport_put_port(port);
952 module_put(port->ops->owner);
953
954 return NULL;
955}
956EXPORT_SYMBOL(parport_register_dev_model);
957
633/** 958/**
634 * parport_unregister_device - deregister a device on a parallel port 959 * parport_unregister_device - deregister a device on a parallel port
635 * @dev: pointer to structure representing device 960 * @dev: pointer to structure representing device
@@ -691,7 +1016,10 @@ void parport_unregister_device(struct pardevice *dev)
691 spin_unlock_irq(&port->waitlist_lock); 1016 spin_unlock_irq(&port->waitlist_lock);
692 1017
693 kfree(dev->state); 1018 kfree(dev->state);
694 kfree(dev); 1019 if (dev->devmodel)
1020 device_unregister(&dev->dev);
1021 else
1022 kfree(dev);
695 1023
696 module_put(port->ops->owner); 1024 module_put(port->ops->owner);
697 parport_put_port (port); 1025 parport_put_port (port);
@@ -1019,7 +1347,6 @@ EXPORT_SYMBOL(parport_release);
1019EXPORT_SYMBOL(parport_register_port); 1347EXPORT_SYMBOL(parport_register_port);
1020EXPORT_SYMBOL(parport_announce_port); 1348EXPORT_SYMBOL(parport_announce_port);
1021EXPORT_SYMBOL(parport_remove_port); 1349EXPORT_SYMBOL(parport_remove_port);
1022EXPORT_SYMBOL(parport_register_driver);
1023EXPORT_SYMBOL(parport_unregister_driver); 1350EXPORT_SYMBOL(parport_unregister_driver);
1024EXPORT_SYMBOL(parport_register_device); 1351EXPORT_SYMBOL(parport_register_device);
1025EXPORT_SYMBOL(parport_unregister_device); 1352EXPORT_SYMBOL(parport_unregister_device);
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 8007bfda720a..c3b615c94b4b 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -621,8 +621,6 @@ static int pccardd(void *__skt)
621 unsigned int events; 621 unsigned int events;
622 unsigned int sysfs_events; 622 unsigned int sysfs_events;
623 623
624 set_current_state(TASK_INTERRUPTIBLE);
625
626 spin_lock_irqsave(&skt->thread_lock, flags); 624 spin_lock_irqsave(&skt->thread_lock, flags);
627 events = skt->thread_events; 625 events = skt->thread_events;
628 skt->thread_events = 0; 626 skt->thread_events = 0;
@@ -670,11 +668,15 @@ static int pccardd(void *__skt)
670 if (kthread_should_stop()) 668 if (kthread_should_stop())
671 break; 669 break;
672 670
671 set_current_state(TASK_INTERRUPTIBLE);
672
673 schedule(); 673 schedule();
674
675 /* make sure we are running */
676 __set_current_state(TASK_RUNNING);
677
674 try_to_freeze(); 678 try_to_freeze();
675 } 679 }
676 /* make sure we are running before we exit */
677 set_current_state(TASK_RUNNING);
678 680
679 /* shut down socket, if a device is still present */ 681 /* shut down socket, if a device is still present */
680 if (skt->state & SOCKET_PRESENT) { 682 if (skt->state & SOCKET_PRESENT) {
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index d9a09d9637d9..a6558409ba45 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -282,7 +282,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
282 return -EIO; 282 return -EIO;
283 283
284 /* Null reads or writes succeeds */ 284 /* Null reads or writes succeeds */
285 if (unlikely(bufflen) == 0) 285 if (unlikely(bufflen == 0))
286 return 0; 286 return 0;
287 287
288 /* Check the buffer range for access */ 288 /* Check the buffer range for access */
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index c8d99563d245..982580af1d16 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -14,6 +14,7 @@ config SPMI_MSM_PMIC_ARB
14 tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)" 14 tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
15 depends on IRQ_DOMAIN 15 depends on IRQ_DOMAIN
16 depends on ARCH_QCOM || COMPILE_TEST 16 depends on ARCH_QCOM || COMPILE_TEST
17 depends on HAS_IOMEM
17 default ARCH_QCOM 18 default ARCH_QCOM
18 help 19 help
19 If you say yes to this option, support will be included for the 20 If you say yes to this option, support will be included for the
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index ea54fb4ec837..592a12241b37 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -2190,6 +2190,8 @@ static struct notifier_block panel_notifier = {
2190 2190
2191static void panel_attach(struct parport *port) 2191static void panel_attach(struct parport *port)
2192{ 2192{
2193 struct pardev_cb panel_cb;
2194
2193 if (port->number != parport) 2195 if (port->number != parport)
2194 return; 2196 return;
2195 2197
@@ -2199,10 +2201,11 @@ static void panel_attach(struct parport *port)
2199 return; 2201 return;
2200 } 2202 }
2201 2203
2202 pprt = parport_register_device(port, "panel", NULL, NULL, /* pf, kf */ 2204 memset(&panel_cb, 0, sizeof(panel_cb));
2203 NULL, 2205 panel_cb.private = &pprt;
2204 /*PARPORT_DEV_EXCL */ 2206 /* panel_cb.flags = 0 should be PARPORT_DEV_EXCL? */
2205 0, (void *)&pprt); 2207
2208 pprt = parport_register_dev_model(port, "panel", &panel_cb, 0);
2206 if (pprt == NULL) { 2209 if (pprt == NULL) {
2207 pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n", 2210 pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n",
2208 __func__, port->number, parport); 2211 __func__, port->number, parport);
@@ -2270,8 +2273,9 @@ static void panel_detach(struct parport *port)
2270 2273
2271static struct parport_driver panel_driver = { 2274static struct parport_driver panel_driver = {
2272 .name = "panel", 2275 .name = "panel",
2273 .attach = panel_attach, 2276 .match_port = panel_attach,
2274 .detach = panel_detach, 2277 .detach = panel_detach,
2278 .devmodel = true,
2275}; 2279};
2276 2280
2277/* init function */ 2281/* init function */
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 8a15c323c030..48fb1d983f6c 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -126,8 +126,8 @@ config UIO_FSL_ELBC_GPCM_NETX5152
126 126
127config UIO_PRUSS 127config UIO_PRUSS
128 tristate "Texas Instruments PRUSS driver" 128 tristate "Texas Instruments PRUSS driver"
129 depends on ARCH_DAVINCI_DA850
130 select GENERIC_ALLOCATOR 129 select GENERIC_ALLOCATOR
130 depends on HAS_IOMEM
131 help 131 help
132 PRUSS driver for OMAPL138/DA850/AM18XX devices 132 PRUSS driver for OMAPL138/DA850/AM18XX devices
133 PRUSS driver requires user space components, examples and user space 133 PRUSS driver requires user space components, examples and user space
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 65bf0676d54a..3257d4220d01 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -879,7 +879,8 @@ void uio_unregister_device(struct uio_info *info)
879 879
880 uio_dev_del_attributes(idev); 880 uio_dev_del_attributes(idev);
881 881
882 free_irq(idev->info->irq, idev); 882 if (info->irq && info->irq != UIO_IRQ_CUSTOM)
883 free_irq(info->irq, idev);
883 884
884 device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); 885 device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
885 886
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 818735bb8c3a..ca9e2fafb0b6 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -24,6 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/sizes.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/genalloc.h> 29#include <linux/genalloc.h>
29 30
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 2b28443d07b9..b40d6a87d694 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -60,10 +60,11 @@ struct tahvo_usb {
60 struct extcon_dev extcon; 60 struct extcon_dev extcon;
61}; 61};
62 62
63static const char *tahvo_cable[] = { 63static const unsigned int tahvo_cable[] = {
64 "USB-HOST", 64 EXTCON_USB,
65 "USB", 65 EXTCON_USB_HOST,
66 NULL, 66
67 EXTCON_NONE,
67}; 68};
68 69
69static ssize_t vbus_state_show(struct device *device, 70static ssize_t vbus_state_show(struct device *device,
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index e76a9b39abb2..a674409edfb3 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -93,6 +93,7 @@ static const struct i2c_device_id ds2482_id[] = {
93 { "ds2482", 0 }, 93 { "ds2482", 0 },
94 { } 94 { }
95}; 95};
96MODULE_DEVICE_TABLE(i2c, ds2482_id);
96 97
97static struct i2c_driver ds2482_driver = { 98static struct i2c_driver ds2482_driver = {
98 .driver = { 99 .driver = {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 1f11a20a8ab9..2f029e8f4f95 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -59,16 +59,32 @@ MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
59static int w1_strong_pullup = 1; 59static int w1_strong_pullup = 1;
60module_param_named(strong_pullup, w1_strong_pullup, int, 0); 60module_param_named(strong_pullup, w1_strong_pullup, int, 0);
61 61
62struct w1_therm_family_data {
63 uint8_t rom[9];
64 atomic_t refcnt;
65};
66
67/* return the address of the refcnt in the family data */
68#define THERM_REFCNT(family_data) \
69 (&((struct w1_therm_family_data*)family_data)->refcnt)
70
62static int w1_therm_add_slave(struct w1_slave *sl) 71static int w1_therm_add_slave(struct w1_slave *sl)
63{ 72{
64 sl->family_data = kzalloc(9, GFP_KERNEL); 73 sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
74 GFP_KERNEL);
65 if (!sl->family_data) 75 if (!sl->family_data)
66 return -ENOMEM; 76 return -ENOMEM;
77 atomic_set(THERM_REFCNT(sl->family_data), 1);
67 return 0; 78 return 0;
68} 79}
69 80
70static void w1_therm_remove_slave(struct w1_slave *sl) 81static void w1_therm_remove_slave(struct w1_slave *sl)
71{ 82{
83 int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
84 while(refcnt) {
85 msleep(1000);
86 refcnt = atomic_read(THERM_REFCNT(sl->family_data));
87 }
72 kfree(sl->family_data); 88 kfree(sl->family_data);
73 sl->family_data = NULL; 89 sl->family_data = NULL;
74} 90}
@@ -76,13 +92,24 @@ static void w1_therm_remove_slave(struct w1_slave *sl)
76static ssize_t w1_slave_show(struct device *device, 92static ssize_t w1_slave_show(struct device *device,
77 struct device_attribute *attr, char *buf); 93 struct device_attribute *attr, char *buf);
78 94
95static ssize_t w1_seq_show(struct device *device,
96 struct device_attribute *attr, char *buf);
97
79static DEVICE_ATTR_RO(w1_slave); 98static DEVICE_ATTR_RO(w1_slave);
99static DEVICE_ATTR_RO(w1_seq);
80 100
81static struct attribute *w1_therm_attrs[] = { 101static struct attribute *w1_therm_attrs[] = {
82 &dev_attr_w1_slave.attr, 102 &dev_attr_w1_slave.attr,
83 NULL, 103 NULL,
84}; 104};
105
106static struct attribute *w1_ds28ea00_attrs[] = {
107 &dev_attr_w1_slave.attr,
108 &dev_attr_w1_seq.attr,
109 NULL,
110};
85ATTRIBUTE_GROUPS(w1_therm); 111ATTRIBUTE_GROUPS(w1_therm);
112ATTRIBUTE_GROUPS(w1_ds28ea00);
86 113
87static struct w1_family_ops w1_therm_fops = { 114static struct w1_family_ops w1_therm_fops = {
88 .add_slave = w1_therm_add_slave, 115 .add_slave = w1_therm_add_slave,
@@ -90,6 +117,12 @@ static struct w1_family_ops w1_therm_fops = {
90 .groups = w1_therm_groups, 117 .groups = w1_therm_groups,
91}; 118};
92 119
120static struct w1_family_ops w1_ds28ea00_fops = {
121 .add_slave = w1_therm_add_slave,
122 .remove_slave = w1_therm_remove_slave,
123 .groups = w1_ds28ea00_groups,
124};
125
93static struct w1_family w1_therm_family_DS18S20 = { 126static struct w1_family w1_therm_family_DS18S20 = {
94 .fid = W1_THERM_DS18S20, 127 .fid = W1_THERM_DS18S20,
95 .fops = &w1_therm_fops, 128 .fops = &w1_therm_fops,
@@ -107,7 +140,7 @@ static struct w1_family w1_therm_family_DS1822 = {
107 140
108static struct w1_family w1_therm_family_DS28EA00 = { 141static struct w1_family w1_therm_family_DS28EA00 = {
109 .fid = W1_THERM_DS28EA00, 142 .fid = W1_THERM_DS28EA00,
110 .fops = &w1_therm_fops, 143 .fops = &w1_ds28ea00_fops,
111}; 144};
112 145
113static struct w1_family w1_therm_family_DS1825 = { 146static struct w1_family w1_therm_family_DS1825 = {
@@ -194,13 +227,22 @@ static ssize_t w1_slave_show(struct device *device,
194 struct w1_slave *sl = dev_to_w1_slave(device); 227 struct w1_slave *sl = dev_to_w1_slave(device);
195 struct w1_master *dev = sl->master; 228 struct w1_master *dev = sl->master;
196 u8 rom[9], crc, verdict, external_power; 229 u8 rom[9], crc, verdict, external_power;
197 int i, max_trying = 10; 230 int i, ret, max_trying = 10;
198 ssize_t c = PAGE_SIZE; 231 ssize_t c = PAGE_SIZE;
232 u8 *family_data = sl->family_data;
199 233
200 i = mutex_lock_interruptible(&dev->bus_mutex); 234 ret = mutex_lock_interruptible(&dev->bus_mutex);
201 if (i != 0) 235 if (ret != 0)
202 return i; 236 goto post_unlock;
237
238 if(!sl->family_data)
239 {
240 ret = -ENODEV;
241 goto pre_unlock;
242 }
203 243
244 /* prevent the slave from going away in sleep */
245 atomic_inc(THERM_REFCNT(family_data));
204 memset(rom, 0, sizeof(rom)); 246 memset(rom, 0, sizeof(rom));
205 247
206 while (max_trying--) { 248 while (max_trying--) {
@@ -230,17 +272,19 @@ static ssize_t w1_slave_show(struct device *device,
230 mutex_unlock(&dev->bus_mutex); 272 mutex_unlock(&dev->bus_mutex);
231 273
232 sleep_rem = msleep_interruptible(tm); 274 sleep_rem = msleep_interruptible(tm);
233 if (sleep_rem != 0) 275 if (sleep_rem != 0) {
234 return -EINTR; 276 ret = -EINTR;
277 goto post_unlock;
278 }
235 279
236 i = mutex_lock_interruptible(&dev->bus_mutex); 280 ret = mutex_lock_interruptible(&dev->bus_mutex);
237 if (i != 0) 281 if (ret != 0)
238 return i; 282 goto post_unlock;
239 } else if (!w1_strong_pullup) { 283 } else if (!w1_strong_pullup) {
240 sleep_rem = msleep_interruptible(tm); 284 sleep_rem = msleep_interruptible(tm);
241 if (sleep_rem != 0) { 285 if (sleep_rem != 0) {
242 mutex_unlock(&dev->bus_mutex); 286 ret = -EINTR;
243 return -EINTR; 287 goto pre_unlock;
244 } 288 }
245 } 289 }
246 290
@@ -269,19 +313,107 @@ static ssize_t w1_slave_show(struct device *device,
269 c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n", 313 c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
270 crc, (verdict) ? "YES" : "NO"); 314 crc, (verdict) ? "YES" : "NO");
271 if (verdict) 315 if (verdict)
272 memcpy(sl->family_data, rom, sizeof(rom)); 316 memcpy(family_data, rom, sizeof(rom));
273 else 317 else
274 dev_warn(device, "Read failed CRC check\n"); 318 dev_warn(device, "Read failed CRC check\n");
275 319
276 for (i = 0; i < 9; ++i) 320 for (i = 0; i < 9; ++i)
277 c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", 321 c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
278 ((u8 *)sl->family_data)[i]); 322 ((u8 *)family_data)[i]);
279 323
280 c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n", 324 c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
281 w1_convert_temp(rom, sl->family->fid)); 325 w1_convert_temp(rom, sl->family->fid));
326 ret = PAGE_SIZE - c;
327
328pre_unlock:
282 mutex_unlock(&dev->bus_mutex); 329 mutex_unlock(&dev->bus_mutex);
283 330
331post_unlock:
332 atomic_dec(THERM_REFCNT(family_data));
333 return ret;
334}
335
336#define W1_42_CHAIN 0x99
337#define W1_42_CHAIN_OFF 0x3C
338#define W1_42_CHAIN_OFF_INV 0xC3
339#define W1_42_CHAIN_ON 0x5A
340#define W1_42_CHAIN_ON_INV 0xA5
341#define W1_42_CHAIN_DONE 0x96
342#define W1_42_CHAIN_DONE_INV 0x69
343#define W1_42_COND_READ 0x0F
344#define W1_42_SUCCESS_CONFIRM_BYTE 0xAA
345#define W1_42_FINISHED_BYTE 0xFF
346static ssize_t w1_seq_show(struct device *device,
347 struct device_attribute *attr, char *buf)
348{
349 struct w1_slave *sl = dev_to_w1_slave(device);
350 ssize_t c = PAGE_SIZE;
351 int rv;
352 int i;
353 u8 ack;
354 u64 rn;
355 struct w1_reg_num *reg_num;
356 int seq = 0;
357
358 mutex_lock(&sl->master->bus_mutex);
359 /* Place all devices in CHAIN state */
360 if (w1_reset_bus(sl->master))
361 goto error;
362 w1_write_8(sl->master, W1_SKIP_ROM);
363 w1_write_8(sl->master, W1_42_CHAIN);
364 w1_write_8(sl->master, W1_42_CHAIN_ON);
365 w1_write_8(sl->master, W1_42_CHAIN_ON_INV);
366 msleep(sl->master->pullup_duration);
367
368 /* check for acknowledgment */
369 ack = w1_read_8(sl->master);
370 if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
371 goto error;
372
373 /* In case the bus fails to send 0xFF, limit*/
374 for (i = 0; i <= 64; i++) {
375 if (w1_reset_bus(sl->master))
376 goto error;
377
378 w1_write_8(sl->master, W1_42_COND_READ);
379 rv = w1_read_block(sl->master, (u8 *)&rn, 8);
380 reg_num = (struct w1_reg_num *) &rn;
381 if (reg_num->family == W1_42_FINISHED_BYTE)
382 break;
383 if (sl->reg_num.id == reg_num->id)
384 seq = i;
385
386 w1_write_8(sl->master, W1_42_CHAIN);
387 w1_write_8(sl->master, W1_42_CHAIN_DONE);
388 w1_write_8(sl->master, W1_42_CHAIN_DONE_INV);
389 w1_read_block(sl->master, &ack, sizeof(ack));
390
391 /* check for acknowledgment */
392 ack = w1_read_8(sl->master);
393 if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
394 goto error;
395
396 }
397
398 /* Exit from CHAIN state */
399 if (w1_reset_bus(sl->master))
400 goto error;
401 w1_write_8(sl->master, W1_SKIP_ROM);
402 w1_write_8(sl->master, W1_42_CHAIN);
403 w1_write_8(sl->master, W1_42_CHAIN_OFF);
404 w1_write_8(sl->master, W1_42_CHAIN_OFF_INV);
405
406 /* check for acknowledgment */
407 ack = w1_read_8(sl->master);
408 if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
409 goto error;
410 mutex_unlock(&sl->master->bus_mutex);
411
412 c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", seq);
284 return PAGE_SIZE - c; 413 return PAGE_SIZE - c;
414error:
415 mutex_unlock(&sl->master->bus_mutex);
416 return -EIO;
285} 417}
286 418
287static int __init w1_therm_init(void) 419static int __init w1_therm_init(void)
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 181f41cb960b..c9a7ff67d395 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -46,11 +46,15 @@ MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); 46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
47 47
48static int w1_timeout = 10; 48static int w1_timeout = 10;
49static int w1_timeout_us = 0;
49int w1_max_slave_count = 64; 50int w1_max_slave_count = 64;
50int w1_max_slave_ttl = 10; 51int w1_max_slave_ttl = 10;
51 52
52module_param_named(timeout, w1_timeout, int, 0); 53module_param_named(timeout, w1_timeout, int, 0);
53MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches"); 54MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
55module_param_named(timeout_us, w1_timeout_us, int, 0);
56MODULE_PARM_DESC(timeout, "time in microseconds between automatic slave"
57 " searches");
54/* A search stops when w1_max_slave_count devices have been found in that 58/* A search stops when w1_max_slave_count devices have been found in that
55 * search. The next search will start over and detect the same set of devices 59 * search. The next search will start over and detect the same set of devices
56 * on a static 1-wire bus. Memory is not allocated based on this number, just 60 * on a static 1-wire bus. Memory is not allocated based on this number, just
@@ -317,6 +321,14 @@ static ssize_t w1_master_attribute_show_timeout(struct device *dev, struct devic
317 return count; 321 return count;
318} 322}
319 323
324static ssize_t w1_master_attribute_show_timeout_us(struct device *dev,
325 struct device_attribute *attr, char *buf)
326{
327 ssize_t count;
328 count = sprintf(buf, "%d\n", w1_timeout_us);
329 return count;
330}
331
320static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev, 332static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev,
321 struct device_attribute *attr, const char *buf, size_t count) 333 struct device_attribute *attr, const char *buf, size_t count)
322{ 334{
@@ -543,6 +555,7 @@ static W1_MASTER_ATTR_RO(slave_count, S_IRUGO);
543static W1_MASTER_ATTR_RW(max_slave_count, S_IRUGO | S_IWUSR | S_IWGRP); 555static W1_MASTER_ATTR_RW(max_slave_count, S_IRUGO | S_IWUSR | S_IWGRP);
544static W1_MASTER_ATTR_RO(attempts, S_IRUGO); 556static W1_MASTER_ATTR_RO(attempts, S_IRUGO);
545static W1_MASTER_ATTR_RO(timeout, S_IRUGO); 557static W1_MASTER_ATTR_RO(timeout, S_IRUGO);
558static W1_MASTER_ATTR_RO(timeout_us, S_IRUGO);
546static W1_MASTER_ATTR_RO(pointer, S_IRUGO); 559static W1_MASTER_ATTR_RO(pointer, S_IRUGO);
547static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP); 560static W1_MASTER_ATTR_RW(search, S_IRUGO | S_IWUSR | S_IWGRP);
548static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP); 561static W1_MASTER_ATTR_RW(pullup, S_IRUGO | S_IWUSR | S_IWGRP);
@@ -556,6 +569,7 @@ static struct attribute *w1_master_default_attrs[] = {
556 &w1_master_attribute_max_slave_count.attr, 569 &w1_master_attribute_max_slave_count.attr,
557 &w1_master_attribute_attempts.attr, 570 &w1_master_attribute_attempts.attr,
558 &w1_master_attribute_timeout.attr, 571 &w1_master_attribute_timeout.attr,
572 &w1_master_attribute_timeout_us.attr,
559 &w1_master_attribute_pointer.attr, 573 &w1_master_attribute_pointer.attr,
560 &w1_master_attribute_search.attr, 574 &w1_master_attribute_search.attr,
561 &w1_master_attribute_pullup.attr, 575 &w1_master_attribute_pullup.attr,
@@ -1108,7 +1122,8 @@ int w1_process(void *data)
1108 /* As long as w1_timeout is only set by a module parameter the sleep 1122 /* As long as w1_timeout is only set by a module parameter the sleep
1109 * time can be calculated in jiffies once. 1123 * time can be calculated in jiffies once.
1110 */ 1124 */
1111 const unsigned long jtime = msecs_to_jiffies(w1_timeout * 1000); 1125 const unsigned long jtime =
1126 usecs_to_jiffies(w1_timeout * 1000000 + w1_timeout_us);
1112 /* remainder if it woke up early */ 1127 /* remainder if it woke up early */
1113 unsigned long jremain = 0; 1128 unsigned long jremain = 0;
1114 1129
diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h
index 555609910acb..7b2000cead43 100644
--- a/include/dt-bindings/mfd/arizona.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -104,4 +104,8 @@
104#define ARIZONA_MICD_TIME_256MS 11 104#define ARIZONA_MICD_TIME_256MS 11
105#define ARIZONA_MICD_TIME_512MS 12 105#define ARIZONA_MICD_TIME_512MS 12
106 106
107#define ARIZONA_ACCDET_MODE_MIC 0
108#define ARIZONA_ACCDET_MODE_HPL 1
109#define ARIZONA_ACCDET_MODE_HPR 2
110
107#endif 111#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 36f49c405dfb..b16d929fa75f 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,6 +1,9 @@
1/* 1/*
2 * External connector (extcon) class driver 2 * External connector (extcon) class driver
3 * 3 *
4 * Copyright (C) 2015 Samsung Electronics
5 * Author: Chanwoo Choi <cw00.choi@samsung.com>
6 *
4 * Copyright (C) 2012 Samsung Electronics 7 * Copyright (C) 2012 Samsung Electronics
5 * Author: Donggeun Kim <dg77.kim@samsung.com> 8 * Author: Donggeun Kim <dg77.kim@samsung.com>
6 * Author: MyungJoo Ham <myungjoo.ham@samsung.com> 9 * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
@@ -27,50 +30,35 @@
27#include <linux/notifier.h> 30#include <linux/notifier.h>
28#include <linux/sysfs.h> 31#include <linux/sysfs.h>
29 32
30#define SUPPORTED_CABLE_MAX 32
31#define CABLE_NAME_MAX 30
32
33/* 33/*
34 * The standard cable name is to help support general notifier 34 * Define the unique id of supported external connectors
35 * and notifiee device drivers to share the common names.
36 * Please use standard cable names unless your notifier device has
37 * a very unique and abnormal cable or
38 * the cable type is supposed to be used with only one unique
39 * pair of notifier/notifiee devices.
40 *
41 * Please add any other "standard" cables used with extcon dev.
42 *
43 * You may add a dot and number to specify version or specification
44 * of the specific cable if it is required. (e.g., "Fast-charger.18"
45 * and "Fast-charger.10" for 1.8A and 1.0A chargers)
46 * However, the notifiee and notifier should be able to handle such
47 * string and if the notifiee can negotiate the protocol or identify,
48 * you don't need such convention. This convention is helpful when
49 * notifier can distinguish but notifiee cannot.
50 */ 35 */
51enum extcon_cable_name { 36#define EXTCON_NONE 0
52 EXTCON_USB = 0, 37
53 EXTCON_USB_HOST, 38#define EXTCON_USB 1 /* USB connector */
54 EXTCON_TA, /* Travel Adaptor */ 39#define EXTCON_USB_HOST 2
55 EXTCON_FAST_CHARGER, 40
56 EXTCON_SLOW_CHARGER, 41#define EXTCON_TA 3 /* Charger connector */
57 EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */ 42#define EXTCON_FAST_CHARGER 4
58 EXTCON_HDMI, 43#define EXTCON_SLOW_CHARGER 5
59 EXTCON_MHL, 44#define EXTCON_CHARGE_DOWNSTREAM 6
60 EXTCON_DVI, 45
61 EXTCON_VGA, 46#define EXTCON_LINE_IN 7 /* Audio/Video connector */
62 EXTCON_DOCK, 47#define EXTCON_LINE_OUT 8
63 EXTCON_LINE_IN, 48#define EXTCON_MICROPHONE 9
64 EXTCON_LINE_OUT, 49#define EXTCON_HEADPHONE 10
65 EXTCON_MIC_IN, 50#define EXTCON_HDMI 11
66 EXTCON_HEADPHONE_OUT, 51#define EXTCON_MHL 12
67 EXTCON_SPDIF_IN, 52#define EXTCON_DVI 13
68 EXTCON_SPDIF_OUT, 53#define EXTCON_VGA 14
69 EXTCON_VIDEO_IN, 54#define EXTCON_SPDIF_IN 15
70 EXTCON_VIDEO_OUT, 55#define EXTCON_SPDIF_OUT 16
71 EXTCON_MECHANICAL, 56#define EXTCON_VIDEO_IN 17
72}; 57#define EXTCON_VIDEO_OUT 18
73extern const char extcon_cable_name[][CABLE_NAME_MAX + 1]; 58
59#define EXTCON_DOCK 19 /* Misc connector */
60#define EXTCON_JIG 20
61#define EXTCON_MECHANICAL 21
74 62
75struct extcon_cable; 63struct extcon_cable;
76 64
@@ -78,7 +66,7 @@ struct extcon_cable;
78 * struct extcon_dev - An extcon device represents one external connector. 66 * struct extcon_dev - An extcon device represents one external connector.
79 * @name: The name of this extcon device. Parent device name is 67 * @name: The name of this extcon device. Parent device name is
80 * used if NULL. 68 * used if NULL.
81 * @supported_cable: Array of supported cable names ending with NULL. 69 * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
82 * If supported_cable is NULL, cable name related APIs 70 * If supported_cable is NULL, cable name related APIs
83 * are disabled. 71 * are disabled.
84 * @mutually_exclusive: Array of mutually exclusive set of cables that cannot 72 * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
@@ -89,16 +77,14 @@ struct extcon_cable;
89 * be attached simulataneously. {0x7, 0} is equivalent to 77 * be attached simulataneously. {0x7, 0} is equivalent to
90 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there 78 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
91 * can be no simultaneous connections. 79 * can be no simultaneous connections.
92 * @print_name: An optional callback to override the method to print the
93 * name of the extcon device.
94 * @print_state: An optional callback to override the method to print the 80 * @print_state: An optional callback to override the method to print the
95 * status of the extcon device. 81 * status of the extcon device.
96 * @dev: Device of this extcon. 82 * @dev: Device of this extcon.
97 * @state: Attach/detach state of this extcon. Do not provide at 83 * @state: Attach/detach state of this extcon. Do not provide at
98 * register-time. 84 * register-time.
99 * @nh: Notifier for the state change events from this extcon 85 * @nh: Notifier for the state change events from this extcon
100 * @entry: To support list of extcon devices so that users can search 86 * @entry: To support list of extcon devices so that users can
101 * for extcon devices based on the extcon name. 87 * search for extcon devices based on the extcon name.
102 * @lock: 88 * @lock:
103 * @max_supported: Internal value to store the number of cables. 89 * @max_supported: Internal value to store the number of cables.
104 * @extcon_dev_type: Device_type struct to provide attribute_groups 90 * @extcon_dev_type: Device_type struct to provide attribute_groups
@@ -113,16 +99,15 @@ struct extcon_cable;
113struct extcon_dev { 99struct extcon_dev {
114 /* Optional user initializing data */ 100 /* Optional user initializing data */
115 const char *name; 101 const char *name;
116 const char **supported_cable; 102 const unsigned int *supported_cable;
117 const u32 *mutually_exclusive; 103 const u32 *mutually_exclusive;
118 104
119 /* Optional callbacks to override class functions */ 105 /* Optional callbacks to override class functions */
120 ssize_t (*print_name)(struct extcon_dev *edev, char *buf);
121 ssize_t (*print_state)(struct extcon_dev *edev, char *buf); 106 ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
122 107
123 /* Internal data. Please do not set. */ 108 /* Internal data. Please do not set. */
124 struct device dev; 109 struct device dev;
125 struct raw_notifier_head nh; 110 struct raw_notifier_head *nh;
126 struct list_head entry; 111 struct list_head entry;
127 int max_supported; 112 int max_supported;
128 spinlock_t lock; /* could be called by irq handler */ 113 spinlock_t lock; /* could be called by irq handler */
@@ -161,8 +146,6 @@ struct extcon_cable {
161/** 146/**
162 * struct extcon_specific_cable_nb - An internal data for 147 * struct extcon_specific_cable_nb - An internal data for
163 * extcon_register_interest(). 148 * extcon_register_interest().
164 * @internal_nb: A notifier block bridging extcon notifier
165 * and cable notifier.
166 * @user_nb: user provided notifier block for events from 149 * @user_nb: user provided notifier block for events from
167 * a specific cable. 150 * a specific cable.
168 * @cable_index: the target cable. 151 * @cable_index: the target cable.
@@ -170,7 +153,6 @@ struct extcon_cable {
170 * @previous_value: the saved previous event value. 153 * @previous_value: the saved previous event value.
171 */ 154 */
172struct extcon_specific_cable_nb { 155struct extcon_specific_cable_nb {
173 struct notifier_block internal_nb;
174 struct notifier_block *user_nb; 156 struct notifier_block *user_nb;
175 int cable_index; 157 int cable_index;
176 struct extcon_dev *edev; 158 struct extcon_dev *edev;
@@ -194,10 +176,10 @@ extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
194/* 176/*
195 * Following APIs control the memory of extcon device. 177 * Following APIs control the memory of extcon device.
196 */ 178 */
197extern struct extcon_dev *extcon_dev_allocate(const char **cables); 179extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
198extern void extcon_dev_free(struct extcon_dev *edev); 180extern void extcon_dev_free(struct extcon_dev *edev);
199extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, 181extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
200 const char **cables); 182 const unsigned int *cable);
201extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); 183extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
202 184
203/* 185/*
@@ -216,13 +198,10 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
216 198
217/* 199/*
218 * get/set_cable_state access each bit of the 32b encoded state value. 200 * get/set_cable_state access each bit of the 32b encoded state value.
219 * They are used to access the status of each cable based on the cable_name 201 * They are used to access the status of each cable based on the cable_name.
220 * or cable_index, which is retrieved by extcon_find_cable_index
221 */ 202 */
222extern int extcon_find_cable_index(struct extcon_dev *sdev, 203extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
223 const char *cable_name); 204extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
224extern int extcon_get_cable_state_(struct extcon_dev *edev, int cable_index);
225extern int extcon_set_cable_state_(struct extcon_dev *edev, int cable_index,
226 bool cable_state); 205 bool cable_state);
227 206
228extern int extcon_get_cable_state(struct extcon_dev *edev, 207extern int extcon_get_cable_state(struct extcon_dev *edev,
@@ -249,16 +228,21 @@ extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
249 * we do not recommend to use this for normal 'notifiee' device drivers who 228 * we do not recommend to use this for normal 'notifiee' device drivers who
250 * want to be notified by a specific external port of the notifier. 229 * want to be notified by a specific external port of the notifier.
251 */ 230 */
252extern int extcon_register_notifier(struct extcon_dev *edev, 231extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
232 struct notifier_block *nb);
233extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
253 struct notifier_block *nb); 234 struct notifier_block *nb);
254extern int extcon_unregister_notifier(struct extcon_dev *edev,
255 struct notifier_block *nb);
256 235
257/* 236/*
258 * Following API get the extcon device from devicetree. 237 * Following API get the extcon device from devicetree.
259 * This function use phandle of devicetree to get extcon device directly. 238 * This function use phandle of devicetree to get extcon device directly.
260 */ 239 */
261extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index); 240extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
241 int index);
242
243/* Following API to get information of extcon device */
244extern const char *extcon_get_edev_name(struct extcon_dev *edev);
245
262#else /* CONFIG_EXTCON */ 246#else /* CONFIG_EXTCON */
263static inline int extcon_dev_register(struct extcon_dev *edev) 247static inline int extcon_dev_register(struct extcon_dev *edev)
264{ 248{
@@ -276,7 +260,7 @@ static inline int devm_extcon_dev_register(struct device *dev,
276static inline void devm_extcon_dev_unregister(struct device *dev, 260static inline void devm_extcon_dev_unregister(struct device *dev,
277 struct extcon_dev *edev) { } 261 struct extcon_dev *edev) { }
278 262
279static inline struct extcon_dev *extcon_dev_allocate(const char **cables) 263static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
280{ 264{
281 return ERR_PTR(-ENOSYS); 265 return ERR_PTR(-ENOSYS);
282} 266}
@@ -284,7 +268,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
284static inline void extcon_dev_free(struct extcon_dev *edev) { } 268static inline void extcon_dev_free(struct extcon_dev *edev) { }
285 269
286static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, 270static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
287 const char **cables) 271 const unsigned int *cable)
288{ 272{
289 return ERR_PTR(-ENOSYS); 273 return ERR_PTR(-ENOSYS);
290} 274}
@@ -307,20 +291,14 @@ static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
307 return 0; 291 return 0;
308} 292}
309 293
310static inline int extcon_find_cable_index(struct extcon_dev *edev,
311 const char *cable_name)
312{
313 return 0;
314}
315
316static inline int extcon_get_cable_state_(struct extcon_dev *edev, 294static inline int extcon_get_cable_state_(struct extcon_dev *edev,
317 int cable_index) 295 unsigned int id)
318{ 296{
319 return 0; 297 return 0;
320} 298}
321 299
322static inline int extcon_set_cable_state_(struct extcon_dev *edev, 300static inline int extcon_set_cable_state_(struct extcon_dev *edev,
323 int cable_index, bool cable_state) 301 unsigned int id, bool cable_state)
324{ 302{
325 return 0; 303 return 0;
326} 304}
@@ -343,13 +321,15 @@ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
343} 321}
344 322
345static inline int extcon_register_notifier(struct extcon_dev *edev, 323static inline int extcon_register_notifier(struct extcon_dev *edev,
346 struct notifier_block *nb) 324 unsigned int id,
325 struct notifier_block *nb)
347{ 326{
348 return 0; 327 return 0;
349} 328}
350 329
351static inline int extcon_unregister_notifier(struct extcon_dev *edev, 330static inline int extcon_unregister_notifier(struct extcon_dev *edev,
352 struct notifier_block *nb) 331 unsigned int id,
332 struct notifier_block *nb)
353{ 333{
354 return 0; 334 return 0;
355} 335}
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 9ca958c4e94c..53c60806bcfb 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -44,7 +44,7 @@ struct adc_jack_cond {
44 * @consumer_channel: Unique name to identify the channel on the consumer 44 * @consumer_channel: Unique name to identify the channel on the consumer
45 * side. This typically describes the channels used within 45 * side. This typically describes the channels used within
46 * the consumer. E.g. 'battery_voltage' 46 * the consumer. E.g. 'battery_voltage'
47 * @cable_names: array of cable names ending with null. 47 * @cable_names: array of extcon id for supported cables.
48 * @adc_contitions: array of struct adc_jack_cond conditions ending 48 * @adc_contitions: array of struct adc_jack_cond conditions ending
49 * with .state = 0 entry. This describes how to decode 49 * with .state = 0 entry. This describes how to decode
50 * adc values into extcon state. 50 * adc values into extcon state.
@@ -58,8 +58,7 @@ struct adc_jack_pdata {
58 const char *name; 58 const char *name;
59 const char *consumer_channel; 59 const char *consumer_channel;
60 60
61 /* The last entry should be NULL */ 61 const enum extcon *cable_names;
62 const char **cable_names;
63 62
64 /* The last entry's state should be 0 */ 63 /* The last entry's state should be 0 */
65 struct adc_jack_cond *adc_conditions; 64 struct adc_jack_cond *adc_conditions;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 902c37aef67e..30d3a1f79450 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -160,16 +160,18 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
160 * 1 . 1 (Windows 7) 160 * 1 . 1 (Windows 7)
161 * 2 . 4 (Windows 8) 161 * 2 . 4 (Windows 8)
162 * 3 . 0 (Windows 8 R2) 162 * 3 . 0 (Windows 8 R2)
163 * 4 . 0 (Windows 10)
163 */ 164 */
164 165
165#define VERSION_WS2008 ((0 << 16) | (13)) 166#define VERSION_WS2008 ((0 << 16) | (13))
166#define VERSION_WIN7 ((1 << 16) | (1)) 167#define VERSION_WIN7 ((1 << 16) | (1))
167#define VERSION_WIN8 ((2 << 16) | (4)) 168#define VERSION_WIN8 ((2 << 16) | (4))
168#define VERSION_WIN8_1 ((3 << 16) | (0)) 169#define VERSION_WIN8_1 ((3 << 16) | (0))
170#define VERSION_WIN10 ((4 << 16) | (0))
169 171
170#define VERSION_INVAL -1 172#define VERSION_INVAL -1
171 173
172#define VERSION_CURRENT VERSION_WIN8_1 174#define VERSION_CURRENT VERSION_WIN10
173 175
174/* Make maximum size of pipe payload of 16K */ 176/* Make maximum size of pipe payload of 16K */
175#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 177#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -389,10 +391,7 @@ enum vmbus_channel_message_type {
389 CHANNELMSG_INITIATE_CONTACT = 14, 391 CHANNELMSG_INITIATE_CONTACT = 14,
390 CHANNELMSG_VERSION_RESPONSE = 15, 392 CHANNELMSG_VERSION_RESPONSE = 15,
391 CHANNELMSG_UNLOAD = 16, 393 CHANNELMSG_UNLOAD = 16,
392#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD 394 CHANNELMSG_UNLOAD_RESPONSE = 17,
393 CHANNELMSG_VIEWRANGE_ADD = 17,
394 CHANNELMSG_VIEWRANGE_REMOVE = 18,
395#endif
396 CHANNELMSG_COUNT 395 CHANNELMSG_COUNT
397}; 396};
398 397
@@ -549,21 +548,6 @@ struct vmbus_channel_gpadl_torndown {
549 u32 gpadl; 548 u32 gpadl;
550} __packed; 549} __packed;
551 550
552#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
553struct vmbus_channel_view_range_add {
554 struct vmbus_channel_message_header header;
555 PHYSICAL_ADDRESS viewrange_base;
556 u64 viewrange_length;
557 u32 child_relid;
558} __packed;
559
560struct vmbus_channel_view_range_remove {
561 struct vmbus_channel_message_header header;
562 PHYSICAL_ADDRESS viewrange_base;
563 u32 child_relid;
564} __packed;
565#endif
566
567struct vmbus_channel_relid_released { 551struct vmbus_channel_relid_released {
568 struct vmbus_channel_message_header header; 552 struct vmbus_channel_message_header header;
569 u32 child_relid; 553 u32 child_relid;
@@ -713,6 +697,11 @@ struct vmbus_channel {
713 /* The corresponding CPUID in the guest */ 697 /* The corresponding CPUID in the guest */
714 u32 target_cpu; 698 u32 target_cpu;
715 /* 699 /*
700 * State to manage the CPU affiliation of channels.
701 */
702 struct cpumask alloced_cpus_in_node;
703 int numa_node;
704 /*
716 * Support for sub-channels. For high performance devices, 705 * Support for sub-channels. For high performance devices,
717 * it will be useful to have multiple sub-channels to support 706 * it will be useful to have multiple sub-channels to support
718 * a scalable communication infrastructure with the host. 707 * a scalable communication infrastructure with the host.
@@ -745,6 +734,15 @@ struct vmbus_channel {
745 */ 734 */
746 struct list_head sc_list; 735 struct list_head sc_list;
747 /* 736 /*
737 * Current number of sub-channels.
738 */
739 int num_sc;
740 /*
741 * Number of a sub-channel (position within sc_list) which is supposed
742 * to be used as the next outgoing channel.
743 */
744 int next_oc;
745 /*
748 * The primary channel this sub-channel belongs to. 746 * The primary channel this sub-channel belongs to.
749 * This will be NULL for the primary channel. 747 * This will be NULL for the primary channel.
750 */ 748 */
@@ -758,9 +756,6 @@ struct vmbus_channel {
758 * link up channels based on their CPU affinity. 756 * link up channels based on their CPU affinity.
759 */ 757 */
760 struct list_head percpu_list; 758 struct list_head percpu_list;
761
762 int num_sc;
763 int next_oc;
764}; 759};
765 760
766static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 761static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
@@ -1236,13 +1231,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1236 struct icmsg_negotiate *, u8 *, int, 1231 struct icmsg_negotiate *, u8 *, int,
1237 int); 1232 int);
1238 1233
1239int hv_kvp_init(struct hv_util_service *);
1240void hv_kvp_deinit(void);
1241void hv_kvp_onchannelcallback(void *);
1242
1243int hv_vss_init(struct hv_util_service *);
1244void hv_vss_deinit(void);
1245void hv_vss_onchannelcallback(void *);
1246void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1234void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1247 1235
1248extern struct resource hyperv_mmio; 1236extern struct resource hyperv_mmio;
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 0819d36a3a74..a16b1f9c1aca 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -7,6 +7,42 @@
7 7
8struct mei_cl_device; 8struct mei_cl_device;
9 9
10typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
11 u32 events, void *context);
12
13/**
14 * struct mei_cl_device - MEI device handle
15 * An mei_cl_device pointer is returned from mei_add_device()
16 * and links MEI bus clients to their actual ME host client pointer.
17 * Drivers for MEI devices will get an mei_cl_device pointer
18 * when being probed and shall use it for doing ME bus I/O.
19 *
20 * @dev: linux driver model device pointer
21 * @me_cl: me client
22 * @cl: mei client
23 * @name: device name
24 * @event_work: async work to execute event callback
25 * @event_cb: Drivers register this callback to get asynchronous ME
26 * events (e.g. Rx buffer pending) notifications.
27 * @event_context: event callback run context
28 * @events: Events bitmask sent to the driver.
29 * @priv_data: client private data
30 */
31struct mei_cl_device {
32 struct device dev;
33
34 struct mei_me_client *me_cl;
35 struct mei_cl *cl;
36 char name[MEI_CL_NAME_SIZE];
37
38 struct work_struct event_work;
39 mei_cl_event_cb_t event_cb;
40 void *event_context;
41 unsigned long events;
42
43 void *priv_data;
44};
45
10struct mei_cl_driver { 46struct mei_cl_driver {
11 struct device_driver driver; 47 struct device_driver driver;
12 const char *name; 48 const char *name;
@@ -28,8 +64,6 @@ void mei_cl_driver_unregister(struct mei_cl_driver *driver);
28ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); 64ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
29ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); 65ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
30 66
31typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
32 u32 events, void *context);
33int mei_cl_register_event_cb(struct mei_cl_device *device, 67int mei_cl_register_event_cb(struct mei_cl_device *device,
34 mei_cl_event_cb_t read_cb, void *context); 68 mei_cl_event_cb_t read_cb, void *context);
35 69
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index f6722677e6d0..43db4faad143 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -121,6 +121,9 @@ struct arizona_pdata {
121 /** GPIO used for mic isolation with HPDET */ 121 /** GPIO used for mic isolation with HPDET */
122 int hpdet_id_gpio; 122 int hpdet_id_gpio;
123 123
124 /** Channel to use for headphone detection */
125 unsigned int hpdet_channel;
126
124 /** Extra debounce timeout used during initial mic detection (ms) */ 127 /** Extra debounce timeout used during initial mic detection (ms) */
125 int micd_detect_debounce; 128 int micd_detect_debounce;
126 129
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 02f97dc568ac..c2aa853fb412 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -368,4 +368,9 @@ struct axp20x_chrg_pdata {
368 int def_cv; 368 int def_cv;
369}; 369};
370 370
371struct axp288_extcon_pdata {
372 /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
373 struct gpio_desc *gpio_mux_cntl;
374};
375
371#endif /* __LINUX_MFD_AXP20X_H */ 376#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 3bfd56778c29..048c270822f9 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -599,9 +599,22 @@ struct ipack_device_id {
599 599
600#define MEI_CL_MODULE_PREFIX "mei:" 600#define MEI_CL_MODULE_PREFIX "mei:"
601#define MEI_CL_NAME_SIZE 32 601#define MEI_CL_NAME_SIZE 32
602#define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
603#define MEI_CL_UUID_ARGS(_u) \
604 _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \
605 _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15]
602 606
607/**
608 * struct mei_cl_device_id - MEI client device identifier
609 * @name: helper name
610 * @uuid: client uuid
611 * @driver_info: information used by the driver.
612 *
613 * identifies mei client device by uuid and name
614 */
603struct mei_cl_device_id { 615struct mei_cl_device_id {
604 char name[MEI_CL_NAME_SIZE]; 616 char name[MEI_CL_NAME_SIZE];
617 uuid_le uuid;
605 kernel_ulong_t driver_info; 618 kernel_ulong_t driver_info;
606}; 619};
607 620
diff --git a/include/linux/parport.h b/include/linux/parport.h
index c22f12547324..58e3c64c6b49 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -13,6 +13,7 @@
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/irqreturn.h> 14#include <linux/irqreturn.h>
15#include <linux/semaphore.h> 15#include <linux/semaphore.h>
16#include <linux/device.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
17#include <uapi/linux/parport.h> 18#include <uapi/linux/parport.h>
18 19
@@ -145,6 +146,8 @@ struct pardevice {
145 unsigned int flags; 146 unsigned int flags;
146 struct pardevice *next; 147 struct pardevice *next;
147 struct pardevice *prev; 148 struct pardevice *prev;
149 struct device dev;
150 bool devmodel;
148 struct parport_state *state; /* saved status over preemption */ 151 struct parport_state *state; /* saved status over preemption */
149 wait_queue_head_t wait_q; 152 wait_queue_head_t wait_q;
150 unsigned long int time; 153 unsigned long int time;
@@ -156,6 +159,8 @@ struct pardevice {
156 void * sysctl_table; 159 void * sysctl_table;
157}; 160};
158 161
162#define to_pardevice(n) container_of(n, struct pardevice, dev)
163
159/* IEEE1284 information */ 164/* IEEE1284 information */
160 165
161/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL 166/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL
@@ -195,7 +200,7 @@ struct parport {
195 * This may unfortulately be null if the 200 * This may unfortulately be null if the
196 * port has a legacy driver. 201 * port has a legacy driver.
197 */ 202 */
198 203 struct device bus_dev; /* to link with the bus */
199 struct parport *physport; 204 struct parport *physport;
200 /* If this is a non-default mux 205 /* If this is a non-default mux
201 parport, i.e. we're a clone of a real 206 parport, i.e. we're a clone of a real
@@ -245,15 +250,26 @@ struct parport {
245 struct parport *slaves[3]; 250 struct parport *slaves[3];
246}; 251};
247 252
253#define to_parport_dev(n) container_of(n, struct parport, bus_dev)
254
248#define DEFAULT_SPIN_TIME 500 /* us */ 255#define DEFAULT_SPIN_TIME 500 /* us */
249 256
250struct parport_driver { 257struct parport_driver {
251 const char *name; 258 const char *name;
252 void (*attach) (struct parport *); 259 void (*attach) (struct parport *);
253 void (*detach) (struct parport *); 260 void (*detach) (struct parport *);
261 void (*match_port)(struct parport *);
262 int (*probe)(struct pardevice *);
263 struct device_driver driver;
264 bool devmodel;
254 struct list_head list; 265 struct list_head list;
255}; 266};
256 267
268#define to_parport_driver(n) container_of(n, struct parport_driver, driver)
269
270int parport_bus_init(void);
271void parport_bus_exit(void);
272
257/* parport_register_port registers a new parallel port at the given 273/* parport_register_port registers a new parallel port at the given
258 address (if one does not already exist) and returns a pointer to it. 274 address (if one does not already exist) and returns a pointer to it.
259 This entails claiming the I/O region, IRQ and DMA. NULL is returned 275 This entails claiming the I/O region, IRQ and DMA. NULL is returned
@@ -272,10 +288,20 @@ void parport_announce_port (struct parport *port);
272extern void parport_remove_port(struct parport *port); 288extern void parport_remove_port(struct parport *port);
273 289
274/* Register a new high-level driver. */ 290/* Register a new high-level driver. */
275extern int parport_register_driver (struct parport_driver *); 291
292int __must_check __parport_register_driver(struct parport_driver *,
293 struct module *,
294 const char *mod_name);
295/*
296 * parport_register_driver must be a macro so that KBUILD_MODNAME can
297 * be expanded
298 */
299#define parport_register_driver(driver) \
300 __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
276 301
277/* Unregister a high-level driver. */ 302/* Unregister a high-level driver. */
278extern void parport_unregister_driver (struct parport_driver *); 303extern void parport_unregister_driver (struct parport_driver *);
304void parport_unregister_driver(struct parport_driver *);
279 305
280/* If parport_register_driver doesn't fit your needs, perhaps 306/* If parport_register_driver doesn't fit your needs, perhaps
281 * parport_find_xxx does. */ 307 * parport_find_xxx does. */
@@ -288,6 +314,15 @@ extern irqreturn_t parport_irq_handler(int irq, void *dev_id);
288/* Reference counting for ports. */ 314/* Reference counting for ports. */
289extern struct parport *parport_get_port (struct parport *); 315extern struct parport *parport_get_port (struct parport *);
290extern void parport_put_port (struct parport *); 316extern void parport_put_port (struct parport *);
317void parport_del_port(struct parport *);
318
319struct pardev_cb {
320 int (*preempt)(void *);
321 void (*wakeup)(void *);
322 void *private;
323 void (*irq_func)(void *);
324 unsigned int flags;
325};
291 326
292/* parport_register_device declares that a device is connected to a 327/* parport_register_device declares that a device is connected to a
293 port, and tells the kernel all it needs to know. 328 port, and tells the kernel all it needs to know.
@@ -301,6 +336,10 @@ struct pardevice *parport_register_device(struct parport *port,
301 void (*irq_func)(void *), 336 void (*irq_func)(void *),
302 int flags, void *handle); 337 int flags, void *handle);
303 338
339struct pardevice *
340parport_register_dev_model(struct parport *port, const char *name,
341 const struct pardev_cb *par_dev_cb, int cnt);
342
304/* parport_unregister unlinks a device from the chain. */ 343/* parport_unregister unlinks a device from the chain. */
305extern void parport_unregister_device(struct pardevice *dev); 344extern void parport_unregister_device(struct pardevice *dev);
306 345
diff --git a/include/linux/scif.h b/include/linux/scif.h
new file mode 100644
index 000000000000..44f4f3898bbe
--- /dev/null
+++ b/include/linux/scif.h
@@ -0,0 +1,993 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * BSD LICENSE
21 *
22 * Copyright(c) 2014 Intel Corporation.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 * Intel SCIF driver.
51 *
52 */
53#ifndef __SCIF_H__
54#define __SCIF_H__
55
56#include <linux/types.h>
57#include <linux/poll.h>
58#include <linux/scif_ioctl.h>
59
60#define SCIF_ACCEPT_SYNC 1
61#define SCIF_SEND_BLOCK 1
62#define SCIF_RECV_BLOCK 1
63
64enum {
65 SCIF_PROT_READ = (1 << 0),
66 SCIF_PROT_WRITE = (1 << 1)
67};
68
69enum {
70 SCIF_MAP_FIXED = 0x10,
71 SCIF_MAP_KERNEL = 0x20,
72};
73
74enum {
75 SCIF_FENCE_INIT_SELF = (1 << 0),
76 SCIF_FENCE_INIT_PEER = (1 << 1),
77 SCIF_SIGNAL_LOCAL = (1 << 4),
78 SCIF_SIGNAL_REMOTE = (1 << 5)
79};
80
81enum {
82 SCIF_RMA_USECPU = (1 << 0),
83 SCIF_RMA_USECACHE = (1 << 1),
84 SCIF_RMA_SYNC = (1 << 2),
85 SCIF_RMA_ORDERED = (1 << 3)
86};
87
88/* End of SCIF Admin Reserved Ports */
89#define SCIF_ADMIN_PORT_END 1024
90
91/* End of SCIF Reserved Ports */
92#define SCIF_PORT_RSVD 1088
93
94typedef struct scif_endpt *scif_epd_t;
95
96#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
97#define SCIF_REGISTER_FAILED ((off_t)-1)
98#define SCIF_MMAP_FAILED ((void *)-1)
99
100/**
101 * scif_open() - Create an endpoint
102 *
103 * Return:
104 * Upon successful completion, scif_open() returns an endpoint descriptor to
105 * be used in subsequent SCIF functions calls to refer to that endpoint;
106 * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is
107 * returned and errno is set to indicate the error; in kernel mode a NULL
108 * scif_epd_t is returned.
109 *
110 * Errors:
111 * ENOMEM - Insufficient kernel memory was available
112 */
113scif_epd_t scif_open(void);
114
115/**
116 * scif_bind() - Bind an endpoint to a port
117 * @epd: endpoint descriptor
118 * @pn: port number
119 *
120 * scif_bind() binds endpoint epd to port pn, where pn is a port number on the
121 * local node. If pn is zero, a port number greater than or equal to
122 * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to
123 * exactly one local port. Ports less than 1024 when requested can only be bound
124 * by system (or root) processes or by processes executed by privileged users.
125 *
126 * Return:
127 * Upon successful completion, scif_bind() returns the port number to which epd
128 * is bound; otherwise in user mode -1 is returned and errno is set to
129 * indicate the error; in kernel mode the negative of one of the following
130 * errors is returned.
131 *
132 * Errors:
133 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
134 * EINVAL - the endpoint or the port is already bound
135 * EISCONN - The endpoint is already connected
136 * ENOSPC - No port number available for assignment
137 * EACCES - The port requested is protected and the user is not the superuser
138 */
139int scif_bind(scif_epd_t epd, u16 pn);
140
141/**
142 * scif_listen() - Listen for connections on an endpoint
143 * @epd: endpoint descriptor
144 * @backlog: maximum pending connection requests
145 *
146 * scif_listen() marks the endpoint epd as a listening endpoint - that is, as
147 * an endpoint that will be used to accept incoming connection requests. Once
148 * so marked, the endpoint is said to be in the listening state and may not be
149 * used as the endpoint of a connection.
150 *
151 * The endpoint, epd, must have been bound to a port.
152 *
153 * The backlog argument defines the maximum length to which the queue of
154 * pending connections for epd may grow. If a connection request arrives when
155 * the queue is full, the client may receive an error with an indication that
156 * the connection was refused.
157 *
158 * Return:
159 * Upon successful completion, scif_listen() returns 0; otherwise in user mode
160 * -1 is returned and errno is set to indicate the error; in kernel mode the
161 * negative of one of the following errors is returned.
162 *
163 * Errors:
164 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
165 * EINVAL - the endpoint is not bound to a port
166 * EISCONN - The endpoint is already connected or listening
167 */
168int scif_listen(scif_epd_t epd, int backlog);
169
170/**
171 * scif_connect() - Initiate a connection on a port
172 * @epd: endpoint descriptor
173 * @dst: global id of port to which to connect
174 *
175 * The scif_connect() function requests the connection of endpoint epd to remote
176 * port dst. If the connection is successful, a peer endpoint, bound to dst, is
177 * created on node dst.node. On successful return, the connection is complete.
178 *
179 * If the endpoint epd has not already been bound to a port, scif_connect()
180 * will bind it to an unused local port.
181 *
182 * A connection is terminated when an endpoint of the connection is closed,
183 * either explicitly by scif_close(), or when a process that owns one of the
184 * endpoints of the connection is terminated.
185 *
186 * In user space, scif_connect() supports an asynchronous connection mode
187 * if the application has set the O_NONBLOCK flag on the endpoint via the
188 * fcntl() system call. Setting this flag will result in the calling process
189 * not to wait during scif_connect().
190 *
191 * Return:
192 * Upon successful completion, scif_connect() returns the port ID to which the
193 * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is
194 * set to indicate the error; in kernel mode the negative of one of the
195 * following errors is returned.
196 *
197 * Errors:
198 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
199 * ECONNREFUSED - The destination was not listening for connections or refused
200 * the connection request
201 * EINVAL - dst.port is not a valid port ID
202 * EISCONN - The endpoint is already connected
203 * ENOMEM - No buffer space is available
204 * ENODEV - The destination node does not exist, or the node is lost or existed,
205 * but is not currently in the network since it may have crashed
206 * ENOSPC - No port number available for assignment
207 * EOPNOTSUPP - The endpoint is listening and cannot be connected
208 */
209int scif_connect(scif_epd_t epd, struct scif_port_id *dst);
210
211/**
212 * scif_accept() - Accept a connection on an endpoint
213 * @epd: endpoint descriptor
214 * @peer: global id of port to which connected
215 * @newepd: new connected endpoint descriptor
216 * @flags: flags
217 *
218 * The scif_accept() call extracts the first connection request from the queue
219 * of pending connections for the port on which epd is listening. scif_accept()
220 * creates a new endpoint, bound to the same port as epd, and allocates a new
221 * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new
222 * endpoint is connected to the endpoint through which the connection was
223 * requested. epd is unaffected by this call, and remains in the listening
224 * state.
225 *
226 * On successful return, peer holds the global port identifier (node id and
227 * local port number) of the port which requested the connection.
228 *
229 * A connection is terminated when an endpoint of the connection is closed,
230 * either explicitly by scif_close(), or when a process that owns one of the
231 * endpoints of the connection is terminated.
232 *
233 * The number of connections that can (subsequently) be accepted on epd is only
234 * limited by system resources (memory).
235 *
236 * The flags argument is formed by OR'ing together zero or more of the
237 * following values.
238 * SCIF_ACCEPT_SYNC - block until a connection request is presented. If
239 * SCIF_ACCEPT_SYNC is not in flags, and no pending
240 * connections are present on the queue, scif_accept()
241 * fails with an EAGAIN error
242 *
243 * In user mode, the select() and poll() functions can be used to determine
244 * when there is a connection request. In kernel mode, the scif_poll()
245 * function may be used for this purpose. A readable event will be delivered
246 * when a connection is requested.
247 *
248 * Return:
249 * Upon successful completion, scif_accept() returns 0; otherwise in user mode
250 * -1 is returned and errno is set to indicate the error; in kernel mode the
251 * negative of one of the following errors is returned.
252 *
253 * Errors:
254 * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be
255 * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete
256 * its connection request
257 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
258 * EINTR - Interrupted function
259 * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is
260 * NULL, or newepd is NULL
261 * ENODEV - The requesting node is lost or existed, but is not currently in the
262 * network since it may have crashed
263 * ENOMEM - Not enough space
264 * ENOENT - Secondary part of epd registration failed
265 */
266int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
267 *newepd, int flags);
268
269/**
270 * scif_close() - Close an endpoint
271 * @epd: endpoint descriptor
272 *
273 * scif_close() closes an endpoint and performs necessary teardown of
274 * facilities associated with that endpoint.
275 *
276 * If epd is a listening endpoint then it will no longer accept connection
277 * requests on the port to which it is bound. Any pending connection requests
278 * are rejected.
279 *
280 * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs
281 * which are in-process through epd or its peer endpoint will complete before
282 * scif_close() returns. Registered windows of the local and peer endpoints are
283 * released as if scif_unregister() was called against each window.
284 *
285 * Closing a SCIF endpoint does not affect local registered memory mapped by
286 * a SCIF endpoint on a remote node. The local memory remains mapped by the peer
287 * SCIF endpoint explicitly removed by calling munmap(..) by the peer.
288 *
289 * If the peer endpoint's receive queue is not empty at the time that epd is
290 * closed, then the peer endpoint can be passed as the endpoint parameter to
291 * scif_recv() until the receive queue is empty.
292 *
293 * epd is freed and may no longer be accessed.
294 *
295 * Return:
296 * Upon successful completion, scif_close() returns 0; otherwise in user mode
297 * -1 is returned and errno is set to indicate the error; in kernel mode the
298 * negative of one of the following errors is returned.
299 *
300 * Errors:
301 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
302 */
303int scif_close(scif_epd_t epd);
304
305/**
306 * scif_send() - Send a message
307 * @epd: endpoint descriptor
308 * @msg: message buffer address
309 * @len: message length
310 * @flags: blocking mode flags
311 *
312 * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data
313 * are copied from memory starting at address msg. On successful execution the
314 * return value of scif_send() is the number of bytes that were sent, and is
315 * zero if no bytes were sent because len was zero. scif_send() may be called
316 * only when the endpoint is in a connected state.
317 *
318 * If a scif_send() call is non-blocking, then it sends only those bytes which
319 * can be sent without waiting, up to a maximum of len bytes.
320 *
321 * If a scif_send() call is blocking, then it normally returns after sending
322 * all len bytes. If a blocking call is interrupted or the connection is
323 * reset, the call is considered successful if some bytes were sent or len is
324 * zero, otherwise the call is considered unsuccessful.
325 *
326 * In user mode, the select() and poll() functions can be used to determine
327 * when the send queue is not full. In kernel mode, the scif_poll() function
328 * may be used for this purpose.
329 *
330 * It is recommended that scif_send()/scif_recv() only be used for short
331 * control-type message communication between SCIF endpoints. The SCIF RMA
332 * APIs are expected to provide better performance for transfer sizes of
333 * 1024 bytes or longer for the current MIC hardware and software
334 * implementation.
335 *
336 * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK
337 * is passed as the flags argument.
338 *
339 * Return:
340 * Upon successful completion, scif_send() returns the number of bytes sent;
341 * otherwise in user mode -1 is returned and errno is set to indicate the
342 * error; in kernel mode the negative of one of the following errors is
343 * returned.
344 *
345 * Errors:
346 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
347 * ECONNRESET - Connection reset by peer
348 * EFAULT - An invalid address was specified for a parameter
349 * EINVAL - flags is invalid, or len is negative
350 * ENODEV - The remote node is lost or existed, but is not currently in the
351 * network since it may have crashed
352 * ENOMEM - Not enough space
353 * ENOTCONN - The endpoint is not connected
354 */
355int scif_send(scif_epd_t epd, void *msg, int len, int flags);
356
357/**
358 * scif_recv() - Receive a message
359 * @epd: endpoint descriptor
360 * @msg: message buffer address
361 * @len: message buffer length
362 * @flags: blocking mode flags
363 *
364 * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of
365 * data are copied to memory starting at address msg. On successful execution
366 * the return value of scif_recv() is the number of bytes that were received,
367 * and is zero if no bytes were received because len was zero. scif_recv() may
368 * be called only when the endpoint is in a connected state.
369 *
370 * If a scif_recv() call is non-blocking, then it receives only those bytes
371 * which can be received without waiting, up to a maximum of len bytes.
372 *
373 * If a scif_recv() call is blocking, then it normally returns after receiving
374 * all len bytes. If the blocking call was interrupted due to a disconnection,
375 * subsequent calls to scif_recv() will copy all bytes received upto the point
376 * of disconnection.
377 *
378 * In user mode, the select() and poll() functions can be used to determine
379 * when data is available to be received. In kernel mode, the scif_poll()
380 * function may be used for this purpose.
381 *
382 * It is recommended that scif_send()/scif_recv() only be used for short
383 * control-type message communication between SCIF endpoints. The SCIF RMA
384 * APIs are expected to provide better performance for transfer sizes of
385 * 1024 bytes or longer for the current MIC hardware and software
386 * implementation.
387 *
388 * scif_recv() will block until the entire message is received if
389 * SCIF_RECV_BLOCK is passed as the flags argument.
390 *
391 * Return:
392 * Upon successful completion, scif_recv() returns the number of bytes
393 * received; otherwise in user mode -1 is returned and errno is set to
394 * indicate the error; in kernel mode the negative of one of the following
395 * errors is returned.
396 *
397 * Errors:
398 * EAGAIN - The destination node is returning from a low power state
399 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
400 * ECONNRESET - Connection reset by peer
401 * EFAULT - An invalid address was specified for a parameter
402 * EINVAL - flags is invalid, or len is negative
403 * ENODEV - The remote node is lost or existed, but is not currently in the
404 * network since it may have crashed
405 * ENOMEM - Not enough space
406 * ENOTCONN - The endpoint is not connected
407 */
408int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
409
410/**
411 * scif_register() - Mark a memory region for remote access.
412 * @epd: endpoint descriptor
413 * @addr: starting virtual address
414 * @len: length of range
415 * @offset: offset of window
416 * @prot_flags: read/write protection flags
417 * @map_flags: mapping flags
418 *
419 * The scif_register() function opens a window, a range of whole pages of the
420 * registered address space of the endpoint epd, starting at offset po and
421 * continuing for len bytes. The value of po, further described below, is a
422 * function of the parameters offset and len, and the value of map_flags. Each
423 * page of the window represents the physical memory page which backs the
424 * corresponding page of the range of virtual address pages starting at addr
425 * and continuing for len bytes. addr and len are constrained to be multiples
426 * of the page size. A successful scif_register() call returns po.
427 *
428 * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
429 * exactly, and offset is constrained to be a multiple of the page size. The
430 * mapping established by scif_register() will not replace any existing
431 * registration; an error is returned if any page within the range [offset,
432 * offset + len - 1] intersects an existing window.
433 *
434 * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
435 * implementation-defined manner to arrive at po. The po value so chosen will
436 * be an area of the registered address space that the implementation deems
437 * suitable for a mapping of len bytes. An offset value of 0 is interpreted as
438 * granting the implementation complete freedom in selecting po, subject to
439 * constraints described below. A non-zero value of offset is taken to be a
440 * suggestion of an offset near which the mapping should be placed. When the
441 * implementation selects a value for po, it does not replace any extant
442 * window. In all cases, po will be a multiple of the page size.
443 *
444 * The physical pages which are so represented by a window are available for
445 * access in calls to mmap(), scif_readfrom(), scif_writeto(),
446 * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
447 * physical pages represented by the window will not be reused by the memory
448 * subsystem for any other purpose. Note that the same physical page may be
449 * represented by multiple windows.
450 *
451 * Subsequent operations which change the memory pages to which virtual
452 * addresses are mapped (such as mmap(), munmap()) have no effect on
453 * existing window.
454 *
455 * If the process will fork(), it is recommended that the registered
456 * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
457 * problems due to copy-on-write semantics.
458 *
459 * The prot_flags argument is formed by OR'ing together one or more of the
460 * following values.
461 * SCIF_PROT_READ - allow read operations from the window
462 * SCIF_PROT_WRITE - allow write operations to the window
463 *
464 * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
465 * fixed offset.
466 *
467 * Return:
468 * Upon successful completion, scif_register() returns the offset at which the
469 * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
470 * is (off_t *)-1) is returned and errno is set to indicate the error; in
471 * kernel mode the negative of one of the following errors is returned.
472 *
473 * Errors:
474 * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range
475 * [offset, offset + len -1] are already registered
476 * EAGAIN - The mapping could not be performed due to lack of resources
477 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
478 * ECONNRESET - Connection reset by peer
479 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
480 * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
481 * set in flags, and offset is not a multiple of the page size, or addr is not a
482 * multiple of the page size, or len is not a multiple of the page size, or is
483 * 0, or offset is negative
484 * ENODEV - The remote node is lost or existed, but is not currently in the
485 * network since it may have crashed
486 * ENOMEM - Not enough space
487 * ENOTCONN -The endpoint is not connected
488 */
489off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
490 int prot_flags, int map_flags);
491
492/**
493 * scif_unregister() - Mark a memory region for remote access.
494 * @epd: endpoint descriptor
495 * @offset: start of range to unregister
496 * @len: length of range to unregister
497 *
498 * The scif_unregister() function closes those previously registered windows
499 * which are entirely within the range [offset, offset + len - 1]. It is an
500 * error to specify a range which intersects only a subrange of a window.
501 *
502 * On a successful return, pages within the window may no longer be specified
503 * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(),
504 * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window,
505 * however, continues to exist until all previous references against it are
506 * removed. A window is referenced if there is a mapping to it created by
507 * mmap(), or if scif_get_pages() was called against the window
508 * (and the pages have not been returned via scif_put_pages()). A window is
509 * also referenced while an RMA, in which some range of the window is a source
510 * or destination, is in progress. Finally a window is referenced while some
511 * offset in that window was specified to scif_fence_signal(), and the RMAs
512 * marked by that call to scif_fence_signal() have not completed. While a
513 * window is in this state, its registered address space pages are not
514 * available for use in a new registered window.
515 *
516 * When all such references to the window have been removed, its references to
517 * all the physical pages which it represents are removed. Similarly, the
518 * registered address space pages of the window become available for
519 * registration in a new window.
520 *
521 * Return:
522 * Upon successful completion, scif_unregister() returns 0; otherwise in user
523 * mode -1 is returned and errno is set to indicate the error; in kernel mode
524 * the negative of one of the following errors is returned. In the event of an
525 * error, no windows are unregistered.
526 *
527 * Errors:
528 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
529 * ECONNRESET - Connection reset by peer
530 * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a
531 * window, or offset is negative
532 * ENODEV - The remote node is lost or existed, but is not currently in the
533 * network since it may have crashed
534 * ENOTCONN - The endpoint is not connected
535 * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the
536 * registered address space of epd
537 */
538int scif_unregister(scif_epd_t epd, off_t offset, size_t len);
539
540/**
541 * scif_readfrom() - Copy from a remote address space
542 * @epd: endpoint descriptor
543 * @loffset: offset in local registered address space to
544 * which to copy
545 * @len: length of range to copy
546 * @roffset: offset in remote registered address space
547 * from which to copy
548 * @rma_flags: transfer mode flags
549 *
550 * scif_readfrom() copies len bytes from the remote registered address space of
551 * the peer of endpoint epd, starting at the offset roffset to the local
552 * registered address space of epd, starting at the offset loffset.
553 *
554 * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
555 * roffset + len - 1] must be within some registered window or windows of the
556 * local and remote nodes. A range may intersect multiple registered windows,
557 * but only if those windows are contiguous in the registered address space.
558 *
559 * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
560 * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
561 * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the
562 * transfer is complete. Otherwise, the transfer may be performed asynchron-
563 * ously. The order in which any two asynchronous RMA operations complete
564 * is non-deterministic. The synchronization functions, scif_fence_mark()/
565 * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
566 * the completion of asynchronous RMA operations on the same endpoint.
567 *
568 * The DMA transfer of individual bytes is not guaranteed to complete in
569 * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
570 * cacheline or partial cacheline of the source range will become visible on
571 * the destination node after all other transferred data in the source
572 * range has become visible on the destination node.
573 *
574 * The optimal DMA performance will likely be realized if both
575 * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
576 * performance will likely be realized if loffset and roffset are not
577 * cacheline aligned but are separated by some multiple of 64. The lowest level
578 * of performance is likely if loffset and roffset are not separated by a
579 * multiple of 64.
580 *
581 * The rma_flags argument is formed by ORing together zero or more of the
582 * following values.
583 * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
584 * engine.
585 * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
586 * transfer has completed. Passing this flag results in the
587 * current implementation busy waiting and consuming CPU cycles
588 * while the DMA transfer is in progress for best performance by
589 * avoiding the interrupt latency.
590 * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
591 * the source range becomes visible on the destination node
592 * after all other transferred data in the source range has
593 * become visible on the destination
594 *
595 * Return:
596 * Upon successful completion, scif_readfrom() returns 0; otherwise in user
597 * mode -1 is returned and errno is set to indicate the error; in kernel mode
598 * the negative of one of the following errors is returned.
599 *
600 * Errors:
601 * EACCESS - Attempt to write to a read-only range
602 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
603 * ECONNRESET - Connection reset by peer
604 * EINVAL - rma_flags is invalid
605 * ENODEV - The remote node is lost or existed, but is not currently in the
606 * network since it may have crashed
607 * ENOTCONN - The endpoint is not connected
608 * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
609 * address space of epd, or, The range [roffset, roffset + len - 1] is invalid
610 * for the registered address space of the peer of epd, or loffset or roffset
611 * is negative
612 */
613int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t
614 roffset, int rma_flags);
615
616/**
617 * scif_writeto() - Copy to a remote address space
618 * @epd: endpoint descriptor
619 * @loffset: offset in local registered address space
620 * from which to copy
621 * @len: length of range to copy
622 * @roffset: offset in remote registered address space to
623 * which to copy
624 * @rma_flags: transfer mode flags
625 *
626 * scif_writeto() copies len bytes from the local registered address space of
627 * epd, starting at the offset loffset to the remote registered address space
628 * of the peer of endpoint epd, starting at the offset roffset.
629 *
630 * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
631 * roffset + len - 1] must be within some registered window or windows of the
632 * local and remote nodes. A range may intersect multiple registered windows,
633 * but only if those windows are contiguous in the registered address space.
634 *
635 * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
636 * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
637 * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the
638 * transfer is complete. Otherwise, the transfer may be performed asynchron-
639 * ously. The order in which any two asynchronous RMA operations complete
640 * is non-deterministic. The synchronization functions, scif_fence_mark()/
641 * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
642 * the completion of asynchronous RMA operations on the same endpoint.
643 *
644 * The DMA transfer of individual bytes is not guaranteed to complete in
645 * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
646 * cacheline or partial cacheline of the source range will become visible on
647 * the destination node after all other transferred data in the source
648 * range has become visible on the destination node.
649 *
650 * The optimal DMA performance will likely be realized if both
651 * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
652 * performance will likely be realized if loffset and roffset are not cacheline
653 * aligned but are separated by some multiple of 64. The lowest level of
654 * performance is likely if loffset and roffset are not separated by a multiple
655 * of 64.
656 *
657 * The rma_flags argument is formed by ORing together zero or more of the
658 * following values.
659 * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
660 * engine.
661 * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
662 * transfer has completed. Passing this flag results in the
663 * current implementation busy waiting and consuming CPU cycles
664 * while the DMA transfer is in progress for best performance by
665 * avoiding the interrupt latency.
666 * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
667 * the source range becomes visible on the destination node
668 * after all other transferred data in the source range has
669 * become visible on the destination
670 *
671 * Return:
672 * Upon successful completion, scif_readfrom() returns 0; otherwise in user
673 * mode -1 is returned and errno is set to indicate the error; in kernel mode
674 * the negative of one of the following errors is returned.
675 *
676 * Errors:
677 * EACCESS - Attempt to write to a read-only range
678 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
679 * ECONNRESET - Connection reset by peer
680 * EINVAL - rma_flags is invalid
681 * ENODEV - The remote node is lost or existed, but is not currently in the
682 * network since it may have crashed
683 * ENOTCONN - The endpoint is not connected
684 * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
685 * address space of epd, or, The range [roffset , roffset + len -1] is invalid
686 * for the registered address space of the peer of epd, or loffset or roffset
687 * is negative
688 */
689int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
690 roffset, int rma_flags);
691
692/**
693 * scif_vreadfrom() - Copy from a remote address space
694 * @epd: endpoint descriptor
695 * @addr: address to which to copy
696 * @len: length of range to copy
697 * @roffset: offset in remote registered address space
698 * from which to copy
699 * @rma_flags: transfer mode flags
700 *
701 * scif_vreadfrom() copies len bytes from the remote registered address
702 * space of the peer of endpoint epd, starting at the offset roffset, to local
703 * memory, starting at addr.
704 *
705 * The specified range [roffset, roffset + len - 1] must be within some
706 * registered window or windows of the remote nodes. The range may
707 * intersect multiple registered windows, but only if those windows are
708 * contiguous in the registered address space.
709 *
710 * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
711 * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
712 * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the
713 * transfer is complete. Otherwise, the transfer may be performed asynchron-
714 * ously. The order in which any two asynchronous RMA operations complete
715 * is non-deterministic. The synchronization functions, scif_fence_mark()/
716 * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
717 * the completion of asynchronous RMA operations on the same endpoint.
718 *
719 * The DMA transfer of individual bytes is not guaranteed to complete in
720 * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
721 * cacheline or partial cacheline of the source range will become visible on
722 * the destination node after all other transferred data in the source
723 * range has become visible on the destination node.
724 *
725 * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
726 * the specified local memory range may be remain in a pinned state even after
727 * the specified transfer completes. This may reduce overhead if some or all of
728 * the same virtual address range is referenced in a subsequent call of
729 * scif_vreadfrom() or scif_vwriteto().
730 *
731 * The optimal DMA performance will likely be realized if both
732 * addr and roffset are cacheline aligned (are a multiple of 64). Lower
733 * performance will likely be realized if addr and roffset are not
734 * cacheline aligned but are separated by some multiple of 64. The lowest level
735 * of performance is likely if addr and roffset are not separated by a
736 * multiple of 64.
737 *
738 * The rma_flags argument is formed by ORing together zero or more of the
739 * following values.
740 * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
741 * engine.
742 * SCIF_RMA_USECACHE - enable registration caching
743 * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
744 * transfer has completed. Passing this flag results in the
745 * current implementation busy waiting and consuming CPU cycles
746 * while the DMA transfer is in progress for best performance by
747 * avoiding the interrupt latency.
748 * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
749 * the source range becomes visible on the destination node
750 * after all other transferred data in the source range has
751 * become visible on the destination
752 *
753 * Return:
754 * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user
755 * mode -1 is returned and errno is set to indicate the error; in kernel mode
756 * the negative of one of the following errors is returned.
757 *
758 * Errors:
759 * EACCESS - Attempt to write to a read-only range
760 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
761 * ECONNRESET - Connection reset by peer
762 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
763 * EINVAL - rma_flags is invalid
764 * ENODEV - The remote node is lost or existed, but is not currently in the
765 * network since it may have crashed
766 * ENOTCONN - The endpoint is not connected
767 * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
768 * registered address space of epd
769 */
770int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
771 int rma_flags);
772
773/**
774 * scif_vwriteto() - Copy to a remote address space
775 * @epd: endpoint descriptor
776 * @addr: address from which to copy
777 * @len: length of range to copy
778 * @roffset: offset in remote registered address space to
779 * which to copy
780 * @rma_flags: transfer mode flags
781 *
782 * scif_vwriteto() copies len bytes from the local memory, starting at addr, to
783 * the remote registered address space of the peer of endpoint epd, starting at
784 * the offset roffset.
785 *
786 * The specified range [roffset, roffset + len - 1] must be within some
787 * registered window or windows of the remote nodes. The range may intersect
788 * multiple registered windows, but only if those windows are contiguous in the
789 * registered address space.
790 *
791 * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
792 * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
793 * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the
794 * transfer is complete. Otherwise, the transfer may be performed asynchron-
795 * ously. The order in which any two asynchronous RMA operations complete
796 * is non-deterministic. The synchronization functions, scif_fence_mark()/
797 * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
798 * the completion of asynchronous RMA operations on the same endpoint.
799 *
800 * The DMA transfer of individual bytes is not guaranteed to complete in
801 * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
802 * cacheline or partial cacheline of the source range will become visible on
803 * the destination node after all other transferred data in the source
804 * range has become visible on the destination node.
805 *
806 * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
807 * the specified local memory range may be remain in a pinned state even after
808 * the specified transfer completes. This may reduce overhead if some or all of
809 * the same virtual address range is referenced in a subsequent call of
810 * scif_vreadfrom() or scif_vwriteto().
811 *
812 * The optimal DMA performance will likely be realized if both
813 * addr and offset are cacheline aligned (are a multiple of 64). Lower
814 * performance will likely be realized if addr and offset are not cacheline
815 * aligned but are separated by some multiple of 64. The lowest level of
816 * performance is likely if addr and offset are not separated by a multiple of
817 * 64.
818 *
819 * The rma_flags argument is formed by ORing together zero or more of the
820 * following values.
821 * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
822 * engine.
823 * SCIF_RMA_USECACHE - allow registration caching
824 * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
825 * transfer has completed. Passing this flag results in the
826 * current implementation busy waiting and consuming CPU cycles
827 * while the DMA transfer is in progress for best performance by
828 * avoiding the interrupt latency.
829 * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
830 * the source range becomes visible on the destination node
831 * after all other transferred data in the source range has
832 * become visible on the destination
833 *
834 * Return:
835 * Upon successful completion, scif_vwriteto() returns 0; otherwise in user
836 * mode -1 is returned and errno is set to indicate the error; in kernel mode
837 * the negative of one of the following errors is returned.
838 *
839 * Errors:
840 * EACCESS - Attempt to write to a read-only range
841 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
842 * ECONNRESET - Connection reset by peer
843 * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid
844 * EINVAL - rma_flags is invalid
845 * ENODEV - The remote node is lost or existed, but is not currently in the
846 * network since it may have crashed
847 * ENOTCONN - The endpoint is not connected
848 * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
849 * registered address space of epd
850 */
851int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset,
852 int rma_flags);
853
854/**
855 * scif_fence_mark() - Mark previously issued RMAs
856 * @epd: endpoint descriptor
857 * @flags: control flags
858 * @mark: marked value returned as output.
859 *
860 * scif_fence_mark() returns after marking the current set of all uncompleted
861 * RMAs initiated through the endpoint epd or the current set of all
862 * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are
863 * marked with a value returned at mark. The application may subsequently call
864 * scif_fence_wait(), passing the value returned at mark, to await completion
865 * of all RMAs so marked.
866 *
867 * The flags argument has exactly one of the following values.
868 * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
869 * epd are marked
870 * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
871 * of endpoint epd are marked
872 *
873 * Return:
874 * Upon successful completion, scif_fence_mark() returns 0; otherwise in user
875 * mode -1 is returned and errno is set to indicate the error; in kernel mode
876 * the negative of one of the following errors is returned.
877 *
878 * Errors:
879 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
880 * ECONNRESET - Connection reset by peer
881 * EINVAL - flags is invalid
882 * ENODEV - The remote node is lost or existed, but is not currently in the
883 * network since it may have crashed
884 * ENOTCONN - The endpoint is not connected
885 * ENOMEM - Insufficient kernel memory was available
886 */
887int scif_fence_mark(scif_epd_t epd, int flags, int *mark);
888
889/**
890 * scif_fence_wait() - Wait for completion of marked RMAs
891 * @epd: endpoint descriptor
892 * @mark: mark request
893 *
894 * scif_fence_wait() returns after all RMAs marked with mark have completed.
895 * The value passed in mark must have been obtained in a previous call to
896 * scif_fence_mark().
897 *
898 * Return:
899 * Upon successful completion, scif_fence_wait() returns 0; otherwise in user
900 * mode -1 is returned and errno is set to indicate the error; in kernel mode
901 * the negative of one of the following errors is returned.
902 *
903 * Errors:
904 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
905 * ECONNRESET - Connection reset by peer
906 * ENODEV - The remote node is lost or existed, but is not currently in the
907 * network since it may have crashed
908 * ENOTCONN - The endpoint is not connected
909 * ENOMEM - Insufficient kernel memory was available
910 */
911int scif_fence_wait(scif_epd_t epd, int mark);
912
913/**
914 * scif_fence_signal() - Request a memory update on completion of RMAs
915 * @epd: endpoint descriptor
916 * @loff: local offset
917 * @lval: local value to write to loffset
918 * @roff: remote offset
919 * @rval: remote value to write to roffset
920 * @flags: flags
921 *
922 * scif_fence_signal() returns after marking the current set of all uncompleted
923 * RMAs initiated through the endpoint epd or marking the current set of all
924 * uncompleted RMAs initiated through the peer of endpoint epd.
925 *
926 * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the
927 * marked set, lval is written to memory at the address corresponding to offset
928 * loff in the local registered address space of epd. loff must be within a
929 * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion
930 * of the RMAs in the marked set, rval is written to memory at the address
931 * corresponding to offset roff in the remote registered address space of epd.
932 * roff must be within a remote registered window of the peer of epd. Note
933 * that any specified offset must be DWORD (4 byte / 32 bit) aligned.
934 *
935 * The flags argument is formed by OR'ing together the following.
936 * Exactly one of the following values.
937 * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
938 * epd are marked
939 * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
940 * of endpoint epd are marked
941 * One or more of the following values.
942 * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to
943 * memory at the address corresponding to offset loff in the local
944 * registered address space of epd.
945 * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to
946 * memory at the address corresponding to offset roff in the remote
947 * registered address space of epd.
948 *
949 * Return:
950 * Upon successful completion, scif_fence_signal() returns 0; otherwise in
951 * user mode -1 is returned and errno is set to indicate the error; in kernel
952 * mode the negative of one of the following errors is returned.
953 *
954 * Errors:
955 * EBADF, ENOTTY - epd is not a valid endpoint descriptor
956 * ECONNRESET - Connection reset by peer
957 * EINVAL - flags is invalid, or loff or roff are not DWORD aligned
958 * ENODEV - The remote node is lost or existed, but is not currently in the
959 * network since it may have crashed
960 * ENOTCONN - The endpoint is not connected
961 * ENXIO - loff is invalid for the registered address of epd, or roff is invalid
962 * for the registered address space, of the peer of epd
963 */
964int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
965 u64 rval, int flags);
966
967/**
968 * scif_get_node_ids() - Return information about online nodes
969 * @nodes: array in which to return online node IDs
970 * @len: number of entries in the nodes array
971 * @self: address to place the node ID of the local node
972 *
973 * scif_get_node_ids() fills in the nodes array with up to len node IDs of the
974 * nodes in the SCIF network. If there is not enough space in nodes, as
975 * indicated by the len parameter, only len node IDs are returned in nodes. The
976 * return value of scif_get_node_ids() is the total number of nodes currently in
977 * the SCIF network. By checking the return value against the len parameter,
978 * the user may determine if enough space for nodes was allocated.
979 *
980 * The node ID of the local node is returned at self.
981 *
982 * Return:
983 * Upon successful completion, scif_get_node_ids() returns the actual number of
984 * online nodes in the SCIF network including 'self'; otherwise in user mode
985 * -1 is returned and errno is set to indicate the error; in kernel mode no
986 * errors are returned.
987 *
988 * Errors:
989 * EFAULT - Bad address
990 */
991int scif_get_node_ids(u16 *nodes, int len, u16 *self);
992
993#endif /* __SCIF_H__ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 4460e5820b0e..baa7c80da6af 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -352,6 +352,7 @@ header-y += rtc.h
352header-y += rtnetlink.h 352header-y += rtnetlink.h
353header-y += scc.h 353header-y += scc.h
354header-y += sched.h 354header-y += sched.h
355header-y += scif_ioctl.h
355header-y += screen_info.h 356header-y += screen_info.h
356header-y += sctp.h 357header-y += sctp.h
357header-y += sdla.h 358header-y += sdla.h
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index bb1cb73c927a..e4c0a35d6417 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -45,6 +45,11 @@
45 45
46#define VSS_OP_REGISTER 128 46#define VSS_OP_REGISTER 128
47 47
48/*
49 Daemon code with full handshake support.
50 */
51#define VSS_OP_REGISTER1 129
52
48enum hv_vss_op { 53enum hv_vss_op {
49 VSS_OP_CREATE = 0, 54 VSS_OP_CREATE = 0,
50 VSS_OP_DELETE, 55 VSS_OP_DELETE,
@@ -100,7 +105,8 @@ struct hv_vss_msg {
100 */ 105 */
101 106
102#define FCOPY_VERSION_0 0 107#define FCOPY_VERSION_0 0
103#define FCOPY_CURRENT_VERSION FCOPY_VERSION_0 108#define FCOPY_VERSION_1 1
109#define FCOPY_CURRENT_VERSION FCOPY_VERSION_1
104#define W_MAX_PATH 260 110#define W_MAX_PATH 260
105 111
106enum hv_fcopy_op { 112enum hv_fcopy_op {
diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h
index 6eb40244e019..302a2ced373c 100644
--- a/include/uapi/linux/mic_common.h
+++ b/include/uapi/linux/mic_common.h
@@ -80,6 +80,12 @@ struct mic_device_ctrl {
80 * @h2c_config_db: Host to Card Virtio config doorbell set by card 80 * @h2c_config_db: Host to Card Virtio config doorbell set by card
81 * @shutdown_status: Card shutdown status set by card 81 * @shutdown_status: Card shutdown status set by card
82 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated 82 * @shutdown_card: Set to 1 by the host when a card shutdown is initiated
83 * @tot_nodes: Total number of nodes in the SCIF network
84 * @node_id: Unique id of the node
85 * @h2c_scif_db - Host to card SCIF doorbell set by card
86 * @c2h_scif_db - Card to host SCIF doorbell set by host
87 * @scif_host_dma_addr - SCIF host queue pair DMA address
88 * @scif_card_dma_addr - SCIF card queue pair DMA address
83 */ 89 */
84struct mic_bootparam { 90struct mic_bootparam {
85 __le32 magic; 91 __le32 magic;
@@ -88,6 +94,12 @@ struct mic_bootparam {
88 __s8 h2c_config_db; 94 __s8 h2c_config_db;
89 __u8 shutdown_status; 95 __u8 shutdown_status;
90 __u8 shutdown_card; 96 __u8 shutdown_card;
97 __u8 tot_nodes;
98 __u8 node_id;
99 __u8 h2c_scif_db;
100 __u8 c2h_scif_db;
101 __u64 scif_host_dma_addr;
102 __u64 scif_card_dma_addr;
91} __attribute__ ((aligned(8))); 103} __attribute__ ((aligned(8)));
92 104
93/** 105/**
diff --git a/include/uapi/linux/scif_ioctl.h b/include/uapi/linux/scif_ioctl.h
new file mode 100644
index 000000000000..4a94d917cf99
--- /dev/null
+++ b/include/uapi/linux/scif_ioctl.h
@@ -0,0 +1,130 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2014 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * BSD LICENSE
21 *
22 * Copyright(c) 2014 Intel Corporation.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 * Intel SCIF driver.
51 *
52 */
53/*
54 * -----------------------------------------
55 * SCIF IOCTL interface information
56 * -----------------------------------------
57 */
58#ifndef SCIF_IOCTL_H
59#define SCIF_IOCTL_H
60
61#include <linux/types.h>
62
63/**
64 * struct scif_port_id - SCIF port information
65 * @node: node on which port resides
66 * @port: local port number
67 */
68struct scif_port_id {
69 __u16 node;
70 __u16 port;
71};
72
73/**
74 * struct scifioctl_connect - used for SCIF_CONNECT IOCTL
75 * @self: used to read back the assigned port_id
76 * @peer: destination node and port to connect to
77 */
78struct scifioctl_connect {
79 struct scif_port_id self;
80 struct scif_port_id peer;
81};
82
83/**
84 * struct scifioctl_accept - used for SCIF_ACCEPTREQ IOCTL
85 * @flags: flags
86 * @peer: global id of peer endpoint
87 * @endpt: new connected endpoint descriptor
88 */
89struct scifioctl_accept {
90 __s32 flags;
91 struct scif_port_id peer;
92 __u64 endpt;
93};
94
95/**
96 * struct scifioctl_msg - used for SCIF_SEND/SCIF_RECV IOCTL
97 * @msg: message buffer address
98 * @len: message length
99 * @flags: flags
100 * @out_len: number of bytes sent/received
101 */
102struct scifioctl_msg {
103 __u64 msg;
104 __s32 len;
105 __s32 flags;
106 __s32 out_len;
107};
108
109/**
110 * struct scifioctl_node_ids - used for SCIF_GET_NODEIDS IOCTL
111 * @nodes: pointer to an array of node_ids
112 * @self: ID of the current node
113 * @len: length of array
114 */
115struct scifioctl_node_ids {
116 __u64 nodes;
117 __u64 self;
118 __s32 len;
119};
120
121#define SCIF_BIND _IOWR('s', 1, __u64)
122#define SCIF_LISTEN _IOW('s', 2, __s32)
123#define SCIF_CONNECT _IOWR('s', 3, struct scifioctl_connect)
124#define SCIF_ACCEPTREQ _IOWR('s', 4, struct scifioctl_accept)
125#define SCIF_ACCEPTREG _IOWR('s', 5, __u64)
126#define SCIF_SEND _IOWR('s', 6, struct scifioctl_msg)
127#define SCIF_RECV _IOWR('s', 7, struct scifioctl_msg)
128#define SCIF_GET_NODEIDS _IOWR('s', 14, struct scifioctl_node_ids)
129
130#endif /* SCIF_IOCTL_H */
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 26cc6029b280..6d940c72b5fc 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -140,8 +140,12 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
140 /* Error: request to write beyond destination buffer */ 140 /* Error: request to write beyond destination buffer */
141 if (cpy > oend) 141 if (cpy > oend)
142 goto _output_error; 142 goto _output_error;
143#if LZ4_ARCH64
144 if ((ref + COPYLENGTH) > oend)
145#else
143 if ((ref + COPYLENGTH) > oend || 146 if ((ref + COPYLENGTH) > oend ||
144 (op + COPYLENGTH) > oend) 147 (op + COPYLENGTH) > oend)
148#endif
145 goto _output_error; 149 goto _output_error;
146 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); 150 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
147 while (op < cpy) 151 while (op < cpy)
@@ -266,7 +270,13 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
266 if (cpy > oend - COPYLENGTH) { 270 if (cpy > oend - COPYLENGTH) {
267 if (cpy > oend) 271 if (cpy > oend)
268 goto _output_error; /* write outside of buf */ 272 goto _output_error; /* write outside of buf */
269 273#if LZ4_ARCH64
274 if ((ref + COPYLENGTH) > oend)
275#else
276 if ((ref + COPYLENGTH) > oend ||
277 (op + COPYLENGTH) > oend)
278#endif
279 goto _output_error;
270 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); 280 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
271 while (op < cpy) 281 while (op < cpy)
272 *op++ = *ref++; 282 *op++ = *ref++;
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index 74086a583d8d..c89fdcaf06e8 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -1,4 +1,4 @@
1#!/usr/bin/env python 1#!/usr/bin/env python2
2 2
3"""Find Kconfig symbols that are referenced but not defined.""" 3"""Find Kconfig symbols that are referenced but not defined."""
4 4
@@ -58,6 +58,12 @@ def parse_options():
58 "input format bases on Git log's " 58 "input format bases on Git log's "
59 "\'commmit1..commit2\'.") 59 "\'commmit1..commit2\'.")
60 60
61 parser.add_option('-i', '--ignore', dest='ignore', action='store',
62 default="",
63 help="Ignore files matching this pattern. Note that "
64 "the pattern needs to be a Python regex. To "
65 "ignore defconfigs, specify -i '.*defconfig'.")
66
61 parser.add_option('', '--force', dest='force', action='store_true', 67 parser.add_option('', '--force', dest='force', action='store_true',
62 default=False, 68 default=False,
63 help="Reset current Git tree even when it's dirty.") 69 help="Reset current Git tree even when it's dirty.")
@@ -80,6 +86,12 @@ def parse_options():
80 "'--force' if you\nwant to ignore this warning and " 86 "'--force' if you\nwant to ignore this warning and "
81 "continue.") 87 "continue.")
82 88
89 if opts.ignore:
90 try:
91 re.match(opts.ignore, "this/is/just/a/test.c")
92 except:
93 sys.exit("Please specify a valid Python regex.")
94
83 return opts 95 return opts
84 96
85 97
@@ -105,11 +117,11 @@ def main():
105 117
106 # get undefined items before the commit 118 # get undefined items before the commit
107 execute("git reset --hard %s" % commit_a) 119 execute("git reset --hard %s" % commit_a)
108 undefined_a = check_symbols() 120 undefined_a = check_symbols(opts.ignore)
109 121
110 # get undefined items for the commit 122 # get undefined items for the commit
111 execute("git reset --hard %s" % commit_b) 123 execute("git reset --hard %s" % commit_b)
112 undefined_b = check_symbols() 124 undefined_b = check_symbols(opts.ignore)
113 125
114 # report cases that are present for the commit but not before 126 # report cases that are present for the commit but not before
115 for feature in sorted(undefined_b): 127 for feature in sorted(undefined_b):
@@ -129,7 +141,7 @@ def main():
129 141
130 # default to check the entire tree 142 # default to check the entire tree
131 else: 143 else:
132 undefined = check_symbols() 144 undefined = check_symbols(opts.ignore)
133 for feature in sorted(undefined): 145 for feature in sorted(undefined):
134 files = sorted(undefined.get(feature)) 146 files = sorted(undefined.get(feature))
135 print "%s\t%s" % (feature, ", ".join(files)) 147 print "%s\t%s" % (feature, ", ".join(files))
@@ -160,9 +172,10 @@ def get_head():
160 return stdout.strip('\n') 172 return stdout.strip('\n')
161 173
162 174
163def check_symbols(): 175def check_symbols(ignore):
164 """Find undefined Kconfig symbols and return a dict with the symbol as key 176 """Find undefined Kconfig symbols and return a dict with the symbol as key
165 and a list of referencing files as value.""" 177 and a list of referencing files as value. Files matching %ignore are not
178 checked for undefined symbols."""
166 source_files = [] 179 source_files = []
167 kconfig_files = [] 180 kconfig_files = []
168 defined_features = set() 181 defined_features = set()
@@ -185,10 +198,17 @@ def check_symbols():
185 source_files.append(gitfile) 198 source_files.append(gitfile)
186 199
187 for sfile in source_files: 200 for sfile in source_files:
201 if ignore and re.match(ignore, sfile):
202 # do not check files matching %ignore
203 continue
188 parse_source_file(sfile, referenced_features) 204 parse_source_file(sfile, referenced_features)
189 205
190 for kfile in kconfig_files: 206 for kfile in kconfig_files:
191 parse_kconfig_file(kfile, defined_features, referenced_features) 207 if ignore and re.match(ignore, kfile):
208 # do not collect references for files matching %ignore
209 parse_kconfig_file(kfile, defined_features, dict())
210 else:
211 parse_kconfig_file(kfile, defined_features, referenced_features)
192 212
193 undefined = {} # {feature: [files]} 213 undefined = {} # {feature: [files]}
194 for feature in sorted(referenced_features): 214 for feature in sorted(referenced_features):
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index fce36d0f6898..091f6290a651 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -182,6 +182,7 @@ int main(void)
182 182
183 DEVID(mei_cl_device_id); 183 DEVID(mei_cl_device_id);
184 DEVID_FIELD(mei_cl_device_id, name); 184 DEVID_FIELD(mei_cl_device_id, name);
185 DEVID_FIELD(mei_cl_device_id, uuid);
185 186
186 DEVID(rio_device_id); 187 DEVID(rio_device_id);
187 DEVID_FIELD(rio_device_id, did); 188 DEVID_FIELD(rio_device_id, did);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 78691d51a479..718b2a29bd43 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -34,6 +34,9 @@ typedef Elf64_Addr kernel_ulong_t;
34typedef uint32_t __u32; 34typedef uint32_t __u32;
35typedef uint16_t __u16; 35typedef uint16_t __u16;
36typedef unsigned char __u8; 36typedef unsigned char __u8;
37typedef struct {
38 __u8 b[16];
39} uuid_le;
37 40
38/* Big exception to the "don't include kernel headers into userspace, which 41/* Big exception to the "don't include kernel headers into userspace, which
39 * even potentially has different endianness and word sizes, since 42 * even potentially has different endianness and word sizes, since
@@ -131,6 +134,15 @@ static inline void add_wildcard(char *str)
131 strcat(str + len, "*"); 134 strcat(str + len, "*");
132} 135}
133 136
137static inline void add_uuid(char *str, uuid_le uuid)
138{
139 int len = strlen(str);
140 int i;
141
142 for (i = 0; i < 16; i++)
143 sprintf(str + len + (i << 1), "%02x", uuid.b[i]);
144}
145
134/** 146/**
135 * Check that sizeof(device_id type) are consistent with size of section 147 * Check that sizeof(device_id type) are consistent with size of section
136 * in .o file. If in-consistent then userspace and kernel does not agree 148 * in .o file. If in-consistent then userspace and kernel does not agree
@@ -1160,13 +1172,18 @@ static int do_cpu_entry(const char *filename, void *symval, char *alias)
1160} 1172}
1161ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry); 1173ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
1162 1174
1163/* Looks like: mei:S */ 1175/* Looks like: mei:S:uuid */
1164static int do_mei_entry(const char *filename, void *symval, 1176static int do_mei_entry(const char *filename, void *symval,
1165 char *alias) 1177 char *alias)
1166{ 1178{
1167 DEF_FIELD_ADDR(symval, mei_cl_device_id, name); 1179 DEF_FIELD_ADDR(symval, mei_cl_device_id, name);
1180 DEF_FIELD_ADDR(symval, mei_cl_device_id, uuid);
1181
1182 sprintf(alias, MEI_CL_MODULE_PREFIX);
1183 sprintf(alias + strlen(alias), "%s:", (*name)[0] ? *name : "*");
1184 add_uuid(alias, *uuid);
1168 1185
1169 sprintf(alias, MEI_CL_MODULE_PREFIX "%s", *name); 1186 strcat(alias, ":*");
1170 1187
1171 return 1; 1188 return 1;
1172} 1189}
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index 9445d8f264a4..5480e4e424eb 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -137,6 +137,8 @@ int main(int argc, char *argv[])
137 int version = FCOPY_CURRENT_VERSION; 137 int version = FCOPY_CURRENT_VERSION;
138 char *buffer[4096 * 2]; 138 char *buffer[4096 * 2];
139 struct hv_fcopy_hdr *in_msg; 139 struct hv_fcopy_hdr *in_msg;
140 int in_handshake = 1;
141 __u32 kernel_modver;
140 142
141 static struct option long_options[] = { 143 static struct option long_options[] = {
142 {"help", no_argument, 0, 'h' }, 144 {"help", no_argument, 0, 'h' },
@@ -191,6 +193,19 @@ int main(int argc, char *argv[])
191 syslog(LOG_ERR, "pread failed: %s", strerror(errno)); 193 syslog(LOG_ERR, "pread failed: %s", strerror(errno));
192 exit(EXIT_FAILURE); 194 exit(EXIT_FAILURE);
193 } 195 }
196
197 if (in_handshake) {
198 if (len != sizeof(kernel_modver)) {
199 syslog(LOG_ERR, "invalid version negotiation");
200 exit(EXIT_FAILURE);
201 }
202 kernel_modver = *(__u32 *)buffer;
203 in_handshake = 0;
204 syslog(LOG_INFO, "HV_FCOPY: kernel module version: %d",
205 kernel_modver);
206 continue;
207 }
208
194 in_msg = (struct hv_fcopy_hdr *)buffer; 209 in_msg = (struct hv_fcopy_hdr *)buffer;
195 210
196 switch (in_msg->operation) { 211 switch (in_msg->operation) {
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 408bb076a234..0d9f48ec42bb 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -33,7 +33,6 @@
33#include <ctype.h> 33#include <ctype.h>
34#include <errno.h> 34#include <errno.h>
35#include <arpa/inet.h> 35#include <arpa/inet.h>
36#include <linux/connector.h>
37#include <linux/hyperv.h> 36#include <linux/hyperv.h>
38#include <linux/netlink.h> 37#include <linux/netlink.h>
39#include <ifaddrs.h> 38#include <ifaddrs.h>
@@ -79,7 +78,6 @@ enum {
79 DNS 78 DNS
80}; 79};
81 80
82static struct sockaddr_nl addr;
83static int in_hand_shake = 1; 81static int in_hand_shake = 1;
84 82
85static char *os_name = ""; 83static char *os_name = "";
@@ -1387,34 +1385,6 @@ kvp_get_domain_name(char *buffer, int length)
1387 freeaddrinfo(info); 1385 freeaddrinfo(info);
1388} 1386}
1389 1387
1390static int
1391netlink_send(int fd, struct cn_msg *msg)
1392{
1393 struct nlmsghdr nlh = { .nlmsg_type = NLMSG_DONE };
1394 unsigned int size;
1395 struct msghdr message;
1396 struct iovec iov[2];
1397
1398 size = sizeof(struct cn_msg) + msg->len;
1399
1400 nlh.nlmsg_pid = getpid();
1401 nlh.nlmsg_len = NLMSG_LENGTH(size);
1402
1403 iov[0].iov_base = &nlh;
1404 iov[0].iov_len = sizeof(nlh);
1405
1406 iov[1].iov_base = msg;
1407 iov[1].iov_len = size;
1408
1409 memset(&message, 0, sizeof(message));
1410 message.msg_name = &addr;
1411 message.msg_namelen = sizeof(addr);
1412 message.msg_iov = iov;
1413 message.msg_iovlen = 2;
1414
1415 return sendmsg(fd, &message, 0);
1416}
1417
1418void print_usage(char *argv[]) 1388void print_usage(char *argv[])
1419{ 1389{
1420 fprintf(stderr, "Usage: %s [options]\n" 1390 fprintf(stderr, "Usage: %s [options]\n"
@@ -1425,22 +1395,17 @@ void print_usage(char *argv[])
1425 1395
1426int main(int argc, char *argv[]) 1396int main(int argc, char *argv[])
1427{ 1397{
1428 int fd, len, nl_group; 1398 int kvp_fd, len;
1429 int error; 1399 int error;
1430 struct cn_msg *message;
1431 struct pollfd pfd; 1400 struct pollfd pfd;
1432 struct nlmsghdr *incoming_msg; 1401 char *p;
1433 struct cn_msg *incoming_cn_msg; 1402 struct hv_kvp_msg hv_msg[1];
1434 struct hv_kvp_msg *hv_msg;
1435 char *p;
1436 char *key_value; 1403 char *key_value;
1437 char *key_name; 1404 char *key_name;
1438 int op; 1405 int op;
1439 int pool; 1406 int pool;
1440 char *if_name; 1407 char *if_name;
1441 struct hv_kvp_ipaddr_value *kvp_ip_val; 1408 struct hv_kvp_ipaddr_value *kvp_ip_val;
1442 char *kvp_recv_buffer;
1443 size_t kvp_recv_buffer_len;
1444 int daemonize = 1, long_index = 0, opt; 1409 int daemonize = 1, long_index = 0, opt;
1445 1410
1446 static struct option long_options[] = { 1411 static struct option long_options[] = {
@@ -1468,12 +1433,14 @@ int main(int argc, char *argv[])
1468 openlog("KVP", 0, LOG_USER); 1433 openlog("KVP", 0, LOG_USER);
1469 syslog(LOG_INFO, "KVP starting; pid is:%d", getpid()); 1434 syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
1470 1435
1471 kvp_recv_buffer_len = NLMSG_LENGTH(0) + sizeof(struct cn_msg) + sizeof(struct hv_kvp_msg); 1436 kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR);
1472 kvp_recv_buffer = calloc(1, kvp_recv_buffer_len); 1437
1473 if (!kvp_recv_buffer) { 1438 if (kvp_fd < 0) {
1474 syslog(LOG_ERR, "Failed to allocate netlink buffer"); 1439 syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s",
1440 errno, strerror(errno));
1475 exit(EXIT_FAILURE); 1441 exit(EXIT_FAILURE);
1476 } 1442 }
1443
1477 /* 1444 /*
1478 * Retrieve OS release information. 1445 * Retrieve OS release information.
1479 */ 1446 */
@@ -1489,100 +1456,44 @@ int main(int argc, char *argv[])
1489 exit(EXIT_FAILURE); 1456 exit(EXIT_FAILURE);
1490 } 1457 }
1491 1458
1492 fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
1493 if (fd < 0) {
1494 syslog(LOG_ERR, "netlink socket creation failed; error: %d %s", errno,
1495 strerror(errno));
1496 exit(EXIT_FAILURE);
1497 }
1498 addr.nl_family = AF_NETLINK;
1499 addr.nl_pad = 0;
1500 addr.nl_pid = 0;
1501 addr.nl_groups = 0;
1502
1503
1504 error = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
1505 if (error < 0) {
1506 syslog(LOG_ERR, "bind failed; error: %d %s", errno, strerror(errno));
1507 close(fd);
1508 exit(EXIT_FAILURE);
1509 }
1510 nl_group = CN_KVP_IDX;
1511
1512 if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group)) < 0) {
1513 syslog(LOG_ERR, "setsockopt failed; error: %d %s", errno, strerror(errno));
1514 close(fd);
1515 exit(EXIT_FAILURE);
1516 }
1517
1518 /* 1459 /*
1519 * Register ourselves with the kernel. 1460 * Register ourselves with the kernel.
1520 */ 1461 */
1521 message = (struct cn_msg *)kvp_recv_buffer;
1522 message->id.idx = CN_KVP_IDX;
1523 message->id.val = CN_KVP_VAL;
1524
1525 hv_msg = (struct hv_kvp_msg *)message->data;
1526 hv_msg->kvp_hdr.operation = KVP_OP_REGISTER1; 1462 hv_msg->kvp_hdr.operation = KVP_OP_REGISTER1;
1527 message->ack = 0; 1463 len = write(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg));
1528 message->len = sizeof(struct hv_kvp_msg); 1464 if (len != sizeof(struct hv_kvp_msg)) {
1529 1465 syslog(LOG_ERR, "registration to kernel failed; error: %d %s",
1530 len = netlink_send(fd, message); 1466 errno, strerror(errno));
1531 if (len < 0) { 1467 close(kvp_fd);
1532 syslog(LOG_ERR, "netlink_send failed; error: %d %s", errno, strerror(errno));
1533 close(fd);
1534 exit(EXIT_FAILURE); 1468 exit(EXIT_FAILURE);
1535 } 1469 }
1536 1470
1537 pfd.fd = fd; 1471 pfd.fd = kvp_fd;
1538 1472
1539 while (1) { 1473 while (1) {
1540 struct sockaddr *addr_p = (struct sockaddr *) &addr;
1541 socklen_t addr_l = sizeof(addr);
1542 pfd.events = POLLIN; 1474 pfd.events = POLLIN;
1543 pfd.revents = 0; 1475 pfd.revents = 0;
1544 1476
1545 if (poll(&pfd, 1, -1) < 0) { 1477 if (poll(&pfd, 1, -1) < 0) {
1546 syslog(LOG_ERR, "poll failed; error: %d %s", errno, strerror(errno)); 1478 syslog(LOG_ERR, "poll failed; error: %d %s", errno, strerror(errno));
1547 if (errno == EINVAL) { 1479 if (errno == EINVAL) {
1548 close(fd); 1480 close(kvp_fd);
1549 exit(EXIT_FAILURE); 1481 exit(EXIT_FAILURE);
1550 } 1482 }
1551 else 1483 else
1552 continue; 1484 continue;
1553 } 1485 }
1554 1486
1555 len = recvfrom(fd, kvp_recv_buffer, kvp_recv_buffer_len, 0, 1487 len = read(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg));
1556 addr_p, &addr_l);
1557
1558 if (len < 0) {
1559 int saved_errno = errno;
1560 syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
1561 addr.nl_pid, errno, strerror(errno));
1562 1488
1563 if (saved_errno == ENOBUFS) { 1489 if (len != sizeof(struct hv_kvp_msg)) {
1564 syslog(LOG_ERR, "receive error: ignored"); 1490 syslog(LOG_ERR, "read failed; error:%d %s",
1565 continue; 1491 errno, strerror(errno));
1566 }
1567 1492
1568 close(fd); 1493 close(kvp_fd);
1569 return -1; 1494 return EXIT_FAILURE;
1570 } 1495 }
1571 1496
1572 if (addr.nl_pid) {
1573 syslog(LOG_WARNING, "Received packet from untrusted pid:%u",
1574 addr.nl_pid);
1575 continue;
1576 }
1577
1578 incoming_msg = (struct nlmsghdr *)kvp_recv_buffer;
1579
1580 if (incoming_msg->nlmsg_type != NLMSG_DONE)
1581 continue;
1582
1583 incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
1584 hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
1585
1586 /* 1497 /*
1587 * We will use the KVP header information to pass back 1498 * We will use the KVP header information to pass back
1588 * the error from this daemon. So, first copy the state 1499 * the error from this daemon. So, first copy the state
@@ -1603,7 +1514,7 @@ int main(int argc, char *argv[])
1603 if (lic_version) { 1514 if (lic_version) {
1604 strcpy(lic_version, p); 1515 strcpy(lic_version, p);
1605 syslog(LOG_INFO, "KVP LIC Version: %s", 1516 syslog(LOG_INFO, "KVP LIC Version: %s",
1606 lic_version); 1517 lic_version);
1607 } else { 1518 } else {
1608 syslog(LOG_ERR, "malloc failed"); 1519 syslog(LOG_ERR, "malloc failed");
1609 } 1520 }
@@ -1702,7 +1613,6 @@ int main(int argc, char *argv[])
1702 goto kvp_done; 1613 goto kvp_done;
1703 } 1614 }
1704 1615
1705 hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data;
1706 key_name = (char *)hv_msg->body.kvp_enum_data.data.key; 1616 key_name = (char *)hv_msg->body.kvp_enum_data.data.key;
1707 key_value = (char *)hv_msg->body.kvp_enum_data.data.value; 1617 key_value = (char *)hv_msg->body.kvp_enum_data.data.value;
1708 1618
@@ -1753,31 +1663,17 @@ int main(int argc, char *argv[])
1753 hv_msg->error = HV_S_CONT; 1663 hv_msg->error = HV_S_CONT;
1754 break; 1664 break;
1755 } 1665 }
1756 /*
1757 * Send the value back to the kernel. The response is
1758 * already in the receive buffer. Update the cn_msg header to
1759 * reflect the key value that has been added to the message
1760 */
1761kvp_done:
1762
1763 incoming_cn_msg->id.idx = CN_KVP_IDX;
1764 incoming_cn_msg->id.val = CN_KVP_VAL;
1765 incoming_cn_msg->ack = 0;
1766 incoming_cn_msg->len = sizeof(struct hv_kvp_msg);
1767
1768 len = netlink_send(fd, incoming_cn_msg);
1769 if (len < 0) {
1770 int saved_errno = errno;
1771 syslog(LOG_ERR, "net_link send failed; error: %d %s", errno,
1772 strerror(errno));
1773
1774 if (saved_errno == ENOMEM || saved_errno == ENOBUFS) {
1775 syslog(LOG_ERR, "send error: ignored");
1776 continue;
1777 }
1778 1666
1667 /* Send the value back to the kernel. */
1668kvp_done:
1669 len = write(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg));
1670 if (len != sizeof(struct hv_kvp_msg)) {
1671 syslog(LOG_ERR, "write failed; error: %d %s", errno,
1672 strerror(errno));
1779 exit(EXIT_FAILURE); 1673 exit(EXIT_FAILURE);
1780 } 1674 }
1781 } 1675 }
1782 1676
1677 close(kvp_fd);
1678 exit(0);
1783} 1679}
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 506dd0148828..96234b638249 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -19,7 +19,6 @@
19 19
20 20
21#include <sys/types.h> 21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/poll.h> 22#include <sys/poll.h>
24#include <sys/ioctl.h> 23#include <sys/ioctl.h>
25#include <fcntl.h> 24#include <fcntl.h>
@@ -30,21 +29,11 @@
30#include <string.h> 29#include <string.h>
31#include <ctype.h> 30#include <ctype.h>
32#include <errno.h> 31#include <errno.h>
33#include <arpa/inet.h>
34#include <linux/fs.h> 32#include <linux/fs.h>
35#include <linux/connector.h>
36#include <linux/hyperv.h> 33#include <linux/hyperv.h>
37#include <linux/netlink.h>
38#include <syslog.h> 34#include <syslog.h>
39#include <getopt.h> 35#include <getopt.h>
40 36
41static struct sockaddr_nl addr;
42
43#ifndef SOL_NETLINK
44#define SOL_NETLINK 270
45#endif
46
47
48/* Don't use syslog() in the function since that can cause write to disk */ 37/* Don't use syslog() in the function since that can cause write to disk */
49static int vss_do_freeze(char *dir, unsigned int cmd) 38static int vss_do_freeze(char *dir, unsigned int cmd)
50{ 39{
@@ -143,33 +132,6 @@ out:
143 return error; 132 return error;
144} 133}
145 134
146static int netlink_send(int fd, struct cn_msg *msg)
147{
148 struct nlmsghdr nlh = { .nlmsg_type = NLMSG_DONE };
149 unsigned int size;
150 struct msghdr message;
151 struct iovec iov[2];
152
153 size = sizeof(struct cn_msg) + msg->len;
154
155 nlh.nlmsg_pid = getpid();
156 nlh.nlmsg_len = NLMSG_LENGTH(size);
157
158 iov[0].iov_base = &nlh;
159 iov[0].iov_len = sizeof(nlh);
160
161 iov[1].iov_base = msg;
162 iov[1].iov_len = size;
163
164 memset(&message, 0, sizeof(message));
165 message.msg_name = &addr;
166 message.msg_namelen = sizeof(addr);
167 message.msg_iov = iov;
168 message.msg_iovlen = 2;
169
170 return sendmsg(fd, &message, 0);
171}
172
173void print_usage(char *argv[]) 135void print_usage(char *argv[])
174{ 136{
175 fprintf(stderr, "Usage: %s [options]\n" 137 fprintf(stderr, "Usage: %s [options]\n"
@@ -180,17 +142,14 @@ void print_usage(char *argv[])
180 142
181int main(int argc, char *argv[]) 143int main(int argc, char *argv[])
182{ 144{
183 int fd, len, nl_group; 145 int vss_fd, len;
184 int error; 146 int error;
185 struct cn_msg *message;
186 struct pollfd pfd; 147 struct pollfd pfd;
187 struct nlmsghdr *incoming_msg;
188 struct cn_msg *incoming_cn_msg;
189 int op; 148 int op;
190 struct hv_vss_msg *vss_msg; 149 struct hv_vss_msg vss_msg[1];
191 char *vss_recv_buffer;
192 size_t vss_recv_buffer_len;
193 int daemonize = 1, long_index = 0, opt; 150 int daemonize = 1, long_index = 0, opt;
151 int in_handshake = 1;
152 __u32 kernel_modver;
194 153
195 static struct option long_options[] = { 154 static struct option long_options[] = {
196 {"help", no_argument, 0, 'h' }, 155 {"help", no_argument, 0, 'h' },
@@ -217,98 +176,62 @@ int main(int argc, char *argv[])
217 openlog("Hyper-V VSS", 0, LOG_USER); 176 openlog("Hyper-V VSS", 0, LOG_USER);
218 syslog(LOG_INFO, "VSS starting; pid is:%d", getpid()); 177 syslog(LOG_INFO, "VSS starting; pid is:%d", getpid());
219 178
220 vss_recv_buffer_len = NLMSG_LENGTH(0) + sizeof(struct cn_msg) + sizeof(struct hv_vss_msg); 179 vss_fd = open("/dev/vmbus/hv_vss", O_RDWR);
221 vss_recv_buffer = calloc(1, vss_recv_buffer_len); 180 if (vss_fd < 0) {
222 if (!vss_recv_buffer) { 181 syslog(LOG_ERR, "open /dev/vmbus/hv_vss failed; error: %d %s",
223 syslog(LOG_ERR, "Failed to allocate netlink buffers"); 182 errno, strerror(errno));
224 exit(EXIT_FAILURE);
225 }
226
227 fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
228 if (fd < 0) {
229 syslog(LOG_ERR, "netlink socket creation failed; error:%d %s",
230 errno, strerror(errno));
231 exit(EXIT_FAILURE);
232 }
233 addr.nl_family = AF_NETLINK;
234 addr.nl_pad = 0;
235 addr.nl_pid = 0;
236 addr.nl_groups = 0;
237
238
239 error = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
240 if (error < 0) {
241 syslog(LOG_ERR, "bind failed; error:%d %s", errno, strerror(errno));
242 close(fd);
243 exit(EXIT_FAILURE);
244 }
245 nl_group = CN_VSS_IDX;
246 if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group)) < 0) {
247 syslog(LOG_ERR, "setsockopt failed; error:%d %s", errno, strerror(errno));
248 close(fd);
249 exit(EXIT_FAILURE); 183 exit(EXIT_FAILURE);
250 } 184 }
251 /* 185 /*
252 * Register ourselves with the kernel. 186 * Register ourselves with the kernel.
253 */ 187 */
254 message = (struct cn_msg *)vss_recv_buffer; 188 vss_msg->vss_hdr.operation = VSS_OP_REGISTER1;
255 message->id.idx = CN_VSS_IDX;
256 message->id.val = CN_VSS_VAL;
257 message->ack = 0;
258 vss_msg = (struct hv_vss_msg *)message->data;
259 vss_msg->vss_hdr.operation = VSS_OP_REGISTER;
260 189
261 message->len = sizeof(struct hv_vss_msg); 190 len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
262
263 len = netlink_send(fd, message);
264 if (len < 0) { 191 if (len < 0) {
265 syslog(LOG_ERR, "netlink_send failed; error:%d %s", errno, strerror(errno)); 192 syslog(LOG_ERR, "registration to kernel failed; error: %d %s",
266 close(fd); 193 errno, strerror(errno));
194 close(vss_fd);
267 exit(EXIT_FAILURE); 195 exit(EXIT_FAILURE);
268 } 196 }
269 197
270 pfd.fd = fd; 198 pfd.fd = vss_fd;
271 199
272 while (1) { 200 while (1) {
273 struct sockaddr *addr_p = (struct sockaddr *) &addr;
274 socklen_t addr_l = sizeof(addr);
275 pfd.events = POLLIN; 201 pfd.events = POLLIN;
276 pfd.revents = 0; 202 pfd.revents = 0;
277 203
278 if (poll(&pfd, 1, -1) < 0) { 204 if (poll(&pfd, 1, -1) < 0) {
279 syslog(LOG_ERR, "poll failed; error:%d %s", errno, strerror(errno)); 205 syslog(LOG_ERR, "poll failed; error:%d %s", errno, strerror(errno));
280 if (errno == EINVAL) { 206 if (errno == EINVAL) {
281 close(fd); 207 close(vss_fd);
282 exit(EXIT_FAILURE); 208 exit(EXIT_FAILURE);
283 } 209 }
284 else 210 else
285 continue; 211 continue;
286 } 212 }
287 213
288 len = recvfrom(fd, vss_recv_buffer, vss_recv_buffer_len, 0, 214 len = read(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
289 addr_p, &addr_l);
290 215
291 if (len < 0) { 216 if (in_handshake) {
292 syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s", 217 if (len != sizeof(kernel_modver)) {
293 addr.nl_pid, errno, strerror(errno)); 218 syslog(LOG_ERR, "invalid version negotiation");
294 close(fd); 219 exit(EXIT_FAILURE);
295 return -1; 220 }
296 } 221 kernel_modver = *(__u32 *)vss_msg;
297 222 in_handshake = 0;
298 if (addr.nl_pid) { 223 syslog(LOG_INFO, "VSS: kernel module version: %d",
299 syslog(LOG_WARNING, 224 kernel_modver);
300 "Received packet from untrusted pid:%u",
301 addr.nl_pid);
302 continue; 225 continue;
303 } 226 }
304 227
305 incoming_msg = (struct nlmsghdr *)vss_recv_buffer; 228 if (len != sizeof(struct hv_vss_msg)) {
306 229 syslog(LOG_ERR, "read failed; error:%d %s",
307 if (incoming_msg->nlmsg_type != NLMSG_DONE) 230 errno, strerror(errno));
308 continue; 231 close(vss_fd);
232 return EXIT_FAILURE;
233 }
309 234
310 incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
311 vss_msg = (struct hv_vss_msg *)incoming_cn_msg->data;
312 op = vss_msg->vss_hdr.operation; 235 op = vss_msg->vss_hdr.operation;
313 error = HV_S_OK; 236 error = HV_S_OK;
314 237
@@ -331,12 +254,14 @@ int main(int argc, char *argv[])
331 syslog(LOG_ERR, "Illegal op:%d\n", op); 254 syslog(LOG_ERR, "Illegal op:%d\n", op);
332 } 255 }
333 vss_msg->error = error; 256 vss_msg->error = error;
334 len = netlink_send(fd, incoming_cn_msg); 257 len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
335 if (len < 0) { 258 if (len != sizeof(struct hv_vss_msg)) {
336 syslog(LOG_ERR, "net_link send failed; error:%d %s", 259 syslog(LOG_ERR, "write failed; error: %d %s", errno,
337 errno, strerror(errno)); 260 strerror(errno));
338 exit(EXIT_FAILURE); 261 exit(EXIT_FAILURE);
339 } 262 }
340 } 263 }
341 264
265 close(vss_fd);
266 exit(0);
342} 267}