aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/uio-howto.tmpl1112
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/devicetree/bindings/misc/idt_89hpesx.txt44
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt6
-rw-r--r--Documentation/devicetree/bindings/sram/sram.txt6
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/uio-howto.rst705
-rw-r--r--Documentation/extcon/intel-int3496.txt22
-rw-r--r--Documentation/fpga/fpga-mgr.txt19
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/arm/mach-davinci/da850.c10
-rw-r--r--arch/arm/mach-davinci/da8xx-dt.c12
-rw-r--r--arch/x86/Kbuild3
-rw-r--r--arch/x86/hyperv/Makefile1
-rw-r--r--arch/x86/hyperv/hv_init.c277
-rw-r--r--arch/x86/include/asm/mshyperv.h151
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h8
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c50
-rw-r--r--arch/x86/platform/goldfish/goldfish.c14
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/android/Kconfig12
-rw-r--r--drivers/android/binder.c1001
-rw-r--r--drivers/auxdisplay/ht16k33.c320
-rw-r--r--drivers/char/Kconfig5
-rw-r--r--drivers/char/apm-emulation.c7
-rw-r--r--drivers/char/ds1302.c1
-rw-r--r--drivers/char/mmtimer.c6
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c4
-rw-r--r--drivers/extcon/Kconfig10
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c2
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-arizona.c20
-rw-r--r--drivers/extcon/extcon-axp288.c110
-rw-r--r--drivers/extcon/extcon-intel-int3496.c179
-rw-r--r--drivers/extcon/extcon-max14577.c6
-rw-r--r--drivers/extcon/extcon-max77693.c12
-rw-r--r--drivers/extcon/extcon-max77843.c24
-rw-r--r--drivers/extcon/extcon-palmas.c21
-rw-r--r--drivers/extcon/extcon-rt8973a.c6
-rw-r--r--drivers/extcon/extcon-sm5502.c6
-rw-r--r--drivers/extcon/extcon-usb-gpio.c7
-rw-r--r--drivers/extcon/extcon.c43
-rw-r--r--drivers/extcon/extcon.h62
-rw-r--r--drivers/fpga/fpga-mgr.c236
-rw-r--r--drivers/fpga/zynq-fpga.c233
-rw-r--r--drivers/fsi/Kconfig12
-rw-r--r--drivers/fsi/Makefile2
-rw-r--r--drivers/fsi/fsi-core.c59
-rw-r--r--drivers/hv/channel.c82
-rw-r--r--drivers/hv/channel_mgmt.c157
-rw-r--r--drivers/hv/connection.c158
-rw-r--r--drivers/hv/hv.c475
-rw-r--r--drivers/hv/hv_balloon.c1
-rw-r--r--drivers/hv/hv_fcopy.c29
-rw-r--r--drivers/hv/hv_kvp.c47
-rw-r--r--drivers/hv/hv_snapshot.c29
-rw-r--r--drivers/hv/hv_util.c283
-rw-r--r--drivers/hv/hyperv_vmbus.h363
-rw-r--r--drivers/hv/ring_buffer.c73
-rw-r--r--drivers/hv/vmbus_drv.c178
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c2
-rw-r--r--drivers/memory/ti-aemif.c8
-rw-r--r--drivers/misc/Kconfig49
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/eeprom/Kconfig10
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c1581
-rw-r--r--drivers/misc/genwqe/card_base.c1
-rw-r--r--drivers/misc/lkdtm_bugs.c7
-rw-r--r--drivers/misc/lkdtm_core.c4
-rw-r--r--drivers/misc/mei/amthif.c45
-rw-r--r--drivers/misc/mei/bus.c63
-rw-r--r--drivers/misc/mei/client.c145
-rw-r--r--drivers/misc/mei/client.h24
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/misc/mei/hw-me.c53
-rw-r--r--drivers/misc/mei/hw-txe.c14
-rw-r--r--drivers/misc/mei/hw-txe.h2
-rw-r--r--drivers/misc/mei/init.c22
-rw-r--r--drivers/misc/mei/interrupt.c36
-rw-r--r--drivers/misc/mei/main.c48
-rw-r--r--drivers/misc/mei/mei_dev.h22
-rw-r--r--drivers/misc/mei/pci-me.c50
-rw-r--r--drivers/misc/mei/pci-txe.c69
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c1
-rw-r--r--drivers/misc/panel.c191
-rw-r--r--drivers/misc/sram-exec.c105
-rw-r--r--drivers/misc/sram.c55
-rw-r--r--drivers/misc/sram.h58
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c75
-rw-r--r--drivers/net/hyperv/netvsc.c21
-rw-r--r--drivers/nvmem/core.c45
-rw-r--r--drivers/nvmem/imx-ocotp.c1
-rw-r--r--drivers/platform/goldfish/pdev_bus.c13
-rw-r--r--drivers/uio/uio_hv_generic.c2
-rw-r--r--drivers/vme/vme.c15
-rw-r--r--drivers/w1/masters/ds2490.c141
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2405.c227
-rw-r--r--drivers/w1/w1.c8
-rw-r--r--drivers/w1/w1.h7
-rw-r--r--drivers/w1/w1_family.c7
-rw-r--r--drivers/w1/w1_family.h8
-rw-r--r--drivers/w1/w1_int.c7
-rw-r--r--drivers/w1/w1_int.h7
-rw-r--r--drivers/w1/w1_io.c8
-rw-r--r--drivers/w1/w1_log.h7
-rw-r--r--drivers/w1/w1_netlink.c7
-rw-r--r--drivers/w1/w1_netlink.h7
-rw-r--r--include/linux/extcon.h71
-rw-r--r--include/linux/extcon/extcon-adc-jack.h2
-rw-r--r--include/linux/fpga/fpga-mgr.h5
-rw-r--r--include/linux/fsi.h50
-rw-r--r--include/linux/hyperv.h128
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/platform_data/ti-aemif.h23
-rw-r--r--include/linux/sram.h27
-rw-r--r--include/linux/vme.h1
-rw-r--r--include/linux/vmw_vmci_defs.h7
-rw-r--r--include/uapi/linux/android/binder.h104
-rw-r--r--init/Kconfig7
-rw-r--r--lib/test_firmware.c92
-rwxr-xr-xscripts/checkkconfigsymbols.py8
-rw-r--r--tools/testing/selftests/firmware/Makefile2
-rwxr-xr-xtools/testing/selftests/firmware/fw_fallback.sh224
-rwxr-xr-xtools/testing/selftests/firmware/fw_userhelper.sh99
134 files changed, 7034 insertions, 3859 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index a6eb7dcd4dd5..5fd8f5effd0c 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -11,7 +11,7 @@ DOCBOOKS := z8530book.xml \
11 writing_usb_driver.xml networking.xml \ 11 writing_usb_driver.xml networking.xml \
12 kernel-api.xml filesystems.xml lsm.xml kgdb.xml \ 12 kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ 13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ 14 genericirq.xml s390-drivers.xml scsi.xml \
15 sh.xml regulator.xml w1.xml \ 15 sh.xml regulator.xml w1.xml \
16 writing_musb_glue_layer.xml iio.xml 16 writing_musb_glue_layer.xml iio.xml
17 17
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
deleted file mode 100644
index 5210f8a577c6..000000000000
--- a/Documentation/DocBook/uio-howto.tmpl
+++ /dev/null
@@ -1,1112 +0,0 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
3"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" []>
4
5<book id="index">
6<bookinfo>
7<title>The Userspace I/O HOWTO</title>
8
9<author>
10 <firstname>Hans-Jürgen</firstname>
11 <surname>Koch</surname>
12 <authorblurb><para>Linux developer, Linutronix</para></authorblurb>
13 <affiliation>
14 <orgname>
15 <ulink url="http://www.linutronix.de">Linutronix</ulink>
16 </orgname>
17
18 <address>
19 <email>hjk@hansjkoch.de</email>
20 </address>
21 </affiliation>
22</author>
23
24<copyright>
25 <year>2006-2008</year>
26 <holder>Hans-Jürgen Koch.</holder>
27</copyright>
28<copyright>
29 <year>2009</year>
30 <holder>Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)</holder>
31</copyright>
32
33<legalnotice>
34<para>
35This documentation is Free Software licensed under the terms of the
36GPL version 2.
37</para>
38</legalnotice>
39
40<pubdate>2006-12-11</pubdate>
41
42<abstract>
43 <para>This HOWTO describes concept and usage of Linux kernel's
44 Userspace I/O system.</para>
45</abstract>
46
47<revhistory>
48 <revision>
49 <revnumber>0.10</revnumber>
50 <date>2016-10-17</date>
51 <authorinitials>sch</authorinitials>
52 <revremark>Added generic hyperv driver
53 </revremark>
54 </revision>
55 <revision>
56 <revnumber>0.9</revnumber>
57 <date>2009-07-16</date>
58 <authorinitials>mst</authorinitials>
59 <revremark>Added generic pci driver
60 </revremark>
61 </revision>
62 <revision>
63 <revnumber>0.8</revnumber>
64 <date>2008-12-24</date>
65 <authorinitials>hjk</authorinitials>
66 <revremark>Added name attributes in mem and portio sysfs directories.
67 </revremark>
68 </revision>
69 <revision>
70 <revnumber>0.7</revnumber>
71 <date>2008-12-23</date>
72 <authorinitials>hjk</authorinitials>
73 <revremark>Added generic platform drivers and offset attribute.</revremark>
74 </revision>
75 <revision>
76 <revnumber>0.6</revnumber>
77 <date>2008-12-05</date>
78 <authorinitials>hjk</authorinitials>
79 <revremark>Added description of portio sysfs attributes.</revremark>
80 </revision>
81 <revision>
82 <revnumber>0.5</revnumber>
83 <date>2008-05-22</date>
84 <authorinitials>hjk</authorinitials>
85 <revremark>Added description of write() function.</revremark>
86 </revision>
87 <revision>
88 <revnumber>0.4</revnumber>
89 <date>2007-11-26</date>
90 <authorinitials>hjk</authorinitials>
91 <revremark>Removed section about uio_dummy.</revremark>
92 </revision>
93 <revision>
94 <revnumber>0.3</revnumber>
95 <date>2007-04-29</date>
96 <authorinitials>hjk</authorinitials>
97 <revremark>Added section about userspace drivers.</revremark>
98 </revision>
99 <revision>
100 <revnumber>0.2</revnumber>
101 <date>2007-02-13</date>
102 <authorinitials>hjk</authorinitials>
103 <revremark>Update after multiple mappings were added.</revremark>
104 </revision>
105 <revision>
106 <revnumber>0.1</revnumber>
107 <date>2006-12-11</date>
108 <authorinitials>hjk</authorinitials>
109 <revremark>First draft.</revremark>
110 </revision>
111</revhistory>
112</bookinfo>
113
114<chapter id="aboutthisdoc">
115<?dbhtml filename="aboutthis.html"?>
116<title>About this document</title>
117
118<sect1 id="translations">
119<?dbhtml filename="translations.html"?>
120<title>Translations</title>
121
122<para>If you know of any translations for this document, or you are
123interested in translating it, please email me
124<email>hjk@hansjkoch.de</email>.
125</para>
126</sect1>
127
128<sect1 id="preface">
129<title>Preface</title>
130 <para>
131 For many types of devices, creating a Linux kernel driver is
132 overkill. All that is really needed is some way to handle an
133 interrupt and provide access to the memory space of the
134 device. The logic of controlling the device does not
135 necessarily have to be within the kernel, as the device does
136 not need to take advantage of any of other resources that the
137 kernel provides. One such common class of devices that are
138 like this are for industrial I/O cards.
139 </para>
140 <para>
141 To address this situation, the userspace I/O system (UIO) was
142 designed. For typical industrial I/O cards, only a very small
143 kernel module is needed. The main part of the driver will run in
144 user space. This simplifies development and reduces the risk of
145 serious bugs within a kernel module.
146 </para>
147 <para>
148 Please note that UIO is not an universal driver interface. Devices
149 that are already handled well by other kernel subsystems (like
150 networking or serial or USB) are no candidates for an UIO driver.
151 Hardware that is ideally suited for an UIO driver fulfills all of
152 the following:
153 </para>
154<itemizedlist>
155<listitem>
156 <para>The device has memory that can be mapped. The device can be
157 controlled completely by writing to this memory.</para>
158</listitem>
159<listitem>
160 <para>The device usually generates interrupts.</para>
161</listitem>
162<listitem>
163 <para>The device does not fit into one of the standard kernel
164 subsystems.</para>
165</listitem>
166</itemizedlist>
167</sect1>
168
169<sect1 id="thanks">
170<title>Acknowledgments</title>
171 <para>I'd like to thank Thomas Gleixner and Benedikt Spranger of
172 Linutronix, who have not only written most of the UIO code, but also
173 helped greatly writing this HOWTO by giving me all kinds of background
174 information.</para>
175</sect1>
176
177<sect1 id="feedback">
178<title>Feedback</title>
179 <para>Find something wrong with this document? (Or perhaps something
180 right?) I would love to hear from you. Please email me at
181 <email>hjk@hansjkoch.de</email>.</para>
182</sect1>
183</chapter>
184
185<chapter id="about">
186<?dbhtml filename="about.html"?>
187<title>About UIO</title>
188
189<para>If you use UIO for your card's driver, here's what you get:</para>
190
191<itemizedlist>
192<listitem>
193 <para>only one small kernel module to write and maintain.</para>
194</listitem>
195<listitem>
196 <para>develop the main part of your driver in user space,
197 with all the tools and libraries you're used to.</para>
198</listitem>
199<listitem>
200 <para>bugs in your driver won't crash the kernel.</para>
201</listitem>
202<listitem>
203 <para>updates of your driver can take place without recompiling
204 the kernel.</para>
205</listitem>
206</itemizedlist>
207
208<sect1 id="how_uio_works">
209<title>How UIO works</title>
210 <para>
211 Each UIO device is accessed through a device file and several
212 sysfs attribute files. The device file will be called
213 <filename>/dev/uio0</filename> for the first device, and
214 <filename>/dev/uio1</filename>, <filename>/dev/uio2</filename>
215 and so on for subsequent devices.
216 </para>
217
218 <para><filename>/dev/uioX</filename> is used to access the
219 address space of the card. Just use
220 <function>mmap()</function> to access registers or RAM
221 locations of your card.
222 </para>
223
224 <para>
225 Interrupts are handled by reading from
226 <filename>/dev/uioX</filename>. A blocking
227 <function>read()</function> from
228 <filename>/dev/uioX</filename> will return as soon as an
229 interrupt occurs. You can also use
230 <function>select()</function> on
231 <filename>/dev/uioX</filename> to wait for an interrupt. The
232 integer value read from <filename>/dev/uioX</filename>
233 represents the total interrupt count. You can use this number
234 to figure out if you missed some interrupts.
235 </para>
236 <para>
237 For some hardware that has more than one interrupt source internally,
238 but not separate IRQ mask and status registers, there might be
239 situations where userspace cannot determine what the interrupt source
240 was if the kernel handler disables them by writing to the chip's IRQ
241 register. In such a case, the kernel has to disable the IRQ completely
242 to leave the chip's register untouched. Now the userspace part can
243 determine the cause of the interrupt, but it cannot re-enable
244 interrupts. Another cornercase is chips where re-enabling interrupts
245 is a read-modify-write operation to a combined IRQ status/acknowledge
246 register. This would be racy if a new interrupt occurred
247 simultaneously.
248 </para>
249 <para>
250 To address these problems, UIO also implements a write() function. It
251 is normally not used and can be ignored for hardware that has only a
252 single interrupt source or has separate IRQ mask and status registers.
253 If you need it, however, a write to <filename>/dev/uioX</filename>
254 will call the <function>irqcontrol()</function> function implemented
255 by the driver. You have to write a 32-bit value that is usually either
256 0 or 1 to disable or enable interrupts. If a driver does not implement
257 <function>irqcontrol()</function>, <function>write()</function> will
258 return with <varname>-ENOSYS</varname>.
259 </para>
260
261 <para>
262 To handle interrupts properly, your custom kernel module can
263 provide its own interrupt handler. It will automatically be
264 called by the built-in handler.
265 </para>
266
267 <para>
268 For cards that don't generate interrupts but need to be
269 polled, there is the possibility to set up a timer that
270 triggers the interrupt handler at configurable time intervals.
271 This interrupt simulation is done by calling
272 <function>uio_event_notify()</function>
273 from the timer's event handler.
274 </para>
275
276 <para>
277 Each driver provides attributes that are used to read or write
278 variables. These attributes are accessible through sysfs
279 files. A custom kernel driver module can add its own
280 attributes to the device owned by the uio driver, but not added
281 to the UIO device itself at this time. This might change in the
282 future if it would be found to be useful.
283 </para>
284
285 <para>
286 The following standard attributes are provided by the UIO
287 framework:
288 </para>
289<itemizedlist>
290<listitem>
291 <para>
292 <filename>name</filename>: The name of your device. It is
293 recommended to use the name of your kernel module for this.
294 </para>
295</listitem>
296<listitem>
297 <para>
298 <filename>version</filename>: A version string defined by your
299 driver. This allows the user space part of your driver to deal
300 with different versions of the kernel module.
301 </para>
302</listitem>
303<listitem>
304 <para>
305 <filename>event</filename>: The total number of interrupts
306 handled by the driver since the last time the device node was
307 read.
308 </para>
309</listitem>
310</itemizedlist>
311<para>
312 These attributes appear under the
313 <filename>/sys/class/uio/uioX</filename> directory. Please
314 note that this directory might be a symlink, and not a real
315 directory. Any userspace code that accesses it must be able
316 to handle this.
317</para>
318<para>
319 Each UIO device can make one or more memory regions available for
320 memory mapping. This is necessary because some industrial I/O cards
321 require access to more than one PCI memory region in a driver.
322</para>
323<para>
324 Each mapping has its own directory in sysfs, the first mapping
325 appears as <filename>/sys/class/uio/uioX/maps/map0/</filename>.
326 Subsequent mappings create directories <filename>map1/</filename>,
327 <filename>map2/</filename>, and so on. These directories will only
328 appear if the size of the mapping is not 0.
329</para>
330<para>
331 Each <filename>mapX/</filename> directory contains four read-only files
332 that show attributes of the memory:
333</para>
334<itemizedlist>
335<listitem>
336 <para>
337 <filename>name</filename>: A string identifier for this mapping. This
338 is optional, the string can be empty. Drivers can set this to make it
339 easier for userspace to find the correct mapping.
340 </para>
341</listitem>
342<listitem>
343 <para>
344 <filename>addr</filename>: The address of memory that can be mapped.
345 </para>
346</listitem>
347<listitem>
348 <para>
349 <filename>size</filename>: The size, in bytes, of the memory
350 pointed to by addr.
351 </para>
352</listitem>
353<listitem>
354 <para>
355 <filename>offset</filename>: The offset, in bytes, that has to be
356 added to the pointer returned by <function>mmap()</function> to get
357 to the actual device memory. This is important if the device's memory
358 is not page aligned. Remember that pointers returned by
359 <function>mmap()</function> are always page aligned, so it is good
360 style to always add this offset.
361 </para>
362</listitem>
363</itemizedlist>
364
365<para>
366 From userspace, the different mappings are distinguished by adjusting
367 the <varname>offset</varname> parameter of the
368 <function>mmap()</function> call. To map the memory of mapping N, you
369 have to use N times the page size as your offset:
370</para>
371<programlisting format="linespecific">
372offset = N * getpagesize();
373</programlisting>
374
375<para>
376 Sometimes there is hardware with memory-like regions that can not be
377 mapped with the technique described here, but there are still ways to
378 access them from userspace. The most common example are x86 ioports.
379 On x86 systems, userspace can access these ioports using
380 <function>ioperm()</function>, <function>iopl()</function>,
381 <function>inb()</function>, <function>outb()</function>, and similar
382 functions.
383</para>
384<para>
385 Since these ioport regions can not be mapped, they will not appear under
386 <filename>/sys/class/uio/uioX/maps/</filename> like the normal memory
387 described above. Without information about the port regions a hardware
388 has to offer, it becomes difficult for the userspace part of the
389 driver to find out which ports belong to which UIO device.
390</para>
391<para>
392 To address this situation, the new directory
393 <filename>/sys/class/uio/uioX/portio/</filename> was added. It only
394 exists if the driver wants to pass information about one or more port
395 regions to userspace. If that is the case, subdirectories named
396 <filename>port0</filename>, <filename>port1</filename>, and so on,
397 will appear underneath
398 <filename>/sys/class/uio/uioX/portio/</filename>.
399</para>
400<para>
401 Each <filename>portX/</filename> directory contains four read-only
402 files that show name, start, size, and type of the port region:
403</para>
404<itemizedlist>
405<listitem>
406 <para>
407 <filename>name</filename>: A string identifier for this port region.
408 The string is optional and can be empty. Drivers can set it to make it
409 easier for userspace to find a certain port region.
410 </para>
411</listitem>
412<listitem>
413 <para>
414 <filename>start</filename>: The first port of this region.
415 </para>
416</listitem>
417<listitem>
418 <para>
419 <filename>size</filename>: The number of ports in this region.
420 </para>
421</listitem>
422<listitem>
423 <para>
424 <filename>porttype</filename>: A string describing the type of port.
425 </para>
426</listitem>
427</itemizedlist>
428
429
430</sect1>
431</chapter>
432
433<chapter id="custom_kernel_module" xreflabel="Writing your own kernel module">
434<?dbhtml filename="custom_kernel_module.html"?>
435<title>Writing your own kernel module</title>
436 <para>
437 Please have a look at <filename>uio_cif.c</filename> as an
438 example. The following paragraphs explain the different
439 sections of this file.
440 </para>
441
442<sect1 id="uio_info">
443<title>struct uio_info</title>
444 <para>
445 This structure tells the framework the details of your driver,
446 Some of the members are required, others are optional.
447 </para>
448
449<itemizedlist>
450<listitem><para>
451<varname>const char *name</varname>: Required. The name of your driver as
452it will appear in sysfs. I recommend using the name of your module for this.
453</para></listitem>
454
455<listitem><para>
456<varname>const char *version</varname>: Required. This string appears in
457<filename>/sys/class/uio/uioX/version</filename>.
458</para></listitem>
459
460<listitem><para>
461<varname>struct uio_mem mem[ MAX_UIO_MAPS ]</varname>: Required if you
462have memory that can be mapped with <function>mmap()</function>. For each
463mapping you need to fill one of the <varname>uio_mem</varname> structures.
464See the description below for details.
465</para></listitem>
466
467<listitem><para>
468<varname>struct uio_port port[ MAX_UIO_PORTS_REGIONS ]</varname>: Required
469if you want to pass information about ioports to userspace. For each port
470region you need to fill one of the <varname>uio_port</varname> structures.
471See the description below for details.
472</para></listitem>
473
474<listitem><para>
475<varname>long irq</varname>: Required. If your hardware generates an
476interrupt, it's your modules task to determine the irq number during
477initialization. If you don't have a hardware generated interrupt but
478want to trigger the interrupt handler in some other way, set
479<varname>irq</varname> to <varname>UIO_IRQ_CUSTOM</varname>.
480If you had no interrupt at all, you could set
481<varname>irq</varname> to <varname>UIO_IRQ_NONE</varname>, though this
482rarely makes sense.
483</para></listitem>
484
485<listitem><para>
486<varname>unsigned long irq_flags</varname>: Required if you've set
487<varname>irq</varname> to a hardware interrupt number. The flags given
488here will be used in the call to <function>request_irq()</function>.
489</para></listitem>
490
491<listitem><para>
492<varname>int (*mmap)(struct uio_info *info, struct vm_area_struct
493*vma)</varname>: Optional. If you need a special
494<function>mmap()</function> function, you can set it here. If this
495pointer is not NULL, your <function>mmap()</function> will be called
496instead of the built-in one.
497</para></listitem>
498
499<listitem><para>
500<varname>int (*open)(struct uio_info *info, struct inode *inode)
501</varname>: Optional. You might want to have your own
502<function>open()</function>, e.g. to enable interrupts only when your
503device is actually used.
504</para></listitem>
505
506<listitem><para>
507<varname>int (*release)(struct uio_info *info, struct inode *inode)
508</varname>: Optional. If you define your own
509<function>open()</function>, you will probably also want a custom
510<function>release()</function> function.
511</para></listitem>
512
513<listitem><para>
514<varname>int (*irqcontrol)(struct uio_info *info, s32 irq_on)
515</varname>: Optional. If you need to be able to enable or disable
516interrupts from userspace by writing to <filename>/dev/uioX</filename>,
517you can implement this function. The parameter <varname>irq_on</varname>
518will be 0 to disable interrupts and 1 to enable them.
519</para></listitem>
520</itemizedlist>
521
522<para>
523Usually, your device will have one or more memory regions that can be mapped
524to user space. For each region, you have to set up a
525<varname>struct uio_mem</varname> in the <varname>mem[]</varname> array.
526Here's a description of the fields of <varname>struct uio_mem</varname>:
527</para>
528
529<itemizedlist>
530<listitem><para>
531<varname>const char *name</varname>: Optional. Set this to help identify
532the memory region, it will show up in the corresponding sysfs node.
533</para></listitem>
534
535<listitem><para>
536<varname>int memtype</varname>: Required if the mapping is used. Set this to
537<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your
538card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical
539memory (e.g. allocated with <function>kmalloc()</function>). There's also
540<varname>UIO_MEM_VIRTUAL</varname> for virtual memory.
541</para></listitem>
542
543<listitem><para>
544<varname>phys_addr_t addr</varname>: Required if the mapping is used.
545Fill in the address of your memory block. This address is the one that
546appears in sysfs.
547</para></listitem>
548
549<listitem><para>
550<varname>resource_size_t size</varname>: Fill in the size of the
551memory block that <varname>addr</varname> points to. If <varname>size</varname>
552is zero, the mapping is considered unused. Note that you
553<emphasis>must</emphasis> initialize <varname>size</varname> with zero for
554all unused mappings.
555</para></listitem>
556
557<listitem><para>
558<varname>void *internal_addr</varname>: If you have to access this memory
559region from within your kernel module, you will want to map it internally by
560using something like <function>ioremap()</function>. Addresses
561returned by this function cannot be mapped to user space, so you must not
562store it in <varname>addr</varname>. Use <varname>internal_addr</varname>
563instead to remember such an address.
564</para></listitem>
565</itemizedlist>
566
567<para>
568Please do not touch the <varname>map</varname> element of
569<varname>struct uio_mem</varname>! It is used by the UIO framework
570to set up sysfs files for this mapping. Simply leave it alone.
571</para>
572
573<para>
574Sometimes, your device can have one or more port regions which can not be
575mapped to userspace. But if there are other possibilities for userspace to
576access these ports, it makes sense to make information about the ports
577available in sysfs. For each region, you have to set up a
578<varname>struct uio_port</varname> in the <varname>port[]</varname> array.
579Here's a description of the fields of <varname>struct uio_port</varname>:
580</para>
581
582<itemizedlist>
583<listitem><para>
584<varname>char *porttype</varname>: Required. Set this to one of the predefined
585constants. Use <varname>UIO_PORT_X86</varname> for the ioports found in x86
586architectures.
587</para></listitem>
588
589<listitem><para>
590<varname>unsigned long start</varname>: Required if the port region is used.
591Fill in the number of the first port of this region.
592</para></listitem>
593
594<listitem><para>
595<varname>unsigned long size</varname>: Fill in the number of ports in this
596region. If <varname>size</varname> is zero, the region is considered unused.
597Note that you <emphasis>must</emphasis> initialize <varname>size</varname>
598with zero for all unused regions.
599</para></listitem>
600</itemizedlist>
601
602<para>
603Please do not touch the <varname>portio</varname> element of
604<varname>struct uio_port</varname>! It is used internally by the UIO
605framework to set up sysfs files for this region. Simply leave it alone.
606</para>
607
608</sect1>
609
610<sect1 id="adding_irq_handler">
611<title>Adding an interrupt handler</title>
612 <para>
613 What you need to do in your interrupt handler depends on your
614 hardware and on how you want to handle it. You should try to
615 keep the amount of code in your kernel interrupt handler low.
616 If your hardware requires no action that you
617 <emphasis>have</emphasis> to perform after each interrupt,
618 then your handler can be empty.</para> <para>If, on the other
619 hand, your hardware <emphasis>needs</emphasis> some action to
620 be performed after each interrupt, then you
621 <emphasis>must</emphasis> do it in your kernel module. Note
622 that you cannot rely on the userspace part of your driver. Your
623 userspace program can terminate at any time, possibly leaving
624 your hardware in a state where proper interrupt handling is
625 still required.
626 </para>
627
628 <para>
629 There might also be applications where you want to read data
630 from your hardware at each interrupt and buffer it in a piece
631 of kernel memory you've allocated for that purpose. With this
632 technique you could avoid loss of data if your userspace
633 program misses an interrupt.
634 </para>
635
636 <para>
637 A note on shared interrupts: Your driver should support
638 interrupt sharing whenever this is possible. It is possible if
639 and only if your driver can detect whether your hardware has
640 triggered the interrupt or not. This is usually done by looking
641 at an interrupt status register. If your driver sees that the
642 IRQ bit is actually set, it will perform its actions, and the
643 handler returns IRQ_HANDLED. If the driver detects that it was
644 not your hardware that caused the interrupt, it will do nothing
645 and return IRQ_NONE, allowing the kernel to call the next
646 possible interrupt handler.
647 </para>
648
649 <para>
650 If you decide not to support shared interrupts, your card
651 won't work in computers with no free interrupts. As this
652 frequently happens on the PC platform, you can save yourself a
653 lot of trouble by supporting interrupt sharing.
654 </para>
655</sect1>
656
657<sect1 id="using_uio_pdrv">
658<title>Using uio_pdrv for platform devices</title>
659 <para>
660 In many cases, UIO drivers for platform devices can be handled in a
661 generic way. In the same place where you define your
662 <varname>struct platform_device</varname>, you simply also implement
663 your interrupt handler and fill your
664 <varname>struct uio_info</varname>. A pointer to this
665 <varname>struct uio_info</varname> is then used as
666 <varname>platform_data</varname> for your platform device.
667 </para>
668 <para>
669 You also need to set up an array of <varname>struct resource</varname>
670 containing addresses and sizes of your memory mappings. This
671 information is passed to the driver using the
672 <varname>.resource</varname> and <varname>.num_resources</varname>
673 elements of <varname>struct platform_device</varname>.
674 </para>
675 <para>
676 You now have to set the <varname>.name</varname> element of
677 <varname>struct platform_device</varname> to
678 <varname>"uio_pdrv"</varname> to use the generic UIO platform device
679 driver. This driver will fill the <varname>mem[]</varname> array
680 according to the resources given, and register the device.
681 </para>
682 <para>
683 The advantage of this approach is that you only have to edit a file
684 you need to edit anyway. You do not have to create an extra driver.
685 </para>
686</sect1>
687
688<sect1 id="using_uio_pdrv_genirq">
689<title>Using uio_pdrv_genirq for platform devices</title>
690 <para>
691 Especially in embedded devices, you frequently find chips where the
692 irq pin is tied to its own dedicated interrupt line. In such cases,
693 where you can be really sure the interrupt is not shared, we can take
694 the concept of <varname>uio_pdrv</varname> one step further and use a
695 generic interrupt handler. That's what
696 <varname>uio_pdrv_genirq</varname> does.
697 </para>
698 <para>
699 The setup for this driver is the same as described above for
700 <varname>uio_pdrv</varname>, except that you do not implement an
701 interrupt handler. The <varname>.handler</varname> element of
702 <varname>struct uio_info</varname> must remain
703 <varname>NULL</varname>. The <varname>.irq_flags</varname> element
704 must not contain <varname>IRQF_SHARED</varname>.
705 </para>
706 <para>
707 You will set the <varname>.name</varname> element of
708 <varname>struct platform_device</varname> to
709 <varname>"uio_pdrv_genirq"</varname> to use this driver.
710 </para>
711 <para>
712 The generic interrupt handler of <varname>uio_pdrv_genirq</varname>
713 will simply disable the interrupt line using
714 <function>disable_irq_nosync()</function>. After doing its work,
715 userspace can reenable the interrupt by writing 0x00000001 to the UIO
716 device file. The driver already implements an
717 <function>irq_control()</function> to make this possible, you must not
718 implement your own.
719 </para>
720 <para>
721 Using <varname>uio_pdrv_genirq</varname> not only saves a few lines of
722 interrupt handler code. You also do not need to know anything about
723 the chip's internal registers to create the kernel part of the driver.
724 All you need to know is the irq number of the pin the chip is
725 connected to.
726 </para>
727</sect1>
728
729<sect1 id="using-uio_dmem_genirq">
730<title>Using uio_dmem_genirq for platform devices</title>
731 <para>
732 In addition to statically allocated memory ranges, they may also be
733 a desire to use dynamically allocated regions in a user space driver.
734 In particular, being able to access memory made available through the
735 dma-mapping API, may be particularly useful. The
736 <varname>uio_dmem_genirq</varname> driver provides a way to accomplish
737 this.
738 </para>
739 <para>
740 This driver is used in a similar manner to the
741 <varname>"uio_pdrv_genirq"</varname> driver with respect to interrupt
742 configuration and handling.
743 </para>
744 <para>
745 Set the <varname>.name</varname> element of
746 <varname>struct platform_device</varname> to
747 <varname>"uio_dmem_genirq"</varname> to use this driver.
748 </para>
749 <para>
750 When using this driver, fill in the <varname>.platform_data</varname>
751 element of <varname>struct platform_device</varname>, which is of type
752 <varname>struct uio_dmem_genirq_pdata</varname> and which contains the
753 following elements:
754 </para>
755 <itemizedlist>
756 <listitem><para><varname>struct uio_info uioinfo</varname>: The same
757 structure used as the <varname>uio_pdrv_genirq</varname> platform
758 data</para></listitem>
759 <listitem><para><varname>unsigned int *dynamic_region_sizes</varname>:
760 Pointer to list of sizes of dynamic memory regions to be mapped into
761 user space.
762 </para></listitem>
763 <listitem><para><varname>unsigned int num_dynamic_regions</varname>:
764 Number of elements in <varname>dynamic_region_sizes</varname> array.
765 </para></listitem>
766 </itemizedlist>
767 <para>
768 The dynamic regions defined in the platform data will be appended to
769 the <varname> mem[] </varname> array after the platform device
770 resources, which implies that the total number of static and dynamic
771 memory regions cannot exceed <varname>MAX_UIO_MAPS</varname>.
772 </para>
773 <para>
774 The dynamic memory regions will be allocated when the UIO device file,
775 <varname>/dev/uioX</varname> is opened.
776 Similar to static memory resources, the memory region information for
777 dynamic regions is then visible via sysfs at
778 <varname>/sys/class/uio/uioX/maps/mapY/*</varname>.
779 The dynamic memory regions will be freed when the UIO device file is
780 closed. When no processes are holding the device file open, the address
781 returned to userspace is ~0.
782 </para>
783</sect1>
784
785</chapter>
786
787<chapter id="userspace_driver" xreflabel="Writing a driver in user space">
788<?dbhtml filename="userspace_driver.html"?>
789<title>Writing a driver in userspace</title>
790 <para>
791 Once you have a working kernel module for your hardware, you can
792 write the userspace part of your driver. You don't need any special
793 libraries, your driver can be written in any reasonable language,
794 you can use floating point numbers and so on. In short, you can
795 use all the tools and libraries you'd normally use for writing a
796 userspace application.
797 </para>
798
799<sect1 id="getting_uio_information">
800<title>Getting information about your UIO device</title>
801 <para>
802 Information about all UIO devices is available in sysfs. The
803 first thing you should do in your driver is check
804 <varname>name</varname> and <varname>version</varname> to
805 make sure your talking to the right device and that its kernel
806 driver has the version you expect.
807 </para>
808 <para>
809 You should also make sure that the memory mapping you need
810 exists and has the size you expect.
811 </para>
812 <para>
813 There is a tool called <varname>lsuio</varname> that lists
814 UIO devices and their attributes. It is available here:
815 </para>
816 <para>
817 <ulink url="http://www.osadl.org/projects/downloads/UIO/user/">
818 http://www.osadl.org/projects/downloads/UIO/user/</ulink>
819 </para>
820 <para>
821 With <varname>lsuio</varname> you can quickly check if your
822 kernel module is loaded and which attributes it exports.
823 Have a look at the manpage for details.
824 </para>
825 <para>
826 The source code of <varname>lsuio</varname> can serve as an
827 example for getting information about an UIO device.
828 The file <filename>uio_helper.c</filename> contains a lot of
829 functions you could use in your userspace driver code.
830 </para>
831</sect1>
832
833<sect1 id="mmap_device_memory">
834<title>mmap() device memory</title>
835 <para>
836 After you made sure you've got the right device with the
837 memory mappings you need, all you have to do is to call
838 <function>mmap()</function> to map the device's memory
839 to userspace.
840 </para>
841 <para>
842 The parameter <varname>offset</varname> of the
843 <function>mmap()</function> call has a special meaning
844 for UIO devices: It is used to select which mapping of
845 your device you want to map. To map the memory of
846 mapping N, you have to use N times the page size as
847 your offset:
848 </para>
849<programlisting format="linespecific">
850 offset = N * getpagesize();
851</programlisting>
852 <para>
853 N starts from zero, so if you've got only one memory
854 range to map, set <varname>offset = 0</varname>.
855 A drawback of this technique is that memory is always
856 mapped beginning with its start address.
857 </para>
858</sect1>
859
860<sect1 id="wait_for_interrupts">
861<title>Waiting for interrupts</title>
862 <para>
863 After you successfully mapped your devices memory, you
864 can access it like an ordinary array. Usually, you will
865 perform some initialization. After that, your hardware
866 starts working and will generate an interrupt as soon
867 as it's finished, has some data available, or needs your
868 attention because an error occurred.
869 </para>
870 <para>
871 <filename>/dev/uioX</filename> is a read-only file. A
872 <function>read()</function> will always block until an
873 interrupt occurs. There is only one legal value for the
874 <varname>count</varname> parameter of
875 <function>read()</function>, and that is the size of a
876 signed 32 bit integer (4). Any other value for
877 <varname>count</varname> causes <function>read()</function>
878 to fail. The signed 32 bit integer read is the interrupt
879 count of your device. If the value is one more than the value
880 you read the last time, everything is OK. If the difference
881 is greater than one, you missed interrupts.
882 </para>
883 <para>
884 You can also use <function>select()</function> on
885 <filename>/dev/uioX</filename>.
886 </para>
887</sect1>
888
889</chapter>
890
891<chapter id="uio_pci_generic" xreflabel="Using Generic driver for PCI cards">
892<?dbhtml filename="uio_pci_generic.html"?>
893<title>Generic PCI UIO driver</title>
894 <para>
895 The generic driver is a kernel module named uio_pci_generic.
896 It can work with any device compliant to PCI 2.3 (circa 2002) and
897 any compliant PCI Express device. Using this, you only need to
898 write the userspace driver, removing the need to write
899 a hardware-specific kernel module.
900 </para>
901
902<sect1 id="uio_pci_generic_binding">
903<title>Making the driver recognize the device</title>
904 <para>
905Since the driver does not declare any device ids, it will not get loaded
906automatically and will not automatically bind to any devices, you must load it
907and allocate id to the driver yourself. For example:
908 <programlisting>
909 modprobe uio_pci_generic
910 echo &quot;8086 10f5&quot; &gt; /sys/bus/pci/drivers/uio_pci_generic/new_id
911 </programlisting>
912 </para>
913 <para>
914If there already is a hardware specific kernel driver for your device, the
915generic driver still won't bind to it, in this case if you want to use the
916generic driver (why would you?) you'll have to manually unbind the hardware
917specific driver and bind the generic driver, like this:
918 <programlisting>
919 echo -n 0000:00:19.0 &gt; /sys/bus/pci/drivers/e1000e/unbind
920 echo -n 0000:00:19.0 &gt; /sys/bus/pci/drivers/uio_pci_generic/bind
921 </programlisting>
922 </para>
923 <para>
924You can verify that the device has been bound to the driver
925by looking for it in sysfs, for example like the following:
926 <programlisting>
927 ls -l /sys/bus/pci/devices/0000:00:19.0/driver
928 </programlisting>
929Which if successful should print
930 <programlisting>
931 .../0000:00:19.0/driver -&gt; ../../../bus/pci/drivers/uio_pci_generic
932 </programlisting>
933Note that the generic driver will not bind to old PCI 2.2 devices.
934If binding the device failed, run the following command:
935 <programlisting>
936 dmesg
937 </programlisting>
938and look in the output for failure reasons
939 </para>
940</sect1>
941
942<sect1 id="uio_pci_generic_internals">
943<title>Things to know about uio_pci_generic</title>
944 <para>
945Interrupts are handled using the Interrupt Disable bit in the PCI command
946register and Interrupt Status bit in the PCI status register. All devices
947compliant to PCI 2.3 (circa 2002) and all compliant PCI Express devices should
948support these bits. uio_pci_generic detects this support, and won't bind to
949devices which do not support the Interrupt Disable Bit in the command register.
950 </para>
951 <para>
952On each interrupt, uio_pci_generic sets the Interrupt Disable bit.
953This prevents the device from generating further interrupts
954until the bit is cleared. The userspace driver should clear this
955bit before blocking and waiting for more interrupts.
956 </para>
957</sect1>
958<sect1 id="uio_pci_generic_userspace">
959<title>Writing userspace driver using uio_pci_generic</title>
960 <para>
961Userspace driver can use pci sysfs interface, or the
962libpci libray that wraps it, to talk to the device and to
963re-enable interrupts by writing to the command register.
964 </para>
965</sect1>
966<sect1 id="uio_pci_generic_example">
967<title>Example code using uio_pci_generic</title>
968 <para>
969Here is some sample userspace driver code using uio_pci_generic:
970<programlisting>
971#include &lt;stdlib.h&gt;
972#include &lt;stdio.h&gt;
973#include &lt;unistd.h&gt;
974#include &lt;sys/types.h&gt;
975#include &lt;sys/stat.h&gt;
976#include &lt;fcntl.h&gt;
977#include &lt;errno.h&gt;
978
979int main()
980{
981 int uiofd;
982 int configfd;
983 int err;
984 int i;
985 unsigned icount;
986 unsigned char command_high;
987
988 uiofd = open(&quot;/dev/uio0&quot;, O_RDONLY);
989 if (uiofd &lt; 0) {
990 perror(&quot;uio open:&quot;);
991 return errno;
992 }
993 configfd = open(&quot;/sys/class/uio/uio0/device/config&quot;, O_RDWR);
994 if (configfd &lt; 0) {
995 perror(&quot;config open:&quot;);
996 return errno;
997 }
998
999 /* Read and cache command value */
1000 err = pread(configfd, &amp;command_high, 1, 5);
1001 if (err != 1) {
1002 perror(&quot;command config read:&quot;);
1003 return errno;
1004 }
1005 command_high &amp;= ~0x4;
1006
1007 for(i = 0;; ++i) {
1008 /* Print out a message, for debugging. */
1009 if (i == 0)
1010 fprintf(stderr, &quot;Started uio test driver.\n&quot;);
1011 else
1012 fprintf(stderr, &quot;Interrupts: %d\n&quot;, icount);
1013
1014 /****************************************/
1015 /* Here we got an interrupt from the
1016 device. Do something to it. */
1017 /****************************************/
1018
1019 /* Re-enable interrupts. */
1020 err = pwrite(configfd, &amp;command_high, 1, 5);
1021 if (err != 1) {
1022 perror(&quot;config write:&quot;);
1023 break;
1024 }
1025
1026 /* Wait for next interrupt. */
1027 err = read(uiofd, &amp;icount, 4);
1028 if (err != 4) {
1029 perror(&quot;uio read:&quot;);
1030 break;
1031 }
1032
1033 }
1034 return errno;
1035}
1036
1037</programlisting>
1038 </para>
1039</sect1>
1040
1041</chapter>
1042
1043<chapter id="uio_hv_generic" xreflabel="Using Generic driver for Hyper-V VMBUS">
1044<?dbhtml filename="uio_hv_generic.html"?>
1045<title>Generic Hyper-V UIO driver</title>
1046 <para>
1047 The generic driver is a kernel module named uio_hv_generic.
1048 It supports devices on the Hyper-V VMBus similar to uio_pci_generic
1049 on PCI bus.
1050 </para>
1051
1052<sect1 id="uio_hv_generic_binding">
1053<title>Making the driver recognize the device</title>
1054 <para>
1055Since the driver does not declare any device GUID's, it will not get loaded
1056automatically and will not automatically bind to any devices, you must load it
1057and allocate id to the driver yourself. For example, to use the network device
1058GUID:
1059 <programlisting>
1060 modprobe uio_hv_generic
1061 echo &quot;f8615163-df3e-46c5-913f-f2d2f965ed0e&quot; &gt; /sys/bus/vmbus/drivers/uio_hv_generic/new_id
1062 </programlisting>
1063 </para>
1064 <para>
1065If there already is a hardware specific kernel driver for the device, the
1066generic driver still won't bind to it, in this case if you want to use the
1067generic driver (why would you?) you'll have to manually unbind the hardware
1068specific driver and bind the generic driver, like this:
1069 <programlisting>
1070 echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 &gt; /sys/bus/vmbus/drivers/hv_netvsc/unbind
1071 echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 &gt; /sys/bus/vmbus/drivers/uio_hv_generic/bind
1072 </programlisting>
1073 </para>
1074 <para>
1075You can verify that the device has been bound to the driver
1076by looking for it in sysfs, for example like the following:
1077 <programlisting>
1078 ls -l /sys/bus/vmbus/devices/vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver
1079 </programlisting>
1080Which if successful should print
1081 <programlisting>
1082 .../vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver -&gt; ../../../bus/vmbus/drivers/uio_hv_generic
1083 </programlisting>
1084 </para>
1085</sect1>
1086
1087<sect1 id="uio_hv_generic_internals">
1088<title>Things to know about uio_hv_generic</title>
1089 <para>
1090On each interrupt, uio_hv_generic sets the Interrupt Disable bit.
1091This prevents the device from generating further interrupts
1092until the bit is cleared. The userspace driver should clear this
1093bit before blocking and waiting for more interrupts.
1094 </para>
1095</sect1>
1096</chapter>
1097
1098<appendix id="app1">
1099<title>Further information</title>
1100<itemizedlist>
1101 <listitem><para>
1102 <ulink url="http://www.osadl.org">
1103 OSADL homepage.</ulink>
1104 </para></listitem>
1105 <listitem><para>
1106 <ulink url="http://www.linutronix.de">
1107 Linutronix homepage.</ulink>
1108 </para></listitem>
1109</itemizedlist>
1110</appendix>
1111
1112</book>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f2e745844d5b..608ba95d9461 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1195,6 +1195,10 @@
1195 When zero, profiling data is discarded and associated 1195 When zero, profiling data is discarded and associated
1196 debugfs files are removed at module unload time. 1196 debugfs files are removed at module unload time.
1197 1197
1198 goldfish [X86] Enable the goldfish android emulator platform.
1199 Don't use this when you are not running on the
1200 android emulator
1201
1198 gpt [EFI] Forces disk with valid GPT signature but 1202 gpt [EFI] Forces disk with valid GPT signature but
1199 invalid Protective MBR to be treated as GPT. If the 1203 invalid Protective MBR to be treated as GPT. If the
1200 primary GPT is corrupted, it enables the backup/alternate 1204 primary GPT is corrupted, it enables the backup/alternate
diff --git a/Documentation/devicetree/bindings/misc/idt_89hpesx.txt b/Documentation/devicetree/bindings/misc/idt_89hpesx.txt
new file mode 100644
index 000000000000..b9093b79ab7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/idt_89hpesx.txt
@@ -0,0 +1,44 @@
1EEPROM / CSR SMBus-slave interface of IDT 89HPESx devices
2
3Required properties:
4 - compatible : should be "<manufacturer>,<type>"
5 Basically there is only one manufacturer: idt, but some
6 compatible devices may be produced in future. Following devices
7 are supported: 89hpes8nt2, 89hpes12nt3, 89hpes24nt6ag2,
8 89hpes32nt8ag2, 89hpes32nt8bg2, 89hpes12nt12g2, 89hpes16nt16g2,
9 89hpes24nt24g2, 89hpes32nt24ag2, 89hpes32nt24bg2;
10 89hpes12n3, 89hpes12n3a, 89hpes24n3, 89hpes24n3a;
11 89hpes32h8, 89hpes32h8g2, 89hpes48h12, 89hpes48h12g2,
12 89hpes48h12ag2, 89hpes16h16, 89hpes22h16, 89hpes22h16g2,
13 89hpes34h16, 89hpes34h16g2, 89hpes64h16, 89hpes64h16g2,
14 89hpes64h16ag2;
15 89hpes12t3g2, 89hpes24t3g2, 89hpes16t4, 89hpes4t4g2,
16 89hpes10t4g2, 89hpes16t4g2, 89hpes16t4ag2, 89hpes5t5,
17 89hpes6t5, 89hpes8t5, 89hpes8t5a, 89hpes24t6, 89hpes6t6g2,
18 89hpes24t6g2, 89hpes16t7, 89hpes32t8, 89hpes32t8g2,
19 89hpes48t12, 89hpes48t12g2.
20 - reg : I2C address of the IDT 89HPESx device.
21
22Optionally there can be EEPROM-compatible subnode:
23 - compatible: There are five EEPROM devices supported: 24c32, 24c64, 24c128,
24 24c256 and 24c512 differed by size.
25 - reg: Custom address of EEPROM device (If not specified IDT 89HPESx
26 (optional) device will try to communicate with EEPROM sited by default
27 address - 0x50)
28 - read-only : Parameterless property disables writes to the EEPROM
29 (optional)
30
31Example:
32 idt@60 {
33 compatible = "idt,89hpes32nt8ag2";
34 reg = <0x74>;
35 #address-cells = <1>;
36 #size-cells = <0>;
37
38 eeprom@50 {
39 compatible = "onsemi,24c64";
40 reg = <0x50>;
41 read-only;
42 };
43 };
44
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 383d5889e95a..966a72ecc6bd 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -1,13 +1,15 @@
1Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings 1Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
2 2
3This binding represents the on-chip eFuse OTP controller found on 3This binding represents the on-chip eFuse OTP controller found on
4i.MX6Q/D, i.MX6DL/S, i.MX6SL, and i.MX6SX SoCs. 4i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX and i.MX6UL SoCs.
5 5
6Required properties: 6Required properties:
7- compatible: should be one of 7- compatible: should be one of
8 "fsl,imx6q-ocotp" (i.MX6Q/D/DL/S), 8 "fsl,imx6q-ocotp" (i.MX6Q/D/DL/S),
9 "fsl,imx6sl-ocotp" (i.MX6SL), or 9 "fsl,imx6sl-ocotp" (i.MX6SL), or
10 "fsl,imx6sx-ocotp" (i.MX6SX), followed by "syscon". 10 "fsl,imx6sx-ocotp" (i.MX6SX),
11 "fsl,imx6ul-ocotp" (i.MX6UL),
12 followed by "syscon".
11- reg: Should contain the register base and length. 13- reg: Should contain the register base and length.
12- clocks: Should contain a phandle pointing to the gated peripheral clock. 14- clocks: Should contain a phandle pointing to the gated peripheral clock.
13 15
diff --git a/Documentation/devicetree/bindings/sram/sram.txt b/Documentation/devicetree/bindings/sram/sram.txt
index 068c2c03c38f..267da4410aef 100644
--- a/Documentation/devicetree/bindings/sram/sram.txt
+++ b/Documentation/devicetree/bindings/sram/sram.txt
@@ -42,6 +42,12 @@ Optional properties in the area nodes:
42 and in use by another device or devices 42 and in use by another device or devices
43- export : indicates that the reserved SRAM area may be accessed outside 43- export : indicates that the reserved SRAM area may be accessed outside
44 of the kernel, e.g. by bootloader or userspace 44 of the kernel, e.g. by bootloader or userspace
45- protect-exec : Same as 'pool' above but with the additional
46 constraint that code wil be run from the region and
47 that the memory is maintained as read-only, executable
48 during code execution. NOTE: This region must be page
49 aligned on start and end in order to properly allow
50 manipulation of the page attributes.
45- label : the name for the reserved partition, if omitted, the label 51- label : the name for the reserved partition, if omitted, the label
46 is taken from the node name excluding the unit address. 52 is taken from the node name excluding the unit address.
47 53
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 5475a2807e7a..c5a1cd0a4ae7 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -30,6 +30,7 @@ available subsections can be seen below.
30 miscellaneous 30 miscellaneous
31 vme 31 vme
32 80211/index 32 80211/index
33 uio-howto
33 34
34.. only:: subproject and html 35.. only:: subproject and html
35 36
diff --git a/Documentation/driver-api/uio-howto.rst b/Documentation/driver-api/uio-howto.rst
new file mode 100644
index 000000000000..f73d660b2956
--- /dev/null
+++ b/Documentation/driver-api/uio-howto.rst
@@ -0,0 +1,705 @@
1=======================
2The Userspace I/O HOWTO
3=======================
4
5:Author: Hans-Jürgen Koch Linux developer, Linutronix
6:Date: 2006-12-11
7
8About this document
9===================
10
11Translations
12------------
13
14If you know of any translations for this document, or you are interested
15in translating it, please email me hjk@hansjkoch.de.
16
17Preface
18-------
19
20For many types of devices, creating a Linux kernel driver is overkill.
21All that is really needed is some way to handle an interrupt and provide
22access to the memory space of the device. The logic of controlling the
23device does not necessarily have to be within the kernel, as the device
24does not need to take advantage of any of other resources that the
25kernel provides. One such common class of devices that are like this are
26for industrial I/O cards.
27
28To address this situation, the userspace I/O system (UIO) was designed.
29For typical industrial I/O cards, only a very small kernel module is
30needed. The main part of the driver will run in user space. This
31simplifies development and reduces the risk of serious bugs within a
32kernel module.
33
34Please note that UIO is not an universal driver interface. Devices that
35are already handled well by other kernel subsystems (like networking or
36serial or USB) are no candidates for an UIO driver. Hardware that is
37ideally suited for an UIO driver fulfills all of the following:
38
39- The device has memory that can be mapped. The device can be
40 controlled completely by writing to this memory.
41
42- The device usually generates interrupts.
43
44- The device does not fit into one of the standard kernel subsystems.
45
46Acknowledgments
47---------------
48
49I'd like to thank Thomas Gleixner and Benedikt Spranger of Linutronix,
50who have not only written most of the UIO code, but also helped greatly
51writing this HOWTO by giving me all kinds of background information.
52
53Feedback
54--------
55
56Find something wrong with this document? (Or perhaps something right?) I
57would love to hear from you. Please email me at hjk@hansjkoch.de.
58
59About UIO
60=========
61
62If you use UIO for your card's driver, here's what you get:
63
64- only one small kernel module to write and maintain.
65
66- develop the main part of your driver in user space, with all the
67 tools and libraries you're used to.
68
69- bugs in your driver won't crash the kernel.
70
71- updates of your driver can take place without recompiling the kernel.
72
73How UIO works
74-------------
75
76Each UIO device is accessed through a device file and several sysfs
77attribute files. The device file will be called ``/dev/uio0`` for the
78first device, and ``/dev/uio1``, ``/dev/uio2`` and so on for subsequent
79devices.
80
81``/dev/uioX`` is used to access the address space of the card. Just use
82:c:func:`mmap()` to access registers or RAM locations of your card.
83
84Interrupts are handled by reading from ``/dev/uioX``. A blocking
85:c:func:`read()` from ``/dev/uioX`` will return as soon as an
86interrupt occurs. You can also use :c:func:`select()` on
87``/dev/uioX`` to wait for an interrupt. The integer value read from
88``/dev/uioX`` represents the total interrupt count. You can use this
89number to figure out if you missed some interrupts.
90
91For some hardware that has more than one interrupt source internally,
92but not separate IRQ mask and status registers, there might be
93situations where userspace cannot determine what the interrupt source
94was if the kernel handler disables them by writing to the chip's IRQ
95register. In such a case, the kernel has to disable the IRQ completely
96to leave the chip's register untouched. Now the userspace part can
97determine the cause of the interrupt, but it cannot re-enable
98interrupts. Another cornercase is chips where re-enabling interrupts is
99a read-modify-write operation to a combined IRQ status/acknowledge
100register. This would be racy if a new interrupt occurred simultaneously.
101
102To address these problems, UIO also implements a write() function. It is
103normally not used and can be ignored for hardware that has only a single
104interrupt source or has separate IRQ mask and status registers. If you
105need it, however, a write to ``/dev/uioX`` will call the
106:c:func:`irqcontrol()` function implemented by the driver. You have
107to write a 32-bit value that is usually either 0 or 1 to disable or
108enable interrupts. If a driver does not implement
109:c:func:`irqcontrol()`, :c:func:`write()` will return with
110``-ENOSYS``.
111
112To handle interrupts properly, your custom kernel module can provide its
113own interrupt handler. It will automatically be called by the built-in
114handler.
115
116For cards that don't generate interrupts but need to be polled, there is
117the possibility to set up a timer that triggers the interrupt handler at
118configurable time intervals. This interrupt simulation is done by
119calling :c:func:`uio_event_notify()` from the timer's event
120handler.
121
122Each driver provides attributes that are used to read or write
123variables. These attributes are accessible through sysfs files. A custom
124kernel driver module can add its own attributes to the device owned by
125the uio driver, but not added to the UIO device itself at this time.
126This might change in the future if it would be found to be useful.
127
128The following standard attributes are provided by the UIO framework:
129
130- ``name``: The name of your device. It is recommended to use the name
131 of your kernel module for this.
132
133- ``version``: A version string defined by your driver. This allows the
134 user space part of your driver to deal with different versions of the
135 kernel module.
136
137- ``event``: The total number of interrupts handled by the driver since
138 the last time the device node was read.
139
140These attributes appear under the ``/sys/class/uio/uioX`` directory.
141Please note that this directory might be a symlink, and not a real
142directory. Any userspace code that accesses it must be able to handle
143this.
144
145Each UIO device can make one or more memory regions available for memory
146mapping. This is necessary because some industrial I/O cards require
147access to more than one PCI memory region in a driver.
148
149Each mapping has its own directory in sysfs, the first mapping appears
150as ``/sys/class/uio/uioX/maps/map0/``. Subsequent mappings create
151directories ``map1/``, ``map2/``, and so on. These directories will only
152appear if the size of the mapping is not 0.
153
154Each ``mapX/`` directory contains four read-only files that show
155attributes of the memory:
156
157- ``name``: A string identifier for this mapping. This is optional, the
158 string can be empty. Drivers can set this to make it easier for
159 userspace to find the correct mapping.
160
161- ``addr``: The address of memory that can be mapped.
162
163- ``size``: The size, in bytes, of the memory pointed to by addr.
164
165- ``offset``: The offset, in bytes, that has to be added to the pointer
166 returned by :c:func:`mmap()` to get to the actual device memory.
167 This is important if the device's memory is not page aligned.
168 Remember that pointers returned by :c:func:`mmap()` are always
169 page aligned, so it is good style to always add this offset.
170
171From userspace, the different mappings are distinguished by adjusting
172the ``offset`` parameter of the :c:func:`mmap()` call. To map the
173memory of mapping N, you have to use N times the page size as your
174offset::
175
176 offset = N * getpagesize();
177
178Sometimes there is hardware with memory-like regions that can not be
179mapped with the technique described here, but there are still ways to
180access them from userspace. The most common example are x86 ioports. On
181x86 systems, userspace can access these ioports using
182:c:func:`ioperm()`, :c:func:`iopl()`, :c:func:`inb()`,
183:c:func:`outb()`, and similar functions.
184
185Since these ioport regions can not be mapped, they will not appear under
186``/sys/class/uio/uioX/maps/`` like the normal memory described above.
187Without information about the port regions a hardware has to offer, it
188becomes difficult for the userspace part of the driver to find out which
189ports belong to which UIO device.
190
191To address this situation, the new directory
192``/sys/class/uio/uioX/portio/`` was added. It only exists if the driver
193wants to pass information about one or more port regions to userspace.
194If that is the case, subdirectories named ``port0``, ``port1``, and so
195on, will appear underneath ``/sys/class/uio/uioX/portio/``.
196
197Each ``portX/`` directory contains four read-only files that show name,
198start, size, and type of the port region:
199
200- ``name``: A string identifier for this port region. The string is
201 optional and can be empty. Drivers can set it to make it easier for
202 userspace to find a certain port region.
203
204- ``start``: The first port of this region.
205
206- ``size``: The number of ports in this region.
207
208- ``porttype``: A string describing the type of port.
209
210Writing your own kernel module
211==============================
212
213Please have a look at ``uio_cif.c`` as an example. The following
214paragraphs explain the different sections of this file.
215
216struct uio_info
217---------------
218
219This structure tells the framework the details of your driver, Some of
220the members are required, others are optional.
221
222- ``const char *name``: Required. The name of your driver as it will
223 appear in sysfs. I recommend using the name of your module for this.
224
225- ``const char *version``: Required. This string appears in
226 ``/sys/class/uio/uioX/version``.
227
228- ``struct uio_mem mem[ MAX_UIO_MAPS ]``: Required if you have memory
229 that can be mapped with :c:func:`mmap()`. For each mapping you
230 need to fill one of the ``uio_mem`` structures. See the description
231 below for details.
232
233- ``struct uio_port port[ MAX_UIO_PORTS_REGIONS ]``: Required if you
234 want to pass information about ioports to userspace. For each port
235 region you need to fill one of the ``uio_port`` structures. See the
236 description below for details.
237
238- ``long irq``: Required. If your hardware generates an interrupt, it's
239 your modules task to determine the irq number during initialization.
240 If you don't have a hardware generated interrupt but want to trigger
241 the interrupt handler in some other way, set ``irq`` to
242 ``UIO_IRQ_CUSTOM``. If you had no interrupt at all, you could set
243 ``irq`` to ``UIO_IRQ_NONE``, though this rarely makes sense.
244
245- ``unsigned long irq_flags``: Required if you've set ``irq`` to a
246 hardware interrupt number. The flags given here will be used in the
247 call to :c:func:`request_irq()`.
248
249- ``int (*mmap)(struct uio_info *info, struct vm_area_struct *vma)``:
250 Optional. If you need a special :c:func:`mmap()`
251 function, you can set it here. If this pointer is not NULL, your
252 :c:func:`mmap()` will be called instead of the built-in one.
253
254- ``int (*open)(struct uio_info *info, struct inode *inode)``:
255 Optional. You might want to have your own :c:func:`open()`,
256 e.g. to enable interrupts only when your device is actually used.
257
258- ``int (*release)(struct uio_info *info, struct inode *inode)``:
259 Optional. If you define your own :c:func:`open()`, you will
260 probably also want a custom :c:func:`release()` function.
261
262- ``int (*irqcontrol)(struct uio_info *info, s32 irq_on)``:
263 Optional. If you need to be able to enable or disable interrupts
264 from userspace by writing to ``/dev/uioX``, you can implement this
265 function. The parameter ``irq_on`` will be 0 to disable interrupts
266 and 1 to enable them.
267
268Usually, your device will have one or more memory regions that can be
269mapped to user space. For each region, you have to set up a
270``struct uio_mem`` in the ``mem[]`` array. Here's a description of the
271fields of ``struct uio_mem``:
272
273- ``const char *name``: Optional. Set this to help identify the memory
274 region, it will show up in the corresponding sysfs node.
275
276- ``int memtype``: Required if the mapping is used. Set this to
277 ``UIO_MEM_PHYS`` if you you have physical memory on your card to be
278 mapped. Use ``UIO_MEM_LOGICAL`` for logical memory (e.g. allocated
279 with :c:func:`kmalloc()`). There's also ``UIO_MEM_VIRTUAL`` for
280 virtual memory.
281
282- ``phys_addr_t addr``: Required if the mapping is used. Fill in the
283 address of your memory block. This address is the one that appears in
284 sysfs.
285
286- ``resource_size_t size``: Fill in the size of the memory block that
287 ``addr`` points to. If ``size`` is zero, the mapping is considered
288 unused. Note that you *must* initialize ``size`` with zero for all
289 unused mappings.
290
291- ``void *internal_addr``: If you have to access this memory region
292 from within your kernel module, you will want to map it internally by
293 using something like :c:func:`ioremap()`. Addresses returned by
294 this function cannot be mapped to user space, so you must not store
295 it in ``addr``. Use ``internal_addr`` instead to remember such an
296 address.
297
298Please do not touch the ``map`` element of ``struct uio_mem``! It is
299used by the UIO framework to set up sysfs files for this mapping. Simply
300leave it alone.
301
302Sometimes, your device can have one or more port regions which can not
303be mapped to userspace. But if there are other possibilities for
304userspace to access these ports, it makes sense to make information
305about the ports available in sysfs. For each region, you have to set up
306a ``struct uio_port`` in the ``port[]`` array. Here's a description of
307the fields of ``struct uio_port``:
308
309- ``char *porttype``: Required. Set this to one of the predefined
310 constants. Use ``UIO_PORT_X86`` for the ioports found in x86
311 architectures.
312
313- ``unsigned long start``: Required if the port region is used. Fill in
314 the number of the first port of this region.
315
316- ``unsigned long size``: Fill in the number of ports in this region.
317 If ``size`` is zero, the region is considered unused. Note that you
318 *must* initialize ``size`` with zero for all unused regions.
319
320Please do not touch the ``portio`` element of ``struct uio_port``! It is
321used internally by the UIO framework to set up sysfs files for this
322region. Simply leave it alone.
323
324Adding an interrupt handler
325---------------------------
326
327What you need to do in your interrupt handler depends on your hardware
328and on how you want to handle it. You should try to keep the amount of
329code in your kernel interrupt handler low. If your hardware requires no
330action that you *have* to perform after each interrupt, then your
331handler can be empty.
332
333If, on the other hand, your hardware *needs* some action to be performed
334after each interrupt, then you *must* do it in your kernel module. Note
335that you cannot rely on the userspace part of your driver. Your
336userspace program can terminate at any time, possibly leaving your
337hardware in a state where proper interrupt handling is still required.
338
339There might also be applications where you want to read data from your
340hardware at each interrupt and buffer it in a piece of kernel memory
341you've allocated for that purpose. With this technique you could avoid
342loss of data if your userspace program misses an interrupt.
343
344A note on shared interrupts: Your driver should support interrupt
345sharing whenever this is possible. It is possible if and only if your
346driver can detect whether your hardware has triggered the interrupt or
347not. This is usually done by looking at an interrupt status register. If
348your driver sees that the IRQ bit is actually set, it will perform its
349actions, and the handler returns IRQ_HANDLED. If the driver detects
350that it was not your hardware that caused the interrupt, it will do
351nothing and return IRQ_NONE, allowing the kernel to call the next
352possible interrupt handler.
353
354If you decide not to support shared interrupts, your card won't work in
355computers with no free interrupts. As this frequently happens on the PC
356platform, you can save yourself a lot of trouble by supporting interrupt
357sharing.
358
359Using uio_pdrv for platform devices
360-----------------------------------
361
362In many cases, UIO drivers for platform devices can be handled in a
363generic way. In the same place where you define your
364``struct platform_device``, you simply also implement your interrupt
365handler and fill your ``struct uio_info``. A pointer to this
366``struct uio_info`` is then used as ``platform_data`` for your platform
367device.
368
369You also need to set up an array of ``struct resource`` containing
370addresses and sizes of your memory mappings. This information is passed
371to the driver using the ``.resource`` and ``.num_resources`` elements of
372``struct platform_device``.
373
374You now have to set the ``.name`` element of ``struct platform_device``
375to ``"uio_pdrv"`` to use the generic UIO platform device driver. This
376driver will fill the ``mem[]`` array according to the resources given,
377and register the device.
378
379The advantage of this approach is that you only have to edit a file you
380need to edit anyway. You do not have to create an extra driver.
381
382Using uio_pdrv_genirq for platform devices
383------------------------------------------
384
385Especially in embedded devices, you frequently find chips where the irq
386pin is tied to its own dedicated interrupt line. In such cases, where
387you can be really sure the interrupt is not shared, we can take the
388concept of ``uio_pdrv`` one step further and use a generic interrupt
389handler. That's what ``uio_pdrv_genirq`` does.
390
391The setup for this driver is the same as described above for
392``uio_pdrv``, except that you do not implement an interrupt handler. The
393``.handler`` element of ``struct uio_info`` must remain ``NULL``. The
394``.irq_flags`` element must not contain ``IRQF_SHARED``.
395
396You will set the ``.name`` element of ``struct platform_device`` to
397``"uio_pdrv_genirq"`` to use this driver.
398
399The generic interrupt handler of ``uio_pdrv_genirq`` will simply disable
400the interrupt line using :c:func:`disable_irq_nosync()`. After
401doing its work, userspace can reenable the interrupt by writing
4020x00000001 to the UIO device file. The driver already implements an
403:c:func:`irq_control()` to make this possible, you must not
404implement your own.
405
406Using ``uio_pdrv_genirq`` not only saves a few lines of interrupt
407handler code. You also do not need to know anything about the chip's
408internal registers to create the kernel part of the driver. All you need
409to know is the irq number of the pin the chip is connected to.
410
411Using uio_dmem_genirq for platform devices
412------------------------------------------
413
414In addition to statically allocated memory ranges, they may also be a
415desire to use dynamically allocated regions in a user space driver. In
416particular, being able to access memory made available through the
417dma-mapping API, may be particularly useful. The ``uio_dmem_genirq``
418driver provides a way to accomplish this.
419
420This driver is used in a similar manner to the ``"uio_pdrv_genirq"``
421driver with respect to interrupt configuration and handling.
422
423Set the ``.name`` element of ``struct platform_device`` to
424``"uio_dmem_genirq"`` to use this driver.
425
426When using this driver, fill in the ``.platform_data`` element of
427``struct platform_device``, which is of type
428``struct uio_dmem_genirq_pdata`` and which contains the following
429elements:
430
431- ``struct uio_info uioinfo``: The same structure used as the
432 ``uio_pdrv_genirq`` platform data
433
434- ``unsigned int *dynamic_region_sizes``: Pointer to list of sizes of
435 dynamic memory regions to be mapped into user space.
436
437- ``unsigned int num_dynamic_regions``: Number of elements in
438 ``dynamic_region_sizes`` array.
439
440The dynamic regions defined in the platform data will be appended to the
441`` mem[] `` array after the platform device resources, which implies
442that the total number of static and dynamic memory regions cannot exceed
443``MAX_UIO_MAPS``.
444
445The dynamic memory regions will be allocated when the UIO device file,
446``/dev/uioX`` is opened. Similar to static memory resources, the memory
447region information for dynamic regions is then visible via sysfs at
448``/sys/class/uio/uioX/maps/mapY/*``. The dynamic memory regions will be
449freed when the UIO device file is closed. When no processes are holding
450the device file open, the address returned to userspace is ~0.
451
452Writing a driver in userspace
453=============================
454
455Once you have a working kernel module for your hardware, you can write
456the userspace part of your driver. You don't need any special libraries,
457your driver can be written in any reasonable language, you can use
458floating point numbers and so on. In short, you can use all the tools
459and libraries you'd normally use for writing a userspace application.
460
461Getting information about your UIO device
462-----------------------------------------
463
464Information about all UIO devices is available in sysfs. The first thing
465you should do in your driver is check ``name`` and ``version`` to make
466sure your talking to the right device and that its kernel driver has the
467version you expect.
468
469You should also make sure that the memory mapping you need exists and
470has the size you expect.
471
472There is a tool called ``lsuio`` that lists UIO devices and their
473attributes. It is available here:
474
475http://www.osadl.org/projects/downloads/UIO/user/
476
477With ``lsuio`` you can quickly check if your kernel module is loaded and
478which attributes it exports. Have a look at the manpage for details.
479
480The source code of ``lsuio`` can serve as an example for getting
481information about an UIO device. The file ``uio_helper.c`` contains a
482lot of functions you could use in your userspace driver code.
483
484mmap() device memory
485--------------------
486
487After you made sure you've got the right device with the memory mappings
488you need, all you have to do is to call :c:func:`mmap()` to map the
489device's memory to userspace.
490
491The parameter ``offset`` of the :c:func:`mmap()` call has a special
492meaning for UIO devices: It is used to select which mapping of your
493device you want to map. To map the memory of mapping N, you have to use
494N times the page size as your offset::
495
496 offset = N * getpagesize();
497
498N starts from zero, so if you've got only one memory range to map, set
499``offset = 0``. A drawback of this technique is that memory is always
500mapped beginning with its start address.
501
502Waiting for interrupts
503----------------------
504
505After you successfully mapped your devices memory, you can access it
506like an ordinary array. Usually, you will perform some initialization.
507After that, your hardware starts working and will generate an interrupt
508as soon as it's finished, has some data available, or needs your
509attention because an error occurred.
510
511``/dev/uioX`` is a read-only file. A :c:func:`read()` will always
512block until an interrupt occurs. There is only one legal value for the
513``count`` parameter of :c:func:`read()`, and that is the size of a
514signed 32 bit integer (4). Any other value for ``count`` causes
515:c:func:`read()` to fail. The signed 32 bit integer read is the
516interrupt count of your device. If the value is one more than the value
517you read the last time, everything is OK. If the difference is greater
518than one, you missed interrupts.
519
520You can also use :c:func:`select()` on ``/dev/uioX``.
521
522Generic PCI UIO driver
523======================
524
525The generic driver is a kernel module named uio_pci_generic. It can
526work with any device compliant to PCI 2.3 (circa 2002) and any compliant
527PCI Express device. Using this, you only need to write the userspace
528driver, removing the need to write a hardware-specific kernel module.
529
530Making the driver recognize the device
531--------------------------------------
532
533Since the driver does not declare any device ids, it will not get loaded
534automatically and will not automatically bind to any devices, you must
535load it and allocate id to the driver yourself. For example::
536
537 modprobe uio_pci_generic
538 echo "8086 10f5" > /sys/bus/pci/drivers/uio_pci_generic/new_id
539
540If there already is a hardware specific kernel driver for your device,
541the generic driver still won't bind to it, in this case if you want to
542use the generic driver (why would you?) you'll have to manually unbind
543the hardware specific driver and bind the generic driver, like this::
544
545 echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
546 echo -n 0000:00:19.0 > /sys/bus/pci/drivers/uio_pci_generic/bind
547
548You can verify that the device has been bound to the driver by looking
549for it in sysfs, for example like the following::
550
551 ls -l /sys/bus/pci/devices/0000:00:19.0/driver
552
553Which if successful should print::
554
555 .../0000:00:19.0/driver -> ../../../bus/pci/drivers/uio_pci_generic
556
557Note that the generic driver will not bind to old PCI 2.2 devices. If
558binding the device failed, run the following command::
559
560 dmesg
561
562and look in the output for failure reasons.
563
564Things to know about uio_pci_generic
565------------------------------------
566
567Interrupts are handled using the Interrupt Disable bit in the PCI
568command register and Interrupt Status bit in the PCI status register.
569All devices compliant to PCI 2.3 (circa 2002) and all compliant PCI
570Express devices should support these bits. uio_pci_generic detects
571this support, and won't bind to devices which do not support the
572Interrupt Disable Bit in the command register.
573
574On each interrupt, uio_pci_generic sets the Interrupt Disable bit.
575This prevents the device from generating further interrupts until the
576bit is cleared. The userspace driver should clear this bit before
577blocking and waiting for more interrupts.
578
579Writing userspace driver using uio_pci_generic
580------------------------------------------------
581
582Userspace driver can use pci sysfs interface, or the libpci library that
583wraps it, to talk to the device and to re-enable interrupts by writing
584to the command register.
585
586Example code using uio_pci_generic
587----------------------------------
588
589Here is some sample userspace driver code using uio_pci_generic::
590
591 #include <stdlib.h>
592 #include <stdio.h>
593 #include <unistd.h>
594 #include <sys/types.h>
595 #include <sys/stat.h>
596 #include <fcntl.h>
597 #include <errno.h>
598
599 int main()
600 {
601 int uiofd;
602 int configfd;
603 int err;
604 int i;
605 unsigned icount;
606 unsigned char command_high;
607
608 uiofd = open("/dev/uio0", O_RDONLY);
609 if (uiofd < 0) {
610 perror("uio open:");
611 return errno;
612 }
613 configfd = open("/sys/class/uio/uio0/device/config", O_RDWR);
614 if (configfd < 0) {
615 perror("config open:");
616 return errno;
617 }
618
619 /* Read and cache command value */
620 err = pread(configfd, &command_high, 1, 5);
621 if (err != 1) {
622 perror("command config read:");
623 return errno;
624 }
625 command_high &= ~0x4;
626
627 for(i = 0;; ++i) {
628 /* Print out a message, for debugging. */
629 if (i == 0)
630 fprintf(stderr, "Started uio test driver.\n");
631 else
632 fprintf(stderr, "Interrupts: %d\n", icount);
633
634 /****************************************/
635 /* Here we got an interrupt from the
636 device. Do something to it. */
637 /****************************************/
638
639 /* Re-enable interrupts. */
640 err = pwrite(configfd, &command_high, 1, 5);
641 if (err != 1) {
642 perror("config write:");
643 break;
644 }
645
646 /* Wait for next interrupt. */
647 err = read(uiofd, &icount, 4);
648 if (err != 4) {
649 perror("uio read:");
650 break;
651 }
652
653 }
654 return errno;
655 }
656
657Generic Hyper-V UIO driver
658==========================
659
660The generic driver is a kernel module named uio_hv_generic. It
661supports devices on the Hyper-V VMBus similar to uio_pci_generic on
662PCI bus.
663
664Making the driver recognize the device
665--------------------------------------
666
667Since the driver does not declare any device GUID's, it will not get
668loaded automatically and will not automatically bind to any devices, you
669must load it and allocate id to the driver yourself. For example, to use
670the network device GUID::
671
672 modprobe uio_hv_generic
673 echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
674
675If there already is a hardware specific kernel driver for the device,
676the generic driver still won't bind to it, in this case if you want to
677use the generic driver (why would you?) you'll have to manually unbind
678the hardware specific driver and bind the generic driver, like this::
679
680 echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 > /sys/bus/vmbus/drivers/hv_netvsc/unbind
681 echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 > /sys/bus/vmbus/drivers/uio_hv_generic/bind
682
683You can verify that the device has been bound to the driver by looking
684for it in sysfs, for example like the following::
685
686 ls -l /sys/bus/vmbus/devices/vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver
687
688Which if successful should print::
689
690 .../vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver -> ../../../bus/vmbus/drivers/uio_hv_generic
691
692Things to know about uio_hv_generic
693-----------------------------------
694
695On each interrupt, uio_hv_generic sets the Interrupt Disable bit. This
696prevents the device from generating further interrupts until the bit is
697cleared. The userspace driver should clear this bit before blocking and
698waiting for more interrupts.
699
700Further information
701===================
702
703- `OSADL homepage. <http://www.osadl.org>`_
704
705- `Linutronix homepage. <http://www.linutronix.de>`_
diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt
new file mode 100644
index 000000000000..af0b366c25b7
--- /dev/null
+++ b/Documentation/extcon/intel-int3496.txt
@@ -0,0 +1,22 @@
1Intel INT3496 ACPI device extcon driver documentation
2-----------------------------------------------------
3
4The Intel INT3496 ACPI device extcon driver is a driver for ACPI
5devices with an acpi-id of INT3496, such as found for example on
6Intel Baytrail and Cherrytrail tablets.
7
8This ACPI device describes how the OS can read the id-pin of the devices'
9USB-otg port, as well as how it optionally can enable Vbus output on the
10otg port and how it can optionally control the muxing of the data pins
11between an USB host and an USB peripheral controller.
12
13The ACPI devices exposes this functionality by returning an array with up
14to 3 gpio descriptors from its ACPI _CRS (Current Resource Settings) call:
15
16Index 0: The input gpio for the id-pin, this is always present and valid
17Index 1: The output gpio for enabling Vbus output from the device to the otg
18 port, write 1 to enable the Vbus output (this gpio descriptor may
19 be absent or invalid)
20Index 2: The output gpio for muxing of the data pins between the USB host and
21 the USB peripheral controller, write 1 to mux to the peripheral
22 controller
diff --git a/Documentation/fpga/fpga-mgr.txt b/Documentation/fpga/fpga-mgr.txt
index 86ee5078fd03..78f197fadfd1 100644
--- a/Documentation/fpga/fpga-mgr.txt
+++ b/Documentation/fpga/fpga-mgr.txt
@@ -22,7 +22,16 @@ To program the FPGA from a file or from a buffer:
22 struct fpga_image_info *info, 22 struct fpga_image_info *info,
23 const char *buf, size_t count); 23 const char *buf, size_t count);
24 24
25Load the FPGA from an image which exists as a buffer in memory. 25Load the FPGA from an image which exists as a contiguous buffer in
26memory. Allocating contiguous kernel memory for the buffer should be avoided,
27users are encouraged to use the _sg interface instead of this.
28
29 int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
30 struct fpga_image_info *info,
31 struct sg_table *sgt);
32
33Load the FPGA from an image from non-contiguous in memory. Callers can
34construct a sg_table using alloc_page backed memory.
26 35
27 int fpga_mgr_firmware_load(struct fpga_manager *mgr, 36 int fpga_mgr_firmware_load(struct fpga_manager *mgr,
28 struct fpga_image_info *info, 37 struct fpga_image_info *info,
@@ -166,7 +175,7 @@ success or negative error codes otherwise.
166 175
167The programming sequence is: 176The programming sequence is:
168 1. .write_init 177 1. .write_init
169 2. .write (may be called once or multiple times) 178 2. .write or .write_sg (may be called once or multiple times)
170 3. .write_complete 179 3. .write_complete
171 180
172The .write_init function will prepare the FPGA to receive the image data. The 181The .write_init function will prepare the FPGA to receive the image data. The
@@ -176,7 +185,11 @@ buffer up at least this much before starting.
176 185
177The .write function writes a buffer to the FPGA. The buffer may be contain the 186The .write function writes a buffer to the FPGA. The buffer may be contain the
178whole FPGA image or may be a smaller chunk of an FPGA image. In the latter 187whole FPGA image or may be a smaller chunk of an FPGA image. In the latter
179case, this function is called multiple times for successive chunks. 188case, this function is called multiple times for successive chunks. This interface
189is suitable for drivers which use PIO.
190
191The .write_sg version behaves the same as .write except the input is a sg_table
192scatter list. This interface is suitable for drivers which use DMA.
180 193
181The .write_complete function is called after all the image has been written 194The .write_complete function is called after all the image has been written
182to put the FPGA into operating mode. 195to put the FPGA into operating mode.
diff --git a/MAINTAINERS b/MAINTAINERS
index d6e91e96f4e5..427c97e429bf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5993,6 +5993,7 @@ S: Maintained
5993F: arch/x86/include/asm/mshyperv.h 5993F: arch/x86/include/asm/mshyperv.h
5994F: arch/x86/include/uapi/asm/hyperv.h 5994F: arch/x86/include/uapi/asm/hyperv.h
5995F: arch/x86/kernel/cpu/mshyperv.c 5995F: arch/x86/kernel/cpu/mshyperv.c
5996F: arch/x86/hyperv
5996F: drivers/hid/hid-hyperv.c 5997F: drivers/hid/hid-hyperv.c
5997F: drivers/hv/ 5998F: drivers/hv/
5998F: drivers/input/serio/hyperv-keyboard.c 5999F: drivers/input/serio/hyperv-keyboard.c
@@ -13071,7 +13072,7 @@ USERSPACE I/O (UIO)
13071M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 13072M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
13072S: Maintained 13073S: Maintained
13073T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git 13074T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
13074F: Documentation/DocBook/uio-howto.tmpl 13075F: Documentation/driver-api/uio-howto.rst
13075F: drivers/uio/ 13076F: drivers/uio/
13076F: include/linux/uio*.h 13077F: include/linux/uio*.h
13077 13078
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 1d873d15b545..9780829f8a05 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -557,15 +557,7 @@ static struct clk_lookup da850_clks[] = {
557 CLK("da830-mmc.0", NULL, &mmcsd0_clk), 557 CLK("da830-mmc.0", NULL, &mmcsd0_clk),
558 CLK("da830-mmc.1", NULL, &mmcsd1_clk), 558 CLK("da830-mmc.1", NULL, &mmcsd1_clk),
559 CLK("ti-aemif", NULL, &aemif_clk), 559 CLK("ti-aemif", NULL, &aemif_clk),
560 /* 560 CLK("davinci-nand.0", "aemif", &aemif_nand_clk),
561 * The only user of this clock is davinci_nand and it get's it through
562 * con_id. The nand node itself is created from within the aemif
563 * driver to guarantee that it's probed after the aemif timing
564 * parameters are configured. of_dev_auxdata is not accessible from
565 * the aemif driver and can't be passed to of_platform_populate(). For
566 * that reason we're leaving the dev_id here as NULL.
567 */
568 CLK(NULL, "aemif", &aemif_nand_clk),
569 CLK("ohci-da8xx", "usb11", &usb11_clk), 561 CLK("ohci-da8xx", "usb11", &usb11_clk),
570 CLK("musb-da8xx", "usb20", &usb20_clk), 562 CLK("musb-da8xx", "usb20", &usb20_clk),
571 CLK("spi_davinci.0", NULL, &spi0_clk), 563 CLK("spi_davinci.0", NULL, &spi0_clk),
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index 9ee44da6eb7b..06205fe4c120 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -11,6 +11,7 @@
11#include <linux/of_irq.h> 11#include <linux/of_irq.h>
12#include <linux/of_platform.h> 12#include <linux/of_platform.h>
13#include <linux/irqdomain.h> 13#include <linux/irqdomain.h>
14#include <linux/platform_data/ti-aemif.h>
14 15
15#include <asm/mach/arch.h> 16#include <asm/mach/arch.h>
16 17
@@ -18,6 +19,15 @@
18#include "cp_intc.h" 19#include "cp_intc.h"
19#include <mach/da8xx.h> 20#include <mach/da8xx.h>
20 21
22static struct of_dev_auxdata da850_aemif_auxdata_lookup[] = {
23 OF_DEV_AUXDATA("ti,davinci-nand", 0x62000000, "davinci-nand.0", NULL),
24 {}
25};
26
27static struct aemif_platform_data aemif_data = {
28 .dev_lookup = da850_aemif_auxdata_lookup,
29};
30
21static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = { 31static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
22 OF_DEV_AUXDATA("ti,davinci-i2c", 0x01c22000, "i2c_davinci.1", NULL), 32 OF_DEV_AUXDATA("ti,davinci-i2c", 0x01c22000, "i2c_davinci.1", NULL),
23 OF_DEV_AUXDATA("ti,davinci-i2c", 0x01e28000, "i2c_davinci.2", NULL), 33 OF_DEV_AUXDATA("ti,davinci-i2c", 0x01e28000, "i2c_davinci.2", NULL),
@@ -37,7 +47,7 @@ static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
37 OF_DEV_AUXDATA("ti,davinci-dm6467-emac", 0x01e20000, "davinci_emac.1", 47 OF_DEV_AUXDATA("ti,davinci-dm6467-emac", 0x01e20000, "davinci_emac.1",
38 NULL), 48 NULL),
39 OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL), 49 OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL),
40 OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", NULL), 50 OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", &aemif_data),
41 OF_DEV_AUXDATA("ti,da850-tilcdc", 0x01e13000, "da8xx_lcdc.0", NULL), 51 OF_DEV_AUXDATA("ti,da850-tilcdc", 0x01e13000, "da8xx_lcdc.0", NULL),
42 OF_DEV_AUXDATA("ti,da830-ohci", 0x01e25000, "ohci-da8xx", NULL), 52 OF_DEV_AUXDATA("ti,da830-ohci", 0x01e25000, "ohci-da8xx", NULL),
43 OF_DEV_AUXDATA("ti,da830-musb", 0x01e00000, "musb-da8xx", NULL), 53 OF_DEV_AUXDATA("ti,da830-musb", 0x01e00000, "musb-da8xx", NULL),
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index eb3abf8ac44e..586b786b3edf 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -7,6 +7,9 @@ obj-$(CONFIG_KVM) += kvm/
7# Xen paravirtualization support 7# Xen paravirtualization support
8obj-$(CONFIG_XEN) += xen/ 8obj-$(CONFIG_XEN) += xen/
9 9
10# Hyper-V paravirtualization support
11obj-$(CONFIG_HYPERVISOR_GUEST) += hyperv/
12
10# lguest paravirtualization support 13# lguest paravirtualization support
11obj-$(CONFIG_LGUEST_GUEST) += lguest/ 14obj-$(CONFIG_LGUEST_GUEST) += lguest/
12 15
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
new file mode 100644
index 000000000000..171ae09864d7
--- /dev/null
+++ b/arch/x86/hyperv/Makefile
@@ -0,0 +1 @@
obj-y := hv_init.o
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
new file mode 100644
index 000000000000..db64baf0e500
--- /dev/null
+++ b/arch/x86/hyperv/hv_init.c
@@ -0,0 +1,277 @@
1/*
2 * X86 specific Hyper-V initialization code.
3 *
4 * Copyright (C) 2016, Microsoft, Inc.
5 *
6 * Author : K. Y. Srinivasan <kys@microsoft.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15 * NON INFRINGEMENT. See the GNU General Public License for more
16 * details.
17 *
18 */
19
20#include <linux/types.h>
21#include <asm/hypervisor.h>
22#include <asm/hyperv.h>
23#include <asm/mshyperv.h>
24#include <linux/version.h>
25#include <linux/vmalloc.h>
26#include <linux/mm.h>
27#include <linux/clockchips.h>
28
29
30#ifdef CONFIG_X86_64
31
32static struct ms_hyperv_tsc_page *tsc_pg;
33
34static u64 read_hv_clock_tsc(struct clocksource *arg)
35{
36 u64 current_tick;
37
38 if (tsc_pg->tsc_sequence != 0) {
39 /*
40 * Use the tsc page to compute the value.
41 */
42
43 while (1) {
44 u64 tmp;
45 u32 sequence = tsc_pg->tsc_sequence;
46 u64 cur_tsc;
47 u64 scale = tsc_pg->tsc_scale;
48 s64 offset = tsc_pg->tsc_offset;
49
50 rdtscll(cur_tsc);
51 /* current_tick = ((cur_tsc *scale) >> 64) + offset */
52 asm("mulq %3"
53 : "=d" (current_tick), "=a" (tmp)
54 : "a" (cur_tsc), "r" (scale));
55
56 current_tick += offset;
57 if (tsc_pg->tsc_sequence == sequence)
58 return current_tick;
59
60 if (tsc_pg->tsc_sequence != 0)
61 continue;
62 /*
63 * Fallback using MSR method.
64 */
65 break;
66 }
67 }
68 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
69 return current_tick;
70}
71
72static struct clocksource hyperv_cs_tsc = {
73 .name = "hyperv_clocksource_tsc_page",
74 .rating = 400,
75 .read = read_hv_clock_tsc,
76 .mask = CLOCKSOURCE_MASK(64),
77 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
78};
79#endif
80
81static u64 read_hv_clock_msr(struct clocksource *arg)
82{
83 u64 current_tick;
84 /*
85 * Read the partition counter to get the current tick count. This count
86 * is set to 0 when the partition is created and is incremented in
87 * 100 nanosecond units.
88 */
89 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
90 return current_tick;
91}
92
93static struct clocksource hyperv_cs_msr = {
94 .name = "hyperv_clocksource_msr",
95 .rating = 400,
96 .read = read_hv_clock_msr,
97 .mask = CLOCKSOURCE_MASK(64),
98 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
99};
100
101static void *hypercall_pg;
102struct clocksource *hyperv_cs;
103EXPORT_SYMBOL_GPL(hyperv_cs);
104
105/*
106 * This function is to be invoked early in the boot sequence after the
107 * hypervisor has been detected.
108 *
109 * 1. Setup the hypercall page.
110 * 2. Register Hyper-V specific clocksource.
111 */
112void hyperv_init(void)
113{
114 u64 guest_id;
115 union hv_x64_msr_hypercall_contents hypercall_msr;
116
117 if (x86_hyper != &x86_hyper_ms_hyperv)
118 return;
119
120 /*
121 * Setup the hypercall page and enable hypercalls.
122 * 1. Register the guest ID
123 * 2. Enable the hypercall and register the hypercall page
124 */
125 guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
126 wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
127
128 hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
129 if (hypercall_pg == NULL) {
130 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
131 return;
132 }
133
134 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
135 hypercall_msr.enable = 1;
136 hypercall_msr.guest_physical_address = vmalloc_to_pfn(hypercall_pg);
137 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
138
139 /*
140 * Register Hyper-V specific clocksource.
141 */
142#ifdef CONFIG_X86_64
143 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
144 union hv_x64_msr_hypercall_contents tsc_msr;
145
146 tsc_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
147 if (!tsc_pg)
148 goto register_msr_cs;
149
150 hyperv_cs = &hyperv_cs_tsc;
151
152 rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
153
154 tsc_msr.enable = 1;
155 tsc_msr.guest_physical_address = vmalloc_to_pfn(tsc_pg);
156
157 wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
158 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
159 return;
160 }
161#endif
162 /*
163 * For 32 bit guests just use the MSR based mechanism for reading
164 * the partition counter.
165 */
166
167register_msr_cs:
168 hyperv_cs = &hyperv_cs_msr;
169 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
170 clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
171}
172
173/*
174 * This routine is called before kexec/kdump, it does the required cleanup.
175 */
176void hyperv_cleanup(void)
177{
178 union hv_x64_msr_hypercall_contents hypercall_msr;
179
180 /* Reset our OS id */
181 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
182
183 /* Reset the hypercall page */
184 hypercall_msr.as_uint64 = 0;
185 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
186
187 /* Reset the TSC page */
188 hypercall_msr.as_uint64 = 0;
189 wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
190}
191EXPORT_SYMBOL_GPL(hyperv_cleanup);
192
193/*
194 * hv_do_hypercall- Invoke the specified hypercall
195 */
196u64 hv_do_hypercall(u64 control, void *input, void *output)
197{
198 u64 input_address = (input) ? virt_to_phys(input) : 0;
199 u64 output_address = (output) ? virt_to_phys(output) : 0;
200#ifdef CONFIG_X86_64
201 u64 hv_status = 0;
202
203 if (!hypercall_pg)
204 return (u64)ULLONG_MAX;
205
206 __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
207 __asm__ __volatile__("call *%3" : "=a" (hv_status) :
208 "c" (control), "d" (input_address),
209 "m" (hypercall_pg));
210
211 return hv_status;
212
213#else
214
215 u32 control_hi = control >> 32;
216 u32 control_lo = control & 0xFFFFFFFF;
217 u32 hv_status_hi = 1;
218 u32 hv_status_lo = 1;
219 u32 input_address_hi = input_address >> 32;
220 u32 input_address_lo = input_address & 0xFFFFFFFF;
221 u32 output_address_hi = output_address >> 32;
222 u32 output_address_lo = output_address & 0xFFFFFFFF;
223
224 if (!hypercall_pg)
225 return (u64)ULLONG_MAX;
226
227 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
228 "=a"(hv_status_lo) : "d" (control_hi),
229 "a" (control_lo), "b" (input_address_hi),
230 "c" (input_address_lo), "D"(output_address_hi),
231 "S"(output_address_lo), "m" (hypercall_pg));
232
233 return hv_status_lo | ((u64)hv_status_hi << 32);
234#endif /* !x86_64 */
235}
236EXPORT_SYMBOL_GPL(hv_do_hypercall);
237
238void hyperv_report_panic(struct pt_regs *regs)
239{
240 static bool panic_reported;
241
242 /*
243 * We prefer to report panic on 'die' chain as we have proper
244 * registers to report, but if we miss it (e.g. on BUG()) we need
245 * to report it on 'panic'.
246 */
247 if (panic_reported)
248 return;
249 panic_reported = true;
250
251 wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
252 wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
253 wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
254 wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
255 wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
256
257 /*
258 * Let Hyper-V know there is crash data available
259 */
260 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
261}
262EXPORT_SYMBOL_GPL(hyperv_report_panic);
263
264bool hv_is_hypercall_page_setup(void)
265{
266 union hv_x64_msr_hypercall_contents hypercall_msr;
267
268 /* Check if the hypercall page is setup */
269 hypercall_msr.as_uint64 = 0;
270 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
271
272 if (!hypercall_msr.enable)
273 return false;
274
275 return true;
276}
277EXPORT_SYMBOL_GPL(hv_is_hypercall_page_setup);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index aaf59b7da98a..7c9c895432a9 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -3,8 +3,28 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/clocksource.h>
6#include <asm/hyperv.h> 7#include <asm/hyperv.h>
7 8
9/*
10 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
11 * is set by CPUID(HVCPUID_VERSION_FEATURES).
12 */
13enum hv_cpuid_function {
14 HVCPUID_VERSION_FEATURES = 0x00000001,
15 HVCPUID_VENDOR_MAXFUNCTION = 0x40000000,
16 HVCPUID_INTERFACE = 0x40000001,
17
18 /*
19 * The remaining functions depend on the value of
20 * HVCPUID_INTERFACE
21 */
22 HVCPUID_VERSION = 0x40000002,
23 HVCPUID_FEATURES = 0x40000003,
24 HVCPUID_ENLIGHTENMENT_INFO = 0x40000004,
25 HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
26};
27
8struct ms_hyperv_info { 28struct ms_hyperv_info {
9 u32 features; 29 u32 features;
10 u32 misc_features; 30 u32 misc_features;
@@ -13,6 +33,128 @@ struct ms_hyperv_info {
13 33
14extern struct ms_hyperv_info ms_hyperv; 34extern struct ms_hyperv_info ms_hyperv;
15 35
36/*
37 * Declare the MSR used to setup pages used to communicate with the hypervisor.
38 */
39union hv_x64_msr_hypercall_contents {
40 u64 as_uint64;
41 struct {
42 u64 enable:1;
43 u64 reserved:11;
44 u64 guest_physical_address:52;
45 };
46};
47
48/*
49 * TSC page layout.
50 */
51
52struct ms_hyperv_tsc_page {
53 volatile u32 tsc_sequence;
54 u32 reserved1;
55 volatile u64 tsc_scale;
56 volatile s64 tsc_offset;
57 u64 reserved2[509];
58};
59
60/*
61 * The guest OS needs to register the guest ID with the hypervisor.
62 * The guest ID is a 64 bit entity and the structure of this ID is
63 * specified in the Hyper-V specification:
64 *
65 * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
66 *
67 * While the current guideline does not specify how Linux guest ID(s)
68 * need to be generated, our plan is to publish the guidelines for
69 * Linux and other guest operating systems that currently are hosted
70 * on Hyper-V. The implementation here conforms to this yet
71 * unpublished guidelines.
72 *
73 *
74 * Bit(s)
75 * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
76 * 62:56 - Os Type; Linux is 0x100
77 * 55:48 - Distro specific identification
78 * 47:16 - Linux kernel version number
79 * 15:0 - Distro specific identification
80 *
81 *
82 */
83
84#define HV_LINUX_VENDOR_ID 0x8100
85
86/*
87 * Generate the guest ID based on the guideline described above.
88 */
89
90static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
91 __u64 d_info2)
92{
93 __u64 guest_id = 0;
94
95 guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
96 guest_id |= (d_info1 << 48);
97 guest_id |= (kernel_version << 16);
98 guest_id |= d_info2;
99
100 return guest_id;
101}
102
103
104/* Free the message slot and signal end-of-message if required */
105static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
106{
107 /*
108 * On crash we're reading some other CPU's message page and we need
109 * to be careful: this other CPU may already had cleared the header
110 * and the host may already had delivered some other message there.
111 * In case we blindly write msg->header.message_type we're going
112 * to lose it. We can still lose a message of the same type but
113 * we count on the fact that there can only be one
114 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
115 * on crash.
116 */
117 if (cmpxchg(&msg->header.message_type, old_msg_type,
118 HVMSG_NONE) != old_msg_type)
119 return;
120
121 /*
122 * Make sure the write to MessageType (ie set to
123 * HVMSG_NONE) happens before we read the
124 * MessagePending and EOMing. Otherwise, the EOMing
125 * will not deliver any more messages since there is
126 * no empty slot
127 */
128 mb();
129
130 if (msg->header.message_flags.msg_pending) {
131 /*
132 * This will cause message queue rescan to
133 * possibly deliver another msg from the
134 * hypervisor
135 */
136 wrmsrl(HV_X64_MSR_EOM, 0);
137 }
138}
139
140#define hv_get_current_tick(tick) rdmsrl(HV_X64_MSR_TIME_REF_COUNT, tick)
141#define hv_init_timer(timer, tick) wrmsrl(timer, tick)
142#define hv_init_timer_config(config, val) wrmsrl(config, val)
143
144#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
145#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
146
147#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
148#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
149
150#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
151#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
152
153#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
154
155#define hv_get_synint_state(int_num, val) rdmsrl(int_num, val)
156#define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
157
16void hyperv_callback_vector(void); 158void hyperv_callback_vector(void);
17#ifdef CONFIG_TRACING 159#ifdef CONFIG_TRACING
18#define trace_hyperv_callback_vector hyperv_callback_vector 160#define trace_hyperv_callback_vector hyperv_callback_vector
@@ -25,4 +167,13 @@ void hv_setup_kexec_handler(void (*handler)(void));
25void hv_remove_kexec_handler(void); 167void hv_remove_kexec_handler(void);
26void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); 168void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
27void hv_remove_crash_handler(void); 169void hv_remove_crash_handler(void);
170
171#if IS_ENABLED(CONFIG_HYPERV)
172extern struct clocksource *hyperv_cs;
173
174void hyperv_init(void);
175void hyperv_report_panic(struct pt_regs *regs);
176bool hv_is_hypercall_page_setup(void);
177void hyperv_cleanup(void);
178#endif
28#endif 179#endif
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 9b1a91834ac8..3a20ccf787b8 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -73,6 +73,9 @@
73 */ 73 */
74#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) 74#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
75 75
76/* Crash MSR available */
77#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
78
76/* 79/*
77 * Feature identification: EBX indicates which flags were specified at 80 * Feature identification: EBX indicates which flags were specified at
78 * partition creation. The format is the same as the partition creation 81 * partition creation. The format is the same as the partition creation
@@ -144,6 +147,11 @@
144 */ 147 */
145#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) 148#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
146 149
150/*
151 * Crash notification flag.
152 */
153#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
154
147/* MSR used to identify the guest OS. */ 155/* MSR used to identify the guest OS. */
148#define HV_X64_MSR_GUEST_OS_ID 0x40000000 156#define HV_X64_MSR_GUEST_OS_ID 0x40000000
149 157
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 65e20c97e04b..b5375b9497b3 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -133,26 +133,6 @@ static uint32_t __init ms_hyperv_platform(void)
133 return 0; 133 return 0;
134} 134}
135 135
136static u64 read_hv_clock(struct clocksource *arg)
137{
138 u64 current_tick;
139 /*
140 * Read the partition counter to get the current tick count. This count
141 * is set to 0 when the partition is created and is incremented in
142 * 100 nanosecond units.
143 */
144 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
145 return current_tick;
146}
147
148static struct clocksource hyperv_cs = {
149 .name = "hyperv_clocksource",
150 .rating = 400, /* use this when running on Hyperv*/
151 .read = read_hv_clock,
152 .mask = CLOCKSOURCE_MASK(64),
153 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
154};
155
156static unsigned char hv_get_nmi_reason(void) 136static unsigned char hv_get_nmi_reason(void)
157{ 137{
158 return 0; 138 return 0;
@@ -180,6 +160,11 @@ static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
180 160
181static void __init ms_hyperv_init_platform(void) 161static void __init ms_hyperv_init_platform(void)
182{ 162{
163 int hv_host_info_eax;
164 int hv_host_info_ebx;
165 int hv_host_info_ecx;
166 int hv_host_info_edx;
167
183 /* 168 /*
184 * Extract the features and hints 169 * Extract the features and hints
185 */ 170 */
@@ -190,6 +175,21 @@ static void __init ms_hyperv_init_platform(void)
190 pr_info("HyperV: features 0x%x, hints 0x%x\n", 175 pr_info("HyperV: features 0x%x, hints 0x%x\n",
191 ms_hyperv.features, ms_hyperv.hints); 176 ms_hyperv.features, ms_hyperv.hints);
192 177
178 /*
179 * Extract host information.
180 */
181 if (cpuid_eax(HVCPUID_VENDOR_MAXFUNCTION) >= HVCPUID_VERSION) {
182 hv_host_info_eax = cpuid_eax(HVCPUID_VERSION);
183 hv_host_info_ebx = cpuid_ebx(HVCPUID_VERSION);
184 hv_host_info_ecx = cpuid_ecx(HVCPUID_VERSION);
185 hv_host_info_edx = cpuid_edx(HVCPUID_VERSION);
186
187 pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d\n",
188 hv_host_info_eax, hv_host_info_ebx >> 16,
189 hv_host_info_ebx & 0xFFFF, hv_host_info_ecx,
190 hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
191 }
192
193#ifdef CONFIG_X86_LOCAL_APIC 193#ifdef CONFIG_X86_LOCAL_APIC
194 if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) { 194 if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
195 /* 195 /*
@@ -208,9 +208,6 @@ static void __init ms_hyperv_init_platform(void)
208 "hv_nmi_unknown"); 208 "hv_nmi_unknown");
209#endif 209#endif
210 210
211 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
212 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
213
214#ifdef CONFIG_X86_IO_APIC 211#ifdef CONFIG_X86_IO_APIC
215 no_timer_check = 1; 212 no_timer_check = 1;
216#endif 213#endif
@@ -227,6 +224,13 @@ static void __init ms_hyperv_init_platform(void)
227 */ 224 */
228 if (efi_enabled(EFI_BOOT)) 225 if (efi_enabled(EFI_BOOT))
229 x86_platform.get_nmi_reason = hv_get_nmi_reason; 226 x86_platform.get_nmi_reason = hv_get_nmi_reason;
227
228#if IS_ENABLED(CONFIG_HYPERV)
229 /*
230 * Setup the hook to get control post apic initialization.
231 */
232 x86_platform.apic_post_init = hyperv_init;
233#endif
230} 234}
231 235
232const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { 236const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
index 1693107a518e..0d17c0aafeb1 100644
--- a/arch/x86/platform/goldfish/goldfish.c
+++ b/arch/x86/platform/goldfish/goldfish.c
@@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = {
42 } 42 }
43}; 43};
44 44
45static bool goldfish_enable __initdata;
46
47static int __init goldfish_setup(char *str)
48{
49 goldfish_enable = true;
50 return 0;
51}
52__setup("goldfish", goldfish_setup);
53
45static int __init goldfish_init(void) 54static int __init goldfish_init(void)
46{ 55{
56 if (!goldfish_enable)
57 return -ENODEV;
58
47 platform_device_register_simple("goldfish_pdev_bus", -1, 59 platform_device_register_simple("goldfish_pdev_bus", -1,
48 goldfish_pdev_bus_resources, 2); 60 goldfish_pdev_bus_resources, 2);
49 return 0; 61 return 0;
50} 62}
51device_initcall(goldfish_init); 63device_initcall(goldfish_init);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e1e2066cecdb..117ca14ccf85 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -202,4 +202,6 @@ source "drivers/hwtracing/intel_th/Kconfig"
202 202
203source "drivers/fpga/Kconfig" 203source "drivers/fpga/Kconfig"
204 204
205source "drivers/fsi/Kconfig"
206
205endmenu 207endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 060026a02f59..67ce51d62015 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -173,3 +173,4 @@ obj-$(CONFIG_STM) += hwtracing/stm/
173obj-$(CONFIG_ANDROID) += android/ 173obj-$(CONFIG_ANDROID) += android/
174obj-$(CONFIG_NVMEM) += nvmem/ 174obj-$(CONFIG_NVMEM) += nvmem/
175obj-$(CONFIG_FPGA) += fpga/ 175obj-$(CONFIG_FPGA) += fpga/
176obj-$(CONFIG_FSI) += fsi/
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index bdfc6c6f4f5a..a82fc022d34b 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -19,6 +19,18 @@ config ANDROID_BINDER_IPC
19 Android process, using Binder to identify, invoke and pass arguments 19 Android process, using Binder to identify, invoke and pass arguments
20 between said processes. 20 between said processes.
21 21
22config ANDROID_BINDER_DEVICES
23 string "Android Binder devices"
24 depends on ANDROID_BINDER_IPC
25 default "binder"
26 ---help---
27 Default value for the binder.devices parameter.
28
29 The binder.devices parameter is a comma-separated list of strings
30 that specifies the names of the binder device nodes that will be
31 created. Each binder device has its own context manager, and is
32 therefore logically separated from the other devices.
33
22config ANDROID_BINDER_IPC_32BIT 34config ANDROID_BINDER_IPC_32BIT
23 bool 35 bool
24 depends on !64BIT && ANDROID_BINDER_IPC 36 depends on !64BIT && ANDROID_BINDER_IPC
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 3c71b982bf2a..9451b762fa1c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -50,14 +50,13 @@ static DEFINE_MUTEX(binder_main_lock);
50static DEFINE_MUTEX(binder_deferred_lock); 50static DEFINE_MUTEX(binder_deferred_lock);
51static DEFINE_MUTEX(binder_mmap_lock); 51static DEFINE_MUTEX(binder_mmap_lock);
52 52
53static HLIST_HEAD(binder_devices);
53static HLIST_HEAD(binder_procs); 54static HLIST_HEAD(binder_procs);
54static HLIST_HEAD(binder_deferred_list); 55static HLIST_HEAD(binder_deferred_list);
55static HLIST_HEAD(binder_dead_nodes); 56static HLIST_HEAD(binder_dead_nodes);
56 57
57static struct dentry *binder_debugfs_dir_entry_root; 58static struct dentry *binder_debugfs_dir_entry_root;
58static struct dentry *binder_debugfs_dir_entry_proc; 59static struct dentry *binder_debugfs_dir_entry_proc;
59static struct binder_node *binder_context_mgr_node;
60static kuid_t binder_context_mgr_uid = INVALID_UID;
61static int binder_last_id; 60static int binder_last_id;
62 61
63#define BINDER_DEBUG_ENTRY(name) \ 62#define BINDER_DEBUG_ENTRY(name) \
@@ -115,6 +114,9 @@ module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
115static bool binder_debug_no_lock; 114static bool binder_debug_no_lock;
116module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 115module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
117 116
117static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118module_param_named(devices, binder_devices_param, charp, 0444);
119
118static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 120static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
119static int binder_stop_on_user_error; 121static int binder_stop_on_user_error;
120 122
@@ -145,6 +147,17 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
145 binder_stop_on_user_error = 2; \ 147 binder_stop_on_user_error = 2; \
146 } while (0) 148 } while (0)
147 149
150#define to_flat_binder_object(hdr) \
151 container_of(hdr, struct flat_binder_object, hdr)
152
153#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154
155#define to_binder_buffer_object(hdr) \
156 container_of(hdr, struct binder_buffer_object, hdr)
157
158#define to_binder_fd_array_object(hdr) \
159 container_of(hdr, struct binder_fd_array_object, hdr)
160
148enum binder_stat_types { 161enum binder_stat_types {
149 BINDER_STAT_PROC, 162 BINDER_STAT_PROC,
150 BINDER_STAT_THREAD, 163 BINDER_STAT_THREAD,
@@ -158,7 +171,7 @@ enum binder_stat_types {
158 171
159struct binder_stats { 172struct binder_stats {
160 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 173 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
161 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 174 int bc[_IOC_NR(BC_REPLY_SG) + 1];
162 int obj_created[BINDER_STAT_COUNT]; 175 int obj_created[BINDER_STAT_COUNT];
163 int obj_deleted[BINDER_STAT_COUNT]; 176 int obj_deleted[BINDER_STAT_COUNT];
164}; 177};
@@ -186,6 +199,7 @@ struct binder_transaction_log_entry {
186 int to_node; 199 int to_node;
187 int data_size; 200 int data_size;
188 int offsets_size; 201 int offsets_size;
202 const char *context_name;
189}; 203};
190struct binder_transaction_log { 204struct binder_transaction_log {
191 int next; 205 int next;
@@ -210,6 +224,18 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
210 return e; 224 return e;
211} 225}
212 226
227struct binder_context {
228 struct binder_node *binder_context_mgr_node;
229 kuid_t binder_context_mgr_uid;
230 const char *name;
231};
232
233struct binder_device {
234 struct hlist_node hlist;
235 struct miscdevice miscdev;
236 struct binder_context context;
237};
238
213struct binder_work { 239struct binder_work {
214 struct list_head entry; 240 struct list_head entry;
215 enum { 241 enum {
@@ -282,6 +308,7 @@ struct binder_buffer {
282 struct binder_node *target_node; 308 struct binder_node *target_node;
283 size_t data_size; 309 size_t data_size;
284 size_t offsets_size; 310 size_t offsets_size;
311 size_t extra_buffers_size;
285 uint8_t data[0]; 312 uint8_t data[0];
286}; 313};
287 314
@@ -325,6 +352,7 @@ struct binder_proc {
325 int ready_threads; 352 int ready_threads;
326 long default_priority; 353 long default_priority;
327 struct dentry *debugfs_entry; 354 struct dentry *debugfs_entry;
355 struct binder_context *context;
328}; 356};
329 357
330enum { 358enum {
@@ -648,7 +676,9 @@ err_no_vma:
648 676
649static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 677static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
650 size_t data_size, 678 size_t data_size,
651 size_t offsets_size, int is_async) 679 size_t offsets_size,
680 size_t extra_buffers_size,
681 int is_async)
652{ 682{
653 struct rb_node *n = proc->free_buffers.rb_node; 683 struct rb_node *n = proc->free_buffers.rb_node;
654 struct binder_buffer *buffer; 684 struct binder_buffer *buffer;
@@ -656,7 +686,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
656 struct rb_node *best_fit = NULL; 686 struct rb_node *best_fit = NULL;
657 void *has_page_addr; 687 void *has_page_addr;
658 void *end_page_addr; 688 void *end_page_addr;
659 size_t size; 689 size_t size, data_offsets_size;
660 690
661 if (proc->vma == NULL) { 691 if (proc->vma == NULL) {
662 pr_err("%d: binder_alloc_buf, no vma\n", 692 pr_err("%d: binder_alloc_buf, no vma\n",
@@ -664,15 +694,20 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
664 return NULL; 694 return NULL;
665 } 695 }
666 696
667 size = ALIGN(data_size, sizeof(void *)) + 697 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
668 ALIGN(offsets_size, sizeof(void *)); 698 ALIGN(offsets_size, sizeof(void *));
669 699
670 if (size < data_size || size < offsets_size) { 700 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
671 binder_user_error("%d: got transaction with invalid size %zd-%zd\n", 701 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
672 proc->pid, data_size, offsets_size); 702 proc->pid, data_size, offsets_size);
673 return NULL; 703 return NULL;
674 } 704 }
675 705 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
706 if (size < data_offsets_size || size < extra_buffers_size) {
707 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
708 proc->pid, extra_buffers_size);
709 return NULL;
710 }
676 if (is_async && 711 if (is_async &&
677 proc->free_async_space < size + sizeof(struct binder_buffer)) { 712 proc->free_async_space < size + sizeof(struct binder_buffer)) {
678 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 713 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -741,6 +776,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
741 proc->pid, size, buffer); 776 proc->pid, size, buffer);
742 buffer->data_size = data_size; 777 buffer->data_size = data_size;
743 buffer->offsets_size = offsets_size; 778 buffer->offsets_size = offsets_size;
779 buffer->extra_buffers_size = extra_buffers_size;
744 buffer->async_transaction = is_async; 780 buffer->async_transaction = is_async;
745 if (is_async) { 781 if (is_async) {
746 proc->free_async_space -= size + sizeof(struct binder_buffer); 782 proc->free_async_space -= size + sizeof(struct binder_buffer);
@@ -815,7 +851,8 @@ static void binder_free_buf(struct binder_proc *proc,
815 buffer_size = binder_buffer_size(proc, buffer); 851 buffer_size = binder_buffer_size(proc, buffer);
816 852
817 size = ALIGN(buffer->data_size, sizeof(void *)) + 853 size = ALIGN(buffer->data_size, sizeof(void *)) +
818 ALIGN(buffer->offsets_size, sizeof(void *)); 854 ALIGN(buffer->offsets_size, sizeof(void *)) +
855 ALIGN(buffer->extra_buffers_size, sizeof(void *));
819 856
820 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 857 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
821 "%d: binder_free_buf %p size %zd buffer_size %zd\n", 858 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
@@ -929,8 +966,9 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
929 if (internal) { 966 if (internal) {
930 if (target_list == NULL && 967 if (target_list == NULL &&
931 node->internal_strong_refs == 0 && 968 node->internal_strong_refs == 0 &&
932 !(node == binder_context_mgr_node && 969 !(node->proc &&
933 node->has_strong_ref)) { 970 node == node->proc->context->binder_context_mgr_node &&
971 node->has_strong_ref)) {
934 pr_err("invalid inc strong node for %d\n", 972 pr_err("invalid inc strong node for %d\n",
935 node->debug_id); 973 node->debug_id);
936 return -EINVAL; 974 return -EINVAL;
@@ -1031,6 +1069,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1031 struct rb_node **p = &proc->refs_by_node.rb_node; 1069 struct rb_node **p = &proc->refs_by_node.rb_node;
1032 struct rb_node *parent = NULL; 1070 struct rb_node *parent = NULL;
1033 struct binder_ref *ref, *new_ref; 1071 struct binder_ref *ref, *new_ref;
1072 struct binder_context *context = proc->context;
1034 1073
1035 while (*p) { 1074 while (*p) {
1036 parent = *p; 1075 parent = *p;
@@ -1053,7 +1092,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1053 rb_link_node(&new_ref->rb_node_node, parent, p); 1092 rb_link_node(&new_ref->rb_node_node, parent, p);
1054 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1093 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1055 1094
1056 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; 1095 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1057 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1096 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1058 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1097 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1059 if (ref->desc > new_ref->desc) 1098 if (ref->desc > new_ref->desc)
@@ -1240,11 +1279,158 @@ static void binder_send_failed_reply(struct binder_transaction *t,
1240 } 1279 }
1241} 1280}
1242 1281
1282/**
1283 * binder_validate_object() - checks for a valid metadata object in a buffer.
1284 * @buffer: binder_buffer that we're parsing.
1285 * @offset: offset in the buffer at which to validate an object.
1286 *
1287 * Return: If there's a valid metadata object at @offset in @buffer, the
1288 * size of that object. Otherwise, it returns zero.
1289 */
1290static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1291{
1292 /* Check if we can read a header first */
1293 struct binder_object_header *hdr;
1294 size_t object_size = 0;
1295
1296 if (offset > buffer->data_size - sizeof(*hdr) ||
1297 buffer->data_size < sizeof(*hdr) ||
1298 !IS_ALIGNED(offset, sizeof(u32)))
1299 return 0;
1300
1301 /* Ok, now see if we can read a complete object. */
1302 hdr = (struct binder_object_header *)(buffer->data + offset);
1303 switch (hdr->type) {
1304 case BINDER_TYPE_BINDER:
1305 case BINDER_TYPE_WEAK_BINDER:
1306 case BINDER_TYPE_HANDLE:
1307 case BINDER_TYPE_WEAK_HANDLE:
1308 object_size = sizeof(struct flat_binder_object);
1309 break;
1310 case BINDER_TYPE_FD:
1311 object_size = sizeof(struct binder_fd_object);
1312 break;
1313 case BINDER_TYPE_PTR:
1314 object_size = sizeof(struct binder_buffer_object);
1315 break;
1316 case BINDER_TYPE_FDA:
1317 object_size = sizeof(struct binder_fd_array_object);
1318 break;
1319 default:
1320 return 0;
1321 }
1322 if (offset <= buffer->data_size - object_size &&
1323 buffer->data_size >= object_size)
1324 return object_size;
1325 else
1326 return 0;
1327}
1328
1329/**
1330 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1331 * @b: binder_buffer containing the object
1332 * @index: index in offset array at which the binder_buffer_object is
1333 * located
1334 * @start: points to the start of the offset array
1335 * @num_valid: the number of valid offsets in the offset array
1336 *
1337 * Return: If @index is within the valid range of the offset array
1338 * described by @start and @num_valid, and if there's a valid
1339 * binder_buffer_object at the offset found in index @index
1340 * of the offset array, that object is returned. Otherwise,
1341 * %NULL is returned.
1342 * Note that the offset found in index @index itself is not
1343 * verified; this function assumes that @num_valid elements
1344 * from @start were previously verified to have valid offsets.
1345 */
1346static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1347 binder_size_t index,
1348 binder_size_t *start,
1349 binder_size_t num_valid)
1350{
1351 struct binder_buffer_object *buffer_obj;
1352 binder_size_t *offp;
1353
1354 if (index >= num_valid)
1355 return NULL;
1356
1357 offp = start + index;
1358 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1359 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1360 return NULL;
1361
1362 return buffer_obj;
1363}
1364
1365/**
1366 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1367 * @b: transaction buffer
1368 * @objects_start start of objects buffer
1369 * @buffer: binder_buffer_object in which to fix up
1370 * @offset: start offset in @buffer to fix up
1371 * @last_obj: last binder_buffer_object that we fixed up in
1372 * @last_min_offset: minimum fixup offset in @last_obj
1373 *
1374 * Return: %true if a fixup in buffer @buffer at offset @offset is
1375 * allowed.
1376 *
1377 * For safety reasons, we only allow fixups inside a buffer to happen
1378 * at increasing offsets; additionally, we only allow fixup on the last
1379 * buffer object that was verified, or one of its parents.
1380 *
1381 * Example of what is allowed:
1382 *
1383 * A
1384 * B (parent = A, offset = 0)
1385 * C (parent = A, offset = 16)
1386 * D (parent = C, offset = 0)
1387 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1388 *
1389 * Examples of what is not allowed:
1390 *
1391 * Decreasing offsets within the same parent:
1392 * A
1393 * C (parent = A, offset = 16)
1394 * B (parent = A, offset = 0) // decreasing offset within A
1395 *
1396 * Referring to a parent that wasn't the last object or any of its parents:
1397 * A
1398 * B (parent = A, offset = 0)
1399 * C (parent = A, offset = 0)
1400 * C (parent = A, offset = 16)
1401 * D (parent = B, offset = 0) // B is not A or any of A's parents
1402 */
1403static bool binder_validate_fixup(struct binder_buffer *b,
1404 binder_size_t *objects_start,
1405 struct binder_buffer_object *buffer,
1406 binder_size_t fixup_offset,
1407 struct binder_buffer_object *last_obj,
1408 binder_size_t last_min_offset)
1409{
1410 if (!last_obj) {
1411 /* Nothing to fix up in */
1412 return false;
1413 }
1414
1415 while (last_obj != buffer) {
1416 /*
1417 * Safe to retrieve the parent of last_obj, since it
1418 * was already previously verified by the driver.
1419 */
1420 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1421 return false;
1422 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1423 last_obj = (struct binder_buffer_object *)
1424 (b->data + *(objects_start + last_obj->parent));
1425 }
1426 return (fixup_offset >= last_min_offset);
1427}
1428
1243static void binder_transaction_buffer_release(struct binder_proc *proc, 1429static void binder_transaction_buffer_release(struct binder_proc *proc,
1244 struct binder_buffer *buffer, 1430 struct binder_buffer *buffer,
1245 binder_size_t *failed_at) 1431 binder_size_t *failed_at)
1246{ 1432{
1247 binder_size_t *offp, *off_end; 1433 binder_size_t *offp, *off_start, *off_end;
1248 int debug_id = buffer->debug_id; 1434 int debug_id = buffer->debug_id;
1249 1435
1250 binder_debug(BINDER_DEBUG_TRANSACTION, 1436 binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1255,28 +1441,30 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
1255 if (buffer->target_node) 1441 if (buffer->target_node)
1256 binder_dec_node(buffer->target_node, 1, 0); 1442 binder_dec_node(buffer->target_node, 1, 0);
1257 1443
1258 offp = (binder_size_t *)(buffer->data + 1444 off_start = (binder_size_t *)(buffer->data +
1259 ALIGN(buffer->data_size, sizeof(void *))); 1445 ALIGN(buffer->data_size, sizeof(void *)));
1260 if (failed_at) 1446 if (failed_at)
1261 off_end = failed_at; 1447 off_end = failed_at;
1262 else 1448 else
1263 off_end = (void *)offp + buffer->offsets_size; 1449 off_end = (void *)off_start + buffer->offsets_size;
1264 for (; offp < off_end; offp++) { 1450 for (offp = off_start; offp < off_end; offp++) {
1265 struct flat_binder_object *fp; 1451 struct binder_object_header *hdr;
1452 size_t object_size = binder_validate_object(buffer, *offp);
1266 1453
1267 if (*offp > buffer->data_size - sizeof(*fp) || 1454 if (object_size == 0) {
1268 buffer->data_size < sizeof(*fp) || 1455 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1269 !IS_ALIGNED(*offp, sizeof(u32))) {
1270 pr_err("transaction release %d bad offset %lld, size %zd\n",
1271 debug_id, (u64)*offp, buffer->data_size); 1456 debug_id, (u64)*offp, buffer->data_size);
1272 continue; 1457 continue;
1273 } 1458 }
1274 fp = (struct flat_binder_object *)(buffer->data + *offp); 1459 hdr = (struct binder_object_header *)(buffer->data + *offp);
1275 switch (fp->type) { 1460 switch (hdr->type) {
1276 case BINDER_TYPE_BINDER: 1461 case BINDER_TYPE_BINDER:
1277 case BINDER_TYPE_WEAK_BINDER: { 1462 case BINDER_TYPE_WEAK_BINDER: {
1278 struct binder_node *node = binder_get_node(proc, fp->binder); 1463 struct flat_binder_object *fp;
1464 struct binder_node *node;
1279 1465
1466 fp = to_flat_binder_object(hdr);
1467 node = binder_get_node(proc, fp->binder);
1280 if (node == NULL) { 1468 if (node == NULL) {
1281 pr_err("transaction release %d bad node %016llx\n", 1469 pr_err("transaction release %d bad node %016llx\n",
1282 debug_id, (u64)fp->binder); 1470 debug_id, (u64)fp->binder);
@@ -1285,15 +1473,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
1285 binder_debug(BINDER_DEBUG_TRANSACTION, 1473 binder_debug(BINDER_DEBUG_TRANSACTION,
1286 " node %d u%016llx\n", 1474 " node %d u%016llx\n",
1287 node->debug_id, (u64)node->ptr); 1475 node->debug_id, (u64)node->ptr);
1288 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); 1476 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1477 0);
1289 } break; 1478 } break;
1290 case BINDER_TYPE_HANDLE: 1479 case BINDER_TYPE_HANDLE:
1291 case BINDER_TYPE_WEAK_HANDLE: { 1480 case BINDER_TYPE_WEAK_HANDLE: {
1481 struct flat_binder_object *fp;
1292 struct binder_ref *ref; 1482 struct binder_ref *ref;
1293 1483
1484 fp = to_flat_binder_object(hdr);
1294 ref = binder_get_ref(proc, fp->handle, 1485 ref = binder_get_ref(proc, fp->handle,
1295 fp->type == BINDER_TYPE_HANDLE); 1486 hdr->type == BINDER_TYPE_HANDLE);
1296
1297 if (ref == NULL) { 1487 if (ref == NULL) {
1298 pr_err("transaction release %d bad handle %d\n", 1488 pr_err("transaction release %d bad handle %d\n",
1299 debug_id, fp->handle); 1489 debug_id, fp->handle);
@@ -1302,32 +1492,348 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
1302 binder_debug(BINDER_DEBUG_TRANSACTION, 1492 binder_debug(BINDER_DEBUG_TRANSACTION,
1303 " ref %d desc %d (node %d)\n", 1493 " ref %d desc %d (node %d)\n",
1304 ref->debug_id, ref->desc, ref->node->debug_id); 1494 ref->debug_id, ref->desc, ref->node->debug_id);
1305 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); 1495 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1306 } break; 1496 } break;
1307 1497
1308 case BINDER_TYPE_FD: 1498 case BINDER_TYPE_FD: {
1499 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1500
1309 binder_debug(BINDER_DEBUG_TRANSACTION, 1501 binder_debug(BINDER_DEBUG_TRANSACTION,
1310 " fd %d\n", fp->handle); 1502 " fd %d\n", fp->fd);
1311 if (failed_at) 1503 if (failed_at)
1312 task_close_fd(proc, fp->handle); 1504 task_close_fd(proc, fp->fd);
1505 } break;
1506 case BINDER_TYPE_PTR:
1507 /*
1508 * Nothing to do here, this will get cleaned up when the
1509 * transaction buffer gets freed
1510 */
1313 break; 1511 break;
1314 1512 case BINDER_TYPE_FDA: {
1513 struct binder_fd_array_object *fda;
1514 struct binder_buffer_object *parent;
1515 uintptr_t parent_buffer;
1516 u32 *fd_array;
1517 size_t fd_index;
1518 binder_size_t fd_buf_size;
1519
1520 fda = to_binder_fd_array_object(hdr);
1521 parent = binder_validate_ptr(buffer, fda->parent,
1522 off_start,
1523 offp - off_start);
1524 if (!parent) {
1525 pr_err("transaction release %d bad parent offset",
1526 debug_id);
1527 continue;
1528 }
1529 /*
1530 * Since the parent was already fixed up, convert it
1531 * back to kernel address space to access it
1532 */
1533 parent_buffer = parent->buffer -
1534 proc->user_buffer_offset;
1535
1536 fd_buf_size = sizeof(u32) * fda->num_fds;
1537 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1538 pr_err("transaction release %d invalid number of fds (%lld)\n",
1539 debug_id, (u64)fda->num_fds);
1540 continue;
1541 }
1542 if (fd_buf_size > parent->length ||
1543 fda->parent_offset > parent->length - fd_buf_size) {
1544 /* No space for all file descriptors here. */
1545 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1546 debug_id, (u64)fda->num_fds);
1547 continue;
1548 }
1549 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1550 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1551 task_close_fd(proc, fd_array[fd_index]);
1552 } break;
1315 default: 1553 default:
1316 pr_err("transaction release %d bad object type %x\n", 1554 pr_err("transaction release %d bad object type %x\n",
1317 debug_id, fp->type); 1555 debug_id, hdr->type);
1318 break; 1556 break;
1319 } 1557 }
1320 } 1558 }
1321} 1559}
1322 1560
1561static int binder_translate_binder(struct flat_binder_object *fp,
1562 struct binder_transaction *t,
1563 struct binder_thread *thread)
1564{
1565 struct binder_node *node;
1566 struct binder_ref *ref;
1567 struct binder_proc *proc = thread->proc;
1568 struct binder_proc *target_proc = t->to_proc;
1569
1570 node = binder_get_node(proc, fp->binder);
1571 if (!node) {
1572 node = binder_new_node(proc, fp->binder, fp->cookie);
1573 if (!node)
1574 return -ENOMEM;
1575
1576 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1577 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1578 }
1579 if (fp->cookie != node->cookie) {
1580 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1581 proc->pid, thread->pid, (u64)fp->binder,
1582 node->debug_id, (u64)fp->cookie,
1583 (u64)node->cookie);
1584 return -EINVAL;
1585 }
1586 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1587 return -EPERM;
1588
1589 ref = binder_get_ref_for_node(target_proc, node);
1590 if (!ref)
1591 return -EINVAL;
1592
1593 if (fp->hdr.type == BINDER_TYPE_BINDER)
1594 fp->hdr.type = BINDER_TYPE_HANDLE;
1595 else
1596 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1597 fp->binder = 0;
1598 fp->handle = ref->desc;
1599 fp->cookie = 0;
1600 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1601
1602 trace_binder_transaction_node_to_ref(t, node, ref);
1603 binder_debug(BINDER_DEBUG_TRANSACTION,
1604 " node %d u%016llx -> ref %d desc %d\n",
1605 node->debug_id, (u64)node->ptr,
1606 ref->debug_id, ref->desc);
1607
1608 return 0;
1609}
1610
1611static int binder_translate_handle(struct flat_binder_object *fp,
1612 struct binder_transaction *t,
1613 struct binder_thread *thread)
1614{
1615 struct binder_ref *ref;
1616 struct binder_proc *proc = thread->proc;
1617 struct binder_proc *target_proc = t->to_proc;
1618
1619 ref = binder_get_ref(proc, fp->handle,
1620 fp->hdr.type == BINDER_TYPE_HANDLE);
1621 if (!ref) {
1622 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1623 proc->pid, thread->pid, fp->handle);
1624 return -EINVAL;
1625 }
1626 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1627 return -EPERM;
1628
1629 if (ref->node->proc == target_proc) {
1630 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1631 fp->hdr.type = BINDER_TYPE_BINDER;
1632 else
1633 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1634 fp->binder = ref->node->ptr;
1635 fp->cookie = ref->node->cookie;
1636 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1637 0, NULL);
1638 trace_binder_transaction_ref_to_node(t, ref);
1639 binder_debug(BINDER_DEBUG_TRANSACTION,
1640 " ref %d desc %d -> node %d u%016llx\n",
1641 ref->debug_id, ref->desc, ref->node->debug_id,
1642 (u64)ref->node->ptr);
1643 } else {
1644 struct binder_ref *new_ref;
1645
1646 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1647 if (!new_ref)
1648 return -EINVAL;
1649
1650 fp->binder = 0;
1651 fp->handle = new_ref->desc;
1652 fp->cookie = 0;
1653 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1654 NULL);
1655 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1656 binder_debug(BINDER_DEBUG_TRANSACTION,
1657 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1658 ref->debug_id, ref->desc, new_ref->debug_id,
1659 new_ref->desc, ref->node->debug_id);
1660 }
1661 return 0;
1662}
1663
1664static int binder_translate_fd(int fd,
1665 struct binder_transaction *t,
1666 struct binder_thread *thread,
1667 struct binder_transaction *in_reply_to)
1668{
1669 struct binder_proc *proc = thread->proc;
1670 struct binder_proc *target_proc = t->to_proc;
1671 int target_fd;
1672 struct file *file;
1673 int ret;
1674 bool target_allows_fd;
1675
1676 if (in_reply_to)
1677 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1678 else
1679 target_allows_fd = t->buffer->target_node->accept_fds;
1680 if (!target_allows_fd) {
1681 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1682 proc->pid, thread->pid,
1683 in_reply_to ? "reply" : "transaction",
1684 fd);
1685 ret = -EPERM;
1686 goto err_fd_not_accepted;
1687 }
1688
1689 file = fget(fd);
1690 if (!file) {
1691 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1692 proc->pid, thread->pid, fd);
1693 ret = -EBADF;
1694 goto err_fget;
1695 }
1696 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1697 if (ret < 0) {
1698 ret = -EPERM;
1699 goto err_security;
1700 }
1701
1702 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1703 if (target_fd < 0) {
1704 ret = -ENOMEM;
1705 goto err_get_unused_fd;
1706 }
1707 task_fd_install(target_proc, target_fd, file);
1708 trace_binder_transaction_fd(t, fd, target_fd);
1709 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1710 fd, target_fd);
1711
1712 return target_fd;
1713
1714err_get_unused_fd:
1715err_security:
1716 fput(file);
1717err_fget:
1718err_fd_not_accepted:
1719 return ret;
1720}
1721
1722static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1723 struct binder_buffer_object *parent,
1724 struct binder_transaction *t,
1725 struct binder_thread *thread,
1726 struct binder_transaction *in_reply_to)
1727{
1728 binder_size_t fdi, fd_buf_size, num_installed_fds;
1729 int target_fd;
1730 uintptr_t parent_buffer;
1731 u32 *fd_array;
1732 struct binder_proc *proc = thread->proc;
1733 struct binder_proc *target_proc = t->to_proc;
1734
1735 fd_buf_size = sizeof(u32) * fda->num_fds;
1736 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1737 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1738 proc->pid, thread->pid, (u64)fda->num_fds);
1739 return -EINVAL;
1740 }
1741 if (fd_buf_size > parent->length ||
1742 fda->parent_offset > parent->length - fd_buf_size) {
1743 /* No space for all file descriptors here. */
1744 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1745 proc->pid, thread->pid, (u64)fda->num_fds);
1746 return -EINVAL;
1747 }
1748 /*
1749 * Since the parent was already fixed up, convert it
1750 * back to the kernel address space to access it
1751 */
1752 parent_buffer = parent->buffer - target_proc->user_buffer_offset;
1753 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1754 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1755 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1756 proc->pid, thread->pid);
1757 return -EINVAL;
1758 }
1759 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1760 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1761 in_reply_to);
1762 if (target_fd < 0)
1763 goto err_translate_fd_failed;
1764 fd_array[fdi] = target_fd;
1765 }
1766 return 0;
1767
1768err_translate_fd_failed:
1769 /*
1770 * Failed to allocate fd or security error, free fds
1771 * installed so far.
1772 */
1773 num_installed_fds = fdi;
1774 for (fdi = 0; fdi < num_installed_fds; fdi++)
1775 task_close_fd(target_proc, fd_array[fdi]);
1776 return target_fd;
1777}
1778
1779static int binder_fixup_parent(struct binder_transaction *t,
1780 struct binder_thread *thread,
1781 struct binder_buffer_object *bp,
1782 binder_size_t *off_start,
1783 binder_size_t num_valid,
1784 struct binder_buffer_object *last_fixup_obj,
1785 binder_size_t last_fixup_min_off)
1786{
1787 struct binder_buffer_object *parent;
1788 u8 *parent_buffer;
1789 struct binder_buffer *b = t->buffer;
1790 struct binder_proc *proc = thread->proc;
1791 struct binder_proc *target_proc = t->to_proc;
1792
1793 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1794 return 0;
1795
1796 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1797 if (!parent) {
1798 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1799 proc->pid, thread->pid);
1800 return -EINVAL;
1801 }
1802
1803 if (!binder_validate_fixup(b, off_start,
1804 parent, bp->parent_offset,
1805 last_fixup_obj,
1806 last_fixup_min_off)) {
1807 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1808 proc->pid, thread->pid);
1809 return -EINVAL;
1810 }
1811
1812 if (parent->length < sizeof(binder_uintptr_t) ||
1813 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1814 /* No space for a pointer here! */
1815 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1816 proc->pid, thread->pid);
1817 return -EINVAL;
1818 }
1819 parent_buffer = (u8 *)(parent->buffer -
1820 target_proc->user_buffer_offset);
1821 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1822
1823 return 0;
1824}
1825
1323static void binder_transaction(struct binder_proc *proc, 1826static void binder_transaction(struct binder_proc *proc,
1324 struct binder_thread *thread, 1827 struct binder_thread *thread,
1325 struct binder_transaction_data *tr, int reply) 1828 struct binder_transaction_data *tr, int reply,
1829 binder_size_t extra_buffers_size)
1326{ 1830{
1831 int ret;
1327 struct binder_transaction *t; 1832 struct binder_transaction *t;
1328 struct binder_work *tcomplete; 1833 struct binder_work *tcomplete;
1329 binder_size_t *offp, *off_end; 1834 binder_size_t *offp, *off_end, *off_start;
1330 binder_size_t off_min; 1835 binder_size_t off_min;
1836 u8 *sg_bufp, *sg_buf_end;
1331 struct binder_proc *target_proc; 1837 struct binder_proc *target_proc;
1332 struct binder_thread *target_thread = NULL; 1838 struct binder_thread *target_thread = NULL;
1333 struct binder_node *target_node = NULL; 1839 struct binder_node *target_node = NULL;
@@ -1336,6 +1842,9 @@ static void binder_transaction(struct binder_proc *proc,
1336 struct binder_transaction *in_reply_to = NULL; 1842 struct binder_transaction *in_reply_to = NULL;
1337 struct binder_transaction_log_entry *e; 1843 struct binder_transaction_log_entry *e;
1338 uint32_t return_error; 1844 uint32_t return_error;
1845 struct binder_buffer_object *last_fixup_obj = NULL;
1846 binder_size_t last_fixup_min_off = 0;
1847 struct binder_context *context = proc->context;
1339 1848
1340 e = binder_transaction_log_add(&binder_transaction_log); 1849 e = binder_transaction_log_add(&binder_transaction_log);
1341 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1850 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
@@ -1344,6 +1853,7 @@ static void binder_transaction(struct binder_proc *proc,
1344 e->target_handle = tr->target.handle; 1853 e->target_handle = tr->target.handle;
1345 e->data_size = tr->data_size; 1854 e->data_size = tr->data_size;
1346 e->offsets_size = tr->offsets_size; 1855 e->offsets_size = tr->offsets_size;
1856 e->context_name = proc->context->name;
1347 1857
1348 if (reply) { 1858 if (reply) {
1349 in_reply_to = thread->transaction_stack; 1859 in_reply_to = thread->transaction_stack;
@@ -1396,7 +1906,7 @@ static void binder_transaction(struct binder_proc *proc,
1396 } 1906 }
1397 target_node = ref->node; 1907 target_node = ref->node;
1398 } else { 1908 } else {
1399 target_node = binder_context_mgr_node; 1909 target_node = context->binder_context_mgr_node;
1400 if (target_node == NULL) { 1910 if (target_node == NULL) {
1401 return_error = BR_DEAD_REPLY; 1911 return_error = BR_DEAD_REPLY;
1402 goto err_no_context_mgr_node; 1912 goto err_no_context_mgr_node;
@@ -1463,20 +1973,22 @@ static void binder_transaction(struct binder_proc *proc,
1463 1973
1464 if (reply) 1974 if (reply)
1465 binder_debug(BINDER_DEBUG_TRANSACTION, 1975 binder_debug(BINDER_DEBUG_TRANSACTION,
1466 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n", 1976 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1467 proc->pid, thread->pid, t->debug_id, 1977 proc->pid, thread->pid, t->debug_id,
1468 target_proc->pid, target_thread->pid, 1978 target_proc->pid, target_thread->pid,
1469 (u64)tr->data.ptr.buffer, 1979 (u64)tr->data.ptr.buffer,
1470 (u64)tr->data.ptr.offsets, 1980 (u64)tr->data.ptr.offsets,
1471 (u64)tr->data_size, (u64)tr->offsets_size); 1981 (u64)tr->data_size, (u64)tr->offsets_size,
1982 (u64)extra_buffers_size);
1472 else 1983 else
1473 binder_debug(BINDER_DEBUG_TRANSACTION, 1984 binder_debug(BINDER_DEBUG_TRANSACTION,
1474 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", 1985 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
1475 proc->pid, thread->pid, t->debug_id, 1986 proc->pid, thread->pid, t->debug_id,
1476 target_proc->pid, target_node->debug_id, 1987 target_proc->pid, target_node->debug_id,
1477 (u64)tr->data.ptr.buffer, 1988 (u64)tr->data.ptr.buffer,
1478 (u64)tr->data.ptr.offsets, 1989 (u64)tr->data.ptr.offsets,
1479 (u64)tr->data_size, (u64)tr->offsets_size); 1990 (u64)tr->data_size, (u64)tr->offsets_size,
1991 (u64)extra_buffers_size);
1480 1992
1481 if (!reply && !(tr->flags & TF_ONE_WAY)) 1993 if (!reply && !(tr->flags & TF_ONE_WAY))
1482 t->from = thread; 1994 t->from = thread;
@@ -1492,7 +2004,8 @@ static void binder_transaction(struct binder_proc *proc,
1492 trace_binder_transaction(reply, t, target_node); 2004 trace_binder_transaction(reply, t, target_node);
1493 2005
1494 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 2006 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1495 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 2007 tr->offsets_size, extra_buffers_size,
2008 !reply && (t->flags & TF_ONE_WAY));
1496 if (t->buffer == NULL) { 2009 if (t->buffer == NULL) {
1497 return_error = BR_FAILED_REPLY; 2010 return_error = BR_FAILED_REPLY;
1498 goto err_binder_alloc_buf_failed; 2011 goto err_binder_alloc_buf_failed;
@@ -1505,8 +2018,9 @@ static void binder_transaction(struct binder_proc *proc,
1505 if (target_node) 2018 if (target_node)
1506 binder_inc_node(target_node, 1, 0, NULL); 2019 binder_inc_node(target_node, 1, 0, NULL);
1507 2020
1508 offp = (binder_size_t *)(t->buffer->data + 2021 off_start = (binder_size_t *)(t->buffer->data +
1509 ALIGN(tr->data_size, sizeof(void *))); 2022 ALIGN(tr->data_size, sizeof(void *)));
2023 offp = off_start;
1510 2024
1511 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 2025 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1512 tr->data.ptr.buffer, tr->data_size)) { 2026 tr->data.ptr.buffer, tr->data_size)) {
@@ -1528,177 +2042,138 @@ static void binder_transaction(struct binder_proc *proc,
1528 return_error = BR_FAILED_REPLY; 2042 return_error = BR_FAILED_REPLY;
1529 goto err_bad_offset; 2043 goto err_bad_offset;
1530 } 2044 }
1531 off_end = (void *)offp + tr->offsets_size; 2045 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2046 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2047 proc->pid, thread->pid,
2048 (u64)extra_buffers_size);
2049 return_error = BR_FAILED_REPLY;
2050 goto err_bad_offset;
2051 }
2052 off_end = (void *)off_start + tr->offsets_size;
2053 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2054 sg_buf_end = sg_bufp + extra_buffers_size;
1532 off_min = 0; 2055 off_min = 0;
1533 for (; offp < off_end; offp++) { 2056 for (; offp < off_end; offp++) {
1534 struct flat_binder_object *fp; 2057 struct binder_object_header *hdr;
2058 size_t object_size = binder_validate_object(t->buffer, *offp);
1535 2059
1536 if (*offp > t->buffer->data_size - sizeof(*fp) || 2060 if (object_size == 0 || *offp < off_min) {
1537 *offp < off_min || 2061 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
1538 t->buffer->data_size < sizeof(*fp) ||
1539 !IS_ALIGNED(*offp, sizeof(u32))) {
1540 binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
1541 proc->pid, thread->pid, (u64)*offp, 2062 proc->pid, thread->pid, (u64)*offp,
1542 (u64)off_min, 2063 (u64)off_min,
1543 (u64)(t->buffer->data_size - 2064 (u64)t->buffer->data_size);
1544 sizeof(*fp)));
1545 return_error = BR_FAILED_REPLY; 2065 return_error = BR_FAILED_REPLY;
1546 goto err_bad_offset; 2066 goto err_bad_offset;
1547 } 2067 }
1548 fp = (struct flat_binder_object *)(t->buffer->data + *offp); 2068
1549 off_min = *offp + sizeof(struct flat_binder_object); 2069 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1550 switch (fp->type) { 2070 off_min = *offp + object_size;
2071 switch (hdr->type) {
1551 case BINDER_TYPE_BINDER: 2072 case BINDER_TYPE_BINDER:
1552 case BINDER_TYPE_WEAK_BINDER: { 2073 case BINDER_TYPE_WEAK_BINDER: {
1553 struct binder_ref *ref; 2074 struct flat_binder_object *fp;
1554 struct binder_node *node = binder_get_node(proc, fp->binder);
1555 2075
1556 if (node == NULL) { 2076 fp = to_flat_binder_object(hdr);
1557 node = binder_new_node(proc, fp->binder, fp->cookie); 2077 ret = binder_translate_binder(fp, t, thread);
1558 if (node == NULL) { 2078 if (ret < 0) {
1559 return_error = BR_FAILED_REPLY;
1560 goto err_binder_new_node_failed;
1561 }
1562 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1563 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1564 }
1565 if (fp->cookie != node->cookie) {
1566 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1567 proc->pid, thread->pid,
1568 (u64)fp->binder, node->debug_id,
1569 (u64)fp->cookie, (u64)node->cookie);
1570 return_error = BR_FAILED_REPLY;
1571 goto err_binder_get_ref_for_node_failed;
1572 }
1573 if (security_binder_transfer_binder(proc->tsk,
1574 target_proc->tsk)) {
1575 return_error = BR_FAILED_REPLY; 2079 return_error = BR_FAILED_REPLY;
1576 goto err_binder_get_ref_for_node_failed; 2080 goto err_translate_failed;
1577 } 2081 }
1578 ref = binder_get_ref_for_node(target_proc, node);
1579 if (ref == NULL) {
1580 return_error = BR_FAILED_REPLY;
1581 goto err_binder_get_ref_for_node_failed;
1582 }
1583 if (fp->type == BINDER_TYPE_BINDER)
1584 fp->type = BINDER_TYPE_HANDLE;
1585 else
1586 fp->type = BINDER_TYPE_WEAK_HANDLE;
1587 fp->binder = 0;
1588 fp->handle = ref->desc;
1589 fp->cookie = 0;
1590 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1591 &thread->todo);
1592
1593 trace_binder_transaction_node_to_ref(t, node, ref);
1594 binder_debug(BINDER_DEBUG_TRANSACTION,
1595 " node %d u%016llx -> ref %d desc %d\n",
1596 node->debug_id, (u64)node->ptr,
1597 ref->debug_id, ref->desc);
1598 } break; 2082 } break;
1599 case BINDER_TYPE_HANDLE: 2083 case BINDER_TYPE_HANDLE:
1600 case BINDER_TYPE_WEAK_HANDLE: { 2084 case BINDER_TYPE_WEAK_HANDLE: {
1601 struct binder_ref *ref; 2085 struct flat_binder_object *fp;
1602 2086
1603 ref = binder_get_ref(proc, fp->handle, 2087 fp = to_flat_binder_object(hdr);
1604 fp->type == BINDER_TYPE_HANDLE); 2088 ret = binder_translate_handle(fp, t, thread);
2089 if (ret < 0) {
2090 return_error = BR_FAILED_REPLY;
2091 goto err_translate_failed;
2092 }
2093 } break;
1605 2094
1606 if (ref == NULL) { 2095 case BINDER_TYPE_FD: {
1607 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 2096 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1608 proc->pid, 2097 int target_fd = binder_translate_fd(fp->fd, t, thread,
1609 thread->pid, fp->handle); 2098 in_reply_to);
2099
2100 if (target_fd < 0) {
1610 return_error = BR_FAILED_REPLY; 2101 return_error = BR_FAILED_REPLY;
1611 goto err_binder_get_ref_failed; 2102 goto err_translate_failed;
1612 } 2103 }
1613 if (security_binder_transfer_binder(proc->tsk, 2104 fp->pad_binder = 0;
1614 target_proc->tsk)) { 2105 fp->fd = target_fd;
2106 } break;
2107 case BINDER_TYPE_FDA: {
2108 struct binder_fd_array_object *fda =
2109 to_binder_fd_array_object(hdr);
2110 struct binder_buffer_object *parent =
2111 binder_validate_ptr(t->buffer, fda->parent,
2112 off_start,
2113 offp - off_start);
2114 if (!parent) {
2115 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2116 proc->pid, thread->pid);
1615 return_error = BR_FAILED_REPLY; 2117 return_error = BR_FAILED_REPLY;
1616 goto err_binder_get_ref_failed; 2118 goto err_bad_parent;
1617 } 2119 }
1618 if (ref->node->proc == target_proc) { 2120 if (!binder_validate_fixup(t->buffer, off_start,
1619 if (fp->type == BINDER_TYPE_HANDLE) 2121 parent, fda->parent_offset,
1620 fp->type = BINDER_TYPE_BINDER; 2122 last_fixup_obj,
1621 else 2123 last_fixup_min_off)) {
1622 fp->type = BINDER_TYPE_WEAK_BINDER; 2124 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1623 fp->binder = ref->node->ptr; 2125 proc->pid, thread->pid);
1624 fp->cookie = ref->node->cookie; 2126 return_error = BR_FAILED_REPLY;
1625 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 2127 goto err_bad_parent;
1626 trace_binder_transaction_ref_to_node(t, ref);
1627 binder_debug(BINDER_DEBUG_TRANSACTION,
1628 " ref %d desc %d -> node %d u%016llx\n",
1629 ref->debug_id, ref->desc, ref->node->debug_id,
1630 (u64)ref->node->ptr);
1631 } else {
1632 struct binder_ref *new_ref;
1633
1634 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1635 if (new_ref == NULL) {
1636 return_error = BR_FAILED_REPLY;
1637 goto err_binder_get_ref_for_node_failed;
1638 }
1639 fp->binder = 0;
1640 fp->handle = new_ref->desc;
1641 fp->cookie = 0;
1642 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1643 trace_binder_transaction_ref_to_ref(t, ref,
1644 new_ref);
1645 binder_debug(BINDER_DEBUG_TRANSACTION,
1646 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1647 ref->debug_id, ref->desc, new_ref->debug_id,
1648 new_ref->desc, ref->node->debug_id);
1649 } 2128 }
1650 } break; 2129 ret = binder_translate_fd_array(fda, parent, t, thread,
1651 2130 in_reply_to);
1652 case BINDER_TYPE_FD: { 2131 if (ret < 0) {
1653 int target_fd;
1654 struct file *file;
1655
1656 if (reply) {
1657 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1658 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1659 proc->pid, thread->pid, fp->handle);
1660 return_error = BR_FAILED_REPLY;
1661 goto err_fd_not_allowed;
1662 }
1663 } else if (!target_node->accept_fds) {
1664 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1665 proc->pid, thread->pid, fp->handle);
1666 return_error = BR_FAILED_REPLY; 2132 return_error = BR_FAILED_REPLY;
1667 goto err_fd_not_allowed; 2133 goto err_translate_failed;
1668 } 2134 }
1669 2135 last_fixup_obj = parent;
1670 file = fget(fp->handle); 2136 last_fixup_min_off =
1671 if (file == NULL) { 2137 fda->parent_offset + sizeof(u32) * fda->num_fds;
1672 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 2138 } break;
1673 proc->pid, thread->pid, fp->handle); 2139 case BINDER_TYPE_PTR: {
2140 struct binder_buffer_object *bp =
2141 to_binder_buffer_object(hdr);
2142 size_t buf_left = sg_buf_end - sg_bufp;
2143
2144 if (bp->length > buf_left) {
2145 binder_user_error("%d:%d got transaction with too large buffer\n",
2146 proc->pid, thread->pid);
1674 return_error = BR_FAILED_REPLY; 2147 return_error = BR_FAILED_REPLY;
1675 goto err_fget_failed; 2148 goto err_bad_offset;
1676 } 2149 }
1677 if (security_binder_transfer_file(proc->tsk, 2150 if (copy_from_user(sg_bufp,
1678 target_proc->tsk, 2151 (const void __user *)(uintptr_t)
1679 file) < 0) { 2152 bp->buffer, bp->length)) {
1680 fput(file); 2153 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2154 proc->pid, thread->pid);
1681 return_error = BR_FAILED_REPLY; 2155 return_error = BR_FAILED_REPLY;
1682 goto err_get_unused_fd_failed; 2156 goto err_copy_data_failed;
1683 } 2157 }
1684 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 2158 /* Fixup buffer pointer to target proc address space */
1685 if (target_fd < 0) { 2159 bp->buffer = (uintptr_t)sg_bufp +
1686 fput(file); 2160 target_proc->user_buffer_offset;
2161 sg_bufp += ALIGN(bp->length, sizeof(u64));
2162
2163 ret = binder_fixup_parent(t, thread, bp, off_start,
2164 offp - off_start,
2165 last_fixup_obj,
2166 last_fixup_min_off);
2167 if (ret < 0) {
1687 return_error = BR_FAILED_REPLY; 2168 return_error = BR_FAILED_REPLY;
1688 goto err_get_unused_fd_failed; 2169 goto err_translate_failed;
1689 } 2170 }
1690 task_fd_install(target_proc, target_fd, file); 2171 last_fixup_obj = bp;
1691 trace_binder_transaction_fd(t, fp->handle, target_fd); 2172 last_fixup_min_off = 0;
1692 binder_debug(BINDER_DEBUG_TRANSACTION,
1693 " fd %d -> %d\n", fp->handle, target_fd);
1694 /* TODO: fput? */
1695 fp->binder = 0;
1696 fp->handle = target_fd;
1697 } break; 2173 } break;
1698
1699 default: 2174 default:
1700 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 2175 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1701 proc->pid, thread->pid, fp->type); 2176 proc->pid, thread->pid, hdr->type);
1702 return_error = BR_FAILED_REPLY; 2177 return_error = BR_FAILED_REPLY;
1703 goto err_bad_object_type; 2178 goto err_bad_object_type;
1704 } 2179 }
@@ -1728,14 +2203,10 @@ static void binder_transaction(struct binder_proc *proc,
1728 wake_up_interruptible(target_wait); 2203 wake_up_interruptible(target_wait);
1729 return; 2204 return;
1730 2205
1731err_get_unused_fd_failed: 2206err_translate_failed:
1732err_fget_failed:
1733err_fd_not_allowed:
1734err_binder_get_ref_for_node_failed:
1735err_binder_get_ref_failed:
1736err_binder_new_node_failed:
1737err_bad_object_type: 2207err_bad_object_type:
1738err_bad_offset: 2208err_bad_offset:
2209err_bad_parent:
1739err_copy_data_failed: 2210err_copy_data_failed:
1740 trace_binder_transaction_failed_buffer_release(t->buffer); 2211 trace_binder_transaction_failed_buffer_release(t->buffer);
1741 binder_transaction_buffer_release(target_proc, t->buffer, offp); 2212 binder_transaction_buffer_release(target_proc, t->buffer, offp);
@@ -1779,6 +2250,7 @@ static int binder_thread_write(struct binder_proc *proc,
1779 binder_size_t *consumed) 2250 binder_size_t *consumed)
1780{ 2251{
1781 uint32_t cmd; 2252 uint32_t cmd;
2253 struct binder_context *context = proc->context;
1782 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2254 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1783 void __user *ptr = buffer + *consumed; 2255 void __user *ptr = buffer + *consumed;
1784 void __user *end = buffer + size; 2256 void __user *end = buffer + size;
@@ -1805,10 +2277,10 @@ static int binder_thread_write(struct binder_proc *proc,
1805 if (get_user(target, (uint32_t __user *)ptr)) 2277 if (get_user(target, (uint32_t __user *)ptr))
1806 return -EFAULT; 2278 return -EFAULT;
1807 ptr += sizeof(uint32_t); 2279 ptr += sizeof(uint32_t);
1808 if (target == 0 && binder_context_mgr_node && 2280 if (target == 0 && context->binder_context_mgr_node &&
1809 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 2281 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1810 ref = binder_get_ref_for_node(proc, 2282 ref = binder_get_ref_for_node(proc,
1811 binder_context_mgr_node); 2283 context->binder_context_mgr_node);
1812 if (ref->desc != target) { 2284 if (ref->desc != target) {
1813 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", 2285 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1814 proc->pid, thread->pid, 2286 proc->pid, thread->pid,
@@ -1953,6 +2425,17 @@ static int binder_thread_write(struct binder_proc *proc,
1953 break; 2425 break;
1954 } 2426 }
1955 2427
2428 case BC_TRANSACTION_SG:
2429 case BC_REPLY_SG: {
2430 struct binder_transaction_data_sg tr;
2431
2432 if (copy_from_user(&tr, ptr, sizeof(tr)))
2433 return -EFAULT;
2434 ptr += sizeof(tr);
2435 binder_transaction(proc, thread, &tr.transaction_data,
2436 cmd == BC_REPLY_SG, tr.buffers_size);
2437 break;
2438 }
1956 case BC_TRANSACTION: 2439 case BC_TRANSACTION:
1957 case BC_REPLY: { 2440 case BC_REPLY: {
1958 struct binder_transaction_data tr; 2441 struct binder_transaction_data tr;
@@ -1960,7 +2443,8 @@ static int binder_thread_write(struct binder_proc *proc,
1960 if (copy_from_user(&tr, ptr, sizeof(tr))) 2443 if (copy_from_user(&tr, ptr, sizeof(tr)))
1961 return -EFAULT; 2444 return -EFAULT;
1962 ptr += sizeof(tr); 2445 ptr += sizeof(tr);
1963 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 2446 binder_transaction(proc, thread, &tr,
2447 cmd == BC_REPLY, 0);
1964 break; 2448 break;
1965 } 2449 }
1966 2450
@@ -2714,9 +3198,11 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
2714{ 3198{
2715 int ret = 0; 3199 int ret = 0;
2716 struct binder_proc *proc = filp->private_data; 3200 struct binder_proc *proc = filp->private_data;
3201 struct binder_context *context = proc->context;
3202
2717 kuid_t curr_euid = current_euid(); 3203 kuid_t curr_euid = current_euid();
2718 3204
2719 if (binder_context_mgr_node != NULL) { 3205 if (context->binder_context_mgr_node) {
2720 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 3206 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2721 ret = -EBUSY; 3207 ret = -EBUSY;
2722 goto out; 3208 goto out;
@@ -2724,27 +3210,27 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
2724 ret = security_binder_set_context_mgr(proc->tsk); 3210 ret = security_binder_set_context_mgr(proc->tsk);
2725 if (ret < 0) 3211 if (ret < 0)
2726 goto out; 3212 goto out;
2727 if (uid_valid(binder_context_mgr_uid)) { 3213 if (uid_valid(context->binder_context_mgr_uid)) {
2728 if (!uid_eq(binder_context_mgr_uid, curr_euid)) { 3214 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
2729 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 3215 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2730 from_kuid(&init_user_ns, curr_euid), 3216 from_kuid(&init_user_ns, curr_euid),
2731 from_kuid(&init_user_ns, 3217 from_kuid(&init_user_ns,
2732 binder_context_mgr_uid)); 3218 context->binder_context_mgr_uid));
2733 ret = -EPERM; 3219 ret = -EPERM;
2734 goto out; 3220 goto out;
2735 } 3221 }
2736 } else { 3222 } else {
2737 binder_context_mgr_uid = curr_euid; 3223 context->binder_context_mgr_uid = curr_euid;
2738 } 3224 }
2739 binder_context_mgr_node = binder_new_node(proc, 0, 0); 3225 context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
2740 if (binder_context_mgr_node == NULL) { 3226 if (!context->binder_context_mgr_node) {
2741 ret = -ENOMEM; 3227 ret = -ENOMEM;
2742 goto out; 3228 goto out;
2743 } 3229 }
2744 binder_context_mgr_node->local_weak_refs++; 3230 context->binder_context_mgr_node->local_weak_refs++;
2745 binder_context_mgr_node->local_strong_refs++; 3231 context->binder_context_mgr_node->local_strong_refs++;
2746 binder_context_mgr_node->has_strong_ref = 1; 3232 context->binder_context_mgr_node->has_strong_ref = 1;
2747 binder_context_mgr_node->has_weak_ref = 1; 3233 context->binder_context_mgr_node->has_weak_ref = 1;
2748out: 3234out:
2749 return ret; 3235 return ret;
2750} 3236}
@@ -2969,6 +3455,7 @@ err_bad_arg:
2969static int binder_open(struct inode *nodp, struct file *filp) 3455static int binder_open(struct inode *nodp, struct file *filp)
2970{ 3456{
2971 struct binder_proc *proc; 3457 struct binder_proc *proc;
3458 struct binder_device *binder_dev;
2972 3459
2973 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 3460 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2974 current->group_leader->pid, current->pid); 3461 current->group_leader->pid, current->pid);
@@ -2982,6 +3469,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
2982 INIT_LIST_HEAD(&proc->todo); 3469 INIT_LIST_HEAD(&proc->todo);
2983 init_waitqueue_head(&proc->wait); 3470 init_waitqueue_head(&proc->wait);
2984 proc->default_priority = task_nice(current); 3471 proc->default_priority = task_nice(current);
3472 binder_dev = container_of(filp->private_data, struct binder_device,
3473 miscdev);
3474 proc->context = &binder_dev->context;
2985 3475
2986 binder_lock(__func__); 3476 binder_lock(__func__);
2987 3477
@@ -2997,8 +3487,17 @@ static int binder_open(struct inode *nodp, struct file *filp)
2997 char strbuf[11]; 3487 char strbuf[11];
2998 3488
2999 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 3489 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3490 /*
3491 * proc debug entries are shared between contexts, so
3492 * this will fail if the process tries to open the driver
3493 * again with a different context. The priting code will
3494 * anyway print all contexts that a given PID has, so this
3495 * is not a problem.
3496 */
3000 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 3497 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3001 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); 3498 binder_debugfs_dir_entry_proc,
3499 (void *)(unsigned long)proc->pid,
3500 &binder_proc_fops);
3002 } 3501 }
3003 3502
3004 return 0; 3503 return 0;
@@ -3091,6 +3590,7 @@ static int binder_node_release(struct binder_node *node, int refs)
3091static void binder_deferred_release(struct binder_proc *proc) 3590static void binder_deferred_release(struct binder_proc *proc)
3092{ 3591{
3093 struct binder_transaction *t; 3592 struct binder_transaction *t;
3593 struct binder_context *context = proc->context;
3094 struct rb_node *n; 3594 struct rb_node *n;
3095 int threads, nodes, incoming_refs, outgoing_refs, buffers, 3595 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3096 active_transactions, page_count; 3596 active_transactions, page_count;
@@ -3100,11 +3600,12 @@ static void binder_deferred_release(struct binder_proc *proc)
3100 3600
3101 hlist_del(&proc->proc_node); 3601 hlist_del(&proc->proc_node);
3102 3602
3103 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 3603 if (context->binder_context_mgr_node &&
3604 context->binder_context_mgr_node->proc == proc) {
3104 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3605 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3105 "%s: %d context_mgr_node gone\n", 3606 "%s: %d context_mgr_node gone\n",
3106 __func__, proc->pid); 3607 __func__, proc->pid);
3107 binder_context_mgr_node = NULL; 3608 context->binder_context_mgr_node = NULL;
3108 } 3609 }
3109 3610
3110 threads = 0; 3611 threads = 0;
@@ -3391,6 +3892,7 @@ static void print_binder_proc(struct seq_file *m,
3391 size_t header_pos; 3892 size_t header_pos;
3392 3893
3393 seq_printf(m, "proc %d\n", proc->pid); 3894 seq_printf(m, "proc %d\n", proc->pid);
3895 seq_printf(m, "context %s\n", proc->context->name);
3394 header_pos = m->count; 3896 header_pos = m->count;
3395 3897
3396 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3898 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
@@ -3460,7 +3962,9 @@ static const char * const binder_command_strings[] = {
3460 "BC_EXIT_LOOPER", 3962 "BC_EXIT_LOOPER",
3461 "BC_REQUEST_DEATH_NOTIFICATION", 3963 "BC_REQUEST_DEATH_NOTIFICATION",
3462 "BC_CLEAR_DEATH_NOTIFICATION", 3964 "BC_CLEAR_DEATH_NOTIFICATION",
3463 "BC_DEAD_BINDER_DONE" 3965 "BC_DEAD_BINDER_DONE",
3966 "BC_TRANSACTION_SG",
3967 "BC_REPLY_SG",
3464}; 3968};
3465 3969
3466static const char * const binder_objstat_strings[] = { 3970static const char * const binder_objstat_strings[] = {
@@ -3515,6 +4019,7 @@ static void print_binder_proc_stats(struct seq_file *m,
3515 int count, strong, weak; 4019 int count, strong, weak;
3516 4020
3517 seq_printf(m, "proc %d\n", proc->pid); 4021 seq_printf(m, "proc %d\n", proc->pid);
4022 seq_printf(m, "context %s\n", proc->context->name);
3518 count = 0; 4023 count = 0;
3519 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 4024 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3520 count++; 4025 count++;
@@ -3622,23 +4127,18 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
3622static int binder_proc_show(struct seq_file *m, void *unused) 4127static int binder_proc_show(struct seq_file *m, void *unused)
3623{ 4128{
3624 struct binder_proc *itr; 4129 struct binder_proc *itr;
3625 struct binder_proc *proc = m->private; 4130 int pid = (unsigned long)m->private;
3626 int do_lock = !binder_debug_no_lock; 4131 int do_lock = !binder_debug_no_lock;
3627 bool valid_proc = false;
3628 4132
3629 if (do_lock) 4133 if (do_lock)
3630 binder_lock(__func__); 4134 binder_lock(__func__);
3631 4135
3632 hlist_for_each_entry(itr, &binder_procs, proc_node) { 4136 hlist_for_each_entry(itr, &binder_procs, proc_node) {
3633 if (itr == proc) { 4137 if (itr->pid == pid) {
3634 valid_proc = true; 4138 seq_puts(m, "binder proc state:\n");
3635 break; 4139 print_binder_proc(m, itr, 1);
3636 } 4140 }
3637 } 4141 }
3638 if (valid_proc) {
3639 seq_puts(m, "binder proc state:\n");
3640 print_binder_proc(m, proc, 1);
3641 }
3642 if (do_lock) 4142 if (do_lock)
3643 binder_unlock(__func__); 4143 binder_unlock(__func__);
3644 return 0; 4144 return 0;
@@ -3648,11 +4148,11 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
3648 struct binder_transaction_log_entry *e) 4148 struct binder_transaction_log_entry *e)
3649{ 4149{
3650 seq_printf(m, 4150 seq_printf(m,
3651 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", 4151 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
3652 e->debug_id, (e->call_type == 2) ? "reply" : 4152 e->debug_id, (e->call_type == 2) ? "reply" :
3653 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 4153 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3654 e->from_thread, e->to_proc, e->to_thread, e->to_node, 4154 e->from_thread, e->to_proc, e->to_thread, e->context_name,
3655 e->target_handle, e->data_size, e->offsets_size); 4155 e->to_node, e->target_handle, e->data_size, e->offsets_size);
3656} 4156}
3657 4157
3658static int binder_transaction_log_show(struct seq_file *m, void *unused) 4158static int binder_transaction_log_show(struct seq_file *m, void *unused)
@@ -3680,26 +4180,50 @@ static const struct file_operations binder_fops = {
3680 .release = binder_release, 4180 .release = binder_release,
3681}; 4181};
3682 4182
3683static struct miscdevice binder_miscdev = {
3684 .minor = MISC_DYNAMIC_MINOR,
3685 .name = "binder",
3686 .fops = &binder_fops
3687};
3688
3689BINDER_DEBUG_ENTRY(state); 4183BINDER_DEBUG_ENTRY(state);
3690BINDER_DEBUG_ENTRY(stats); 4184BINDER_DEBUG_ENTRY(stats);
3691BINDER_DEBUG_ENTRY(transactions); 4185BINDER_DEBUG_ENTRY(transactions);
3692BINDER_DEBUG_ENTRY(transaction_log); 4186BINDER_DEBUG_ENTRY(transaction_log);
3693 4187
4188static int __init init_binder_device(const char *name)
4189{
4190 int ret;
4191 struct binder_device *binder_device;
4192
4193 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4194 if (!binder_device)
4195 return -ENOMEM;
4196
4197 binder_device->miscdev.fops = &binder_fops;
4198 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4199 binder_device->miscdev.name = name;
4200
4201 binder_device->context.binder_context_mgr_uid = INVALID_UID;
4202 binder_device->context.name = name;
4203
4204 ret = misc_register(&binder_device->miscdev);
4205 if (ret < 0) {
4206 kfree(binder_device);
4207 return ret;
4208 }
4209
4210 hlist_add_head(&binder_device->hlist, &binder_devices);
4211
4212 return ret;
4213}
4214
3694static int __init binder_init(void) 4215static int __init binder_init(void)
3695{ 4216{
3696 int ret; 4217 int ret;
4218 char *device_name, *device_names;
4219 struct binder_device *device;
4220 struct hlist_node *tmp;
3697 4221
3698 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 4222 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3699 if (binder_debugfs_dir_entry_root) 4223 if (binder_debugfs_dir_entry_root)
3700 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 4224 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3701 binder_debugfs_dir_entry_root); 4225 binder_debugfs_dir_entry_root);
3702 ret = misc_register(&binder_miscdev); 4226
3703 if (binder_debugfs_dir_entry_root) { 4227 if (binder_debugfs_dir_entry_root) {
3704 debugfs_create_file("state", 4228 debugfs_create_file("state",
3705 S_IRUGO, 4229 S_IRUGO,
@@ -3727,6 +4251,35 @@ static int __init binder_init(void)
3727 &binder_transaction_log_failed, 4251 &binder_transaction_log_failed,
3728 &binder_transaction_log_fops); 4252 &binder_transaction_log_fops);
3729 } 4253 }
4254
4255 /*
4256 * Copy the module_parameter string, because we don't want to
4257 * tokenize it in-place.
4258 */
4259 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4260 if (!device_names) {
4261 ret = -ENOMEM;
4262 goto err_alloc_device_names_failed;
4263 }
4264 strcpy(device_names, binder_devices_param);
4265
4266 while ((device_name = strsep(&device_names, ","))) {
4267 ret = init_binder_device(device_name);
4268 if (ret)
4269 goto err_init_binder_device_failed;
4270 }
4271
4272 return ret;
4273
4274err_init_binder_device_failed:
4275 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4276 misc_deregister(&device->miscdev);
4277 hlist_del(&device->hlist);
4278 kfree(device);
4279 }
4280err_alloc_device_names_failed:
4281 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4282
3730 return ret; 4283 return ret;
3731} 4284}
3732 4285
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index eeb323f56c07..f66b45b235b0 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -56,14 +56,16 @@
56#define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW) 56#define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
57 57
58struct ht16k33_keypad { 58struct ht16k33_keypad {
59 struct i2c_client *client;
59 struct input_dev *dev; 60 struct input_dev *dev;
60 spinlock_t lock;
61 struct delayed_work work;
62 uint32_t cols; 61 uint32_t cols;
63 uint32_t rows; 62 uint32_t rows;
64 uint32_t row_shift; 63 uint32_t row_shift;
65 uint32_t debounce_ms; 64 uint32_t debounce_ms;
66 uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS]; 65 uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
66
67 wait_queue_head_t wait;
68 bool stopped;
67}; 69};
68 70
69struct ht16k33_fbdev { 71struct ht16k33_fbdev {
@@ -78,7 +80,6 @@ struct ht16k33_priv {
78 struct i2c_client *client; 80 struct i2c_client *client;
79 struct ht16k33_keypad keypad; 81 struct ht16k33_keypad keypad;
80 struct ht16k33_fbdev fbdev; 82 struct ht16k33_fbdev fbdev;
81 struct workqueue_struct *workqueue;
82}; 83};
83 84
84static struct fb_fix_screeninfo ht16k33_fb_fix = { 85static struct fb_fix_screeninfo ht16k33_fb_fix = {
@@ -124,16 +125,8 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
124{ 125{
125 struct ht16k33_fbdev *fbdev = &priv->fbdev; 126 struct ht16k33_fbdev *fbdev = &priv->fbdev;
126 127
127 queue_delayed_work(priv->workqueue, &fbdev->work, 128 schedule_delayed_work(&fbdev->work,
128 msecs_to_jiffies(HZ / fbdev->refresh_rate)); 129 msecs_to_jiffies(HZ / fbdev->refresh_rate));
129}
130
131static void ht16k33_keypad_queue(struct ht16k33_priv *priv)
132{
133 struct ht16k33_keypad *keypad = &priv->keypad;
134
135 queue_delayed_work(priv->workqueue, &keypad->work,
136 msecs_to_jiffies(keypad->debounce_ms));
137} 130}
138 131
139/* 132/*
@@ -182,32 +175,6 @@ requeue:
182 ht16k33_fb_queue(priv); 175 ht16k33_fb_queue(priv);
183} 176}
184 177
185static int ht16k33_keypad_start(struct input_dev *dev)
186{
187 struct ht16k33_priv *priv = input_get_drvdata(dev);
188 struct ht16k33_keypad *keypad = &priv->keypad;
189
190 /*
191 * Schedule an immediate key scan to capture current key state;
192 * columns will be activated and IRQs be enabled after the scan.
193 */
194 queue_delayed_work(priv->workqueue, &keypad->work, 0);
195 return 0;
196}
197
198static void ht16k33_keypad_stop(struct input_dev *dev)
199{
200 struct ht16k33_priv *priv = input_get_drvdata(dev);
201 struct ht16k33_keypad *keypad = &priv->keypad;
202
203 cancel_delayed_work(&keypad->work);
204 /*
205 * ht16k33_keypad_scan() will leave IRQs enabled;
206 * we should disable them now.
207 */
208 disable_irq_nosync(priv->client->irq);
209}
210
211static int ht16k33_initialize(struct ht16k33_priv *priv) 178static int ht16k33_initialize(struct ht16k33_priv *priv)
212{ 179{
213 uint8_t byte; 180 uint8_t byte;
@@ -233,61 +200,6 @@ static int ht16k33_initialize(struct ht16k33_priv *priv)
233 return i2c_smbus_write_byte(priv->client, byte); 200 return i2c_smbus_write_byte(priv->client, byte);
234} 201}
235 202
236/*
237 * This gets the keys from keypad and reports it to input subsystem
238 */
239static void ht16k33_keypad_scan(struct work_struct *work)
240{
241 struct ht16k33_keypad *keypad =
242 container_of(work, struct ht16k33_keypad, work.work);
243 struct ht16k33_priv *priv =
244 container_of(keypad, struct ht16k33_priv, keypad);
245 const unsigned short *keycodes = keypad->dev->keycode;
246 uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
247 uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
248 int row, col, code;
249 bool reschedule = false;
250
251 if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) {
252 dev_err(&priv->client->dev, "Failed to read key data\n");
253 goto end;
254 }
255
256 for (col = 0; col < keypad->cols; col++) {
257 new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
258 if (new_state[col])
259 reschedule = true;
260 bits_changed = keypad->last_key_state[col] ^ new_state[col];
261
262 while (bits_changed) {
263 row = ffs(bits_changed) - 1;
264 code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
265 input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
266 input_report_key(keypad->dev, keycodes[code],
267 new_state[col] & BIT(row));
268 bits_changed &= ~BIT(row);
269 }
270 }
271 input_sync(keypad->dev);
272 memcpy(keypad->last_key_state, new_state, sizeof(new_state));
273
274end:
275 if (reschedule)
276 ht16k33_keypad_queue(priv);
277 else
278 enable_irq(priv->client->irq);
279}
280
281static irqreturn_t ht16k33_irq_thread(int irq, void *dev)
282{
283 struct ht16k33_priv *priv = dev;
284
285 disable_irq_nosync(priv->client->irq);
286 ht16k33_keypad_queue(priv);
287
288 return IRQ_HANDLED;
289}
290
291static int ht16k33_bl_update_status(struct backlight_device *bl) 203static int ht16k33_bl_update_status(struct backlight_device *bl)
292{ 204{
293 int brightness = bl->props.brightness; 205 int brightness = bl->props.brightness;
@@ -334,15 +246,152 @@ static struct fb_ops ht16k33_fb_ops = {
334 .fb_mmap = ht16k33_mmap, 246 .fb_mmap = ht16k33_mmap,
335}; 247};
336 248
249/*
250 * This gets the keys from keypad and reports it to input subsystem.
251 * Returns true if a key is pressed.
252 */
253static bool ht16k33_keypad_scan(struct ht16k33_keypad *keypad)
254{
255 const unsigned short *keycodes = keypad->dev->keycode;
256 u16 new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
257 u8 data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
258 unsigned long bits_changed;
259 int row, col, code;
260 bool pressed = false;
261
262 if (i2c_smbus_read_i2c_block_data(keypad->client, 0x40, 6, data) != 6) {
263 dev_err(&keypad->client->dev, "Failed to read key data\n");
264 return false;
265 }
266
267 for (col = 0; col < keypad->cols; col++) {
268 new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
269 if (new_state[col])
270 pressed = true;
271 bits_changed = keypad->last_key_state[col] ^ new_state[col];
272
273 for_each_set_bit(row, &bits_changed, BITS_PER_LONG) {
274 code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
275 input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
276 input_report_key(keypad->dev, keycodes[code],
277 new_state[col] & BIT(row));
278 }
279 }
280 input_sync(keypad->dev);
281 memcpy(keypad->last_key_state, new_state, sizeof(new_state));
282
283 return pressed;
284}
285
286static irqreturn_t ht16k33_keypad_irq_thread(int irq, void *dev)
287{
288 struct ht16k33_keypad *keypad = dev;
289
290 do {
291 wait_event_timeout(keypad->wait, keypad->stopped,
292 msecs_to_jiffies(keypad->debounce_ms));
293 if (keypad->stopped)
294 break;
295 } while (ht16k33_keypad_scan(keypad));
296
297 return IRQ_HANDLED;
298}
299
300static int ht16k33_keypad_start(struct input_dev *dev)
301{
302 struct ht16k33_keypad *keypad = input_get_drvdata(dev);
303
304 keypad->stopped = false;
305 mb();
306 enable_irq(keypad->client->irq);
307
308 return 0;
309}
310
311static void ht16k33_keypad_stop(struct input_dev *dev)
312{
313 struct ht16k33_keypad *keypad = input_get_drvdata(dev);
314
315 keypad->stopped = true;
316 mb();
317 wake_up(&keypad->wait);
318 disable_irq(keypad->client->irq);
319}
320
321static int ht16k33_keypad_probe(struct i2c_client *client,
322 struct ht16k33_keypad *keypad)
323{
324 struct device_node *node = client->dev.of_node;
325 u32 rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
326 u32 cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
327 int err;
328
329 keypad->client = client;
330 init_waitqueue_head(&keypad->wait);
331
332 keypad->dev = devm_input_allocate_device(&client->dev);
333 if (!keypad->dev)
334 return -ENOMEM;
335
336 input_set_drvdata(keypad->dev, keypad);
337
338 keypad->dev->name = DRIVER_NAME"-keypad";
339 keypad->dev->id.bustype = BUS_I2C;
340 keypad->dev->open = ht16k33_keypad_start;
341 keypad->dev->close = ht16k33_keypad_stop;
342
343 if (!of_get_property(node, "linux,no-autorepeat", NULL))
344 __set_bit(EV_REP, keypad->dev->evbit);
345
346 err = of_property_read_u32(node, "debounce-delay-ms",
347 &keypad->debounce_ms);
348 if (err) {
349 dev_err(&client->dev, "key debounce delay not specified\n");
350 return err;
351 }
352
353 err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
354 if (err)
355 return err;
356
357 keypad->rows = rows;
358 keypad->cols = cols;
359 keypad->row_shift = get_count_order(cols);
360
361 err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
362 keypad->dev);
363 if (err) {
364 dev_err(&client->dev, "failed to build keymap\n");
365 return err;
366 }
367
368 err = devm_request_threaded_irq(&client->dev, client->irq,
369 NULL, ht16k33_keypad_irq_thread,
370 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
371 DRIVER_NAME, keypad);
372 if (err) {
373 dev_err(&client->dev, "irq request failed %d, error %d\n",
374 client->irq, err);
375 return err;
376 }
377
378 ht16k33_keypad_stop(keypad->dev);
379
380 err = input_register_device(keypad->dev);
381 if (err)
382 return err;
383
384 return 0;
385}
386
337static int ht16k33_probe(struct i2c_client *client, 387static int ht16k33_probe(struct i2c_client *client,
338 const struct i2c_device_id *id) 388 const struct i2c_device_id *id)
339{ 389{
340 int err; 390 int err;
341 uint32_t rows, cols, dft_brightness; 391 uint32_t dft_brightness;
342 struct backlight_device *bl; 392 struct backlight_device *bl;
343 struct backlight_properties bl_props; 393 struct backlight_properties bl_props;
344 struct ht16k33_priv *priv; 394 struct ht16k33_priv *priv;
345 struct ht16k33_keypad *keypad;
346 struct ht16k33_fbdev *fbdev; 395 struct ht16k33_fbdev *fbdev;
347 struct device_node *node = client->dev.of_node; 396 struct device_node *node = client->dev.of_node;
348 397
@@ -363,23 +412,16 @@ static int ht16k33_probe(struct i2c_client *client,
363 priv->client = client; 412 priv->client = client;
364 i2c_set_clientdata(client, priv); 413 i2c_set_clientdata(client, priv);
365 fbdev = &priv->fbdev; 414 fbdev = &priv->fbdev;
366 keypad = &priv->keypad;
367
368 priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq");
369 if (priv->workqueue == NULL)
370 return -ENOMEM;
371 415
372 err = ht16k33_initialize(priv); 416 err = ht16k33_initialize(priv);
373 if (err) 417 if (err)
374 goto err_destroy_wq; 418 return err;
375 419
376 /* Framebuffer (2 bytes per column) */ 420 /* Framebuffer (2 bytes per column) */
377 BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE); 421 BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
378 fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL); 422 fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
379 if (!fbdev->buffer) { 423 if (!fbdev->buffer)
380 err = -ENOMEM; 424 return -ENOMEM;
381 goto err_free_fbdev;
382 }
383 425
384 fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL); 426 fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL);
385 if (!fbdev->cache) { 427 if (!fbdev->cache) {
@@ -415,59 +457,7 @@ static int ht16k33_probe(struct i2c_client *client,
415 if (err) 457 if (err)
416 goto err_fbdev_info; 458 goto err_fbdev_info;
417 459
418 /* Keypad */ 460 err = ht16k33_keypad_probe(client, &priv->keypad);
419 keypad->dev = devm_input_allocate_device(&client->dev);
420 if (!keypad->dev) {
421 err = -ENOMEM;
422 goto err_fbdev_unregister;
423 }
424
425 keypad->dev->name = DRIVER_NAME"-keypad";
426 keypad->dev->id.bustype = BUS_I2C;
427 keypad->dev->open = ht16k33_keypad_start;
428 keypad->dev->close = ht16k33_keypad_stop;
429
430 if (!of_get_property(node, "linux,no-autorepeat", NULL))
431 __set_bit(EV_REP, keypad->dev->evbit);
432
433 err = of_property_read_u32(node, "debounce-delay-ms",
434 &keypad->debounce_ms);
435 if (err) {
436 dev_err(&client->dev, "key debounce delay not specified\n");
437 goto err_fbdev_unregister;
438 }
439
440 err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
441 ht16k33_irq_thread,
442 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
443 DRIVER_NAME, priv);
444 if (err) {
445 dev_err(&client->dev, "irq request failed %d, error %d\n",
446 client->irq, err);
447 goto err_fbdev_unregister;
448 }
449
450 disable_irq_nosync(client->irq);
451 rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
452 cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
453 err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
454 if (err)
455 goto err_fbdev_unregister;
456
457 err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
458 keypad->dev);
459 if (err) {
460 dev_err(&client->dev, "failed to build keymap\n");
461 goto err_fbdev_unregister;
462 }
463
464 input_set_drvdata(keypad->dev, priv);
465 keypad->rows = rows;
466 keypad->cols = cols;
467 keypad->row_shift = get_count_order(cols);
468 INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan);
469
470 err = input_register_device(keypad->dev);
471 if (err) 461 if (err)
472 goto err_fbdev_unregister; 462 goto err_fbdev_unregister;
473 463
@@ -482,7 +472,7 @@ static int ht16k33_probe(struct i2c_client *client,
482 if (IS_ERR(bl)) { 472 if (IS_ERR(bl)) {
483 dev_err(&client->dev, "failed to register backlight\n"); 473 dev_err(&client->dev, "failed to register backlight\n");
484 err = PTR_ERR(bl); 474 err = PTR_ERR(bl);
485 goto err_keypad_unregister; 475 goto err_fbdev_unregister;
486 } 476 }
487 477
488 err = of_property_read_u32(node, "default-brightness-level", 478 err = of_property_read_u32(node, "default-brightness-level",
@@ -502,18 +492,12 @@ static int ht16k33_probe(struct i2c_client *client,
502 ht16k33_fb_queue(priv); 492 ht16k33_fb_queue(priv);
503 return 0; 493 return 0;
504 494
505err_keypad_unregister:
506 input_unregister_device(keypad->dev);
507err_fbdev_unregister: 495err_fbdev_unregister:
508 unregister_framebuffer(fbdev->info); 496 unregister_framebuffer(fbdev->info);
509err_fbdev_info: 497err_fbdev_info:
510 framebuffer_release(fbdev->info); 498 framebuffer_release(fbdev->info);
511err_fbdev_buffer: 499err_fbdev_buffer:
512 free_page((unsigned long) fbdev->buffer); 500 free_page((unsigned long) fbdev->buffer);
513err_free_fbdev:
514 kfree(fbdev);
515err_destroy_wq:
516 destroy_workqueue(priv->workqueue);
517 501
518 return err; 502 return err;
519} 503}
@@ -521,17 +505,13 @@ err_destroy_wq:
521static int ht16k33_remove(struct i2c_client *client) 505static int ht16k33_remove(struct i2c_client *client)
522{ 506{
523 struct ht16k33_priv *priv = i2c_get_clientdata(client); 507 struct ht16k33_priv *priv = i2c_get_clientdata(client);
524 struct ht16k33_keypad *keypad = &priv->keypad;
525 struct ht16k33_fbdev *fbdev = &priv->fbdev; 508 struct ht16k33_fbdev *fbdev = &priv->fbdev;
526 509
527 ht16k33_keypad_stop(keypad->dev);
528
529 cancel_delayed_work(&fbdev->work); 510 cancel_delayed_work(&fbdev->work);
530 unregister_framebuffer(fbdev->info); 511 unregister_framebuffer(fbdev->info);
531 framebuffer_release(fbdev->info); 512 framebuffer_release(fbdev->info);
532 free_page((unsigned long) fbdev->buffer); 513 free_page((unsigned long) fbdev->buffer);
533 514
534 destroy_workqueue(priv->workqueue);
535 return 0; 515 return 0;
536} 516}
537 517
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fde005ef9d36..4ee2a10207d0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -571,9 +571,12 @@ config TELCLOCK
571 controlling the behavior of this hardware. 571 controlling the behavior of this hardware.
572 572
573config DEVPORT 573config DEVPORT
574 bool 574 bool "/dev/port character device"
575 depends on ISA || PCI 575 depends on ISA || PCI
576 default y 576 default y
577 help
578 Say Y here if you want to support the /dev/port device. The /dev/port
579 device is similar to /dev/mem, but for I/O ports.
577 580
578source "drivers/s390/char/Kconfig" 581source "drivers/s390/char/Kconfig"
579 582
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index dd9dfa15e9d1..1dfb9f8de171 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -31,13 +31,6 @@
31#include <linux/kthread.h> 31#include <linux/kthread.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34
35/*
36 * The apm_bios device is one of the misc char devices.
37 * This is its minor number.
38 */
39#define APM_MINOR_DEV 134
40
41/* 34/*
42 * One option can be changed at boot time as follows: 35 * One option can be changed at boot time as follows:
43 * apm=on/off enable/disable APM 36 * apm=on/off enable/disable APM
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
index 7d34b203718a..c614a56e68cc 100644
--- a/drivers/char/ds1302.c
+++ b/drivers/char/ds1302.c
@@ -17,7 +17,6 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/miscdevice.h>
21#include <linux/delay.h> 20#include <linux/delay.h>
22#include <linux/bcd.h> 21#include <linux/bcd.h>
23#include <linux/mutex.h> 22#include <linux/mutex.h>
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index f786b18ac500..b708c85dc9c1 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -463,9 +463,9 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
463} 463}
464 464
465static struct miscdevice mmtimer_miscdev = { 465static struct miscdevice mmtimer_miscdev = {
466 SGI_MMTIMER, 466 .minor = SGI_MMTIMER,
467 MMTIMER_NAME, 467 .name = MMTIMER_NAME,
468 &mmtimer_fops 468 .fops = &mmtimer_fops
469}; 469};
470 470
471static struct timespec sgi_clock_offset; 471static struct timespec sgi_clock_offset;
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
index 53c3882e4981..35981cae1afa 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.c
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -269,7 +269,6 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
269{ 269{
270 int status; 270 int status;
271 s32 buffer_count = 0; 271 s32 buffer_count = 0;
272 s32 num_writes = 0;
273 bool dirty = false; 272 bool dirty = false;
274 u32 i; 273 u32 i;
275 void __iomem *base_address = drvdata->base_address; 274 void __iomem *base_address = drvdata->base_address;
@@ -298,7 +297,6 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
298 } 297 }
299 298
300 buffer_count = 0; 299 buffer_count = 0;
301 num_writes++;
302 dirty = false; 300 dirty = false;
303 } 301 }
304 302
@@ -328,7 +326,6 @@ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
328{ 326{
329 int status; 327 int status;
330 s32 buffer_count = 0; 328 s32 buffer_count = 0;
331 s32 read_count = 0;
332 u32 i; 329 u32 i;
333 void __iomem *base_address = drvdata->base_address; 330 void __iomem *base_address = drvdata->base_address;
334 331
@@ -353,7 +350,6 @@ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
353 } 350 }
354 351
355 buffer_count = 0; 352 buffer_count = 0;
356 read_count++;
357 } 353 }
358 354
359 /* Copy data from bram */ 355 /* Copy data from bram */
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 04788d92ea52..96bbae579c0b 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -42,6 +42,16 @@ config EXTCON_GPIO
42 Say Y here to enable GPIO based extcon support. Note that GPIO 42 Say Y here to enable GPIO based extcon support. Note that GPIO
43 extcon supports single state per extcon instance. 43 extcon supports single state per extcon instance.
44 44
45config EXTCON_INTEL_INT3496
46 tristate "Intel INT3496 ACPI device extcon driver"
47 depends on GPIOLIB && ACPI
48 help
49 Say Y here to enable extcon support for USB OTG ports controlled by
50 an Intel INT3496 ACPI device.
51
52 This ACPI device is typically found on Intel Baytrail or Cherrytrail
53 based tablets, or other Baytrail / Cherrytrail devices.
54
45config EXTCON_MAX14577 55config EXTCON_MAX14577
46 tristate "Maxim MAX14577/77836 EXTCON Support" 56 tristate "Maxim MAX14577/77836 EXTCON Support"
47 depends on MFD_MAX14577 57 depends on MFD_MAX14577
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 31a0a999c4fb..237ac3f953c2 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
8obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o 8obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
9obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o 9obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
10obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o 10obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
11obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
11obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o 12obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
12obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o 13obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
13obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o 14obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
index e686acd1c459..b40eb1805927 100644
--- a/drivers/extcon/devres.c
+++ b/drivers/extcon/devres.c
@@ -14,7 +14,7 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <linux/extcon.h> 17#include "extcon.h"
18 18
19static int devm_extcon_dev_match(struct device *dev, void *res, void *data) 19static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
20{ 20{
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index bc538708c753..6f6537ab0a79 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -67,7 +67,7 @@ static void adc_jack_handler(struct work_struct *work)
67 67
68 ret = iio_read_channel_raw(data->chan, &adc_val); 68 ret = iio_read_channel_raw(data->chan, &adc_val);
69 if (ret < 0) { 69 if (ret < 0) {
70 dev_err(&data->edev->dev, "read channel() error: %d\n", ret); 70 dev_err(data->dev, "read channel() error: %d\n", ret);
71 return; 71 return;
72 } 72 }
73 73
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index d836d4ce5ee4..ed78b7c26627 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -236,12 +236,8 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
236 236
237 mode %= info->micd_num_modes; 237 mode %= info->micd_num_modes;
238 238
239 if (arizona->pdata.micd_pol_gpio > 0) 239 gpiod_set_value_cansleep(info->micd_pol_gpio,
240 gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio, 240 info->micd_modes[mode].gpio);
241 info->micd_modes[mode].gpio);
242 else
243 gpiod_set_value_cansleep(info->micd_pol_gpio,
244 info->micd_modes[mode].gpio);
245 241
246 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1, 242 regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
247 ARIZONA_MICD_BIAS_SRC_MASK, 243 ARIZONA_MICD_BIAS_SRC_MASK,
@@ -1412,21 +1408,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1412 regmap_update_bits(arizona->regmap, ARIZONA_GP_SWITCH_1, 1408 regmap_update_bits(arizona->regmap, ARIZONA_GP_SWITCH_1,
1413 ARIZONA_SW1_MODE_MASK, arizona->pdata.gpsw); 1409 ARIZONA_SW1_MODE_MASK, arizona->pdata.gpsw);
1414 1410
1415 if (arizona->pdata.micd_pol_gpio > 0) { 1411 if (pdata->micd_pol_gpio > 0) {
1416 if (info->micd_modes[0].gpio) 1412 if (info->micd_modes[0].gpio)
1417 mode = GPIOF_OUT_INIT_HIGH; 1413 mode = GPIOF_OUT_INIT_HIGH;
1418 else 1414 else
1419 mode = GPIOF_OUT_INIT_LOW; 1415 mode = GPIOF_OUT_INIT_LOW;
1420 1416
1421 ret = devm_gpio_request_one(&pdev->dev, 1417 ret = devm_gpio_request_one(&pdev->dev, pdata->micd_pol_gpio,
1422 arizona->pdata.micd_pol_gpio, 1418 mode, "MICD polarity");
1423 mode,
1424 "MICD polarity");
1425 if (ret != 0) { 1419 if (ret != 0) {
1426 dev_err(arizona->dev, "Failed to request GPIO%d: %d\n", 1420 dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
1427 arizona->pdata.micd_pol_gpio, ret); 1421 pdata->micd_pol_gpio, ret);
1428 goto err_register; 1422 goto err_register;
1429 } 1423 }
1424
1425 info->micd_pol_gpio = gpio_to_desc(pdata->micd_pol_gpio);
1430 } else { 1426 } else {
1431 if (info->micd_modes[0].gpio) 1427 if (info->micd_modes[0].gpio)
1432 mode = GPIOD_OUT_HIGH; 1428 mode = GPIOD_OUT_HIGH;
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 42f41e808292..f4fd03e58e37 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -21,7 +21,6 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/property.h> 23#include <linux/property.h>
24#include <linux/usb/phy.h>
25#include <linux/notifier.h> 24#include <linux/notifier.h>
26#include <linux/extcon.h> 25#include <linux/extcon.h>
27#include <linux/regmap.h> 26#include <linux/regmap.h>
@@ -71,12 +70,6 @@
71#define DET_STAT_CDP 2 70#define DET_STAT_CDP 2
72#define DET_STAT_DCP 3 71#define DET_STAT_DCP 3
73 72
74/* IRQ enable-1 register */
75#define PWRSRC_IRQ_CFG_MASK (BIT(4)|BIT(3)|BIT(2))
76
77/* IRQ enable-6 register */
78#define BC12_IRQ_CFG_MASK BIT(1)
79
80enum axp288_extcon_reg { 73enum axp288_extcon_reg {
81 AXP288_PS_STAT_REG = 0x00, 74 AXP288_PS_STAT_REG = 0x00,
82 AXP288_PS_BOOT_REASON_REG = 0x02, 75 AXP288_PS_BOOT_REASON_REG = 0x02,
@@ -84,8 +77,6 @@ enum axp288_extcon_reg {
84 AXP288_BC_VBUS_CNTL_REG = 0x2d, 77 AXP288_BC_VBUS_CNTL_REG = 0x2d,
85 AXP288_BC_USB_STAT_REG = 0x2e, 78 AXP288_BC_USB_STAT_REG = 0x2e,
86 AXP288_BC_DET_STAT_REG = 0x2f, 79 AXP288_BC_DET_STAT_REG = 0x2f,
87 AXP288_PWRSRC_IRQ_CFG_REG = 0x40,
88 AXP288_BC12_IRQ_CFG_REG = 0x45,
89}; 80};
90 81
91enum axp288_mux_select { 82enum axp288_mux_select {
@@ -105,6 +96,7 @@ static const unsigned int axp288_extcon_cables[] = {
105 EXTCON_CHG_USB_SDP, 96 EXTCON_CHG_USB_SDP,
106 EXTCON_CHG_USB_CDP, 97 EXTCON_CHG_USB_CDP,
107 EXTCON_CHG_USB_DCP, 98 EXTCON_CHG_USB_DCP,
99 EXTCON_USB,
108 EXTCON_NONE, 100 EXTCON_NONE,
109}; 101};
110 102
@@ -112,11 +104,11 @@ struct axp288_extcon_info {
112 struct device *dev; 104 struct device *dev;
113 struct regmap *regmap; 105 struct regmap *regmap;
114 struct regmap_irq_chip_data *regmap_irqc; 106 struct regmap_irq_chip_data *regmap_irqc;
115 struct axp288_extcon_pdata *pdata; 107 struct gpio_desc *gpio_mux_cntl;
116 int irq[EXTCON_IRQ_END]; 108 int irq[EXTCON_IRQ_END];
117 struct extcon_dev *edev; 109 struct extcon_dev *edev;
118 struct notifier_block extcon_nb; 110 struct notifier_block extcon_nb;
119 struct usb_phy *otg; 111 unsigned int previous_cable;
120}; 112};
121 113
122/* Power up/down reason string array */ 114/* Power up/down reason string array */
@@ -156,10 +148,9 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
156 148
157static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info) 149static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
158{ 150{
159 static bool notify_otg, notify_charger;
160 static unsigned int cable;
161 int ret, stat, cfg, pwr_stat; 151 int ret, stat, cfg, pwr_stat;
162 u8 chrg_type; 152 u8 chrg_type;
153 unsigned int cable = info->previous_cable;
163 bool vbus_attach = false; 154 bool vbus_attach = false;
164 155
165 ret = regmap_read(info->regmap, AXP288_PS_STAT_REG, &pwr_stat); 156 ret = regmap_read(info->regmap, AXP288_PS_STAT_REG, &pwr_stat);
@@ -168,9 +159,9 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
168 return ret; 159 return ret;
169 } 160 }
170 161
171 vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT); 162 vbus_attach = (pwr_stat & PS_STAT_VBUS_VALID);
172 if (!vbus_attach) 163 if (!vbus_attach)
173 goto notify_otg; 164 goto no_vbus;
174 165
175 /* Check charger detection completion status */ 166 /* Check charger detection completion status */
176 ret = regmap_read(info->regmap, AXP288_BC_GLOBAL_REG, &cfg); 167 ret = regmap_read(info->regmap, AXP288_BC_GLOBAL_REG, &cfg);
@@ -190,19 +181,14 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
190 switch (chrg_type) { 181 switch (chrg_type) {
191 case DET_STAT_SDP: 182 case DET_STAT_SDP:
192 dev_dbg(info->dev, "sdp cable is connected\n"); 183 dev_dbg(info->dev, "sdp cable is connected\n");
193 notify_otg = true;
194 notify_charger = true;
195 cable = EXTCON_CHG_USB_SDP; 184 cable = EXTCON_CHG_USB_SDP;
196 break; 185 break;
197 case DET_STAT_CDP: 186 case DET_STAT_CDP:
198 dev_dbg(info->dev, "cdp cable is connected\n"); 187 dev_dbg(info->dev, "cdp cable is connected\n");
199 notify_otg = true;
200 notify_charger = true;
201 cable = EXTCON_CHG_USB_CDP; 188 cable = EXTCON_CHG_USB_CDP;
202 break; 189 break;
203 case DET_STAT_DCP: 190 case DET_STAT_DCP:
204 dev_dbg(info->dev, "dcp cable is connected\n"); 191 dev_dbg(info->dev, "dcp cable is connected\n");
205 notify_charger = true;
206 cable = EXTCON_CHG_USB_DCP; 192 cable = EXTCON_CHG_USB_DCP;
207 break; 193 break;
208 default: 194 default:
@@ -210,27 +196,28 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
210 "disconnect or unknown or ID event\n"); 196 "disconnect or unknown or ID event\n");
211 } 197 }
212 198
213notify_otg: 199no_vbus:
214 if (notify_otg) { 200 /*
215 /* 201 * If VBUS is absent Connect D+/D- lines to PMIC for BC
216 * If VBUS is absent Connect D+/D- lines to PMIC for BC 202 * detection. Else connect them to SOC for USB communication.
217 * detection. Else connect them to SOC for USB communication. 203 */
218 */ 204 if (info->gpio_mux_cntl)
219 if (info->pdata->gpio_mux_cntl) 205 gpiod_set_value(info->gpio_mux_cntl,
220 gpiod_set_value(info->pdata->gpio_mux_cntl, 206 vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
221 vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC 207 : EXTCON_GPIO_MUX_SEL_PMIC);
222 : EXTCON_GPIO_MUX_SEL_PMIC); 208
223 209 extcon_set_state_sync(info->edev, info->previous_cable, false);
224 atomic_notifier_call_chain(&info->otg->notifier, 210 if (info->previous_cable == EXTCON_CHG_USB_SDP)
225 vbus_attach ? USB_EVENT_VBUS : USB_EVENT_NONE, NULL); 211 extcon_set_state_sync(info->edev, EXTCON_USB, false);
226 } 212
227 213 if (vbus_attach) {
228 if (notify_charger)
229 extcon_set_state_sync(info->edev, cable, vbus_attach); 214 extcon_set_state_sync(info->edev, cable, vbus_attach);
215 if (cable == EXTCON_CHG_USB_SDP)
216 extcon_set_state_sync(info->edev, EXTCON_USB,
217 vbus_attach);
230 218
231 /* Clear the flags on disconnect event */ 219 info->previous_cable = cable;
232 if (!vbus_attach) 220 }
233 notify_otg = notify_charger = false;
234 221
235 return 0; 222 return 0;
236 223
@@ -253,15 +240,10 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
253 return IRQ_HANDLED; 240 return IRQ_HANDLED;
254} 241}
255 242
256static void axp288_extcon_enable_irq(struct axp288_extcon_info *info) 243static void axp288_extcon_enable(struct axp288_extcon_info *info)
257{ 244{
258 /* Unmask VBUS interrupt */
259 regmap_write(info->regmap, AXP288_PWRSRC_IRQ_CFG_REG,
260 PWRSRC_IRQ_CFG_MASK);
261 regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG, 245 regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
262 BC_GLOBAL_RUN, 0); 246 BC_GLOBAL_RUN, 0);
263 /* Unmask the BC1.2 complete interrupts */
264 regmap_write(info->regmap, AXP288_BC12_IRQ_CFG_REG, BC12_IRQ_CFG_MASK);
265 /* Enable the charger detection logic */ 247 /* Enable the charger detection logic */
266 regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG, 248 regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
267 BC_GLOBAL_RUN, BC_GLOBAL_RUN); 249 BC_GLOBAL_RUN, BC_GLOBAL_RUN);
@@ -271,6 +253,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
271{ 253{
272 struct axp288_extcon_info *info; 254 struct axp288_extcon_info *info;
273 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); 255 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
256 struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
274 int ret, i, pirq, gpio; 257 int ret, i, pirq, gpio;
275 258
276 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 259 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -280,15 +263,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
280 info->dev = &pdev->dev; 263 info->dev = &pdev->dev;
281 info->regmap = axp20x->regmap; 264 info->regmap = axp20x->regmap;
282 info->regmap_irqc = axp20x->regmap_irqc; 265 info->regmap_irqc = axp20x->regmap_irqc;
283 info->pdata = pdev->dev.platform_data; 266 info->previous_cable = EXTCON_NONE;
284 267 if (pdata)
285 if (!info->pdata) { 268 info->gpio_mux_cntl = pdata->gpio_mux_cntl;
286 /* Try ACPI provided pdata via device properties */ 269
287 if (!device_property_present(&pdev->dev,
288 "axp288_extcon_data\n"))
289 dev_err(&pdev->dev, "failed to get platform data\n");
290 return -ENODEV;
291 }
292 platform_set_drvdata(pdev, info); 270 platform_set_drvdata(pdev, info);
293 271
294 axp288_extcon_log_rsi(info); 272 axp288_extcon_log_rsi(info);
@@ -308,23 +286,16 @@ static int axp288_extcon_probe(struct platform_device *pdev)
308 return ret; 286 return ret;
309 } 287 }
310 288
311 /* Get otg transceiver phy */
312 info->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
313 if (IS_ERR(info->otg)) {
314 dev_err(&pdev->dev, "failed to get otg transceiver\n");
315 return PTR_ERR(info->otg);
316 }
317
318 /* Set up gpio control for USB Mux */ 289 /* Set up gpio control for USB Mux */
319 if (info->pdata->gpio_mux_cntl) { 290 if (info->gpio_mux_cntl) {
320 gpio = desc_to_gpio(info->pdata->gpio_mux_cntl); 291 gpio = desc_to_gpio(info->gpio_mux_cntl);
321 ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX"); 292 ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
322 if (ret < 0) { 293 if (ret < 0) {
323 dev_err(&pdev->dev, 294 dev_err(&pdev->dev,
324 "failed to request the gpio=%d\n", gpio); 295 "failed to request the gpio=%d\n", gpio);
325 return ret; 296 return ret;
326 } 297 }
327 gpiod_direction_output(info->pdata->gpio_mux_cntl, 298 gpiod_direction_output(info->gpio_mux_cntl,
328 EXTCON_GPIO_MUX_SEL_PMIC); 299 EXTCON_GPIO_MUX_SEL_PMIC);
329 } 300 }
330 301
@@ -349,14 +320,21 @@ static int axp288_extcon_probe(struct platform_device *pdev)
349 } 320 }
350 } 321 }
351 322
352 /* Enable interrupts */ 323 /* Start charger cable type detection */
353 axp288_extcon_enable_irq(info); 324 axp288_extcon_enable(info);
354 325
355 return 0; 326 return 0;
356} 327}
357 328
329static const struct platform_device_id axp288_extcon_table[] = {
330 { .name = "axp288_extcon" },
331 {},
332};
333MODULE_DEVICE_TABLE(platform, axp288_extcon_table);
334
358static struct platform_driver axp288_extcon_driver = { 335static struct platform_driver axp288_extcon_driver = {
359 .probe = axp288_extcon_probe, 336 .probe = axp288_extcon_probe,
337 .id_table = axp288_extcon_table,
360 .driver = { 338 .driver = {
361 .name = "axp288_extcon", 339 .name = "axp288_extcon",
362 }, 340 },
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
new file mode 100644
index 000000000000..a3131b036de6
--- /dev/null
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -0,0 +1,179 @@
1/*
2 * Intel INT3496 ACPI device extcon driver
3 *
4 * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
5 *
6 * Based on android x86 kernel code which is:
7 *
8 * Copyright (c) 2014, Intel Corporation.
9 * Author: David Cohen <david.a.cohen@linux.intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/acpi.h>
22#include <linux/extcon.h>
23#include <linux/gpio.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27
28#define INT3496_GPIO_USB_ID 0
29#define INT3496_GPIO_VBUS_EN 1
30#define INT3496_GPIO_USB_MUX 2
31#define DEBOUNCE_TIME msecs_to_jiffies(50)
32
33struct int3496_data {
34 struct device *dev;
35 struct extcon_dev *edev;
36 struct delayed_work work;
37 struct gpio_desc *gpio_usb_id;
38 struct gpio_desc *gpio_vbus_en;
39 struct gpio_desc *gpio_usb_mux;
40 int usb_id_irq;
41};
42
43static const unsigned int int3496_cable[] = {
44 EXTCON_USB_HOST,
45 EXTCON_NONE,
46};
47
48static void int3496_do_usb_id(struct work_struct *work)
49{
50 struct int3496_data *data =
51 container_of(work, struct int3496_data, work.work);
52 int id = gpiod_get_value_cansleep(data->gpio_usb_id);
53
54 /* id == 1: PERIPHERAL, id == 0: HOST */
55 dev_dbg(data->dev, "Connected %s cable\n", id ? "PERIPHERAL" : "HOST");
56
57 /*
58 * Peripheral: set USB mux to peripheral and disable VBUS
59 * Host: set USB mux to host and enable VBUS
60 */
61 if (!IS_ERR(data->gpio_usb_mux))
62 gpiod_direction_output(data->gpio_usb_mux, id);
63
64 if (!IS_ERR(data->gpio_vbus_en))
65 gpiod_direction_output(data->gpio_vbus_en, !id);
66
67 extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id);
68}
69
70static irqreturn_t int3496_thread_isr(int irq, void *priv)
71{
72 struct int3496_data *data = priv;
73
74 /* Let the pin settle before processing it */
75 mod_delayed_work(system_wq, &data->work, DEBOUNCE_TIME);
76
77 return IRQ_HANDLED;
78}
79
80static int int3496_probe(struct platform_device *pdev)
81{
82 struct device *dev = &pdev->dev;
83 struct int3496_data *data;
84 int ret;
85
86 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
87 if (!data)
88 return -ENOMEM;
89
90 data->dev = dev;
91 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
92
93 data->gpio_usb_id = devm_gpiod_get_index(dev, "id",
94 INT3496_GPIO_USB_ID,
95 GPIOD_IN);
96 if (IS_ERR(data->gpio_usb_id)) {
97 ret = PTR_ERR(data->gpio_usb_id);
98 dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
99 return ret;
100 }
101
102 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
103 if (data->usb_id_irq <= 0) {
104 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
105 return -EINVAL;
106 }
107
108 data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en",
109 INT3496_GPIO_VBUS_EN,
110 GPIOD_ASIS);
111 if (IS_ERR(data->gpio_vbus_en))
112 dev_info(dev, "can't request VBUS EN GPIO\n");
113
114 data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux",
115 INT3496_GPIO_USB_MUX,
116 GPIOD_ASIS);
117 if (IS_ERR(data->gpio_usb_mux))
118 dev_info(dev, "can't request USB MUX GPIO\n");
119
120 /* register extcon device */
121 data->edev = devm_extcon_dev_allocate(dev, int3496_cable);
122 if (IS_ERR(data->edev))
123 return -ENOMEM;
124
125 ret = devm_extcon_dev_register(dev, data->edev);
126 if (ret < 0) {
127 dev_err(dev, "can't register extcon device: %d\n", ret);
128 return ret;
129 }
130
131 ret = devm_request_threaded_irq(dev, data->usb_id_irq,
132 NULL, int3496_thread_isr,
133 IRQF_SHARED | IRQF_ONESHOT |
134 IRQF_TRIGGER_RISING |
135 IRQF_TRIGGER_FALLING,
136 dev_name(dev), data);
137 if (ret < 0) {
138 dev_err(dev, "can't request IRQ for USB ID GPIO: %d\n", ret);
139 return ret;
140 }
141
142 /* queue initial processing of id-pin */
143 queue_delayed_work(system_wq, &data->work, 0);
144
145 platform_set_drvdata(pdev, data);
146
147 return 0;
148}
149
150static int int3496_remove(struct platform_device *pdev)
151{
152 struct int3496_data *data = platform_get_drvdata(pdev);
153
154 devm_free_irq(&pdev->dev, data->usb_id_irq, data);
155 cancel_delayed_work_sync(&data->work);
156
157 return 0;
158}
159
160static struct acpi_device_id int3496_acpi_match[] = {
161 { "INT3496" },
162 { }
163};
164MODULE_DEVICE_TABLE(acpi, int3496_acpi_match);
165
166static struct platform_driver int3496_driver = {
167 .driver = {
168 .name = "intel-int3496",
169 .acpi_match_table = int3496_acpi_match,
170 },
171 .probe = int3496_probe,
172 .remove = int3496_remove,
173};
174
175module_platform_driver(int3496_driver);
176
177MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
178MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver");
179MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 12e26c4e7763..f6414b7fa5bc 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -531,8 +531,10 @@ static int max14577_parse_irq(struct max14577_muic_info *info, int irq_type)
531 case MAX14577_IRQ_INT1_ADC: 531 case MAX14577_IRQ_INT1_ADC:
532 case MAX14577_IRQ_INT1_ADCLOW: 532 case MAX14577_IRQ_INT1_ADCLOW:
533 case MAX14577_IRQ_INT1_ADCERR: 533 case MAX14577_IRQ_INT1_ADCERR:
534 /* Handle all of accessory except for 534 /*
535 type of charger accessory */ 535 * Handle all of accessory except for
536 * type of charger accessory.
537 */
536 info->irq_adc = true; 538 info->irq_adc = true;
537 return 1; 539 return 1;
538 case MAX14577_IRQ_INT2_CHGTYP: 540 case MAX14577_IRQ_INT2_CHGTYP:
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 68dbcb814b2f..62163468f205 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -188,8 +188,10 @@ enum max77693_muic_acc_type {
188 MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE, 188 MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE,
189 MAX77693_MUIC_ADC_OPEN, 189 MAX77693_MUIC_ADC_OPEN,
190 190
191 /* The below accessories have same ADC value so ADCLow and 191 /*
192 ADC1K bit is used to separate specific accessory */ 192 * The below accessories have same ADC value so ADCLow and
193 * ADC1K bit is used to separate specific accessory.
194 */
193 /* ADC|VBVolot|ADCLow|ADC1K| */ 195 /* ADC|VBVolot|ADCLow|ADC1K| */
194 MAX77693_MUIC_GND_USB_HOST = 0x100, /* 0x0| 0| 0| 0| */ 196 MAX77693_MUIC_GND_USB_HOST = 0x100, /* 0x0| 0| 0| 0| */
195 MAX77693_MUIC_GND_USB_HOST_VB = 0x104, /* 0x0| 1| 0| 0| */ 197 MAX77693_MUIC_GND_USB_HOST_VB = 0x104, /* 0x0| 1| 0| 0| */
@@ -970,8 +972,10 @@ static void max77693_muic_irq_work(struct work_struct *work)
970 case MAX77693_MUIC_IRQ_INT1_ADC_LOW: 972 case MAX77693_MUIC_IRQ_INT1_ADC_LOW:
971 case MAX77693_MUIC_IRQ_INT1_ADC_ERR: 973 case MAX77693_MUIC_IRQ_INT1_ADC_ERR:
972 case MAX77693_MUIC_IRQ_INT1_ADC1K: 974 case MAX77693_MUIC_IRQ_INT1_ADC1K:
973 /* Handle all of accessory except for 975 /*
974 type of charger accessory */ 976 * Handle all of accessory except for
977 * type of charger accessory.
978 */
975 ret = max77693_muic_adc_handler(info); 979 ret = max77693_muic_adc_handler(info);
976 break; 980 break;
977 case MAX77693_MUIC_IRQ_INT2_CHGTYP: 981 case MAX77693_MUIC_IRQ_INT2_CHGTYP:
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 5d11fdf36e94..6e722d552cf1 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -97,8 +97,10 @@ enum max77843_muic_accessory_type {
97 MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1, 97 MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1,
98 MAX77843_MUIC_ADC_OPEN, 98 MAX77843_MUIC_ADC_OPEN,
99 99
100 /* The blow accessories should check 100 /*
101 not only ADC value but also ADC1K and VBVolt value. */ 101 * The below accessories should check
102 * not only ADC value but also ADC1K and VBVolt value.
103 */
102 /* Offset|ADC1K|VBVolt| */ 104 /* Offset|ADC1K|VBVolt| */
103 MAX77843_MUIC_GND_USB_HOST = 0x100, /* 0x1| 0| 0| */ 105 MAX77843_MUIC_GND_USB_HOST = 0x100, /* 0x1| 0| 0| */
104 MAX77843_MUIC_GND_USB_HOST_VB = 0x101, /* 0x1| 0| 1| */ 106 MAX77843_MUIC_GND_USB_HOST_VB = 0x101, /* 0x1| 0| 1| */
@@ -265,16 +267,20 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
265 /* Check GROUND accessory with charger cable */ 267 /* Check GROUND accessory with charger cable */
266 if (adc == MAX77843_MUIC_ADC_GROUND) { 268 if (adc == MAX77843_MUIC_ADC_GROUND) {
267 if (chg_type == MAX77843_MUIC_CHG_NONE) { 269 if (chg_type == MAX77843_MUIC_CHG_NONE) {
268 /* The following state when charger cable is 270 /*
271 * The following state when charger cable is
269 * disconnected but the GROUND accessory still 272 * disconnected but the GROUND accessory still
270 * connected */ 273 * connected.
274 */
271 *attached = false; 275 *attached = false;
272 cable_type = info->prev_chg_type; 276 cable_type = info->prev_chg_type;
273 info->prev_chg_type = MAX77843_MUIC_CHG_NONE; 277 info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
274 } else { 278 } else {
275 279
276 /* The following state when charger cable is 280 /*
277 * connected on the GROUND accessory */ 281 * The following state when charger cable is
282 * connected on the GROUND accessory.
283 */
278 *attached = true; 284 *attached = true;
279 cable_type = MAX77843_MUIC_CHG_GND; 285 cable_type = MAX77843_MUIC_CHG_GND;
280 info->prev_chg_type = MAX77843_MUIC_CHG_GND; 286 info->prev_chg_type = MAX77843_MUIC_CHG_GND;
@@ -299,11 +305,13 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
299 } else { 305 } else {
300 *attached = true; 306 *attached = true;
301 307
302 /* Offset|ADC1K|VBVolt| 308 /*
309 * Offset|ADC1K|VBVolt|
303 * 0x1| 0| 0| USB-HOST 310 * 0x1| 0| 0| USB-HOST
304 * 0x1| 0| 1| USB-HOST with VB 311 * 0x1| 0| 1| USB-HOST with VB
305 * 0x1| 1| 0| MHL 312 * 0x1| 1| 0| MHL
306 * 0x1| 1| 1| MHL with VB */ 313 * 0x1| 1| 1| MHL with VB
314 */
307 /* Get ADC1K register bit */ 315 /* Get ADC1K register bit */
308 gnd_type = (info->status[MAX77843_MUIC_STATUS1] & 316 gnd_type = (info->status[MAX77843_MUIC_STATUS1] &
309 MAX77843_MUIC_STATUS1_ADC1K_MASK); 317 MAX77843_MUIC_STATUS1_ADC1K_MASK);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 634ba70782de..ca904e8b3235 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -62,7 +62,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
62 if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) { 62 if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
63 palmas_usb->linkstat = PALMAS_USB_STATE_VBUS; 63 palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
64 extcon_set_state_sync(edev, EXTCON_USB, true); 64 extcon_set_state_sync(edev, EXTCON_USB, true);
65 dev_info(palmas_usb->dev, "USB cable is attached\n"); 65 dev_dbg(palmas_usb->dev, "USB cable is attached\n");
66 } else { 66 } else {
67 dev_dbg(palmas_usb->dev, 67 dev_dbg(palmas_usb->dev,
68 "Spurious connect event detected\n"); 68 "Spurious connect event detected\n");
@@ -71,7 +71,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
71 if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) { 71 if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
72 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; 72 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
73 extcon_set_state_sync(edev, EXTCON_USB, false); 73 extcon_set_state_sync(edev, EXTCON_USB, false);
74 dev_info(palmas_usb->dev, "USB cable is detached\n"); 74 dev_dbg(palmas_usb->dev, "USB cable is detached\n");
75 } else { 75 } else {
76 dev_dbg(palmas_usb->dev, 76 dev_dbg(palmas_usb->dev,
77 "Spurious disconnect event detected\n"); 77 "Spurious disconnect event detected\n");
@@ -99,7 +99,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
99 PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND); 99 PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
100 palmas_usb->linkstat = PALMAS_USB_STATE_ID; 100 palmas_usb->linkstat = PALMAS_USB_STATE_ID;
101 extcon_set_state_sync(edev, EXTCON_USB_HOST, true); 101 extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
102 dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); 102 dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
103 } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) && 103 } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
104 (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) { 104 (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
105 palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, 105 palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
@@ -107,17 +107,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
107 PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT); 107 PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
108 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; 108 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
109 extcon_set_state_sync(edev, EXTCON_USB_HOST, false); 109 extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
110 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); 110 dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
111 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) && 111 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
112 (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) { 112 (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
113 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; 113 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
114 extcon_set_state_sync(edev, EXTCON_USB_HOST, false); 114 extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
115 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); 115 dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
116 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) && 116 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
117 (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) { 117 (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
118 palmas_usb->linkstat = PALMAS_USB_STATE_ID; 118 palmas_usb->linkstat = PALMAS_USB_STATE_ID;
119 extcon_set_state_sync(edev, EXTCON_USB_HOST, true); 119 extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
120 dev_info(palmas_usb->dev, " USB-HOST cable is attached\n"); 120 dev_dbg(palmas_usb->dev, " USB-HOST cable is attached\n");
121 } 121 }
122 122
123 return IRQ_HANDLED; 123 return IRQ_HANDLED;
@@ -138,10 +138,10 @@ static void palmas_gpio_id_detect(struct work_struct *work)
138 138
139 if (id) { 139 if (id) {
140 extcon_set_state_sync(edev, EXTCON_USB_HOST, false); 140 extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
141 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); 141 dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
142 } else { 142 } else {
143 extcon_set_state_sync(edev, EXTCON_USB_HOST, true); 143 extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
144 dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); 144 dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
145 } 145 }
146} 146}
147 147
@@ -190,6 +190,11 @@ static int palmas_usb_probe(struct platform_device *pdev)
190 struct palmas_usb *palmas_usb; 190 struct palmas_usb *palmas_usb;
191 int status; 191 int status;
192 192
193 if (!palmas) {
194 dev_err(&pdev->dev, "failed to get valid parent\n");
195 return -EINVAL;
196 }
197
193 palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL); 198 palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
194 if (!palmas_usb) 199 if (!palmas_usb)
195 return -ENOMEM; 200 return -ENOMEM;
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 174c388739ea..3e882aa107e8 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -142,8 +142,10 @@ enum rt8973a_muic_acc_type {
142 RT8973A_MUIC_ADC_UNKNOWN_ACC_5, 142 RT8973A_MUIC_ADC_UNKNOWN_ACC_5,
143 RT8973A_MUIC_ADC_OPEN = 0x1f, 143 RT8973A_MUIC_ADC_OPEN = 0x1f,
144 144
145 /* The below accessories has same ADC value (0x1f). 145 /*
146 So, Device type1 is used to separate specific accessory. */ 146 * The below accessories has same ADC value (0x1f).
147 * So, Device type1 is used to separate specific accessory.
148 */
147 /* |---------|--ADC| */ 149 /* |---------|--ADC| */
148 /* | [7:5]|[4:0]| */ 150 /* | [7:5]|[4:0]| */
149 RT8973A_MUIC_ADC_USB = 0x3f, /* | 001|11111| */ 151 RT8973A_MUIC_ADC_USB = 0x3f, /* | 001|11111| */
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index b22325688503..106ef0297b53 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -135,8 +135,10 @@ enum sm5502_muic_acc_type {
135 SM5502_MUIC_ADC_AUDIO_TYPE1, 135 SM5502_MUIC_ADC_AUDIO_TYPE1,
136 SM5502_MUIC_ADC_OPEN = 0x1f, 136 SM5502_MUIC_ADC_OPEN = 0x1f,
137 137
138 /* The below accessories have same ADC value (0x1f or 0x1e). 138 /*
139 So, Device type1 is used to separate specific accessory. */ 139 * The below accessories have same ADC value (0x1f or 0x1e).
140 * So, Device type1 is used to separate specific accessory.
141 */
140 /* |---------|--ADC| */ 142 /* |---------|--ADC| */
141 /* | [7:5]|[4:0]| */ 143 /* | [7:5]|[4:0]| */
142 SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* | 001|11110| */ 144 SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* | 001|11110| */
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index d589c5feff3d..a5e1882b4ca6 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/acpi.h> 29#include <linux/acpi.h>
30#include <linux/pinctrl/consumer.h>
30 31
31#define USB_GPIO_DEBOUNCE_MS 20 /* ms */ 32#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
32 33
@@ -245,6 +246,9 @@ static int usb_extcon_suspend(struct device *dev)
245 if (info->vbus_gpiod) 246 if (info->vbus_gpiod)
246 disable_irq(info->vbus_irq); 247 disable_irq(info->vbus_irq);
247 248
249 if (!device_may_wakeup(dev))
250 pinctrl_pm_select_sleep_state(dev);
251
248 return ret; 252 return ret;
249} 253}
250 254
@@ -253,6 +257,9 @@ static int usb_extcon_resume(struct device *dev)
253 struct usb_extcon_info *info = dev_get_drvdata(dev); 257 struct usb_extcon_info *info = dev_get_drvdata(dev);
254 int ret = 0; 258 int ret = 0;
255 259
260 if (!device_may_wakeup(dev))
261 pinctrl_pm_select_default_state(dev);
262
256 if (device_may_wakeup(dev)) { 263 if (device_may_wakeup(dev)) {
257 if (info->id_gpiod) { 264 if (info->id_gpiod) {
258 ret = disable_irq_wake(info->id_irq); 265 ret = disable_irq_wake(info->id_irq);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 7c1e3a7b14e0..09ac5e70c2f3 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -30,11 +30,12 @@
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/err.h> 32#include <linux/err.h>
33#include <linux/extcon.h>
34#include <linux/of.h> 33#include <linux/of.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <linux/sysfs.h> 35#include <linux/sysfs.h>
37 36
37#include "extcon.h"
38
38#define SUPPORTED_CABLE_MAX 32 39#define SUPPORTED_CABLE_MAX 32
39#define CABLE_NAME_MAX 30 40#define CABLE_NAME_MAX 30
40 41
@@ -59,7 +60,7 @@ struct __extcon_info {
59 [EXTCON_USB_HOST] = { 60 [EXTCON_USB_HOST] = {
60 .type = EXTCON_TYPE_USB, 61 .type = EXTCON_TYPE_USB,
61 .id = EXTCON_USB_HOST, 62 .id = EXTCON_USB_HOST,
62 .name = "USB_HOST", 63 .name = "USB-HOST",
63 }, 64 },
64 65
65 /* Charging external connector */ 66 /* Charging external connector */
@@ -98,6 +99,11 @@ struct __extcon_info {
98 .id = EXTCON_CHG_WPT, 99 .id = EXTCON_CHG_WPT,
99 .name = "WPT", 100 .name = "WPT",
100 }, 101 },
102 [EXTCON_CHG_USB_PD] = {
103 .type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
104 .id = EXTCON_CHG_USB_PD,
105 .name = "PD",
106 },
101 107
102 /* Jack external connector */ 108 /* Jack external connector */
103 [EXTCON_JACK_MICROPHONE] = { 109 [EXTCON_JACK_MICROPHONE] = {
@@ -906,35 +912,16 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
906 unsigned long flags; 912 unsigned long flags;
907 int ret, idx = -EINVAL; 913 int ret, idx = -EINVAL;
908 914
909 if (!nb) 915 if (!edev || !nb)
910 return -EINVAL; 916 return -EINVAL;
911 917
912 if (edev) { 918 idx = find_cable_index_by_id(edev, id);
913 idx = find_cable_index_by_id(edev, id); 919 if (idx < 0)
914 if (idx < 0) 920 return idx;
915 return idx;
916
917 spin_lock_irqsave(&edev->lock, flags);
918 ret = raw_notifier_chain_register(&edev->nh[idx], nb);
919 spin_unlock_irqrestore(&edev->lock, flags);
920 } else {
921 struct extcon_dev *extd;
922
923 mutex_lock(&extcon_dev_list_lock);
924 list_for_each_entry(extd, &extcon_dev_list, entry) {
925 idx = find_cable_index_by_id(extd, id);
926 if (idx >= 0)
927 break;
928 }
929 mutex_unlock(&extcon_dev_list_lock);
930 921
931 if (idx >= 0) { 922 spin_lock_irqsave(&edev->lock, flags);
932 edev = extd; 923 ret = raw_notifier_chain_register(&edev->nh[idx], nb);
933 return extcon_register_notifier(extd, id, nb); 924 spin_unlock_irqrestore(&edev->lock, flags);
934 } else {
935 ret = -ENODEV;
936 }
937 }
938 925
939 return ret; 926 return ret;
940} 927}
diff --git a/drivers/extcon/extcon.h b/drivers/extcon/extcon.h
new file mode 100644
index 000000000000..993ddccafe11
--- /dev/null
+++ b/drivers/extcon/extcon.h
@@ -0,0 +1,62 @@
1#ifndef __LINUX_EXTCON_INTERNAL_H__
2#define __LINUX_EXTCON_INTERNAL_H__
3
4#include <linux/extcon.h>
5
6/**
7 * struct extcon_dev - An extcon device represents one external connector.
8 * @name: The name of this extcon device. Parent device name is
9 * used if NULL.
10 * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
11 * If supported_cable is NULL, cable name related APIs
12 * are disabled.
13 * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
14 * be attached simultaneously. The array should be
15 * ending with NULL or be NULL (no mutually exclusive
16 * cables). For example, if it is { 0x7, 0x30, 0}, then,
17 * {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
18 * be attached simulataneously. {0x7, 0} is equivalent to
19 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
20 * can be no simultaneous connections.
21 * @dev: Device of this extcon.
22 * @state: Attach/detach state of this extcon. Do not provide at
23 * register-time.
24 * @nh: Notifier for the state change events from this extcon
25 * @entry: To support list of extcon devices so that users can
26 * search for extcon devices based on the extcon name.
27 * @lock:
28 * @max_supported: Internal value to store the number of cables.
29 * @extcon_dev_type: Device_type struct to provide attribute_groups
30 * customized for each extcon device.
31 * @cables: Sysfs subdirectories. Each represents one cable.
32 *
33 * In most cases, users only need to provide "User initializing data" of
34 * this struct when registering an extcon. In some exceptional cases,
35 * optional callbacks may be needed. However, the values in "internal data"
36 * are overwritten by register function.
37 */
38struct extcon_dev {
39 /* Optional user initializing data */
40 const char *name;
41 const unsigned int *supported_cable;
42 const u32 *mutually_exclusive;
43
44 /* Internal data. Please do not set. */
45 struct device dev;
46 struct raw_notifier_head *nh;
47 struct list_head entry;
48 int max_supported;
49 spinlock_t lock; /* could be called by irq handler */
50 u32 state;
51
52 /* /sys/class/extcon/.../cable.n/... */
53 struct device_type extcon_dev_type;
54 struct extcon_cable *cables;
55
56 /* /sys/class/extcon/.../mutually_exclusive/... */
57 struct attribute_group attr_g_muex;
58 struct attribute **attrs_muex;
59 struct device_attribute *d_attrs_muex;
60};
61
62#endif /* __LINUX_EXTCON_INTERNAL_H__ */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index f0a69d3e60a5..86d2cb203533 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -25,16 +25,106 @@
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/scatterlist.h>
29#include <linux/highmem.h>
28 30
29static DEFINE_IDA(fpga_mgr_ida); 31static DEFINE_IDA(fpga_mgr_ida);
30static struct class *fpga_mgr_class; 32static struct class *fpga_mgr_class;
31 33
34/*
35 * Call the low level driver's write_init function. This will do the
36 * device-specific things to get the FPGA into the state where it is ready to
37 * receive an FPGA image. The low level driver only gets to see the first
38 * initial_header_size bytes in the buffer.
39 */
40static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
41 struct fpga_image_info *info,
42 const char *buf, size_t count)
43{
44 int ret;
45
46 mgr->state = FPGA_MGR_STATE_WRITE_INIT;
47 if (!mgr->mops->initial_header_size)
48 ret = mgr->mops->write_init(mgr, info, NULL, 0);
49 else
50 ret = mgr->mops->write_init(
51 mgr, info, buf, min(mgr->mops->initial_header_size, count));
52
53 if (ret) {
54 dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
55 mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
56 return ret;
57 }
58
59 return 0;
60}
61
62static int fpga_mgr_write_init_sg(struct fpga_manager *mgr,
63 struct fpga_image_info *info,
64 struct sg_table *sgt)
65{
66 struct sg_mapping_iter miter;
67 size_t len;
68 char *buf;
69 int ret;
70
71 if (!mgr->mops->initial_header_size)
72 return fpga_mgr_write_init_buf(mgr, info, NULL, 0);
73
74 /*
75 * First try to use miter to map the first fragment to access the
76 * header, this is the typical path.
77 */
78 sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
79 if (sg_miter_next(&miter) &&
80 miter.length >= mgr->mops->initial_header_size) {
81 ret = fpga_mgr_write_init_buf(mgr, info, miter.addr,
82 miter.length);
83 sg_miter_stop(&miter);
84 return ret;
85 }
86 sg_miter_stop(&miter);
87
88 /* Otherwise copy the fragments into temporary memory. */
89 buf = kmalloc(mgr->mops->initial_header_size, GFP_KERNEL);
90 if (!buf)
91 return -ENOMEM;
92
93 len = sg_copy_to_buffer(sgt->sgl, sgt->nents, buf,
94 mgr->mops->initial_header_size);
95 ret = fpga_mgr_write_init_buf(mgr, info, buf, len);
96
97 kfree(buf);
98
99 return ret;
100}
101
102/*
103 * After all the FPGA image has been written, do the device specific steps to
104 * finish and set the FPGA into operating mode.
105 */
106static int fpga_mgr_write_complete(struct fpga_manager *mgr,
107 struct fpga_image_info *info)
108{
109 int ret;
110
111 mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
112 ret = mgr->mops->write_complete(mgr, info);
113 if (ret) {
114 dev_err(&mgr->dev, "Error after writing image data to FPGA\n");
115 mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
116 return ret;
117 }
118 mgr->state = FPGA_MGR_STATE_OPERATING;
119
120 return 0;
121}
122
32/** 123/**
33 * fpga_mgr_buf_load - load fpga from image in buffer 124 * fpga_mgr_buf_load_sg - load fpga from image in buffer from a scatter list
34 * @mgr: fpga manager 125 * @mgr: fpga manager
35 * @info: fpga image specific information 126 * @info: fpga image specific information
36 * @buf: buffer contain fpga image 127 * @sgt: scatterlist table
37 * @count: byte count of buf
38 * 128 *
39 * Step the low level fpga manager through the device-specific steps of getting 129 * Step the low level fpga manager through the device-specific steps of getting
40 * an FPGA ready to be configured, writing the image to it, then doing whatever 130 * an FPGA ready to be configured, writing the image to it, then doing whatever
@@ -42,54 +132,139 @@ static struct class *fpga_mgr_class;
42 * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is 132 * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
43 * not an error code. 133 * not an error code.
44 * 134 *
135 * This is the preferred entry point for FPGA programming, it does not require
136 * any contiguous kernel memory.
137 *
45 * Return: 0 on success, negative error code otherwise. 138 * Return: 0 on success, negative error code otherwise.
46 */ 139 */
47int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, 140int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
48 const char *buf, size_t count) 141 struct sg_table *sgt)
49{ 142{
50 struct device *dev = &mgr->dev;
51 int ret; 143 int ret;
52 144
53 /* 145 ret = fpga_mgr_write_init_sg(mgr, info, sgt);
54 * Call the low level driver's write_init function. This will do the 146 if (ret)
55 * device-specific things to get the FPGA into the state where it is 147 return ret;
56 * ready to receive an FPGA image. The low level driver only gets to 148
57 * see the first initial_header_size bytes in the buffer. 149 /* Write the FPGA image to the FPGA. */
58 */ 150 mgr->state = FPGA_MGR_STATE_WRITE;
59 mgr->state = FPGA_MGR_STATE_WRITE_INIT; 151 if (mgr->mops->write_sg) {
60 ret = mgr->mops->write_init(mgr, info, buf, 152 ret = mgr->mops->write_sg(mgr, sgt);
61 min(mgr->mops->initial_header_size, count)); 153 } else {
154 struct sg_mapping_iter miter;
155
156 sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
157 while (sg_miter_next(&miter)) {
158 ret = mgr->mops->write(mgr, miter.addr, miter.length);
159 if (ret)
160 break;
161 }
162 sg_miter_stop(&miter);
163 }
164
62 if (ret) { 165 if (ret) {
63 dev_err(dev, "Error preparing FPGA for writing\n"); 166 dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
64 mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR; 167 mgr->state = FPGA_MGR_STATE_WRITE_ERR;
65 return ret; 168 return ret;
66 } 169 }
67 170
171 return fpga_mgr_write_complete(mgr, info);
172}
173EXPORT_SYMBOL_GPL(fpga_mgr_buf_load_sg);
174
175static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
176 struct fpga_image_info *info,
177 const char *buf, size_t count)
178{
179 int ret;
180
181 ret = fpga_mgr_write_init_buf(mgr, info, buf, count);
182 if (ret)
183 return ret;
184
68 /* 185 /*
69 * Write the FPGA image to the FPGA. 186 * Write the FPGA image to the FPGA.
70 */ 187 */
71 mgr->state = FPGA_MGR_STATE_WRITE; 188 mgr->state = FPGA_MGR_STATE_WRITE;
72 ret = mgr->mops->write(mgr, buf, count); 189 ret = mgr->mops->write(mgr, buf, count);
73 if (ret) { 190 if (ret) {
74 dev_err(dev, "Error while writing image data to FPGA\n"); 191 dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
75 mgr->state = FPGA_MGR_STATE_WRITE_ERR; 192 mgr->state = FPGA_MGR_STATE_WRITE_ERR;
76 return ret; 193 return ret;
77 } 194 }
78 195
196 return fpga_mgr_write_complete(mgr, info);
197}
198
199/**
200 * fpga_mgr_buf_load - load fpga from image in buffer
201 * @mgr: fpga manager
202 * @flags: flags setting fpga confuration modes
203 * @buf: buffer contain fpga image
204 * @count: byte count of buf
205 *
206 * Step the low level fpga manager through the device-specific steps of getting
207 * an FPGA ready to be configured, writing the image to it, then doing whatever
208 * post-configuration steps necessary. This code assumes the caller got the
209 * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
210 *
211 * Return: 0 on success, negative error code otherwise.
212 */
213int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
214 const char *buf, size_t count)
215{
216 struct page **pages;
217 struct sg_table sgt;
218 const void *p;
219 int nr_pages;
220 int index;
221 int rc;
222
79 /* 223 /*
80 * After all the FPGA image has been written, do the device specific 224 * This is just a fast path if the caller has already created a
81 * steps to finish and set the FPGA into operating mode. 225 * contiguous kernel buffer and the driver doesn't require SG, non-SG
226 * drivers will still work on the slow path.
82 */ 227 */
83 mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE; 228 if (mgr->mops->write)
84 ret = mgr->mops->write_complete(mgr, info); 229 return fpga_mgr_buf_load_mapped(mgr, info, buf, count);
85 if (ret) { 230
86 dev_err(dev, "Error after writing image data to FPGA\n"); 231 /*
87 mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR; 232 * Convert the linear kernel pointer into a sg_table of pages for use
88 return ret; 233 * by the driver.
234 */
235 nr_pages = DIV_ROUND_UP((unsigned long)buf + count, PAGE_SIZE) -
236 (unsigned long)buf / PAGE_SIZE;
237 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
238 if (!pages)
239 return -ENOMEM;
240
241 p = buf - offset_in_page(buf);
242 for (index = 0; index < nr_pages; index++) {
243 if (is_vmalloc_addr(p))
244 pages[index] = vmalloc_to_page(p);
245 else
246 pages[index] = kmap_to_page((void *)p);
247 if (!pages[index]) {
248 kfree(pages);
249 return -EFAULT;
250 }
251 p += PAGE_SIZE;
89 } 252 }
90 mgr->state = FPGA_MGR_STATE_OPERATING;
91 253
92 return 0; 254 /*
255 * The temporary pages list is used to code share the merging algorithm
256 * in sg_alloc_table_from_pages
257 */
258 rc = sg_alloc_table_from_pages(&sgt, pages, index, offset_in_page(buf),
259 count, GFP_KERNEL);
260 kfree(pages);
261 if (rc)
262 return rc;
263
264 rc = fpga_mgr_buf_load_sg(mgr, info, &sgt);
265 sg_free_table(&sgt);
266
267 return rc;
93} 268}
94EXPORT_SYMBOL_GPL(fpga_mgr_buf_load); 269EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
95 270
@@ -291,8 +466,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
291 struct fpga_manager *mgr; 466 struct fpga_manager *mgr;
292 int id, ret; 467 int id, ret;
293 468
294 if (!mops || !mops->write_init || !mops->write || 469 if (!mops || !mops->write_complete || !mops->state ||
295 !mops->write_complete || !mops->state) { 470 !mops->write_init || (!mops->write && !mops->write_sg) ||
471 (mops->write && mops->write_sg)) {
296 dev_err(dev, "Attempt to register without fpga_manager_ops\n"); 472 dev_err(dev, "Attempt to register without fpga_manager_ops\n");
297 return -EINVAL; 473 return -EINVAL;
298 } 474 }
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 1812bf7614e1..34cb98139442 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -30,6 +30,7 @@
30#include <linux/pm.h> 30#include <linux/pm.h>
31#include <linux/regmap.h> 31#include <linux/regmap.h>
32#include <linux/string.h> 32#include <linux/string.h>
33#include <linux/scatterlist.h>
33 34
34/* Offsets into SLCR regmap */ 35/* Offsets into SLCR regmap */
35 36
@@ -80,6 +81,7 @@
80 81
81/* FPGA init status */ 82/* FPGA init status */
82#define STATUS_DMA_Q_F BIT(31) 83#define STATUS_DMA_Q_F BIT(31)
84#define STATUS_DMA_Q_E BIT(30)
83#define STATUS_PCFG_INIT_MASK BIT(4) 85#define STATUS_PCFG_INIT_MASK BIT(4)
84 86
85/* Interrupt Status/Mask Register Bit definitions */ 87/* Interrupt Status/Mask Register Bit definitions */
@@ -89,7 +91,7 @@
89#define IXR_D_P_DONE_MASK BIT(12) 91#define IXR_D_P_DONE_MASK BIT(12)
90 /* FPGA programmed */ 92 /* FPGA programmed */
91#define IXR_PCFG_DONE_MASK BIT(2) 93#define IXR_PCFG_DONE_MASK BIT(2)
92#define IXR_ERROR_FLAGS_MASK 0x00F0F860 94#define IXR_ERROR_FLAGS_MASK 0x00F0C860
93#define IXR_ALL_MASK 0xF8F7F87F 95#define IXR_ALL_MASK 0xF8F7F87F
94 96
95/* Miscellaneous constant values */ 97/* Miscellaneous constant values */
@@ -98,12 +100,16 @@
98#define DMA_INVALID_ADDRESS GENMASK(31, 0) 100#define DMA_INVALID_ADDRESS GENMASK(31, 0)
99/* Used to unlock the dev */ 101/* Used to unlock the dev */
100#define UNLOCK_MASK 0x757bdf0d 102#define UNLOCK_MASK 0x757bdf0d
101/* Timeout for DMA to complete */
102#define DMA_DONE_TIMEOUT msecs_to_jiffies(1000)
103/* Timeout for polling reset bits */ 103/* Timeout for polling reset bits */
104#define INIT_POLL_TIMEOUT 2500000 104#define INIT_POLL_TIMEOUT 2500000
105/* Delay for polling reset bits */ 105/* Delay for polling reset bits */
106#define INIT_POLL_DELAY 20 106#define INIT_POLL_DELAY 20
107/* Signal this is the last DMA transfer, wait for the AXI and PCAP before
108 * interrupting
109 */
110#define DMA_SRC_LAST_TRANSFER 1
111/* Timeout for DMA completion */
112#define DMA_TIMEOUT_MS 5000
107 113
108/* Masks for controlling stuff in SLCR */ 114/* Masks for controlling stuff in SLCR */
109/* Disable all Level shifters */ 115/* Disable all Level shifters */
@@ -124,6 +130,11 @@ struct zynq_fpga_priv {
124 void __iomem *io_base; 130 void __iomem *io_base;
125 struct regmap *slcr; 131 struct regmap *slcr;
126 132
133 spinlock_t dma_lock;
134 unsigned int dma_elm;
135 unsigned int dma_nelms;
136 struct scatterlist *cur_sg;
137
127 struct completion dma_done; 138 struct completion dma_done;
128}; 139};
129 140
@@ -143,37 +154,104 @@ static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
143 readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \ 154 readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
144 timeout_us) 155 timeout_us)
145 156
146static void zynq_fpga_mask_irqs(struct zynq_fpga_priv *priv) 157/* Cause the specified irq mask bits to generate IRQs */
158static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
147{ 159{
148 u32 intr_mask; 160 zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
149
150 intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
151 zynq_fpga_write(priv, INT_MASK_OFFSET,
152 intr_mask | IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
153} 161}
154 162
155static void zynq_fpga_unmask_irqs(struct zynq_fpga_priv *priv) 163/* Must be called with dma_lock held */
164static void zynq_step_dma(struct zynq_fpga_priv *priv)
156{ 165{
157 u32 intr_mask; 166 u32 addr;
167 u32 len;
168 bool first;
169
170 first = priv->dma_elm == 0;
171 while (priv->cur_sg) {
172 /* Feed the DMA queue until it is full. */
173 if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
174 break;
175
176 addr = sg_dma_address(priv->cur_sg);
177 len = sg_dma_len(priv->cur_sg);
178 if (priv->dma_elm + 1 == priv->dma_nelms) {
179 /* The last transfer waits for the PCAP to finish too,
180 * notice this also changes the irq_mask to ignore
181 * IXR_DMA_DONE_MASK which ensures we do not trigger
182 * the completion too early.
183 */
184 addr |= DMA_SRC_LAST_TRANSFER;
185 priv->cur_sg = NULL;
186 } else {
187 priv->cur_sg = sg_next(priv->cur_sg);
188 priv->dma_elm++;
189 }
158 190
159 intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET); 191 zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
160 zynq_fpga_write(priv, INT_MASK_OFFSET, 192 zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
161 intr_mask 193 zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
162 & ~(IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK)); 194 zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
195 }
196
197 /* Once the first transfer is queued we can turn on the ISR, future
198 * calls to zynq_step_dma will happen from the ISR context. The
199 * dma_lock spinlock guarentees this handover is done coherently, the
200 * ISR enable is put at the end to avoid another CPU spinning in the
201 * ISR on this lock.
202 */
203 if (first && priv->cur_sg) {
204 zynq_fpga_set_irq(priv,
205 IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
206 } else if (!priv->cur_sg) {
207 /* The last transfer changes to DMA & PCAP mode since we do
208 * not want to continue until everything has been flushed into
209 * the PCAP.
210 */
211 zynq_fpga_set_irq(priv,
212 IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
213 }
163} 214}
164 215
165static irqreturn_t zynq_fpga_isr(int irq, void *data) 216static irqreturn_t zynq_fpga_isr(int irq, void *data)
166{ 217{
167 struct zynq_fpga_priv *priv = data; 218 struct zynq_fpga_priv *priv = data;
219 u32 intr_status;
168 220
169 /* disable DMA and error IRQs */ 221 /* If anything other than DMA completion is reported stop and hand
170 zynq_fpga_mask_irqs(priv); 222 * control back to zynq_fpga_ops_write, something went wrong,
223 * otherwise progress the DMA.
224 */
225 spin_lock(&priv->dma_lock);
226 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
227 if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
228 (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
229 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
230 zynq_step_dma(priv);
231 spin_unlock(&priv->dma_lock);
232 return IRQ_HANDLED;
233 }
234 spin_unlock(&priv->dma_lock);
171 235
236 zynq_fpga_set_irq(priv, 0);
172 complete(&priv->dma_done); 237 complete(&priv->dma_done);
173 238
174 return IRQ_HANDLED; 239 return IRQ_HANDLED;
175} 240}
176 241
242/* Sanity check the proposed bitstream. It must start with the sync word in
243 * the correct byte order, and be dword aligned. The input is a Xilinx .bin
244 * file with every 32 bit quantity swapped.
245 */
246static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
247{
248 for (; count >= 4; buf += 4, count -= 4)
249 if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
250 buf[3] == 0xaa)
251 return true;
252 return false;
253}
254
177static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, 255static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
178 struct fpga_image_info *info, 256 struct fpga_image_info *info,
179 const char *buf, size_t count) 257 const char *buf, size_t count)
@@ -190,6 +268,13 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
190 268
191 /* don't globally reset PL if we're doing partial reconfig */ 269 /* don't globally reset PL if we're doing partial reconfig */
192 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) { 270 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
271 if (!zynq_fpga_has_sync(buf, count)) {
272 dev_err(&mgr->dev,
273 "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
274 err = -EINVAL;
275 goto out_err;
276 }
277
193 /* assert AXI interface resets */ 278 /* assert AXI interface resets */
194 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET, 279 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
195 FPGA_RST_ALL_MASK); 280 FPGA_RST_ALL_MASK);
@@ -259,10 +344,11 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
259 zynq_fpga_write(priv, CTRL_OFFSET, 344 zynq_fpga_write(priv, CTRL_OFFSET,
260 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl)); 345 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl));
261 346
262 /* check that we have room in the command queue */ 347 /* We expect that the command queue is empty right now. */
263 status = zynq_fpga_read(priv, STATUS_OFFSET); 348 status = zynq_fpga_read(priv, STATUS_OFFSET);
264 if (status & STATUS_DMA_Q_F) { 349 if ((status & STATUS_DMA_Q_F) ||
265 dev_err(&mgr->dev, "DMA command queue full\n"); 350 (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
351 dev_err(&mgr->dev, "DMA command queue not right\n");
266 err = -EBUSY; 352 err = -EBUSY;
267 goto out_err; 353 goto out_err;
268 } 354 }
@@ -281,26 +367,36 @@ out_err:
281 return err; 367 return err;
282} 368}
283 369
284static int zynq_fpga_ops_write(struct fpga_manager *mgr, 370static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
285 const char *buf, size_t count)
286{ 371{
287 struct zynq_fpga_priv *priv; 372 struct zynq_fpga_priv *priv;
373 const char *why;
288 int err; 374 int err;
289 char *kbuf;
290 size_t in_count;
291 dma_addr_t dma_addr;
292 u32 transfer_length;
293 u32 intr_status; 375 u32 intr_status;
376 unsigned long timeout;
377 unsigned long flags;
378 struct scatterlist *sg;
379 int i;
294 380
295 in_count = count;
296 priv = mgr->priv; 381 priv = mgr->priv;
297 382
298 kbuf = 383 /* The hardware can only DMA multiples of 4 bytes, and it requires the
299 dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL); 384 * starting addresses to be aligned to 64 bits (UG585 pg 212).
300 if (!kbuf) 385 */
301 return -ENOMEM; 386 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
387 if ((sg->offset % 8) || (sg->length % 4)) {
388 dev_err(&mgr->dev,
389 "Invalid bitstream, chunks must be aligned\n");
390 return -EINVAL;
391 }
392 }
302 393
303 memcpy(kbuf, buf, count); 394 priv->dma_nelms =
395 dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
396 if (priv->dma_nelms == 0) {
397 dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
398 return -ENOMEM;
399 }
304 400
305 /* enable clock */ 401 /* enable clock */
306 err = clk_enable(priv->clk); 402 err = clk_enable(priv->clk);
@@ -308,38 +404,67 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
308 goto out_free; 404 goto out_free;
309 405
310 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); 406 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
311
312 reinit_completion(&priv->dma_done); 407 reinit_completion(&priv->dma_done);
313 408
314 /* enable DMA and error IRQs */ 409 /* zynq_step_dma will turn on interrupts */
315 zynq_fpga_unmask_irqs(priv); 410 spin_lock_irqsave(&priv->dma_lock, flags);
411 priv->dma_elm = 0;
412 priv->cur_sg = sgt->sgl;
413 zynq_step_dma(priv);
414 spin_unlock_irqrestore(&priv->dma_lock, flags);
316 415
317 /* the +1 in the src addr is used to hold off on DMA_DONE IRQ 416 timeout = wait_for_completion_timeout(&priv->dma_done,
318 * until both AXI and PCAP are done ... 417 msecs_to_jiffies(DMA_TIMEOUT_MS));
319 */
320 zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, (u32)(dma_addr) + 1);
321 zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, (u32)DMA_INVALID_ADDRESS);
322 418
323 /* convert #bytes to #words */ 419 spin_lock_irqsave(&priv->dma_lock, flags);
324 transfer_length = (count + 3) / 4; 420 zynq_fpga_set_irq(priv, 0);
421 priv->cur_sg = NULL;
422 spin_unlock_irqrestore(&priv->dma_lock, flags);
325 423
326 zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, transfer_length); 424 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
327 zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0); 425 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
328 426
329 wait_for_completion(&priv->dma_done); 427 /* There doesn't seem to be a way to force cancel any DMA, so if
428 * something went wrong we are relying on the hardware to have halted
429 * the DMA before we get here, if there was we could use
430 * wait_for_completion_interruptible too.
431 */
330 432
331 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET); 433 if (intr_status & IXR_ERROR_FLAGS_MASK) {
332 zynq_fpga_write(priv, INT_STS_OFFSET, intr_status); 434 why = "DMA reported error";
435 err = -EIO;
436 goto out_report;
437 }
333 438
334 if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) { 439 if (priv->cur_sg ||
335 dev_err(&mgr->dev, "Error configuring FPGA\n"); 440 !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
336 err = -EFAULT; 441 if (timeout == 0)
442 why = "DMA timed out";
443 else
444 why = "DMA did not complete";
445 err = -EIO;
446 goto out_report;
337 } 447 }
338 448
449 err = 0;
450 goto out_clk;
451
452out_report:
453 dev_err(&mgr->dev,
454 "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
455 why,
456 intr_status,
457 zynq_fpga_read(priv, CTRL_OFFSET),
458 zynq_fpga_read(priv, LOCK_OFFSET),
459 zynq_fpga_read(priv, INT_MASK_OFFSET),
460 zynq_fpga_read(priv, STATUS_OFFSET),
461 zynq_fpga_read(priv, MCTRL_OFFSET));
462
463out_clk:
339 clk_disable(priv->clk); 464 clk_disable(priv->clk);
340 465
341out_free: 466out_free:
342 dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr); 467 dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
343 return err; 468 return err;
344} 469}
345 470
@@ -400,9 +525,10 @@ static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
400} 525}
401 526
402static const struct fpga_manager_ops zynq_fpga_ops = { 527static const struct fpga_manager_ops zynq_fpga_ops = {
528 .initial_header_size = 128,
403 .state = zynq_fpga_ops_state, 529 .state = zynq_fpga_ops_state,
404 .write_init = zynq_fpga_ops_write_init, 530 .write_init = zynq_fpga_ops_write_init,
405 .write = zynq_fpga_ops_write, 531 .write_sg = zynq_fpga_ops_write,
406 .write_complete = zynq_fpga_ops_write_complete, 532 .write_complete = zynq_fpga_ops_write_complete,
407}; 533};
408 534
@@ -416,6 +542,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
416 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 542 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
417 if (!priv) 543 if (!priv)
418 return -ENOMEM; 544 return -ENOMEM;
545 spin_lock_init(&priv->dma_lock);
419 546
420 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 547 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
421 priv->io_base = devm_ioremap_resource(dev, res); 548 priv->io_base = devm_ioremap_resource(dev, res);
@@ -452,7 +579,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
452 /* unlock the device */ 579 /* unlock the device */
453 zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK); 580 zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
454 581
455 zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF); 582 zynq_fpga_set_irq(priv, 0);
456 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); 583 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
457 err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev), 584 err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
458 priv); 585 priv);
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
new file mode 100644
index 000000000000..04c1a0efa7a7
--- /dev/null
+++ b/drivers/fsi/Kconfig
@@ -0,0 +1,12 @@
1#
2# FSI subsystem
3#
4
5menu "FSI support"
6
7config FSI
8 tristate "FSI support"
9 ---help---
10 FSI - the FRU Support Interface - is a simple bus for low-level
11 access to POWER-based hardware.
12endmenu
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
new file mode 100644
index 000000000000..db0e5e7c1655
--- /dev/null
+++ b/drivers/fsi/Makefile
@@ -0,0 +1,2 @@
1
2obj-$(CONFIG_FSI) += fsi-core.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
new file mode 100644
index 000000000000..3d55bd547178
--- /dev/null
+++ b/drivers/fsi/fsi-core.c
@@ -0,0 +1,59 @@
1/*
2 * FSI core driver
3 *
4 * Copyright (C) IBM Corporation 2016
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/device.h>
17#include <linux/fsi.h>
18#include <linux/module.h>
19
20/* FSI core & Linux bus type definitions */
21
22static int fsi_bus_match(struct device *dev, struct device_driver *drv)
23{
24 struct fsi_device *fsi_dev = to_fsi_dev(dev);
25 struct fsi_driver *fsi_drv = to_fsi_drv(drv);
26 const struct fsi_device_id *id;
27
28 if (!fsi_drv->id_table)
29 return 0;
30
31 for (id = fsi_drv->id_table; id->engine_type; id++) {
32 if (id->engine_type != fsi_dev->engine_type)
33 continue;
34 if (id->version == FSI_VERSION_ANY ||
35 id->version == fsi_dev->version)
36 return 1;
37 }
38
39 return 0;
40}
41
42struct bus_type fsi_bus_type = {
43 .name = "fsi",
44 .match = fsi_bus_match,
45};
46EXPORT_SYMBOL_GPL(fsi_bus_type);
47
48static int fsi_init(void)
49{
50 return bus_register(&fsi_bus_type);
51}
52
53static void fsi_exit(void)
54{
55 bus_unregister(&fsi_bus_type);
56}
57
58module_init(fsi_init);
59module_exit(fsi_exit);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 5fb4c6d9209b..81a80c82f1bd 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -47,12 +47,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
47 * For channels marked as in "low latency" mode 47 * For channels marked as in "low latency" mode
48 * bypass the monitor page mechanism. 48 * bypass the monitor page mechanism.
49 */ 49 */
50 if ((channel->offermsg.monitor_allocated) && 50 if (channel->offermsg.monitor_allocated && !channel->low_latency) {
51 (!channel->low_latency)) { 51 vmbus_send_interrupt(channel->offermsg.child_relid);
52 /* Each u32 represents 32 channels */
53 sync_set_bit(channel->offermsg.child_relid & 31,
54 (unsigned long *) vmbus_connection.send_int_page +
55 (channel->offermsg.child_relid >> 5));
56 52
57 /* Get the child to parent monitor page */ 53 /* Get the child to parent monitor page */
58 monitorpage = vmbus_connection.monitor_pages[1]; 54 monitorpage = vmbus_connection.monitor_pages[1];
@@ -157,6 +153,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
157 } 153 }
158 154
159 init_completion(&open_info->waitevent); 155 init_completion(&open_info->waitevent);
156 open_info->waiting_channel = newchannel;
160 157
161 open_msg = (struct vmbus_channel_open_channel *)open_info->msg; 158 open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
162 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL; 159 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
@@ -181,7 +178,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
181 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 178 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
182 179
183 ret = vmbus_post_msg(open_msg, 180 ret = vmbus_post_msg(open_msg,
184 sizeof(struct vmbus_channel_open_channel)); 181 sizeof(struct vmbus_channel_open_channel), true);
185 182
186 if (ret != 0) { 183 if (ret != 0) {
187 err = ret; 184 err = ret;
@@ -194,6 +191,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
194 list_del(&open_info->msglistentry); 191 list_del(&open_info->msglistentry);
195 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 192 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
196 193
194 if (newchannel->rescind) {
195 err = -ENODEV;
196 goto error_free_gpadl;
197 }
198
197 if (open_info->response.open_result.status) { 199 if (open_info->response.open_result.status) {
198 err = -EAGAIN; 200 err = -EAGAIN;
199 goto error_free_gpadl; 201 goto error_free_gpadl;
@@ -233,7 +235,7 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
233 conn_msg.guest_endpoint_id = *shv_guest_servie_id; 235 conn_msg.guest_endpoint_id = *shv_guest_servie_id;
234 conn_msg.host_service_id = *shv_host_servie_id; 236 conn_msg.host_service_id = *shv_host_servie_id;
235 237
236 return vmbus_post_msg(&conn_msg, sizeof(conn_msg)); 238 return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
237} 239}
238EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request); 240EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
239 241
@@ -405,6 +407,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
405 return ret; 407 return ret;
406 408
407 init_completion(&msginfo->waitevent); 409 init_completion(&msginfo->waitevent);
410 msginfo->waiting_channel = channel;
408 411
409 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg; 412 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
410 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER; 413 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
@@ -419,7 +422,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
419 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 422 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
420 423
421 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize - 424 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
422 sizeof(*msginfo)); 425 sizeof(*msginfo), true);
423 if (ret != 0) 426 if (ret != 0)
424 goto cleanup; 427 goto cleanup;
425 428
@@ -433,14 +436,19 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
433 gpadl_body->gpadl = next_gpadl_handle; 436 gpadl_body->gpadl = next_gpadl_handle;
434 437
435 ret = vmbus_post_msg(gpadl_body, 438 ret = vmbus_post_msg(gpadl_body,
436 submsginfo->msgsize - 439 submsginfo->msgsize - sizeof(*submsginfo),
437 sizeof(*submsginfo)); 440 true);
438 if (ret != 0) 441 if (ret != 0)
439 goto cleanup; 442 goto cleanup;
440 443
441 } 444 }
442 wait_for_completion(&msginfo->waitevent); 445 wait_for_completion(&msginfo->waitevent);
443 446
447 if (channel->rescind) {
448 ret = -ENODEV;
449 goto cleanup;
450 }
451
444 /* At this point, we received the gpadl created msg */ 452 /* At this point, we received the gpadl created msg */
445 *gpadl_handle = gpadlmsg->gpadl; 453 *gpadl_handle = gpadlmsg->gpadl;
446 454
@@ -474,6 +482,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
474 return -ENOMEM; 482 return -ENOMEM;
475 483
476 init_completion(&info->waitevent); 484 init_completion(&info->waitevent);
485 info->waiting_channel = channel;
477 486
478 msg = (struct vmbus_channel_gpadl_teardown *)info->msg; 487 msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
479 488
@@ -485,14 +494,19 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
485 list_add_tail(&info->msglistentry, 494 list_add_tail(&info->msglistentry,
486 &vmbus_connection.chn_msg_list); 495 &vmbus_connection.chn_msg_list);
487 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 496 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
488 ret = vmbus_post_msg(msg, 497 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
489 sizeof(struct vmbus_channel_gpadl_teardown)); 498 true);
490 499
491 if (ret) 500 if (ret)
492 goto post_msg_err; 501 goto post_msg_err;
493 502
494 wait_for_completion(&info->waitevent); 503 wait_for_completion(&info->waitevent);
495 504
505 if (channel->rescind) {
506 ret = -ENODEV;
507 goto post_msg_err;
508 }
509
496post_msg_err: 510post_msg_err:
497 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 511 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
498 list_del(&info->msglistentry); 512 list_del(&info->msglistentry);
@@ -516,7 +530,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
516 int ret; 530 int ret;
517 531
518 /* 532 /*
519 * process_chn_event(), running in the tasklet, can race 533 * vmbus_on_event(), running in the tasklet, can race
520 * with vmbus_close_internal() in the case of SMP guest, e.g., when 534 * with vmbus_close_internal() in the case of SMP guest, e.g., when
521 * the former is accessing channel->inbound.ring_buffer, the latter 535 * the former is accessing channel->inbound.ring_buffer, the latter
522 * could be freeing the ring_buffer pages. 536 * could be freeing the ring_buffer pages.
@@ -557,7 +571,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
557 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL; 571 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
558 msg->child_relid = channel->offermsg.child_relid; 572 msg->child_relid = channel->offermsg.child_relid;
559 573
560 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel)); 574 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
575 true);
561 576
562 if (ret) { 577 if (ret) {
563 pr_err("Close failed: close post msg return is %d\n", ret); 578 pr_err("Close failed: close post msg return is %d\n", ret);
@@ -628,15 +643,14 @@ void vmbus_close(struct vmbus_channel *channel)
628EXPORT_SYMBOL_GPL(vmbus_close); 643EXPORT_SYMBOL_GPL(vmbus_close);
629 644
630int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, 645int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
631 u32 bufferlen, u64 requestid, 646 u32 bufferlen, u64 requestid,
632 enum vmbus_packet_type type, u32 flags, bool kick_q) 647 enum vmbus_packet_type type, u32 flags)
633{ 648{
634 struct vmpacket_descriptor desc; 649 struct vmpacket_descriptor desc;
635 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; 650 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
636 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 651 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
637 struct kvec bufferlist[3]; 652 struct kvec bufferlist[3];
638 u64 aligned_data = 0; 653 u64 aligned_data = 0;
639 bool lock = channel->acquire_ring_lock;
640 int num_vecs = ((bufferlen != 0) ? 3 : 1); 654 int num_vecs = ((bufferlen != 0) ? 3 : 1);
641 655
642 656
@@ -655,9 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
655 bufferlist[2].iov_base = &aligned_data; 669 bufferlist[2].iov_base = &aligned_data;
656 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 670 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
657 671
658 return hv_ringbuffer_write(channel, bufferlist, num_vecs, 672 return hv_ringbuffer_write(channel, bufferlist, num_vecs);
659 lock, kick_q);
660
661} 673}
662EXPORT_SYMBOL(vmbus_sendpacket_ctl); 674EXPORT_SYMBOL(vmbus_sendpacket_ctl);
663 675
@@ -680,7 +692,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
680 enum vmbus_packet_type type, u32 flags) 692 enum vmbus_packet_type type, u32 flags)
681{ 693{
682 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid, 694 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
683 type, flags, true); 695 type, flags);
684} 696}
685EXPORT_SYMBOL(vmbus_sendpacket); 697EXPORT_SYMBOL(vmbus_sendpacket);
686 698
@@ -692,11 +704,9 @@ EXPORT_SYMBOL(vmbus_sendpacket);
692 * explicitly. 704 * explicitly.
693 */ 705 */
694int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, 706int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
695 struct hv_page_buffer pagebuffers[], 707 struct hv_page_buffer pagebuffers[],
696 u32 pagecount, void *buffer, u32 bufferlen, 708 u32 pagecount, void *buffer, u32 bufferlen,
697 u64 requestid, 709 u64 requestid, u32 flags)
698 u32 flags,
699 bool kick_q)
700{ 710{
701 int i; 711 int i;
702 struct vmbus_channel_packet_page_buffer desc; 712 struct vmbus_channel_packet_page_buffer desc;
@@ -705,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
705 u32 packetlen_aligned; 715 u32 packetlen_aligned;
706 struct kvec bufferlist[3]; 716 struct kvec bufferlist[3];
707 u64 aligned_data = 0; 717 u64 aligned_data = 0;
708 bool lock = channel->acquire_ring_lock;
709 718
710 if (pagecount > MAX_PAGE_BUFFER_COUNT) 719 if (pagecount > MAX_PAGE_BUFFER_COUNT)
711 return -EINVAL; 720 return -EINVAL;
712 721
713
714 /* 722 /*
715 * Adjust the size down since vmbus_channel_packet_page_buffer is the 723 * Adjust the size down since vmbus_channel_packet_page_buffer is the
716 * largest size we support 724 * largest size we support
@@ -742,8 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
742 bufferlist[2].iov_base = &aligned_data; 750 bufferlist[2].iov_base = &aligned_data;
743 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 751 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
744 752
745 return hv_ringbuffer_write(channel, bufferlist, 3, 753 return hv_ringbuffer_write(channel, bufferlist, 3);
746 lock, kick_q);
747} 754}
748EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); 755EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
749 756
@@ -757,9 +764,10 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
757 u64 requestid) 764 u64 requestid)
758{ 765{
759 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 766 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
767
760 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount, 768 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
761 buffer, bufferlen, requestid, 769 buffer, bufferlen,
762 flags, true); 770 requestid, flags);
763 771
764} 772}
765EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); 773EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
@@ -778,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
778 u32 packetlen_aligned; 786 u32 packetlen_aligned;
779 struct kvec bufferlist[3]; 787 struct kvec bufferlist[3];
780 u64 aligned_data = 0; 788 u64 aligned_data = 0;
781 bool lock = channel->acquire_ring_lock;
782 789
783 packetlen = desc_size + bufferlen; 790 packetlen = desc_size + bufferlen;
784 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 791 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -798,8 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
798 bufferlist[2].iov_base = &aligned_data; 805 bufferlist[2].iov_base = &aligned_data;
799 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 806 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
800 807
801 return hv_ringbuffer_write(channel, bufferlist, 3, 808 return hv_ringbuffer_write(channel, bufferlist, 3);
802 lock, true);
803} 809}
804EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); 810EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
805 811
@@ -817,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
817 u32 packetlen_aligned; 823 u32 packetlen_aligned;
818 struct kvec bufferlist[3]; 824 struct kvec bufferlist[3];
819 u64 aligned_data = 0; 825 u64 aligned_data = 0;
820 bool lock = channel->acquire_ring_lock;
821 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 826 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
822 multi_pagebuffer->len); 827 multi_pagebuffer->len);
823 828
@@ -856,8 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
856 bufferlist[2].iov_base = &aligned_data; 861 bufferlist[2].iov_base = &aligned_data;
857 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 862 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
858 863
859 return hv_ringbuffer_write(channel, bufferlist, 3, 864 return hv_ringbuffer_write(channel, bufferlist, 3);
860 lock, true);
861} 865}
862EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); 866EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
863 867
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 26b419203f16..f33465d78a02 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -31,6 +31,7 @@
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/hyperv.h> 33#include <linux/hyperv.h>
34#include <asm/mshyperv.h>
34 35
35#include "hyperv_vmbus.h" 36#include "hyperv_vmbus.h"
36 37
@@ -147,6 +148,29 @@ static const struct {
147 { HV_RDV_GUID }, 148 { HV_RDV_GUID },
148}; 149};
149 150
151/*
152 * The rescinded channel may be blocked waiting for a response from the host;
153 * take care of that.
154 */
155static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
156{
157 struct vmbus_channel_msginfo *msginfo;
158 unsigned long flags;
159
160
161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
162
163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
164 msglistentry) {
165
166 if (msginfo->waiting_channel == channel) {
167 complete(&msginfo->waitevent);
168 break;
169 }
170 }
171 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
172}
173
150static bool is_unsupported_vmbus_devs(const uuid_le *guid) 174static bool is_unsupported_vmbus_devs(const uuid_le *guid)
151{ 175{
152 int i; 176 int i;
@@ -180,33 +204,34 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
180 * @buf: Raw buffer channel data 204 * @buf: Raw buffer channel data
181 * 205 *
182 * @icmsghdrp is of type &struct icmsg_hdr. 206 * @icmsghdrp is of type &struct icmsg_hdr.
183 * @negop is of type &struct icmsg_negotiate.
184 * Set up and fill in default negotiate response message. 207 * Set up and fill in default negotiate response message.
185 * 208 *
186 * The fw_version specifies the framework version that 209 * The fw_version and fw_vercnt specifies the framework version that
187 * we can support and srv_version specifies the service 210 * we can support.
188 * version we can support. 211 *
212 * The srv_version and srv_vercnt specifies the service
213 * versions we can support.
214 *
215 * Versions are given in decreasing order.
216 *
217 * nego_fw_version and nego_srv_version store the selected protocol versions.
189 * 218 *
190 * Mainly used by Hyper-V drivers. 219 * Mainly used by Hyper-V drivers.
191 */ 220 */
192bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, 221bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
193 struct icmsg_negotiate *negop, u8 *buf, 222 u8 *buf, const int *fw_version, int fw_vercnt,
194 int fw_version, int srv_version) 223 const int *srv_version, int srv_vercnt,
224 int *nego_fw_version, int *nego_srv_version)
195{ 225{
196 int icframe_major, icframe_minor; 226 int icframe_major, icframe_minor;
197 int icmsg_major, icmsg_minor; 227 int icmsg_major, icmsg_minor;
198 int fw_major, fw_minor; 228 int fw_major, fw_minor;
199 int srv_major, srv_minor; 229 int srv_major, srv_minor;
200 int i; 230 int i, j;
201 bool found_match = false; 231 bool found_match = false;
232 struct icmsg_negotiate *negop;
202 233
203 icmsghdrp->icmsgsize = 0x10; 234 icmsghdrp->icmsgsize = 0x10;
204 fw_major = (fw_version >> 16);
205 fw_minor = (fw_version & 0xFFFF);
206
207 srv_major = (srv_version >> 16);
208 srv_minor = (srv_version & 0xFFFF);
209
210 negop = (struct icmsg_negotiate *)&buf[ 235 negop = (struct icmsg_negotiate *)&buf[
211 sizeof(struct vmbuspipe_hdr) + 236 sizeof(struct vmbuspipe_hdr) +
212 sizeof(struct icmsg_hdr)]; 237 sizeof(struct icmsg_hdr)];
@@ -222,13 +247,22 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
222 * support. 247 * support.
223 */ 248 */
224 249
225 for (i = 0; i < negop->icframe_vercnt; i++) { 250 for (i = 0; i < fw_vercnt; i++) {
226 if ((negop->icversion_data[i].major == fw_major) && 251 fw_major = (fw_version[i] >> 16);
227 (negop->icversion_data[i].minor == fw_minor)) { 252 fw_minor = (fw_version[i] & 0xFFFF);
228 icframe_major = negop->icversion_data[i].major; 253
229 icframe_minor = negop->icversion_data[i].minor; 254 for (j = 0; j < negop->icframe_vercnt; j++) {
230 found_match = true; 255 if ((negop->icversion_data[j].major == fw_major) &&
256 (negop->icversion_data[j].minor == fw_minor)) {
257 icframe_major = negop->icversion_data[j].major;
258 icframe_minor = negop->icversion_data[j].minor;
259 found_match = true;
260 break;
261 }
231 } 262 }
263
264 if (found_match)
265 break;
232 } 266 }
233 267
234 if (!found_match) 268 if (!found_match)
@@ -236,14 +270,26 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
236 270
237 found_match = false; 271 found_match = false;
238 272
239 for (i = negop->icframe_vercnt; 273 for (i = 0; i < srv_vercnt; i++) {
240 (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) { 274 srv_major = (srv_version[i] >> 16);
241 if ((negop->icversion_data[i].major == srv_major) && 275 srv_minor = (srv_version[i] & 0xFFFF);
242 (negop->icversion_data[i].minor == srv_minor)) { 276
243 icmsg_major = negop->icversion_data[i].major; 277 for (j = negop->icframe_vercnt;
244 icmsg_minor = negop->icversion_data[i].minor; 278 (j < negop->icframe_vercnt + negop->icmsg_vercnt);
245 found_match = true; 279 j++) {
280
281 if ((negop->icversion_data[j].major == srv_major) &&
282 (negop->icversion_data[j].minor == srv_minor)) {
283
284 icmsg_major = negop->icversion_data[j].major;
285 icmsg_minor = negop->icversion_data[j].minor;
286 found_match = true;
287 break;
288 }
246 } 289 }
290
291 if (found_match)
292 break;
247 } 293 }
248 294
249 /* 295 /*
@@ -260,6 +306,12 @@ fw_error:
260 negop->icmsg_vercnt = 1; 306 negop->icmsg_vercnt = 1;
261 } 307 }
262 308
309 if (nego_fw_version)
310 *nego_fw_version = (icframe_major << 16) | icframe_minor;
311
312 if (nego_srv_version)
313 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
314
263 negop->icversion_data[0].major = icframe_major; 315 negop->icversion_data[0].major = icframe_major;
264 negop->icversion_data[0].minor = icframe_minor; 316 negop->icversion_data[0].minor = icframe_minor;
265 negop->icversion_data[1].major = icmsg_major; 317 negop->icversion_data[1].major = icmsg_major;
@@ -280,13 +332,15 @@ static struct vmbus_channel *alloc_channel(void)
280 if (!channel) 332 if (!channel)
281 return NULL; 333 return NULL;
282 334
283 channel->acquire_ring_lock = true;
284 spin_lock_init(&channel->inbound_lock); 335 spin_lock_init(&channel->inbound_lock);
285 spin_lock_init(&channel->lock); 336 spin_lock_init(&channel->lock);
286 337
287 INIT_LIST_HEAD(&channel->sc_list); 338 INIT_LIST_HEAD(&channel->sc_list);
288 INIT_LIST_HEAD(&channel->percpu_list); 339 INIT_LIST_HEAD(&channel->percpu_list);
289 340
341 tasklet_init(&channel->callback_event,
342 vmbus_on_event, (unsigned long)channel);
343
290 return channel; 344 return channel;
291} 345}
292 346
@@ -295,15 +349,17 @@ static struct vmbus_channel *alloc_channel(void)
295 */ 349 */
296static void free_channel(struct vmbus_channel *channel) 350static void free_channel(struct vmbus_channel *channel)
297{ 351{
352 tasklet_kill(&channel->callback_event);
298 kfree(channel); 353 kfree(channel);
299} 354}
300 355
301static void percpu_channel_enq(void *arg) 356static void percpu_channel_enq(void *arg)
302{ 357{
303 struct vmbus_channel *channel = arg; 358 struct vmbus_channel *channel = arg;
304 int cpu = smp_processor_id(); 359 struct hv_per_cpu_context *hv_cpu
360 = this_cpu_ptr(hv_context.cpu_context);
305 361
306 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]); 362 list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
307} 363}
308 364
309static void percpu_channel_deq(void *arg) 365static void percpu_channel_deq(void *arg)
@@ -321,24 +377,21 @@ static void vmbus_release_relid(u32 relid)
321 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); 377 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
322 msg.child_relid = relid; 378 msg.child_relid = relid;
323 msg.header.msgtype = CHANNELMSG_RELID_RELEASED; 379 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
324 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); 380 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
381 true);
325} 382}
326 383
327void hv_event_tasklet_disable(struct vmbus_channel *channel) 384void hv_event_tasklet_disable(struct vmbus_channel *channel)
328{ 385{
329 struct tasklet_struct *tasklet; 386 tasklet_disable(&channel->callback_event);
330 tasklet = hv_context.event_dpc[channel->target_cpu];
331 tasklet_disable(tasklet);
332} 387}
333 388
334void hv_event_tasklet_enable(struct vmbus_channel *channel) 389void hv_event_tasklet_enable(struct vmbus_channel *channel)
335{ 390{
336 struct tasklet_struct *tasklet; 391 tasklet_enable(&channel->callback_event);
337 tasklet = hv_context.event_dpc[channel->target_cpu];
338 tasklet_enable(tasklet);
339 392
340 /* In case there is any pending event */ 393 /* In case there is any pending event */
341 tasklet_schedule(tasklet); 394 tasklet_schedule(&channel->callback_event);
342} 395}
343 396
344void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 397void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
@@ -673,9 +726,12 @@ static void vmbus_wait_for_unload(void)
673 break; 726 break;
674 727
675 for_each_online_cpu(cpu) { 728 for_each_online_cpu(cpu) {
676 page_addr = hv_context.synic_message_page[cpu]; 729 struct hv_per_cpu_context *hv_cpu
677 msg = (struct hv_message *)page_addr + 730 = per_cpu_ptr(hv_context.cpu_context, cpu);
678 VMBUS_MESSAGE_SINT; 731
732 page_addr = hv_cpu->synic_message_page;
733 msg = (struct hv_message *)page_addr
734 + VMBUS_MESSAGE_SINT;
679 735
680 message_type = READ_ONCE(msg->header.message_type); 736 message_type = READ_ONCE(msg->header.message_type);
681 if (message_type == HVMSG_NONE) 737 if (message_type == HVMSG_NONE)
@@ -699,7 +755,10 @@ static void vmbus_wait_for_unload(void)
699 * messages after we reconnect. 755 * messages after we reconnect.
700 */ 756 */
701 for_each_online_cpu(cpu) { 757 for_each_online_cpu(cpu) {
702 page_addr = hv_context.synic_message_page[cpu]; 758 struct hv_per_cpu_context *hv_cpu
759 = per_cpu_ptr(hv_context.cpu_context, cpu);
760
761 page_addr = hv_cpu->synic_message_page;
703 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 762 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
704 msg->header.message_type = HVMSG_NONE; 763 msg->header.message_type = HVMSG_NONE;
705 } 764 }
@@ -728,7 +787,8 @@ void vmbus_initiate_unload(bool crash)
728 init_completion(&vmbus_connection.unload_event); 787 init_completion(&vmbus_connection.unload_event);
729 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header)); 788 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
730 hdr.msgtype = CHANNELMSG_UNLOAD; 789 hdr.msgtype = CHANNELMSG_UNLOAD;
731 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); 790 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
791 !crash);
732 792
733 /* 793 /*
734 * vmbus_initiate_unload() is also called on crash and the crash can be 794 * vmbus_initiate_unload() is also called on crash and the crash can be
@@ -759,13 +819,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
759 } 819 }
760 820
761 /* 821 /*
762 * By default we setup state to enable batched
763 * reading. A specific service can choose to
764 * disable this prior to opening the channel.
765 */
766 newchannel->batched_reading = true;
767
768 /*
769 * Setup state for signalling the host. 822 * Setup state for signalling the host.
770 */ 823 */
771 newchannel->sig_event = (struct hv_input_signal_event *) 824 newchannel->sig_event = (struct hv_input_signal_event *)
@@ -823,6 +876,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
823 channel->rescind = true; 876 channel->rescind = true;
824 spin_unlock_irqrestore(&channel->lock, flags); 877 spin_unlock_irqrestore(&channel->lock, flags);
825 878
879 vmbus_rescind_cleanup(channel);
880
826 if (channel->device_obj) { 881 if (channel->device_obj) {
827 if (channel->chn_rescind_callback) { 882 if (channel->chn_rescind_callback) {
828 channel->chn_rescind_callback(channel); 883 channel->chn_rescind_callback(channel);
@@ -1116,8 +1171,8 @@ int vmbus_request_offers(void)
1116 msg->msgtype = CHANNELMSG_REQUESTOFFERS; 1171 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1117 1172
1118 1173
1119 ret = vmbus_post_msg(msg, 1174 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1120 sizeof(struct vmbus_channel_message_header)); 1175 true);
1121 if (ret != 0) { 1176 if (ret != 0) {
1122 pr_err("Unable to request offers - %d\n", ret); 1177 pr_err("Unable to request offers - %d\n", ret);
1123 1178
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6ce8b874e833..a8366fec1458 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -93,12 +93,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
93 * all the CPUs. This is needed for kexec to work correctly where 93 * all the CPUs. This is needed for kexec to work correctly where
94 * the CPU attempting to connect may not be CPU 0. 94 * the CPU attempting to connect may not be CPU 0.
95 */ 95 */
96 if (version >= VERSION_WIN8_1) { 96 if (version >= VERSION_WIN8_1)
97 msg->target_vcpu = hv_context.vp_index[get_cpu()]; 97 msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
98 put_cpu(); 98 else
99 } else {
100 msg->target_vcpu = 0; 99 msg->target_vcpu = 0;
101 }
102 100
103 /* 101 /*
104 * Add to list before we send the request since we may 102 * Add to list before we send the request since we may
@@ -111,7 +109,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
111 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 109 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
112 110
113 ret = vmbus_post_msg(msg, 111 ret = vmbus_post_msg(msg,
114 sizeof(struct vmbus_channel_initiate_contact)); 112 sizeof(struct vmbus_channel_initiate_contact),
113 true);
115 if (ret != 0) { 114 if (ret != 0) {
116 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 115 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
117 list_del(&msginfo->msglistentry); 116 list_del(&msginfo->msglistentry);
@@ -220,11 +219,8 @@ int vmbus_connect(void)
220 goto cleanup; 219 goto cleanup;
221 220
222 vmbus_proto_version = version; 221 vmbus_proto_version = version;
223 pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n", 222 pr_info("Vmbus version:%d.%d\n",
224 host_info_eax, host_info_ebx >> 16, 223 version >> 16, version & 0xFFFF);
225 host_info_ebx & 0xFFFF, host_info_ecx,
226 host_info_edx >> 24, host_info_edx & 0xFFFFFF,
227 version >> 16, version & 0xFFFF);
228 224
229 kfree(msginfo); 225 kfree(msginfo);
230 return 0; 226 return 0;
@@ -264,29 +260,6 @@ void vmbus_disconnect(void)
264} 260}
265 261
266/* 262/*
267 * Map the given relid to the corresponding channel based on the
268 * per-cpu list of channels that have been affinitized to this CPU.
269 * This will be used in the channel callback path as we can do this
270 * mapping in a lock-free fashion.
271 */
272static struct vmbus_channel *pcpu_relid2channel(u32 relid)
273{
274 struct vmbus_channel *channel;
275 struct vmbus_channel *found_channel = NULL;
276 int cpu = smp_processor_id();
277 struct list_head *pcpu_head = &hv_context.percpu_list[cpu];
278
279 list_for_each_entry(channel, pcpu_head, percpu_list) {
280 if (channel->offermsg.child_relid == relid) {
281 found_channel = channel;
282 break;
283 }
284 }
285
286 return found_channel;
287}
288
289/*
290 * relid2channel - Get the channel object given its 263 * relid2channel - Get the channel object given its
291 * child relative id (ie channel id) 264 * child relative id (ie channel id)
292 */ 265 */
@@ -322,23 +295,12 @@ struct vmbus_channel *relid2channel(u32 relid)
322} 295}
323 296
324/* 297/*
325 * process_chn_event - Process a channel event notification 298 * vmbus_on_event - Process a channel event notification
326 */ 299 */
327static void process_chn_event(u32 relid) 300void vmbus_on_event(unsigned long data)
328{ 301{
329 struct vmbus_channel *channel; 302 struct vmbus_channel *channel = (void *) data;
330 void *arg; 303 void (*callback_fn)(void *);
331 bool read_state;
332 u32 bytes_to_read;
333
334 /*
335 * Find the channel based on this relid and invokes the
336 * channel callback to process the event
337 */
338 channel = pcpu_relid2channel(relid);
339
340 if (!channel)
341 return;
342 304
343 /* 305 /*
344 * A channel once created is persistent even when there 306 * A channel once created is persistent even when there
@@ -348,10 +310,13 @@ static void process_chn_event(u32 relid)
348 * Thus, checking and invoking the driver specific callback takes 310 * Thus, checking and invoking the driver specific callback takes
349 * care of orderly unloading of the driver. 311 * care of orderly unloading of the driver.
350 */ 312 */
313 callback_fn = READ_ONCE(channel->onchannel_callback);
314 if (unlikely(callback_fn == NULL))
315 return;
351 316
352 if (channel->onchannel_callback != NULL) { 317 (*callback_fn)(channel->channel_callback_context);
353 arg = channel->channel_callback_context; 318
354 read_state = channel->batched_reading; 319 if (channel->callback_mode == HV_CALL_BATCHED) {
355 /* 320 /*
356 * This callback reads the messages sent by the host. 321 * This callback reads the messages sent by the host.
357 * We can optimize host to guest signaling by ensuring: 322 * We can optimize host to guest signaling by ensuring:
@@ -363,71 +328,10 @@ static void process_chn_event(u32 relid)
363 * state is set we check to see if additional packets are 328 * state is set we check to see if additional packets are
364 * available to read. In this case we repeat the process. 329 * available to read. In this case we repeat the process.
365 */ 330 */
331 if (hv_end_read(&channel->inbound) != 0) {
332 hv_begin_read(&channel->inbound);
366 333
367 do { 334 tasklet_schedule(&channel->callback_event);
368 if (read_state)
369 hv_begin_read(&channel->inbound);
370 channel->onchannel_callback(arg);
371 if (read_state)
372 bytes_to_read = hv_end_read(&channel->inbound);
373 else
374 bytes_to_read = 0;
375 } while (read_state && (bytes_to_read != 0));
376 }
377}
378
379/*
380 * vmbus_on_event - Handler for events
381 */
382void vmbus_on_event(unsigned long data)
383{
384 u32 dword;
385 u32 maxdword;
386 int bit;
387 u32 relid;
388 u32 *recv_int_page = NULL;
389 void *page_addr;
390 int cpu = smp_processor_id();
391 union hv_synic_event_flags *event;
392
393 if (vmbus_proto_version < VERSION_WIN8) {
394 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
395 recv_int_page = vmbus_connection.recv_int_page;
396 } else {
397 /*
398 * When the host is win8 and beyond, the event page
399 * can be directly checked to get the id of the channel
400 * that has the interrupt pending.
401 */
402 maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
403 page_addr = hv_context.synic_event_page[cpu];
404 event = (union hv_synic_event_flags *)page_addr +
405 VMBUS_MESSAGE_SINT;
406 recv_int_page = event->flags32;
407 }
408
409
410
411 /* Check events */
412 if (!recv_int_page)
413 return;
414 for (dword = 0; dword < maxdword; dword++) {
415 if (!recv_int_page[dword])
416 continue;
417 for (bit = 0; bit < 32; bit++) {
418 if (sync_test_and_clear_bit(bit,
419 (unsigned long *)&recv_int_page[dword])) {
420 relid = (dword << 5) + bit;
421
422 if (relid == 0)
423 /*
424 * Special case - vmbus
425 * channel protocol msg
426 */
427 continue;
428
429 process_chn_event(relid);
430 }
431 } 335 }
432 } 336 }
433} 337}
@@ -435,7 +339,7 @@ void vmbus_on_event(unsigned long data)
435/* 339/*
436 * vmbus_post_msg - Send a msg on the vmbus's message connection 340 * vmbus_post_msg - Send a msg on the vmbus's message connection
437 */ 341 */
438int vmbus_post_msg(void *buffer, size_t buflen) 342int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
439{ 343{
440 union hv_connection_id conn_id; 344 union hv_connection_id conn_id;
441 int ret = 0; 345 int ret = 0;
@@ -450,7 +354,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
450 * insufficient resources. Retry the operation a couple of 354 * insufficient resources. Retry the operation a couple of
451 * times before giving up. 355 * times before giving up.
452 */ 356 */
453 while (retries < 20) { 357 while (retries < 100) {
454 ret = hv_post_message(conn_id, 1, buffer, buflen); 358 ret = hv_post_message(conn_id, 1, buffer, buflen);
455 359
456 switch (ret) { 360 switch (ret) {
@@ -473,8 +377,14 @@ int vmbus_post_msg(void *buffer, size_t buflen)
473 } 377 }
474 378
475 retries++; 379 retries++;
476 udelay(usec); 380 if (can_sleep && usec > 1000)
477 if (usec < 2048) 381 msleep(usec / 1000);
382 else if (usec < MAX_UDELAY_MS * 1000)
383 udelay(usec);
384 else
385 mdelay(usec / 1000);
386
387 if (usec < 256000)
478 usec *= 2; 388 usec *= 2;
479 } 389 }
480 return ret; 390 return ret;
@@ -487,12 +397,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
487{ 397{
488 u32 child_relid = channel->offermsg.child_relid; 398 u32 child_relid = channel->offermsg.child_relid;
489 399
490 if (!channel->is_dedicated_interrupt) { 400 if (!channel->is_dedicated_interrupt)
491 /* Each u32 represents 32 channels */ 401 vmbus_send_interrupt(child_relid);
492 sync_set_bit(child_relid & 31,
493 (unsigned long *)vmbus_connection.send_int_page +
494 (child_relid >> 5));
495 }
496 402
497 hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL); 403 hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
498} 404}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index b44b32f21e61..665a64f1611e 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -36,7 +36,6 @@
36/* The one and only */ 36/* The one and only */
37struct hv_context hv_context = { 37struct hv_context hv_context = {
38 .synic_initialized = false, 38 .synic_initialized = false,
39 .hypercall_page = NULL,
40}; 39};
41 40
42#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */ 41#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
@@ -44,276 +43,20 @@ struct hv_context hv_context = {
44#define HV_MIN_DELTA_TICKS 1 43#define HV_MIN_DELTA_TICKS 1
45 44
46/* 45/*
47 * query_hypervisor_info - Get version info of the windows hypervisor
48 */
49unsigned int host_info_eax;
50unsigned int host_info_ebx;
51unsigned int host_info_ecx;
52unsigned int host_info_edx;
53
54static int query_hypervisor_info(void)
55{
56 unsigned int eax;
57 unsigned int ebx;
58 unsigned int ecx;
59 unsigned int edx;
60 unsigned int max_leaf;
61 unsigned int op;
62
63 /*
64 * Its assumed that this is called after confirming that Viridian
65 * is present. Query id and revision.
66 */
67 eax = 0;
68 ebx = 0;
69 ecx = 0;
70 edx = 0;
71 op = HVCPUID_VENDOR_MAXFUNCTION;
72 cpuid(op, &eax, &ebx, &ecx, &edx);
73
74 max_leaf = eax;
75
76 if (max_leaf >= HVCPUID_VERSION) {
77 eax = 0;
78 ebx = 0;
79 ecx = 0;
80 edx = 0;
81 op = HVCPUID_VERSION;
82 cpuid(op, &eax, &ebx, &ecx, &edx);
83 host_info_eax = eax;
84 host_info_ebx = ebx;
85 host_info_ecx = ecx;
86 host_info_edx = edx;
87 }
88 return max_leaf;
89}
90
91/*
92 * hv_do_hypercall- Invoke the specified hypercall
93 */
94u64 hv_do_hypercall(u64 control, void *input, void *output)
95{
96 u64 input_address = (input) ? virt_to_phys(input) : 0;
97 u64 output_address = (output) ? virt_to_phys(output) : 0;
98 void *hypercall_page = hv_context.hypercall_page;
99#ifdef CONFIG_X86_64
100 u64 hv_status = 0;
101
102 if (!hypercall_page)
103 return (u64)ULLONG_MAX;
104
105 __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
106 __asm__ __volatile__("call *%3" : "=a" (hv_status) :
107 "c" (control), "d" (input_address),
108 "m" (hypercall_page));
109
110 return hv_status;
111
112#else
113
114 u32 control_hi = control >> 32;
115 u32 control_lo = control & 0xFFFFFFFF;
116 u32 hv_status_hi = 1;
117 u32 hv_status_lo = 1;
118 u32 input_address_hi = input_address >> 32;
119 u32 input_address_lo = input_address & 0xFFFFFFFF;
120 u32 output_address_hi = output_address >> 32;
121 u32 output_address_lo = output_address & 0xFFFFFFFF;
122
123 if (!hypercall_page)
124 return (u64)ULLONG_MAX;
125
126 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
127 "=a"(hv_status_lo) : "d" (control_hi),
128 "a" (control_lo), "b" (input_address_hi),
129 "c" (input_address_lo), "D"(output_address_hi),
130 "S"(output_address_lo), "m" (hypercall_page));
131
132 return hv_status_lo | ((u64)hv_status_hi << 32);
133#endif /* !x86_64 */
134}
135EXPORT_SYMBOL_GPL(hv_do_hypercall);
136
137#ifdef CONFIG_X86_64
138static u64 read_hv_clock_tsc(struct clocksource *arg)
139{
140 u64 current_tick;
141 struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
142
143 if (tsc_pg->tsc_sequence != 0) {
144 /*
145 * Use the tsc page to compute the value.
146 */
147
148 while (1) {
149 u64 tmp;
150 u32 sequence = tsc_pg->tsc_sequence;
151 u64 cur_tsc;
152 u64 scale = tsc_pg->tsc_scale;
153 s64 offset = tsc_pg->tsc_offset;
154
155 rdtscll(cur_tsc);
156 /* current_tick = ((cur_tsc *scale) >> 64) + offset */
157 asm("mulq %3"
158 : "=d" (current_tick), "=a" (tmp)
159 : "a" (cur_tsc), "r" (scale));
160
161 current_tick += offset;
162 if (tsc_pg->tsc_sequence == sequence)
163 return current_tick;
164
165 if (tsc_pg->tsc_sequence != 0)
166 continue;
167 /*
168 * Fallback using MSR method.
169 */
170 break;
171 }
172 }
173 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
174 return current_tick;
175}
176
177static struct clocksource hyperv_cs_tsc = {
178 .name = "hyperv_clocksource_tsc_page",
179 .rating = 425,
180 .read = read_hv_clock_tsc,
181 .mask = CLOCKSOURCE_MASK(64),
182 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
183};
184#endif
185
186
187/*
188 * hv_init - Main initialization routine. 46 * hv_init - Main initialization routine.
189 * 47 *
190 * This routine must be called before any other routines in here are called 48 * This routine must be called before any other routines in here are called
191 */ 49 */
192int hv_init(void) 50int hv_init(void)
193{ 51{
194 int max_leaf; 52 if (!hv_is_hypercall_page_setup())
195 union hv_x64_msr_hypercall_contents hypercall_msr; 53 return -ENOTSUPP;
196 void *virtaddr = NULL;
197
198 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
199 memset(hv_context.synic_message_page, 0,
200 sizeof(void *) * NR_CPUS);
201 memset(hv_context.post_msg_page, 0,
202 sizeof(void *) * NR_CPUS);
203 memset(hv_context.vp_index, 0,
204 sizeof(int) * NR_CPUS);
205 memset(hv_context.event_dpc, 0,
206 sizeof(void *) * NR_CPUS);
207 memset(hv_context.msg_dpc, 0,
208 sizeof(void *) * NR_CPUS);
209 memset(hv_context.clk_evt, 0,
210 sizeof(void *) * NR_CPUS);
211
212 max_leaf = query_hypervisor_info();
213 54
214 /* 55 hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
215 * Write our OS ID. 56 if (!hv_context.cpu_context)
216 */ 57 return -ENOMEM;
217 hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0);
218 wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
219
220 /* See if the hypercall page is already set */
221 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
222
223 virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
224
225 if (!virtaddr)
226 goto cleanup;
227
228 hypercall_msr.enable = 1;
229
230 hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
231 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
232
233 /* Confirm that hypercall page did get setup. */
234 hypercall_msr.as_uint64 = 0;
235 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
236
237 if (!hypercall_msr.enable)
238 goto cleanup;
239
240 hv_context.hypercall_page = virtaddr;
241
242#ifdef CONFIG_X86_64
243 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
244 union hv_x64_msr_hypercall_contents tsc_msr;
245 void *va_tsc;
246
247 va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
248 if (!va_tsc)
249 goto cleanup;
250 hv_context.tsc_page = va_tsc;
251
252 rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
253 58
254 tsc_msr.enable = 1;
255 tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
256
257 wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
258 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
259 }
260#endif
261 return 0; 59 return 0;
262
263cleanup:
264 if (virtaddr) {
265 if (hypercall_msr.enable) {
266 hypercall_msr.as_uint64 = 0;
267 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
268 }
269
270 vfree(virtaddr);
271 }
272
273 return -ENOTSUPP;
274}
275
276/*
277 * hv_cleanup - Cleanup routine.
278 *
279 * This routine is called normally during driver unloading or exiting.
280 */
281void hv_cleanup(bool crash)
282{
283 union hv_x64_msr_hypercall_contents hypercall_msr;
284
285 /* Reset our OS id */
286 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
287
288 if (hv_context.hypercall_page) {
289 hypercall_msr.as_uint64 = 0;
290 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
291 if (!crash)
292 vfree(hv_context.hypercall_page);
293 hv_context.hypercall_page = NULL;
294 }
295
296#ifdef CONFIG_X86_64
297 /*
298 * Cleanup the TSC page based CS.
299 */
300 if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
301 /*
302 * Crash can happen in an interrupt context and unregistering
303 * a clocksource is impossible and redundant in this case.
304 */
305 if (!oops_in_progress) {
306 clocksource_change_rating(&hyperv_cs_tsc, 10);
307 clocksource_unregister(&hyperv_cs_tsc);
308 }
309
310 hypercall_msr.as_uint64 = 0;
311 wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
312 if (!crash)
313 vfree(hv_context.tsc_page);
314 hv_context.tsc_page = NULL;
315 }
316#endif
317} 60}
318 61
319/* 62/*
@@ -325,25 +68,24 @@ int hv_post_message(union hv_connection_id connection_id,
325 enum hv_message_type message_type, 68 enum hv_message_type message_type,
326 void *payload, size_t payload_size) 69 void *payload, size_t payload_size)
327{ 70{
328
329 struct hv_input_post_message *aligned_msg; 71 struct hv_input_post_message *aligned_msg;
72 struct hv_per_cpu_context *hv_cpu;
330 u64 status; 73 u64 status;
331 74
332 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) 75 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
333 return -EMSGSIZE; 76 return -EMSGSIZE;
334 77
335 aligned_msg = (struct hv_input_post_message *) 78 hv_cpu = get_cpu_ptr(hv_context.cpu_context);
336 hv_context.post_msg_page[get_cpu()]; 79 aligned_msg = hv_cpu->post_msg_page;
337
338 aligned_msg->connectionid = connection_id; 80 aligned_msg->connectionid = connection_id;
339 aligned_msg->reserved = 0; 81 aligned_msg->reserved = 0;
340 aligned_msg->message_type = message_type; 82 aligned_msg->message_type = message_type;
341 aligned_msg->payload_size = payload_size; 83 aligned_msg->payload_size = payload_size;
342 memcpy((void *)aligned_msg->payload, payload, payload_size); 84 memcpy((void *)aligned_msg->payload, payload, payload_size);
85 put_cpu_ptr(hv_cpu);
343 86
344 status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL); 87 status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
345 88
346 put_cpu();
347 return status & 0xFFFF; 89 return status & 0xFFFF;
348} 90}
349 91
@@ -354,16 +96,16 @@ static int hv_ce_set_next_event(unsigned long delta,
354 96
355 WARN_ON(!clockevent_state_oneshot(evt)); 97 WARN_ON(!clockevent_state_oneshot(evt));
356 98
357 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick); 99 hv_get_current_tick(current_tick);
358 current_tick += delta; 100 current_tick += delta;
359 wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick); 101 hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
360 return 0; 102 return 0;
361} 103}
362 104
363static int hv_ce_shutdown(struct clock_event_device *evt) 105static int hv_ce_shutdown(struct clock_event_device *evt)
364{ 106{
365 wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0); 107 hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
366 wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0); 108 hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
367 109
368 return 0; 110 return 0;
369} 111}
@@ -375,7 +117,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
375 timer_cfg.enable = 1; 117 timer_cfg.enable = 1;
376 timer_cfg.auto_enable = 1; 118 timer_cfg.auto_enable = 1;
377 timer_cfg.sintx = VMBUS_MESSAGE_SINT; 119 timer_cfg.sintx = VMBUS_MESSAGE_SINT;
378 wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64); 120 hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
379 121
380 return 0; 122 return 0;
381} 123}
@@ -400,8 +142,6 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
400 142
401int hv_synic_alloc(void) 143int hv_synic_alloc(void)
402{ 144{
403 size_t size = sizeof(struct tasklet_struct);
404 size_t ced_size = sizeof(struct clock_event_device);
405 int cpu; 145 int cpu;
406 146
407 hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids, 147 hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
@@ -411,52 +151,42 @@ int hv_synic_alloc(void)
411 goto err; 151 goto err;
412 } 152 }
413 153
414 for_each_online_cpu(cpu) { 154 for_each_present_cpu(cpu) {
415 hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); 155 struct hv_per_cpu_context *hv_cpu
416 if (hv_context.event_dpc[cpu] == NULL) { 156 = per_cpu_ptr(hv_context.cpu_context, cpu);
417 pr_err("Unable to allocate event dpc\n");
418 goto err;
419 }
420 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
421 157
422 hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC); 158 memset(hv_cpu, 0, sizeof(*hv_cpu));
423 if (hv_context.msg_dpc[cpu] == NULL) { 159 tasklet_init(&hv_cpu->msg_dpc,
424 pr_err("Unable to allocate event dpc\n"); 160 vmbus_on_msg_dpc, (unsigned long) hv_cpu);
425 goto err;
426 }
427 tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
428 161
429 hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC); 162 hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
430 if (hv_context.clk_evt[cpu] == NULL) { 163 GFP_KERNEL);
164 if (hv_cpu->clk_evt == NULL) {
431 pr_err("Unable to allocate clock event device\n"); 165 pr_err("Unable to allocate clock event device\n");
432 goto err; 166 goto err;
433 } 167 }
168 hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
434 169
435 hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu); 170 hv_cpu->synic_message_page =
436
437 hv_context.synic_message_page[cpu] =
438 (void *)get_zeroed_page(GFP_ATOMIC); 171 (void *)get_zeroed_page(GFP_ATOMIC);
439 172 if (hv_cpu->synic_message_page == NULL) {
440 if (hv_context.synic_message_page[cpu] == NULL) {
441 pr_err("Unable to allocate SYNIC message page\n"); 173 pr_err("Unable to allocate SYNIC message page\n");
442 goto err; 174 goto err;
443 } 175 }
444 176
445 hv_context.synic_event_page[cpu] = 177 hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
446 (void *)get_zeroed_page(GFP_ATOMIC); 178 if (hv_cpu->synic_event_page == NULL) {
447
448 if (hv_context.synic_event_page[cpu] == NULL) {
449 pr_err("Unable to allocate SYNIC event page\n"); 179 pr_err("Unable to allocate SYNIC event page\n");
450 goto err; 180 goto err;
451 } 181 }
452 182
453 hv_context.post_msg_page[cpu] = 183 hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
454 (void *)get_zeroed_page(GFP_ATOMIC); 184 if (hv_cpu->post_msg_page == NULL) {
455
456 if (hv_context.post_msg_page[cpu] == NULL) {
457 pr_err("Unable to allocate post msg page\n"); 185 pr_err("Unable to allocate post msg page\n");
458 goto err; 186 goto err;
459 } 187 }
188
189 INIT_LIST_HEAD(&hv_cpu->chan_list);
460 } 190 }
461 191
462 return 0; 192 return 0;
@@ -464,26 +194,24 @@ err:
464 return -ENOMEM; 194 return -ENOMEM;
465} 195}
466 196
467static void hv_synic_free_cpu(int cpu)
468{
469 kfree(hv_context.event_dpc[cpu]);
470 kfree(hv_context.msg_dpc[cpu]);
471 kfree(hv_context.clk_evt[cpu]);
472 if (hv_context.synic_event_page[cpu])
473 free_page((unsigned long)hv_context.synic_event_page[cpu]);
474 if (hv_context.synic_message_page[cpu])
475 free_page((unsigned long)hv_context.synic_message_page[cpu]);
476 if (hv_context.post_msg_page[cpu])
477 free_page((unsigned long)hv_context.post_msg_page[cpu]);
478}
479 197
480void hv_synic_free(void) 198void hv_synic_free(void)
481{ 199{
482 int cpu; 200 int cpu;
483 201
202 for_each_present_cpu(cpu) {
203 struct hv_per_cpu_context *hv_cpu
204 = per_cpu_ptr(hv_context.cpu_context, cpu);
205
206 if (hv_cpu->synic_event_page)
207 free_page((unsigned long)hv_cpu->synic_event_page);
208 if (hv_cpu->synic_message_page)
209 free_page((unsigned long)hv_cpu->synic_message_page);
210 if (hv_cpu->post_msg_page)
211 free_page((unsigned long)hv_cpu->post_msg_page);
212 }
213
484 kfree(hv_context.hv_numa_map); 214 kfree(hv_context.hv_numa_map);
485 for_each_online_cpu(cpu)
486 hv_synic_free_cpu(cpu);
487} 215}
488 216
489/* 217/*
@@ -493,54 +221,49 @@ void hv_synic_free(void)
493 * retrieve the initialized message and event pages. Otherwise, we create and 221 * retrieve the initialized message and event pages. Otherwise, we create and
494 * initialize the message and event pages. 222 * initialize the message and event pages.
495 */ 223 */
496void hv_synic_init(void *arg) 224int hv_synic_init(unsigned int cpu)
497{ 225{
498 u64 version; 226 struct hv_per_cpu_context *hv_cpu
227 = per_cpu_ptr(hv_context.cpu_context, cpu);
499 union hv_synic_simp simp; 228 union hv_synic_simp simp;
500 union hv_synic_siefp siefp; 229 union hv_synic_siefp siefp;
501 union hv_synic_sint shared_sint; 230 union hv_synic_sint shared_sint;
502 union hv_synic_scontrol sctrl; 231 union hv_synic_scontrol sctrl;
503 u64 vp_index; 232 u64 vp_index;
504 233
505 int cpu = smp_processor_id();
506
507 if (!hv_context.hypercall_page)
508 return;
509
510 /* Check the version */
511 rdmsrl(HV_X64_MSR_SVERSION, version);
512
513 /* Setup the Synic's message page */ 234 /* Setup the Synic's message page */
514 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64); 235 hv_get_simp(simp.as_uint64);
515 simp.simp_enabled = 1; 236 simp.simp_enabled = 1;
516 simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu]) 237 simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
517 >> PAGE_SHIFT; 238 >> PAGE_SHIFT;
518 239
519 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64); 240 hv_set_simp(simp.as_uint64);
520 241
521 /* Setup the Synic's event page */ 242 /* Setup the Synic's event page */
522 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); 243 hv_get_siefp(siefp.as_uint64);
523 siefp.siefp_enabled = 1; 244 siefp.siefp_enabled = 1;
524 siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu]) 245 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
525 >> PAGE_SHIFT; 246 >> PAGE_SHIFT;
526 247
527 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); 248 hv_set_siefp(siefp.as_uint64);
528 249
529 /* Setup the shared SINT. */ 250 /* Setup the shared SINT. */
530 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 251 hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
252 shared_sint.as_uint64);
531 253
532 shared_sint.as_uint64 = 0; 254 shared_sint.as_uint64 = 0;
533 shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR; 255 shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
534 shared_sint.masked = false; 256 shared_sint.masked = false;
535 shared_sint.auto_eoi = true; 257 shared_sint.auto_eoi = true;
536 258
537 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 259 hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
260 shared_sint.as_uint64);
538 261
539 /* Enable the global synic bit */ 262 /* Enable the global synic bit */
540 rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); 263 hv_get_synic_state(sctrl.as_uint64);
541 sctrl.enable = 1; 264 sctrl.enable = 1;
542 265
543 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); 266 hv_set_synic_state(sctrl.as_uint64);
544 267
545 hv_context.synic_initialized = true; 268 hv_context.synic_initialized = true;
546 269
@@ -549,20 +272,18 @@ void hv_synic_init(void *arg)
549 * of cpuid and Linux' notion of cpuid. 272 * of cpuid and Linux' notion of cpuid.
550 * This array will be indexed using Linux cpuid. 273 * This array will be indexed using Linux cpuid.
551 */ 274 */
552 rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); 275 hv_get_vp_index(vp_index);
553 hv_context.vp_index[cpu] = (u32)vp_index; 276 hv_context.vp_index[cpu] = (u32)vp_index;
554 277
555 INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
556
557 /* 278 /*
558 * Register the per-cpu clockevent source. 279 * Register the per-cpu clockevent source.
559 */ 280 */
560 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) 281 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
561 clockevents_config_and_register(hv_context.clk_evt[cpu], 282 clockevents_config_and_register(hv_cpu->clk_evt,
562 HV_TIMER_FREQUENCY, 283 HV_TIMER_FREQUENCY,
563 HV_MIN_DELTA_TICKS, 284 HV_MIN_DELTA_TICKS,
564 HV_MAX_MAX_DELTA_TICKS); 285 HV_MAX_MAX_DELTA_TICKS);
565 return; 286 return 0;
566} 287}
567 288
568/* 289/*
@@ -575,52 +296,94 @@ void hv_synic_clockevents_cleanup(void)
575 if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)) 296 if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
576 return; 297 return;
577 298
578 for_each_present_cpu(cpu) 299 for_each_present_cpu(cpu) {
579 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu); 300 struct hv_per_cpu_context *hv_cpu
301 = per_cpu_ptr(hv_context.cpu_context, cpu);
302
303 clockevents_unbind_device(hv_cpu->clk_evt, cpu);
304 }
580} 305}
581 306
582/* 307/*
583 * hv_synic_cleanup - Cleanup routine for hv_synic_init(). 308 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
584 */ 309 */
585void hv_synic_cleanup(void *arg) 310int hv_synic_cleanup(unsigned int cpu)
586{ 311{
587 union hv_synic_sint shared_sint; 312 union hv_synic_sint shared_sint;
588 union hv_synic_simp simp; 313 union hv_synic_simp simp;
589 union hv_synic_siefp siefp; 314 union hv_synic_siefp siefp;
590 union hv_synic_scontrol sctrl; 315 union hv_synic_scontrol sctrl;
591 int cpu = smp_processor_id(); 316 struct vmbus_channel *channel, *sc;
317 bool channel_found = false;
318 unsigned long flags;
592 319
593 if (!hv_context.synic_initialized) 320 if (!hv_context.synic_initialized)
594 return; 321 return -EFAULT;
322
323 /*
324 * Search for channels which are bound to the CPU we're about to
325 * cleanup. In case we find one and vmbus is still connected we need to
326 * fail, this will effectively prevent CPU offlining. There is no way
327 * we can re-bind channels to different CPUs for now.
328 */
329 mutex_lock(&vmbus_connection.channel_mutex);
330 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
331 if (channel->target_cpu == cpu) {
332 channel_found = true;
333 break;
334 }
335 spin_lock_irqsave(&channel->lock, flags);
336 list_for_each_entry(sc, &channel->sc_list, sc_list) {
337 if (sc->target_cpu == cpu) {
338 channel_found = true;
339 break;
340 }
341 }
342 spin_unlock_irqrestore(&channel->lock, flags);
343 if (channel_found)
344 break;
345 }
346 mutex_unlock(&vmbus_connection.channel_mutex);
347
348 if (channel_found && vmbus_connection.conn_state == CONNECTED)
349 return -EBUSY;
595 350
596 /* Turn off clockevent device */ 351 /* Turn off clockevent device */
597 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) { 352 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
598 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu); 353 struct hv_per_cpu_context *hv_cpu
599 hv_ce_shutdown(hv_context.clk_evt[cpu]); 354 = this_cpu_ptr(hv_context.cpu_context);
355
356 clockevents_unbind_device(hv_cpu->clk_evt, cpu);
357 hv_ce_shutdown(hv_cpu->clk_evt);
358 put_cpu_ptr(hv_cpu);
600 } 359 }
601 360
602 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 361 hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
362 shared_sint.as_uint64);
603 363
604 shared_sint.masked = 1; 364 shared_sint.masked = 1;
605 365
606 /* Need to correctly cleanup in the case of SMP!!! */ 366 /* Need to correctly cleanup in the case of SMP!!! */
607 /* Disable the interrupt */ 367 /* Disable the interrupt */
608 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 368 hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
369 shared_sint.as_uint64);
609 370
610 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64); 371 hv_get_simp(simp.as_uint64);
611 simp.simp_enabled = 0; 372 simp.simp_enabled = 0;
612 simp.base_simp_gpa = 0; 373 simp.base_simp_gpa = 0;
613 374
614 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64); 375 hv_set_simp(simp.as_uint64);
615 376
616 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); 377 hv_get_siefp(siefp.as_uint64);
617 siefp.siefp_enabled = 0; 378 siefp.siefp_enabled = 0;
618 siefp.base_siefp_gpa = 0; 379 siefp.base_siefp_gpa = 0;
619 380
620 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); 381 hv_set_siefp(siefp.as_uint64);
621 382
622 /* Disable the global synic bit */ 383 /* Disable the global synic bit */
623 rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); 384 hv_get_synic_state(sctrl.as_uint64);
624 sctrl.enable = 0; 385 sctrl.enable = 0;
625 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); 386 hv_set_synic_state(sctrl.as_uint64);
387
388 return 0;
626} 389}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 14c3dc4bd23c..5fd03e59cee5 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -587,6 +587,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
587 spin_lock_irqsave(&dm_device.ha_lock, flags); 587 spin_lock_irqsave(&dm_device.ha_lock, flags);
588 dm_device.num_pages_onlined += mem->nr_pages; 588 dm_device.num_pages_onlined += mem->nr_pages;
589 spin_unlock_irqrestore(&dm_device.ha_lock, flags); 589 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
590 /* Fall through */
590 case MEM_CANCEL_ONLINE: 591 case MEM_CANCEL_ONLINE:
591 if (dm_device.ha_waiting) { 592 if (dm_device.ha_waiting) {
592 dm_device.ha_waiting = false; 593 dm_device.ha_waiting = false;
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 8b2ba98831ec..9aee6014339d 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -31,6 +31,16 @@
31#define WIN8_SRV_MINOR 1 31#define WIN8_SRV_MINOR 1
32#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) 32#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
33 33
34#define FCOPY_VER_COUNT 1
35static const int fcopy_versions[] = {
36 WIN8_SRV_VERSION
37};
38
39#define FW_VER_COUNT 1
40static const int fw_versions[] = {
41 UTIL_FW_VERSION
42};
43
34/* 44/*
35 * Global state maintained for transaction that is being processed. 45 * Global state maintained for transaction that is being processed.
36 * For a class of integration services, including the "file copy service", 46 * For a class of integration services, including the "file copy service",
@@ -61,6 +71,7 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
61static const char fcopy_devname[] = "vmbus/hv_fcopy"; 71static const char fcopy_devname[] = "vmbus/hv_fcopy";
62static u8 *recv_buffer; 72static u8 *recv_buffer;
63static struct hvutil_transport *hvt; 73static struct hvutil_transport *hvt;
74static struct completion release_event;
64/* 75/*
65 * This state maintains the version number registered by the daemon. 76 * This state maintains the version number registered by the daemon.
66 */ 77 */
@@ -227,8 +238,6 @@ void hv_fcopy_onchannelcallback(void *context)
227 u64 requestid; 238 u64 requestid;
228 struct hv_fcopy_hdr *fcopy_msg; 239 struct hv_fcopy_hdr *fcopy_msg;
229 struct icmsg_hdr *icmsghdr; 240 struct icmsg_hdr *icmsghdr;
230 struct icmsg_negotiate *negop = NULL;
231 int util_fw_version;
232 int fcopy_srv_version; 241 int fcopy_srv_version;
233 242
234 if (fcopy_transaction.state > HVUTIL_READY) 243 if (fcopy_transaction.state > HVUTIL_READY)
@@ -242,10 +251,15 @@ void hv_fcopy_onchannelcallback(void *context)
242 icmsghdr = (struct icmsg_hdr *)&recv_buffer[ 251 icmsghdr = (struct icmsg_hdr *)&recv_buffer[
243 sizeof(struct vmbuspipe_hdr)]; 252 sizeof(struct vmbuspipe_hdr)];
244 if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) { 253 if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
245 util_fw_version = UTIL_FW_VERSION; 254 if (vmbus_prep_negotiate_resp(icmsghdr, recv_buffer,
246 fcopy_srv_version = WIN8_SRV_VERSION; 255 fw_versions, FW_VER_COUNT,
247 vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer, 256 fcopy_versions, FCOPY_VER_COUNT,
248 util_fw_version, fcopy_srv_version); 257 NULL, &fcopy_srv_version)) {
258
259 pr_info("FCopy IC version %d.%d\n",
260 fcopy_srv_version >> 16,
261 fcopy_srv_version & 0xFFFF);
262 }
249 } else { 263 } else {
250 fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[ 264 fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
251 sizeof(struct vmbuspipe_hdr) + 265 sizeof(struct vmbuspipe_hdr) +
@@ -317,6 +331,7 @@ static void fcopy_on_reset(void)
317 331
318 if (cancel_delayed_work_sync(&fcopy_timeout_work)) 332 if (cancel_delayed_work_sync(&fcopy_timeout_work))
319 fcopy_respond_to_host(HV_E_FAIL); 333 fcopy_respond_to_host(HV_E_FAIL);
334 complete(&release_event);
320} 335}
321 336
322int hv_fcopy_init(struct hv_util_service *srv) 337int hv_fcopy_init(struct hv_util_service *srv)
@@ -324,6 +339,7 @@ int hv_fcopy_init(struct hv_util_service *srv)
324 recv_buffer = srv->recv_buffer; 339 recv_buffer = srv->recv_buffer;
325 fcopy_transaction.recv_channel = srv->channel; 340 fcopy_transaction.recv_channel = srv->channel;
326 341
342 init_completion(&release_event);
327 /* 343 /*
328 * When this driver loads, the user level daemon that 344 * When this driver loads, the user level daemon that
329 * processes the host requests may not yet be running. 345 * processes the host requests may not yet be running.
@@ -345,4 +361,5 @@ void hv_fcopy_deinit(void)
345 fcopy_transaction.state = HVUTIL_DEVICE_DYING; 361 fcopy_transaction.state = HVUTIL_DEVICE_DYING;
346 cancel_delayed_work_sync(&fcopy_timeout_work); 362 cancel_delayed_work_sync(&fcopy_timeout_work);
347 hvutil_transport_destroy(hvt); 363 hvutil_transport_destroy(hvt);
364 wait_for_completion(&release_event);
348} 365}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5e1fdc8d32ab..de263712e247 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -46,6 +46,19 @@
46#define WIN8_SRV_MINOR 0 46#define WIN8_SRV_MINOR 0
47#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) 47#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
48 48
49#define KVP_VER_COUNT 3
50static const int kvp_versions[] = {
51 WIN8_SRV_VERSION,
52 WIN7_SRV_VERSION,
53 WS2008_SRV_VERSION
54};
55
56#define FW_VER_COUNT 2
57static const int fw_versions[] = {
58 UTIL_FW_VERSION,
59 UTIL_WS2K8_FW_VERSION
60};
61
49/* 62/*
50 * Global state maintained for transaction that is being processed. For a class 63 * Global state maintained for transaction that is being processed. For a class
51 * of integration services, including the "KVP service", the specified protocol 64 * of integration services, including the "KVP service", the specified protocol
@@ -88,6 +101,7 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
88static const char kvp_devname[] = "vmbus/hv_kvp"; 101static const char kvp_devname[] = "vmbus/hv_kvp";
89static u8 *recv_buffer; 102static u8 *recv_buffer;
90static struct hvutil_transport *hvt; 103static struct hvutil_transport *hvt;
104static struct completion release_event;
91/* 105/*
92 * Register the kernel component with the user-level daemon. 106 * Register the kernel component with the user-level daemon.
93 * As part of this registration, pass the LIC version number. 107 * As part of this registration, pass the LIC version number.
@@ -609,8 +623,6 @@ void hv_kvp_onchannelcallback(void *context)
609 struct hv_kvp_msg *kvp_msg; 623 struct hv_kvp_msg *kvp_msg;
610 624
611 struct icmsg_hdr *icmsghdrp; 625 struct icmsg_hdr *icmsghdrp;
612 struct icmsg_negotiate *negop = NULL;
613 int util_fw_version;
614 int kvp_srv_version; 626 int kvp_srv_version;
615 static enum {NEGO_NOT_STARTED, 627 static enum {NEGO_NOT_STARTED,
616 NEGO_IN_PROGRESS, 628 NEGO_IN_PROGRESS,
@@ -639,28 +651,14 @@ void hv_kvp_onchannelcallback(void *context)
639 sizeof(struct vmbuspipe_hdr)]; 651 sizeof(struct vmbuspipe_hdr)];
640 652
641 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 653 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
642 /* 654 if (vmbus_prep_negotiate_resp(icmsghdrp,
643 * Based on the host, select appropriate 655 recv_buffer, fw_versions, FW_VER_COUNT,
644 * framework and service versions we will 656 kvp_versions, KVP_VER_COUNT,
645 * negotiate. 657 NULL, &kvp_srv_version)) {
646 */ 658 pr_info("KVP IC version %d.%d\n",
647 switch (vmbus_proto_version) { 659 kvp_srv_version >> 16,
648 case (VERSION_WS2008): 660 kvp_srv_version & 0xFFFF);
649 util_fw_version = UTIL_WS2K8_FW_VERSION;
650 kvp_srv_version = WS2008_SRV_VERSION;
651 break;
652 case (VERSION_WIN7):
653 util_fw_version = UTIL_FW_VERSION;
654 kvp_srv_version = WIN7_SRV_VERSION;
655 break;
656 default:
657 util_fw_version = UTIL_FW_VERSION;
658 kvp_srv_version = WIN8_SRV_VERSION;
659 } 661 }
660 vmbus_prep_negotiate_resp(icmsghdrp, negop,
661 recv_buffer, util_fw_version,
662 kvp_srv_version);
663
664 } else { 662 } else {
665 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ 663 kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
666 sizeof(struct vmbuspipe_hdr) + 664 sizeof(struct vmbuspipe_hdr) +
@@ -716,6 +714,7 @@ static void kvp_on_reset(void)
716 if (cancel_delayed_work_sync(&kvp_timeout_work)) 714 if (cancel_delayed_work_sync(&kvp_timeout_work))
717 kvp_respond_to_host(NULL, HV_E_FAIL); 715 kvp_respond_to_host(NULL, HV_E_FAIL);
718 kvp_transaction.state = HVUTIL_DEVICE_INIT; 716 kvp_transaction.state = HVUTIL_DEVICE_INIT;
717 complete(&release_event);
719} 718}
720 719
721int 720int
@@ -724,6 +723,7 @@ hv_kvp_init(struct hv_util_service *srv)
724 recv_buffer = srv->recv_buffer; 723 recv_buffer = srv->recv_buffer;
725 kvp_transaction.recv_channel = srv->channel; 724 kvp_transaction.recv_channel = srv->channel;
726 725
726 init_completion(&release_event);
727 /* 727 /*
728 * When this driver loads, the user level daemon that 728 * When this driver loads, the user level daemon that
729 * processes the host requests may not yet be running. 729 * processes the host requests may not yet be running.
@@ -747,4 +747,5 @@ void hv_kvp_deinit(void)
747 cancel_delayed_work_sync(&kvp_timeout_work); 747 cancel_delayed_work_sync(&kvp_timeout_work);
748 cancel_work_sync(&kvp_sendkey_work); 748 cancel_work_sync(&kvp_sendkey_work);
749 hvutil_transport_destroy(hvt); 749 hvutil_transport_destroy(hvt);
750 wait_for_completion(&release_event);
750} 751}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index eee238cc60bd..bcc03f0748d6 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -31,6 +31,16 @@
31#define VSS_MINOR 0 31#define VSS_MINOR 0
32#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR) 32#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
33 33
34#define VSS_VER_COUNT 1
35static const int vss_versions[] = {
36 VSS_VERSION
37};
38
39#define FW_VER_COUNT 1
40static const int fw_versions[] = {
41 UTIL_FW_VERSION
42};
43
34/* 44/*
35 * Timeout values are based on expecations from host 45 * Timeout values are based on expecations from host
36 */ 46 */
@@ -69,6 +79,7 @@ static int dm_reg_value;
69static const char vss_devname[] = "vmbus/hv_vss"; 79static const char vss_devname[] = "vmbus/hv_vss";
70static __u8 *recv_buffer; 80static __u8 *recv_buffer;
71static struct hvutil_transport *hvt; 81static struct hvutil_transport *hvt;
82static struct completion release_event;
72 83
73static void vss_timeout_func(struct work_struct *dummy); 84static void vss_timeout_func(struct work_struct *dummy);
74static void vss_handle_request(struct work_struct *dummy); 85static void vss_handle_request(struct work_struct *dummy);
@@ -293,10 +304,9 @@ void hv_vss_onchannelcallback(void *context)
293 u32 recvlen; 304 u32 recvlen;
294 u64 requestid; 305 u64 requestid;
295 struct hv_vss_msg *vss_msg; 306 struct hv_vss_msg *vss_msg;
296 307 int vss_srv_version;
297 308
298 struct icmsg_hdr *icmsghdrp; 309 struct icmsg_hdr *icmsghdrp;
299 struct icmsg_negotiate *negop = NULL;
300 310
301 if (vss_transaction.state > HVUTIL_READY) 311 if (vss_transaction.state > HVUTIL_READY)
302 return; 312 return;
@@ -309,9 +319,15 @@ void hv_vss_onchannelcallback(void *context)
309 sizeof(struct vmbuspipe_hdr)]; 319 sizeof(struct vmbuspipe_hdr)];
310 320
311 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 321 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
312 vmbus_prep_negotiate_resp(icmsghdrp, negop, 322 if (vmbus_prep_negotiate_resp(icmsghdrp,
313 recv_buffer, UTIL_FW_VERSION, 323 recv_buffer, fw_versions, FW_VER_COUNT,
314 VSS_VERSION); 324 vss_versions, VSS_VER_COUNT,
325 NULL, &vss_srv_version)) {
326
327 pr_info("VSS IC version %d.%d\n",
328 vss_srv_version >> 16,
329 vss_srv_version & 0xFFFF);
330 }
315 } else { 331 } else {
316 vss_msg = (struct hv_vss_msg *)&recv_buffer[ 332 vss_msg = (struct hv_vss_msg *)&recv_buffer[
317 sizeof(struct vmbuspipe_hdr) + 333 sizeof(struct vmbuspipe_hdr) +
@@ -345,11 +361,13 @@ static void vss_on_reset(void)
345 if (cancel_delayed_work_sync(&vss_timeout_work)) 361 if (cancel_delayed_work_sync(&vss_timeout_work))
346 vss_respond_to_host(HV_E_FAIL); 362 vss_respond_to_host(HV_E_FAIL);
347 vss_transaction.state = HVUTIL_DEVICE_INIT; 363 vss_transaction.state = HVUTIL_DEVICE_INIT;
364 complete(&release_event);
348} 365}
349 366
350int 367int
351hv_vss_init(struct hv_util_service *srv) 368hv_vss_init(struct hv_util_service *srv)
352{ 369{
370 init_completion(&release_event);
353 if (vmbus_proto_version < VERSION_WIN8_1) { 371 if (vmbus_proto_version < VERSION_WIN8_1) {
354 pr_warn("Integration service 'Backup (volume snapshot)'" 372 pr_warn("Integration service 'Backup (volume snapshot)'"
355 " not supported on this host version.\n"); 373 " not supported on this host version.\n");
@@ -382,4 +400,5 @@ void hv_vss_deinit(void)
382 cancel_delayed_work_sync(&vss_timeout_work); 400 cancel_delayed_work_sync(&vss_timeout_work);
383 cancel_work_sync(&vss_handle_request_work); 401 cancel_work_sync(&vss_handle_request_work);
384 hvutil_transport_destroy(hvt); 402 hvutil_transport_destroy(hvt);
403 wait_for_completion(&release_event);
385} 404}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index e7707747f56d..3042eaa13062 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -27,6 +27,9 @@
27#include <linux/sysctl.h> 27#include <linux/sysctl.h>
28#include <linux/reboot.h> 28#include <linux/reboot.h>
29#include <linux/hyperv.h> 29#include <linux/hyperv.h>
30#include <linux/clockchips.h>
31#include <linux/ptp_clock_kernel.h>
32#include <asm/mshyperv.h>
30 33
31#include "hyperv_vmbus.h" 34#include "hyperv_vmbus.h"
32 35
@@ -57,7 +60,31 @@
57static int sd_srv_version; 60static int sd_srv_version;
58static int ts_srv_version; 61static int ts_srv_version;
59static int hb_srv_version; 62static int hb_srv_version;
60static int util_fw_version; 63
64#define SD_VER_COUNT 2
65static const int sd_versions[] = {
66 SD_VERSION,
67 SD_VERSION_1
68};
69
70#define TS_VER_COUNT 3
71static const int ts_versions[] = {
72 TS_VERSION,
73 TS_VERSION_3,
74 TS_VERSION_1
75};
76
77#define HB_VER_COUNT 2
78static const int hb_versions[] = {
79 HB_VERSION,
80 HB_VERSION_1
81};
82
83#define FW_VER_COUNT 2
84static const int fw_versions[] = {
85 UTIL_FW_VERSION,
86 UTIL_WS2K8_FW_VERSION
87};
61 88
62static void shutdown_onchannelcallback(void *context); 89static void shutdown_onchannelcallback(void *context);
63static struct hv_util_service util_shutdown = { 90static struct hv_util_service util_shutdown = {
@@ -118,7 +145,6 @@ static void shutdown_onchannelcallback(void *context)
118 struct shutdown_msg_data *shutdown_msg; 145 struct shutdown_msg_data *shutdown_msg;
119 146
120 struct icmsg_hdr *icmsghdrp; 147 struct icmsg_hdr *icmsghdrp;
121 struct icmsg_negotiate *negop = NULL;
122 148
123 vmbus_recvpacket(channel, shut_txf_buf, 149 vmbus_recvpacket(channel, shut_txf_buf,
124 PAGE_SIZE, &recvlen, &requestid); 150 PAGE_SIZE, &recvlen, &requestid);
@@ -128,9 +154,14 @@ static void shutdown_onchannelcallback(void *context)
128 sizeof(struct vmbuspipe_hdr)]; 154 sizeof(struct vmbuspipe_hdr)];
129 155
130 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 156 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
131 vmbus_prep_negotiate_resp(icmsghdrp, negop, 157 if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
132 shut_txf_buf, util_fw_version, 158 fw_versions, FW_VER_COUNT,
133 sd_srv_version); 159 sd_versions, SD_VER_COUNT,
160 NULL, &sd_srv_version)) {
161 pr_info("Shutdown IC version %d.%d\n",
162 sd_srv_version >> 16,
163 sd_srv_version & 0xFFFF);
164 }
134 } else { 165 } else {
135 shutdown_msg = 166 shutdown_msg =
136 (struct shutdown_msg_data *)&shut_txf_buf[ 167 (struct shutdown_msg_data *)&shut_txf_buf[
@@ -181,31 +212,17 @@ struct adj_time_work {
181 212
182static void hv_set_host_time(struct work_struct *work) 213static void hv_set_host_time(struct work_struct *work)
183{ 214{
184 struct adj_time_work *wrk; 215 struct adj_time_work *wrk;
185 s64 host_tns; 216 struct timespec64 host_ts;
186 u64 newtime; 217 u64 reftime, newtime;
187 struct timespec host_ts;
188 218
189 wrk = container_of(work, struct adj_time_work, work); 219 wrk = container_of(work, struct adj_time_work, work);
190 220
191 newtime = wrk->host_time; 221 reftime = hyperv_cs->read(hyperv_cs);
192 if (ts_srv_version > TS_VERSION_3) { 222 newtime = wrk->host_time + (reftime - wrk->ref_time);
193 /* 223 host_ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
194 * Some latency has been introduced since Hyper-V generated
195 * its time sample. Take that latency into account before
196 * using TSC reference time sample from Hyper-V.
197 *
198 * This sample is given by TimeSync v4 and above hosts.
199 */
200 u64 current_tick;
201
202 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
203 newtime += (current_tick - wrk->ref_time);
204 }
205 host_tns = (newtime - WLTIMEDELTA) * 100;
206 host_ts = ns_to_timespec(host_tns);
207 224
208 do_settimeofday(&host_ts); 225 do_settimeofday64(&host_ts);
209} 226}
210 227
211/* 228/*
@@ -222,22 +239,60 @@ static void hv_set_host_time(struct work_struct *work)
222 * to discipline the clock. 239 * to discipline the clock.
223 */ 240 */
224static struct adj_time_work wrk; 241static struct adj_time_work wrk;
225static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 flags) 242
243/*
244 * The last time sample, received from the host. PTP device responds to
245 * requests by using this data and the current partition-wide time reference
246 * count.
247 */
248static struct {
249 u64 host_time;
250 u64 ref_time;
251 struct system_time_snapshot snap;
252 spinlock_t lock;
253} host_ts;
254
255static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
226{ 256{
257 unsigned long flags;
258 u64 cur_reftime;
227 259
228 /* 260 /*
229 * This check is safe since we are executing in the 261 * This check is safe since we are executing in the
230 * interrupt context and time synch messages arre always 262 * interrupt context and time synch messages are always
231 * delivered on the same CPU. 263 * delivered on the same CPU.
232 */ 264 */
233 if (work_pending(&wrk.work)) 265 if (adj_flags & ICTIMESYNCFLAG_SYNC) {
234 return; 266 /* Queue a job to do do_settimeofday64() */
235 267 if (work_pending(&wrk.work))
236 wrk.host_time = hosttime; 268 return;
237 wrk.ref_time = reftime; 269
238 wrk.flags = flags; 270 wrk.host_time = hosttime;
239 if ((flags & (ICTIMESYNCFLAG_SYNC | ICTIMESYNCFLAG_SAMPLE)) != 0) { 271 wrk.ref_time = reftime;
272 wrk.flags = adj_flags;
240 schedule_work(&wrk.work); 273 schedule_work(&wrk.work);
274 } else {
275 /*
276 * Save the adjusted time sample from the host and the snapshot
277 * of the current system time for PTP device.
278 */
279 spin_lock_irqsave(&host_ts.lock, flags);
280
281 cur_reftime = hyperv_cs->read(hyperv_cs);
282 host_ts.host_time = hosttime;
283 host_ts.ref_time = cur_reftime;
284 ktime_get_snapshot(&host_ts.snap);
285
286 /*
287 * TimeSync v4 messages contain reference time (guest's Hyper-V
288 * clocksource read when the time sample was generated), we can
289 * improve the precision by adding the delta between now and the
290 * time of generation.
291 */
292 if (ts_srv_version > TS_VERSION_3)
293 host_ts.host_time += (cur_reftime - reftime);
294
295 spin_unlock_irqrestore(&host_ts.lock, flags);
241 } 296 }
242} 297}
243 298
@@ -253,7 +308,6 @@ static void timesync_onchannelcallback(void *context)
253 struct ictimesync_data *timedatap; 308 struct ictimesync_data *timedatap;
254 struct ictimesync_ref_data *refdata; 309 struct ictimesync_ref_data *refdata;
255 u8 *time_txf_buf = util_timesynch.recv_buffer; 310 u8 *time_txf_buf = util_timesynch.recv_buffer;
256 struct icmsg_negotiate *negop = NULL;
257 311
258 vmbus_recvpacket(channel, time_txf_buf, 312 vmbus_recvpacket(channel, time_txf_buf,
259 PAGE_SIZE, &recvlen, &requestid); 313 PAGE_SIZE, &recvlen, &requestid);
@@ -263,12 +317,14 @@ static void timesync_onchannelcallback(void *context)
263 sizeof(struct vmbuspipe_hdr)]; 317 sizeof(struct vmbuspipe_hdr)];
264 318
265 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 319 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
266 vmbus_prep_negotiate_resp(icmsghdrp, negop, 320 if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
267 time_txf_buf, 321 fw_versions, FW_VER_COUNT,
268 util_fw_version, 322 ts_versions, TS_VER_COUNT,
269 ts_srv_version); 323 NULL, &ts_srv_version)) {
270 pr_info("Using TimeSync version %d.%d\n", 324 pr_info("TimeSync IC version %d.%d\n",
271 ts_srv_version >> 16, ts_srv_version & 0xFFFF); 325 ts_srv_version >> 16,
326 ts_srv_version & 0xFFFF);
327 }
272 } else { 328 } else {
273 if (ts_srv_version > TS_VERSION_3) { 329 if (ts_srv_version > TS_VERSION_3) {
274 refdata = (struct ictimesync_ref_data *) 330 refdata = (struct ictimesync_ref_data *)
@@ -312,7 +368,6 @@ static void heartbeat_onchannelcallback(void *context)
312 struct icmsg_hdr *icmsghdrp; 368 struct icmsg_hdr *icmsghdrp;
313 struct heartbeat_msg_data *heartbeat_msg; 369 struct heartbeat_msg_data *heartbeat_msg;
314 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; 370 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
315 struct icmsg_negotiate *negop = NULL;
316 371
317 while (1) { 372 while (1) {
318 373
@@ -326,9 +381,16 @@ static void heartbeat_onchannelcallback(void *context)
326 sizeof(struct vmbuspipe_hdr)]; 381 sizeof(struct vmbuspipe_hdr)];
327 382
328 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 383 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
329 vmbus_prep_negotiate_resp(icmsghdrp, negop, 384 if (vmbus_prep_negotiate_resp(icmsghdrp,
330 hbeat_txf_buf, util_fw_version, 385 hbeat_txf_buf,
331 hb_srv_version); 386 fw_versions, FW_VER_COUNT,
387 hb_versions, HB_VER_COUNT,
388 NULL, &hb_srv_version)) {
389
390 pr_info("Heartbeat IC version %d.%d\n",
391 hb_srv_version >> 16,
392 hb_srv_version & 0xFFFF);
393 }
332 } else { 394 } else {
333 heartbeat_msg = 395 heartbeat_msg =
334 (struct heartbeat_msg_data *)&hbeat_txf_buf[ 396 (struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -373,38 +435,10 @@ static int util_probe(struct hv_device *dev,
373 * Turn off batched reading for all util drivers before we open the 435 * Turn off batched reading for all util drivers before we open the
374 * channel. 436 * channel.
375 */ 437 */
376 438 set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
377 set_channel_read_state(dev->channel, false);
378 439
379 hv_set_drvdata(dev, srv); 440 hv_set_drvdata(dev, srv);
380 441
381 /*
382 * Based on the host; initialize the framework and
383 * service version numbers we will negotiate.
384 */
385 switch (vmbus_proto_version) {
386 case (VERSION_WS2008):
387 util_fw_version = UTIL_WS2K8_FW_VERSION;
388 sd_srv_version = SD_VERSION_1;
389 ts_srv_version = TS_VERSION_1;
390 hb_srv_version = HB_VERSION_1;
391 break;
392 case VERSION_WIN7:
393 case VERSION_WIN8:
394 case VERSION_WIN8_1:
395 util_fw_version = UTIL_FW_VERSION;
396 sd_srv_version = SD_VERSION;
397 ts_srv_version = TS_VERSION_3;
398 hb_srv_version = HB_VERSION;
399 break;
400 case VERSION_WIN10:
401 default:
402 util_fw_version = UTIL_FW_VERSION;
403 sd_srv_version = SD_VERSION;
404 ts_srv_version = TS_VERSION;
405 hb_srv_version = HB_VERSION;
406 }
407
408 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0, 442 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
409 srv->util_cb, dev->channel); 443 srv->util_cb, dev->channel);
410 if (ret) 444 if (ret)
@@ -470,14 +504,113 @@ static struct hv_driver util_drv = {
470 .remove = util_remove, 504 .remove = util_remove,
471}; 505};
472 506
507static int hv_ptp_enable(struct ptp_clock_info *info,
508 struct ptp_clock_request *request, int on)
509{
510 return -EOPNOTSUPP;
511}
512
513static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
514{
515 return -EOPNOTSUPP;
516}
517
518static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
519{
520 return -EOPNOTSUPP;
521}
522static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
523{
524 return -EOPNOTSUPP;
525}
526
527static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
528{
529 unsigned long flags;
530 u64 newtime, reftime;
531
532 spin_lock_irqsave(&host_ts.lock, flags);
533 reftime = hyperv_cs->read(hyperv_cs);
534 newtime = host_ts.host_time + (reftime - host_ts.ref_time);
535 *ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
536 spin_unlock_irqrestore(&host_ts.lock, flags);
537
538 return 0;
539}
540
541static int hv_ptp_get_syncdevicetime(ktime_t *device,
542 struct system_counterval_t *system,
543 void *ctx)
544{
545 system->cs = hyperv_cs;
546 system->cycles = host_ts.ref_time;
547 *device = ns_to_ktime((host_ts.host_time - WLTIMEDELTA) * 100);
548
549 return 0;
550}
551
552static int hv_ptp_getcrosststamp(struct ptp_clock_info *ptp,
553 struct system_device_crosststamp *xtstamp)
554{
555 unsigned long flags;
556 int ret;
557
558 spin_lock_irqsave(&host_ts.lock, flags);
559
560 /*
561 * host_ts contains the last time sample from the host and the snapshot
562 * of system time. We don't need to calculate the time delta between
563 * the reception and now as get_device_system_crosststamp() does the
564 * required interpolation.
565 */
566 ret = get_device_system_crosststamp(hv_ptp_get_syncdevicetime,
567 NULL, &host_ts.snap, xtstamp);
568
569 spin_unlock_irqrestore(&host_ts.lock, flags);
570
571 return ret;
572}
573
574static struct ptp_clock_info ptp_hyperv_info = {
575 .name = "hyperv",
576 .enable = hv_ptp_enable,
577 .adjtime = hv_ptp_adjtime,
578 .adjfreq = hv_ptp_adjfreq,
579 .gettime64 = hv_ptp_gettime,
580 .getcrosststamp = hv_ptp_getcrosststamp,
581 .settime64 = hv_ptp_settime,
582 .owner = THIS_MODULE,
583};
584
585static struct ptp_clock *hv_ptp_clock;
586
473static int hv_timesync_init(struct hv_util_service *srv) 587static int hv_timesync_init(struct hv_util_service *srv)
474{ 588{
589 /* TimeSync requires Hyper-V clocksource. */
590 if (!hyperv_cs)
591 return -ENODEV;
592
475 INIT_WORK(&wrk.work, hv_set_host_time); 593 INIT_WORK(&wrk.work, hv_set_host_time);
594
595 /*
596 * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
597 * disabled but the driver is still useful without the PTP device
598 * as it still handles the ICTIMESYNCFLAG_SYNC case.
599 */
600 hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
601 if (IS_ERR_OR_NULL(hv_ptp_clock)) {
602 pr_err("cannot register PTP clock: %ld\n",
603 PTR_ERR(hv_ptp_clock));
604 hv_ptp_clock = NULL;
605 }
606
476 return 0; 607 return 0;
477} 608}
478 609
479static void hv_timesync_deinit(void) 610static void hv_timesync_deinit(void)
480{ 611{
612 if (hv_ptp_clock)
613 ptp_clock_unregister(hv_ptp_clock);
481 cancel_work_sync(&wrk.work); 614 cancel_work_sync(&wrk.work);
482} 615}
483 616
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 0675b395ce5c..884f83bba1ab 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -29,6 +29,7 @@
29#include <asm/sync_bitops.h> 29#include <asm/sync_bitops.h>
30#include <linux/atomic.h> 30#include <linux/atomic.h>
31#include <linux/hyperv.h> 31#include <linux/hyperv.h>
32#include <linux/interrupt.h>
32 33
33/* 34/*
34 * Timeout for services such as KVP and fcopy. 35 * Timeout for services such as KVP and fcopy.
@@ -40,95 +41,9 @@
40 */ 41 */
41#define HV_UTIL_NEGO_TIMEOUT 55 42#define HV_UTIL_NEGO_TIMEOUT 55
42 43
43/*
44 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
45 * is set by CPUID(HVCPUID_VERSION_FEATURES).
46 */
47enum hv_cpuid_function {
48 HVCPUID_VERSION_FEATURES = 0x00000001,
49 HVCPUID_VENDOR_MAXFUNCTION = 0x40000000,
50 HVCPUID_INTERFACE = 0x40000001,
51
52 /*
53 * The remaining functions depend on the value of
54 * HVCPUID_INTERFACE
55 */
56 HVCPUID_VERSION = 0x40000002,
57 HVCPUID_FEATURES = 0x40000003,
58 HVCPUID_ENLIGHTENMENT_INFO = 0x40000004,
59 HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
60};
61
62#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE 0x400
63
64#define HV_X64_MSR_CRASH_P0 0x40000100
65#define HV_X64_MSR_CRASH_P1 0x40000101
66#define HV_X64_MSR_CRASH_P2 0x40000102
67#define HV_X64_MSR_CRASH_P3 0x40000103
68#define HV_X64_MSR_CRASH_P4 0x40000104
69#define HV_X64_MSR_CRASH_CTL 0x40000105
70
71#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
72
73/* Define version of the synthetic interrupt controller. */
74#define HV_SYNIC_VERSION (1)
75
76#define HV_ANY_VP (0xFFFFFFFF)
77
78/* Define synthetic interrupt controller flag constants. */ 44/* Define synthetic interrupt controller flag constants. */
79#define HV_EVENT_FLAGS_COUNT (256 * 8) 45#define HV_EVENT_FLAGS_COUNT (256 * 8)
80#define HV_EVENT_FLAGS_BYTE_COUNT (256) 46#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
81#define HV_EVENT_FLAGS_DWORD_COUNT (256 / sizeof(u32))
82
83/* Define invalid partition identifier. */
84#define HV_PARTITION_ID_INVALID ((u64)0x0)
85
86/* Define port type. */
87enum hv_port_type {
88 HVPORT_MSG = 1,
89 HVPORT_EVENT = 2,
90 HVPORT_MONITOR = 3
91};
92
93/* Define port information structure. */
94struct hv_port_info {
95 enum hv_port_type port_type;
96 u32 padding;
97 union {
98 struct {
99 u32 target_sint;
100 u32 target_vp;
101 u64 rsvdz;
102 } message_port_info;
103 struct {
104 u32 target_sint;
105 u32 target_vp;
106 u16 base_flag_number;
107 u16 flag_count;
108 u32 rsvdz;
109 } event_port_info;
110 struct {
111 u64 monitor_address;
112 u64 rsvdz;
113 } monitor_port_info;
114 };
115};
116
117struct hv_connection_info {
118 enum hv_port_type port_type;
119 u32 padding;
120 union {
121 struct {
122 u64 rsvdz;
123 } message_connection_info;
124 struct {
125 u64 rsvdz;
126 } event_connection_info;
127 struct {
128 u64 monitor_address;
129 } monitor_connection_info;
130 };
131};
132 47
133/* 48/*
134 * Timer configuration register. 49 * Timer configuration register.
@@ -146,18 +61,10 @@ union hv_timer_config {
146 }; 61 };
147}; 62};
148 63
149/* Define the number of message buffers associated with each port. */
150#define HV_PORT_MESSAGE_BUFFER_COUNT (16)
151 64
152/* Define the synthetic interrupt controller event flags format. */ 65/* Define the synthetic interrupt controller event flags format. */
153union hv_synic_event_flags { 66union hv_synic_event_flags {
154 u8 flags8[HV_EVENT_FLAGS_BYTE_COUNT]; 67 unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
155 u32 flags32[HV_EVENT_FLAGS_DWORD_COUNT];
156};
157
158/* Define the synthetic interrupt flags page layout. */
159struct hv_synic_event_flags_page {
160 union hv_synic_event_flags sintevent_flags[HV_SYNIC_SINT_COUNT];
161}; 68};
162 69
163/* Define SynIC control register. */ 70/* Define SynIC control register. */
@@ -261,6 +168,8 @@ struct hv_monitor_page {
261 u8 rsvdz4[1984]; 168 u8 rsvdz4[1984];
262}; 169};
263 170
171#define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
172
264/* Definition of the hv_post_message hypercall input structure. */ 173/* Definition of the hv_post_message hypercall input structure. */
265struct hv_input_post_message { 174struct hv_input_post_message {
266 union hv_connection_id connectionid; 175 union hv_connection_id connectionid;
@@ -270,56 +179,6 @@ struct hv_input_post_message {
270 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; 179 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
271}; 180};
272 181
273/*
274 * Versioning definitions used for guests reporting themselves to the
275 * hypervisor, and visa versa.
276 */
277
278/* Version info reported by guest OS's */
279enum hv_guest_os_vendor {
280 HVGUESTOS_VENDOR_MICROSOFT = 0x0001
281};
282
283enum hv_guest_os_microsoft_ids {
284 HVGUESTOS_MICROSOFT_UNDEFINED = 0x00,
285 HVGUESTOS_MICROSOFT_MSDOS = 0x01,
286 HVGUESTOS_MICROSOFT_WINDOWS3X = 0x02,
287 HVGUESTOS_MICROSOFT_WINDOWS9X = 0x03,
288 HVGUESTOS_MICROSOFT_WINDOWSNT = 0x04,
289 HVGUESTOS_MICROSOFT_WINDOWSCE = 0x05
290};
291
292/*
293 * Declare the MSR used to identify the guest OS.
294 */
295#define HV_X64_MSR_GUEST_OS_ID 0x40000000
296
297union hv_x64_msr_guest_os_id_contents {
298 u64 as_uint64;
299 struct {
300 u64 build_number:16;
301 u64 service_version:8; /* Service Pack, etc. */
302 u64 minor_version:8;
303 u64 major_version:8;
304 u64 os_id:8; /* enum hv_guest_os_microsoft_ids (if Vendor=MS) */
305 u64 vendor_id:16; /* enum hv_guest_os_vendor */
306 };
307};
308
309/*
310 * Declare the MSR used to setup pages used to communicate with the hypervisor.
311 */
312#define HV_X64_MSR_HYPERCALL 0x40000001
313
314union hv_x64_msr_hypercall_contents {
315 u64 as_uint64;
316 struct {
317 u64 enable:1;
318 u64 reserved:11;
319 u64 guest_physical_address:52;
320 };
321};
322
323 182
324enum { 183enum {
325 VMBUS_MESSAGE_CONNECTION_ID = 1, 184 VMBUS_MESSAGE_CONNECTION_ID = 1,
@@ -331,111 +190,44 @@ enum {
331 VMBUS_MESSAGE_SINT = 2, 190 VMBUS_MESSAGE_SINT = 2,
332}; 191};
333 192
334/* #defines */
335
336#define HV_PRESENT_BIT 0x80000000
337
338/*
339 * The guest OS needs to register the guest ID with the hypervisor.
340 * The guest ID is a 64 bit entity and the structure of this ID is
341 * specified in the Hyper-V specification:
342 *
343 * http://msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
344 *
345 * While the current guideline does not specify how Linux guest ID(s)
346 * need to be generated, our plan is to publish the guidelines for
347 * Linux and other guest operating systems that currently are hosted
348 * on Hyper-V. The implementation here conforms to this yet
349 * unpublished guidelines.
350 *
351 *
352 * Bit(s)
353 * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
354 * 62:56 - Os Type; Linux is 0x100
355 * 55:48 - Distro specific identification
356 * 47:16 - Linux kernel version number
357 * 15:0 - Distro specific identification
358 *
359 *
360 */
361
362#define HV_LINUX_VENDOR_ID 0x8100
363
364/* 193/*
365 * Generate the guest ID based on the guideline described above. 194 * Per cpu state for channel handling
366 */ 195 */
196struct hv_per_cpu_context {
197 void *synic_message_page;
198 void *synic_event_page;
199 /*
200 * buffer to post messages to the host.
201 */
202 void *post_msg_page;
367 203
368static inline __u64 generate_guest_id(__u8 d_info1, __u32 kernel_version, 204 /*
369 __u16 d_info2) 205 * Starting with win8, we can take channel interrupts on any CPU;
370{ 206 * we will manage the tasklet that handles events messages on a per CPU
371 __u64 guest_id = 0; 207 * basis.
372 208 */
373 guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48); 209 struct tasklet_struct msg_dpc;
374 guest_id |= (((__u64)(d_info1)) << 48);
375 guest_id |= (((__u64)(kernel_version)) << 16);
376 guest_id |= ((__u64)(d_info2));
377
378 return guest_id;
379}
380
381
382#define HV_CPU_POWER_MANAGEMENT (1 << 0)
383#define HV_RECOMMENDATIONS_MAX 4
384
385#define HV_X64_MAX 5
386#define HV_CAPS_MAX 8
387
388
389#define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
390
391
392/* Service definitions */
393
394#define HV_SERVICE_PARENT_PORT (0)
395#define HV_SERVICE_PARENT_CONNECTION (0)
396
397#define HV_SERVICE_CONNECT_RESPONSE_SUCCESS (0)
398#define HV_SERVICE_CONNECT_RESPONSE_INVALID_PARAMETER (1)
399#define HV_SERVICE_CONNECT_RESPONSE_UNKNOWN_SERVICE (2)
400#define HV_SERVICE_CONNECT_RESPONSE_CONNECTION_REJECTED (3)
401
402#define HV_SERVICE_CONNECT_REQUEST_MESSAGE_ID (1)
403#define HV_SERVICE_CONNECT_RESPONSE_MESSAGE_ID (2)
404#define HV_SERVICE_DISCONNECT_REQUEST_MESSAGE_ID (3)
405#define HV_SERVICE_DISCONNECT_RESPONSE_MESSAGE_ID (4)
406#define HV_SERVICE_MAX_MESSAGE_ID (4)
407
408#define HV_SERVICE_PROTOCOL_VERSION (0x0010)
409#define HV_CONNECT_PAYLOAD_BYTE_COUNT 64
410
411/* #define VMBUS_REVISION_NUMBER 6 */
412
413/* Our local vmbus's port and connection id. Anything >0 is fine */
414/* #define VMBUS_PORT_ID 11 */
415 210
416/* 628180B8-308D-4c5e-B7DB-1BEB62E62EF4 */ 211 /*
417static const uuid_le VMBUS_SERVICE_ID = { 212 * To optimize the mapping of relid to channel, maintain
418 .b = { 213 * per-cpu list of the channels based on their CPU affinity.
419 0xb8, 0x80, 0x81, 0x62, 0x8d, 0x30, 0x5e, 0x4c, 214 */
420 0xb7, 0xdb, 0x1b, 0xeb, 0x62, 0xe6, 0x2e, 0xf4 215 struct list_head chan_list;
421 }, 216 struct clock_event_device *clk_evt;
422}; 217};
423 218
424
425
426struct hv_context { 219struct hv_context {
427 /* We only support running on top of Hyper-V 220 /* We only support running on top of Hyper-V
428 * So at this point this really can only contain the Hyper-V ID 221 * So at this point this really can only contain the Hyper-V ID
429 */ 222 */
430 u64 guestid; 223 u64 guestid;
431 224
432 void *hypercall_page;
433 void *tsc_page; 225 void *tsc_page;
434 226
435 bool synic_initialized; 227 bool synic_initialized;
436 228
437 void *synic_message_page[NR_CPUS]; 229 struct hv_per_cpu_context __percpu *cpu_context;
438 void *synic_event_page[NR_CPUS]; 230
439 /* 231 /*
440 * Hypervisor's notion of virtual processor ID is different from 232 * Hypervisor's notion of virtual processor ID is different from
441 * Linux' notion of CPU ID. This information can only be retrieved 233 * Linux' notion of CPU ID. This information can only be retrieved
@@ -446,26 +238,7 @@ struct hv_context {
446 * Linux cpuid 'a'. 238 * Linux cpuid 'a'.
447 */ 239 */
448 u32 vp_index[NR_CPUS]; 240 u32 vp_index[NR_CPUS];
449 /* 241
450 * Starting with win8, we can take channel interrupts on any CPU;
451 * we will manage the tasklet that handles events messages on a per CPU
452 * basis.
453 */
454 struct tasklet_struct *event_dpc[NR_CPUS];
455 struct tasklet_struct *msg_dpc[NR_CPUS];
456 /*
457 * To optimize the mapping of relid to channel, maintain
458 * per-cpu list of the channels based on their CPU affinity.
459 */
460 struct list_head percpu_list[NR_CPUS];
461 /*
462 * buffer to post messages to the host.
463 */
464 void *post_msg_page[NR_CPUS];
465 /*
466 * Support PV clockevent device.
467 */
468 struct clock_event_device *clk_evt[NR_CPUS];
469 /* 242 /*
470 * To manage allocations in a NUMA node. 243 * To manage allocations in a NUMA node.
471 * Array indexed by numa node ID. 244 * Array indexed by numa node ID.
@@ -475,14 +248,6 @@ struct hv_context {
475 248
476extern struct hv_context hv_context; 249extern struct hv_context hv_context;
477 250
478struct ms_hyperv_tsc_page {
479 volatile u32 tsc_sequence;
480 u32 reserved1;
481 volatile u64 tsc_scale;
482 volatile s64 tsc_offset;
483 u64 reserved2[509];
484};
485
486struct hv_ring_buffer_debug_info { 251struct hv_ring_buffer_debug_info {
487 u32 current_interrupt_mask; 252 u32 current_interrupt_mask;
488 u32 current_read_index; 253 u32 current_read_index;
@@ -495,8 +260,6 @@ struct hv_ring_buffer_debug_info {
495 260
496extern int hv_init(void); 261extern int hv_init(void);
497 262
498extern void hv_cleanup(bool crash);
499
500extern int hv_post_message(union hv_connection_id connection_id, 263extern int hv_post_message(union hv_connection_id connection_id,
501 enum hv_message_type message_type, 264 enum hv_message_type message_type,
502 void *payload, size_t payload_size); 265 void *payload, size_t payload_size);
@@ -505,20 +268,12 @@ extern int hv_synic_alloc(void);
505 268
506extern void hv_synic_free(void); 269extern void hv_synic_free(void);
507 270
508extern void hv_synic_init(void *irqarg); 271extern int hv_synic_init(unsigned int cpu);
509 272
510extern void hv_synic_cleanup(void *arg); 273extern int hv_synic_cleanup(unsigned int cpu);
511 274
512extern void hv_synic_clockevents_cleanup(void); 275extern void hv_synic_clockevents_cleanup(void);
513 276
514/*
515 * Host version information.
516 */
517extern unsigned int host_info_eax;
518extern unsigned int host_info_ebx;
519extern unsigned int host_info_ecx;
520extern unsigned int host_info_edx;
521
522/* Interface */ 277/* Interface */
523 278
524 279
@@ -528,20 +283,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
528void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); 283void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
529 284
530int hv_ringbuffer_write(struct vmbus_channel *channel, 285int hv_ringbuffer_write(struct vmbus_channel *channel,
531 struct kvec *kv_list, 286 const struct kvec *kv_list, u32 kv_count);
532 u32 kv_count, bool lock,
533 bool kick_q);
534 287
535int hv_ringbuffer_read(struct vmbus_channel *channel, 288int hv_ringbuffer_read(struct vmbus_channel *channel,
536 void *buffer, u32 buflen, u32 *buffer_actual_len, 289 void *buffer, u32 buflen, u32 *buffer_actual_len,
537 u64 *requestid, bool raw); 290 u64 *requestid, bool raw);
538 291
539void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 292void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
540 struct hv_ring_buffer_debug_info *debug_info); 293 struct hv_ring_buffer_debug_info *debug_info);
541
542void hv_begin_read(struct hv_ring_buffer_info *rbi);
543
544u32 hv_end_read(struct hv_ring_buffer_info *rbi);
545 294
546/* 295/*
547 * Maximum channels is determined by the size of the interrupt page 296 * Maximum channels is determined by the size of the interrupt page
@@ -608,6 +357,11 @@ struct vmbus_msginfo {
608 357
609extern struct vmbus_connection vmbus_connection; 358extern struct vmbus_connection vmbus_connection;
610 359
360static inline void vmbus_send_interrupt(u32 relid)
361{
362 sync_set_bit(relid, vmbus_connection.send_int_page);
363}
364
611enum vmbus_message_handler_type { 365enum vmbus_message_handler_type {
612 /* The related handler can sleep. */ 366 /* The related handler can sleep. */
613 VMHT_BLOCKING = 0, 367 VMHT_BLOCKING = 0,
@@ -625,41 +379,6 @@ struct vmbus_channel_message_table_entry {
625extern struct vmbus_channel_message_table_entry 379extern struct vmbus_channel_message_table_entry
626 channel_message_table[CHANNELMSG_COUNT]; 380 channel_message_table[CHANNELMSG_COUNT];
627 381
628/* Free the message slot and signal end-of-message if required */
629static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
630{
631 /*
632 * On crash we're reading some other CPU's message page and we need
633 * to be careful: this other CPU may already had cleared the header
634 * and the host may already had delivered some other message there.
635 * In case we blindly write msg->header.message_type we're going
636 * to lose it. We can still lose a message of the same type but
637 * we count on the fact that there can only be one
638 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
639 * on crash.
640 */
641 if (cmpxchg(&msg->header.message_type, old_msg_type,
642 HVMSG_NONE) != old_msg_type)
643 return;
644
645 /*
646 * Make sure the write to MessageType (ie set to
647 * HVMSG_NONE) happens before we read the
648 * MessagePending and EOMing. Otherwise, the EOMing
649 * will not deliver any more messages since there is
650 * no empty slot
651 */
652 mb();
653
654 if (msg->header.message_flags.msg_pending) {
655 /*
656 * This will cause message queue rescan to
657 * possibly deliver another msg from the
658 * hypervisor
659 */
660 wrmsrl(HV_X64_MSR_EOM, 0);
661 }
662}
663 382
664/* General vmbus interface */ 383/* General vmbus interface */
665 384
@@ -670,10 +389,6 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
670int vmbus_device_register(struct hv_device *child_device_obj); 389int vmbus_device_register(struct hv_device *child_device_obj);
671void vmbus_device_unregister(struct hv_device *device_obj); 390void vmbus_device_unregister(struct hv_device *device_obj);
672 391
673/* static void */
674/* VmbusChildDeviceDestroy( */
675/* struct hv_device *); */
676
677struct vmbus_channel *relid2channel(u32 relid); 392struct vmbus_channel *relid2channel(u32 relid);
678 393
679void vmbus_free_channels(void); 394void vmbus_free_channels(void);
@@ -683,7 +398,7 @@ void vmbus_free_channels(void);
683int vmbus_connect(void); 398int vmbus_connect(void);
684void vmbus_disconnect(void); 399void vmbus_disconnect(void);
685 400
686int vmbus_post_msg(void *buffer, size_t buflen); 401int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
687 402
688void vmbus_on_event(unsigned long data); 403void vmbus_on_event(unsigned long data);
689void vmbus_on_msg_dpc(unsigned long data); 404void vmbus_on_msg_dpc(unsigned long data);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 308dbda700eb..87799e81af97 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -32,26 +32,6 @@
32 32
33#include "hyperv_vmbus.h" 33#include "hyperv_vmbus.h"
34 34
35void hv_begin_read(struct hv_ring_buffer_info *rbi)
36{
37 rbi->ring_buffer->interrupt_mask = 1;
38 virt_mb();
39}
40
41u32 hv_end_read(struct hv_ring_buffer_info *rbi)
42{
43
44 rbi->ring_buffer->interrupt_mask = 0;
45 virt_mb();
46
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 return hv_get_bytes_to_read(rbi);
53}
54
55/* 35/*
56 * When we write to the ring buffer, check if the host needs to 36 * When we write to the ring buffer, check if the host needs to
57 * be signaled. Here is the details of this protocol: 37 * be signaled. Here is the details of this protocol:
@@ -77,8 +57,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
77 * host logic is fixed. 57 * host logic is fixed.
78 */ 58 */
79 59
80static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel, 60static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
81 bool kick_q)
82{ 61{
83 struct hv_ring_buffer_info *rbi = &channel->outbound; 62 struct hv_ring_buffer_info *rbi = &channel->outbound;
84 63
@@ -117,11 +96,9 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
117 96
118/* Get the next read location for the specified ring buffer. */ 97/* Get the next read location for the specified ring buffer. */
119static inline u32 98static inline u32
120hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) 99hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
121{ 100{
122 u32 next = ring_info->ring_buffer->read_index; 101 return ring_info->ring_buffer->read_index;
123
124 return next;
125} 102}
126 103
127/* 104/*
@@ -129,13 +106,14 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
129 * This allows the caller to skip. 106 * This allows the caller to skip.
130 */ 107 */
131static inline u32 108static inline u32
132hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, 109hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
133 u32 offset) 110 u32 offset)
134{ 111{
135 u32 next = ring_info->ring_buffer->read_index; 112 u32 next = ring_info->ring_buffer->read_index;
136 113
137 next += offset; 114 next += offset;
138 next %= ring_info->ring_datasize; 115 if (next >= ring_info->ring_datasize)
116 next -= ring_info->ring_datasize;
139 117
140 return next; 118 return next;
141} 119}
@@ -151,7 +129,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
151 129
152/* Get the size of the ring buffer. */ 130/* Get the size of the ring buffer. */
153static inline u32 131static inline u32
154hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) 132hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
155{ 133{
156 return ring_info->ring_datasize; 134 return ring_info->ring_datasize;
157} 135}
@@ -168,7 +146,7 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
168 * Assume there is enough room. Handles wrap-around in src case only!! 146 * Assume there is enough room. Handles wrap-around in src case only!!
169 */ 147 */
170static u32 hv_copyfrom_ringbuffer( 148static u32 hv_copyfrom_ringbuffer(
171 struct hv_ring_buffer_info *ring_info, 149 const struct hv_ring_buffer_info *ring_info,
172 void *dest, 150 void *dest,
173 u32 destlen, 151 u32 destlen,
174 u32 start_read_offset) 152 u32 start_read_offset)
@@ -179,7 +157,8 @@ static u32 hv_copyfrom_ringbuffer(
179 memcpy(dest, ring_buffer + start_read_offset, destlen); 157 memcpy(dest, ring_buffer + start_read_offset, destlen);
180 158
181 start_read_offset += destlen; 159 start_read_offset += destlen;
182 start_read_offset %= ring_buffer_size; 160 if (start_read_offset >= ring_buffer_size)
161 start_read_offset -= ring_buffer_size;
183 162
184 return start_read_offset; 163 return start_read_offset;
185} 164}
@@ -192,7 +171,7 @@ static u32 hv_copyfrom_ringbuffer(
192static u32 hv_copyto_ringbuffer( 171static u32 hv_copyto_ringbuffer(
193 struct hv_ring_buffer_info *ring_info, 172 struct hv_ring_buffer_info *ring_info,
194 u32 start_write_offset, 173 u32 start_write_offset,
195 void *src, 174 const void *src,
196 u32 srclen) 175 u32 srclen)
197{ 176{
198 void *ring_buffer = hv_get_ring_buffer(ring_info); 177 void *ring_buffer = hv_get_ring_buffer(ring_info);
@@ -201,14 +180,15 @@ static u32 hv_copyto_ringbuffer(
201 memcpy(ring_buffer + start_write_offset, src, srclen); 180 memcpy(ring_buffer + start_write_offset, src, srclen);
202 181
203 start_write_offset += srclen; 182 start_write_offset += srclen;
204 start_write_offset %= ring_buffer_size; 183 if (start_write_offset >= ring_buffer_size)
184 start_write_offset -= ring_buffer_size;
205 185
206 return start_write_offset; 186 return start_write_offset;
207} 187}
208 188
209/* Get various debug metrics for the specified ring buffer. */ 189/* Get various debug metrics for the specified ring buffer. */
210void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 190void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
211 struct hv_ring_buffer_debug_info *debug_info) 191 struct hv_ring_buffer_debug_info *debug_info)
212{ 192{
213 u32 bytes_avail_towrite; 193 u32 bytes_avail_towrite;
214 u32 bytes_avail_toread; 194 u32 bytes_avail_toread;
@@ -285,8 +265,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
285 265
286/* Write to the ring buffer. */ 266/* Write to the ring buffer. */
287int hv_ringbuffer_write(struct vmbus_channel *channel, 267int hv_ringbuffer_write(struct vmbus_channel *channel,
288 struct kvec *kv_list, u32 kv_count, bool lock, 268 const struct kvec *kv_list, u32 kv_count)
289 bool kick_q)
290{ 269{
291 int i = 0; 270 int i = 0;
292 u32 bytes_avail_towrite; 271 u32 bytes_avail_towrite;
@@ -298,13 +277,15 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
298 unsigned long flags = 0; 277 unsigned long flags = 0;
299 struct hv_ring_buffer_info *outring_info = &channel->outbound; 278 struct hv_ring_buffer_info *outring_info = &channel->outbound;
300 279
280 if (channel->rescind)
281 return -ENODEV;
282
301 for (i = 0; i < kv_count; i++) 283 for (i = 0; i < kv_count; i++)
302 totalbytes_towrite += kv_list[i].iov_len; 284 totalbytes_towrite += kv_list[i].iov_len;
303 285
304 totalbytes_towrite += sizeof(u64); 286 totalbytes_towrite += sizeof(u64);
305 287
306 if (lock) 288 spin_lock_irqsave(&outring_info->ring_lock, flags);
307 spin_lock_irqsave(&outring_info->ring_lock, flags);
308 289
309 bytes_avail_towrite = hv_get_bytes_to_write(outring_info); 290 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
310 291
@@ -314,8 +295,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
314 * is empty since the read index == write index. 295 * is empty since the read index == write index.
315 */ 296 */
316 if (bytes_avail_towrite <= totalbytes_towrite) { 297 if (bytes_avail_towrite <= totalbytes_towrite) {
317 if (lock) 298 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
318 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
319 return -EAGAIN; 299 return -EAGAIN;
320 } 300 }
321 301
@@ -346,10 +326,13 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
346 hv_set_next_write_location(outring_info, next_write_location); 326 hv_set_next_write_location(outring_info, next_write_location);
347 327
348 328
349 if (lock) 329 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
350 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 330
331 hv_signal_on_write(old_write, channel);
332
333 if (channel->rescind)
334 return -ENODEV;
351 335
352 hv_signal_on_write(old_write, channel, kick_q);
353 return 0; 336 return 0;
354} 337}
355 338
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 230c62e7f567..f7f6b9144b07 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -54,31 +54,7 @@ static struct acpi_device *hv_acpi_dev;
54 54
55static struct completion probe_event; 55static struct completion probe_event;
56 56
57 57static int hyperv_cpuhp_online;
58static void hyperv_report_panic(struct pt_regs *regs)
59{
60 static bool panic_reported;
61
62 /*
63 * We prefer to report panic on 'die' chain as we have proper
64 * registers to report, but if we miss it (e.g. on BUG()) we need
65 * to report it on 'panic'.
66 */
67 if (panic_reported)
68 return;
69 panic_reported = true;
70
71 wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
72 wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
73 wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
74 wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
75 wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
76
77 /*
78 * Let Hyper-V know there is crash data available
79 */
80 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
81}
82 58
83static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, 59static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
84 void *args) 60 void *args)
@@ -859,9 +835,10 @@ static void vmbus_onmessage_work(struct work_struct *work)
859 kfree(ctx); 835 kfree(ctx);
860} 836}
861 837
862static void hv_process_timer_expiration(struct hv_message *msg, int cpu) 838static void hv_process_timer_expiration(struct hv_message *msg,
839 struct hv_per_cpu_context *hv_cpu)
863{ 840{
864 struct clock_event_device *dev = hv_context.clk_evt[cpu]; 841 struct clock_event_device *dev = hv_cpu->clk_evt;
865 842
866 if (dev->event_handler) 843 if (dev->event_handler)
867 dev->event_handler(dev); 844 dev->event_handler(dev);
@@ -871,8 +848,8 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
871 848
872void vmbus_on_msg_dpc(unsigned long data) 849void vmbus_on_msg_dpc(unsigned long data)
873{ 850{
874 int cpu = smp_processor_id(); 851 struct hv_per_cpu_context *hv_cpu = (void *)data;
875 void *page_addr = hv_context.synic_message_page[cpu]; 852 void *page_addr = hv_cpu->synic_message_page;
876 struct hv_message *msg = (struct hv_message *)page_addr + 853 struct hv_message *msg = (struct hv_message *)page_addr +
877 VMBUS_MESSAGE_SINT; 854 VMBUS_MESSAGE_SINT;
878 struct vmbus_channel_message_header *hdr; 855 struct vmbus_channel_message_header *hdr;
@@ -908,16 +885,88 @@ msg_handled:
908 vmbus_signal_eom(msg, message_type); 885 vmbus_signal_eom(msg, message_type);
909} 886}
910 887
888
889/*
890 * Direct callback for channels using other deferred processing
891 */
892static void vmbus_channel_isr(struct vmbus_channel *channel)
893{
894 void (*callback_fn)(void *);
895
896 callback_fn = READ_ONCE(channel->onchannel_callback);
897 if (likely(callback_fn != NULL))
898 (*callback_fn)(channel->channel_callback_context);
899}
900
901/*
902 * Schedule all channels with events pending
903 */
904static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
905{
906 unsigned long *recv_int_page;
907 u32 maxbits, relid;
908
909 if (vmbus_proto_version < VERSION_WIN8) {
910 maxbits = MAX_NUM_CHANNELS_SUPPORTED;
911 recv_int_page = vmbus_connection.recv_int_page;
912 } else {
913 /*
914 * When the host is win8 and beyond, the event page
915 * can be directly checked to get the id of the channel
916 * that has the interrupt pending.
917 */
918 void *page_addr = hv_cpu->synic_event_page;
919 union hv_synic_event_flags *event
920 = (union hv_synic_event_flags *)page_addr +
921 VMBUS_MESSAGE_SINT;
922
923 maxbits = HV_EVENT_FLAGS_COUNT;
924 recv_int_page = event->flags;
925 }
926
927 if (unlikely(!recv_int_page))
928 return;
929
930 for_each_set_bit(relid, recv_int_page, maxbits) {
931 struct vmbus_channel *channel;
932
933 if (!sync_test_and_clear_bit(relid, recv_int_page))
934 continue;
935
936 /* Special case - vmbus channel protocol msg */
937 if (relid == 0)
938 continue;
939
940 /* Find channel based on relid */
941 list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) {
942 if (channel->offermsg.child_relid != relid)
943 continue;
944
945 switch (channel->callback_mode) {
946 case HV_CALL_ISR:
947 vmbus_channel_isr(channel);
948 break;
949
950 case HV_CALL_BATCHED:
951 hv_begin_read(&channel->inbound);
952 /* fallthrough */
953 case HV_CALL_DIRECT:
954 tasklet_schedule(&channel->callback_event);
955 }
956 }
957 }
958}
959
911static void vmbus_isr(void) 960static void vmbus_isr(void)
912{ 961{
913 int cpu = smp_processor_id(); 962 struct hv_per_cpu_context *hv_cpu
914 void *page_addr; 963 = this_cpu_ptr(hv_context.cpu_context);
964 void *page_addr = hv_cpu->synic_event_page;
915 struct hv_message *msg; 965 struct hv_message *msg;
916 union hv_synic_event_flags *event; 966 union hv_synic_event_flags *event;
917 bool handled = false; 967 bool handled = false;
918 968
919 page_addr = hv_context.synic_event_page[cpu]; 969 if (unlikely(page_addr == NULL))
920 if (page_addr == NULL)
921 return; 970 return;
922 971
923 event = (union hv_synic_event_flags *)page_addr + 972 event = (union hv_synic_event_flags *)page_addr +
@@ -932,10 +981,8 @@ static void vmbus_isr(void)
932 (vmbus_proto_version == VERSION_WIN7)) { 981 (vmbus_proto_version == VERSION_WIN7)) {
933 982
934 /* Since we are a child, we only need to check bit 0 */ 983 /* Since we are a child, we only need to check bit 0 */
935 if (sync_test_and_clear_bit(0, 984 if (sync_test_and_clear_bit(0, event->flags))
936 (unsigned long *) &event->flags32[0])) {
937 handled = true; 985 handled = true;
938 }
939 } else { 986 } else {
940 /* 987 /*
941 * Our host is win8 or above. The signaling mechanism 988 * Our host is win8 or above. The signaling mechanism
@@ -947,18 +994,17 @@ static void vmbus_isr(void)
947 } 994 }
948 995
949 if (handled) 996 if (handled)
950 tasklet_schedule(hv_context.event_dpc[cpu]); 997 vmbus_chan_sched(hv_cpu);
951
952 998
953 page_addr = hv_context.synic_message_page[cpu]; 999 page_addr = hv_cpu->synic_message_page;
954 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 1000 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
955 1001
956 /* Check if there are actual msgs to be processed */ 1002 /* Check if there are actual msgs to be processed */
957 if (msg->header.message_type != HVMSG_NONE) { 1003 if (msg->header.message_type != HVMSG_NONE) {
958 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) 1004 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
959 hv_process_timer_expiration(msg, cpu); 1005 hv_process_timer_expiration(msg, hv_cpu);
960 else 1006 else
961 tasklet_schedule(hv_context.msg_dpc[cpu]); 1007 tasklet_schedule(&hv_cpu->msg_dpc);
962 } 1008 }
963 1009
964 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); 1010 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
@@ -986,7 +1032,7 @@ static int vmbus_bus_init(void)
986 1032
987 ret = bus_register(&hv_bus); 1033 ret = bus_register(&hv_bus);
988 if (ret) 1034 if (ret)
989 goto err_cleanup; 1035 return ret;
990 1036
991 hv_setup_vmbus_irq(vmbus_isr); 1037 hv_setup_vmbus_irq(vmbus_isr);
992 1038
@@ -997,14 +1043,16 @@ static int vmbus_bus_init(void)
997 * Initialize the per-cpu interrupt state and 1043 * Initialize the per-cpu interrupt state and
998 * connect to the host. 1044 * connect to the host.
999 */ 1045 */
1000 on_each_cpu(hv_synic_init, NULL, 1); 1046 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv:online",
1047 hv_synic_init, hv_synic_cleanup);
1048 if (ret < 0)
1049 goto err_alloc;
1050 hyperv_cpuhp_online = ret;
1051
1001 ret = vmbus_connect(); 1052 ret = vmbus_connect();
1002 if (ret) 1053 if (ret)
1003 goto err_connect; 1054 goto err_connect;
1004 1055
1005 if (vmbus_proto_version > VERSION_WIN7)
1006 cpu_hotplug_disable();
1007
1008 /* 1056 /*
1009 * Only register if the crash MSRs are available 1057 * Only register if the crash MSRs are available
1010 */ 1058 */
@@ -1019,16 +1067,13 @@ static int vmbus_bus_init(void)
1019 return 0; 1067 return 0;
1020 1068
1021err_connect: 1069err_connect:
1022 on_each_cpu(hv_synic_cleanup, NULL, 1); 1070 cpuhp_remove_state(hyperv_cpuhp_online);
1023err_alloc: 1071err_alloc:
1024 hv_synic_free(); 1072 hv_synic_free();
1025 hv_remove_vmbus_irq(); 1073 hv_remove_vmbus_irq();
1026 1074
1027 bus_unregister(&hv_bus); 1075 bus_unregister(&hv_bus);
1028 1076
1029err_cleanup:
1030 hv_cleanup(false);
1031
1032 return ret; 1077 return ret;
1033} 1078}
1034 1079
@@ -1478,13 +1523,13 @@ static struct acpi_driver vmbus_acpi_driver = {
1478 1523
1479static void hv_kexec_handler(void) 1524static void hv_kexec_handler(void)
1480{ 1525{
1481 int cpu;
1482
1483 hv_synic_clockevents_cleanup(); 1526 hv_synic_clockevents_cleanup();
1484 vmbus_initiate_unload(false); 1527 vmbus_initiate_unload(false);
1485 for_each_online_cpu(cpu) 1528 vmbus_connection.conn_state = DISCONNECTED;
1486 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); 1529 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
1487 hv_cleanup(false); 1530 mb();
1531 cpuhp_remove_state(hyperv_cpuhp_online);
1532 hyperv_cleanup();
1488}; 1533};
1489 1534
1490static void hv_crash_handler(struct pt_regs *regs) 1535static void hv_crash_handler(struct pt_regs *regs)
@@ -1495,8 +1540,9 @@ static void hv_crash_handler(struct pt_regs *regs)
1495 * doing the cleanup for current CPU only. This should be sufficient 1540 * doing the cleanup for current CPU only. This should be sufficient
1496 * for kdump. 1541 * for kdump.
1497 */ 1542 */
1498 hv_synic_cleanup(NULL); 1543 vmbus_connection.conn_state = DISCONNECTED;
1499 hv_cleanup(true); 1544 hv_synic_cleanup(smp_processor_id());
1545 hyperv_cleanup();
1500}; 1546};
1501 1547
1502static int __init hv_acpi_init(void) 1548static int __init hv_acpi_init(void)
@@ -1547,24 +1593,24 @@ static void __exit vmbus_exit(void)
1547 hv_synic_clockevents_cleanup(); 1593 hv_synic_clockevents_cleanup();
1548 vmbus_disconnect(); 1594 vmbus_disconnect();
1549 hv_remove_vmbus_irq(); 1595 hv_remove_vmbus_irq();
1550 for_each_online_cpu(cpu) 1596 for_each_online_cpu(cpu) {
1551 tasklet_kill(hv_context.msg_dpc[cpu]); 1597 struct hv_per_cpu_context *hv_cpu
1598 = per_cpu_ptr(hv_context.cpu_context, cpu);
1599
1600 tasklet_kill(&hv_cpu->msg_dpc);
1601 }
1552 vmbus_free_channels(); 1602 vmbus_free_channels();
1603
1553 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { 1604 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1554 unregister_die_notifier(&hyperv_die_block); 1605 unregister_die_notifier(&hyperv_die_block);
1555 atomic_notifier_chain_unregister(&panic_notifier_list, 1606 atomic_notifier_chain_unregister(&panic_notifier_list,
1556 &hyperv_panic_block); 1607 &hyperv_panic_block);
1557 } 1608 }
1558 bus_unregister(&hv_bus); 1609 bus_unregister(&hv_bus);
1559 hv_cleanup(false); 1610
1560 for_each_online_cpu(cpu) { 1611 cpuhp_remove_state(hyperv_cpuhp_online);
1561 tasklet_kill(hv_context.event_dpc[cpu]);
1562 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
1563 }
1564 hv_synic_free(); 1612 hv_synic_free();
1565 acpi_bus_unregister_driver(&vmbus_acpi_driver); 1613 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1566 if (vmbus_proto_version > VERSION_WIN7)
1567 cpu_hotplug_enable();
1568} 1614}
1569 1615
1570 1616
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 17741969026e..26cfac3e6de7 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -242,6 +242,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
242 if (!sink_ops(sink)->alloc_buffer) 242 if (!sink_ops(sink)->alloc_buffer)
243 goto err; 243 goto err;
244 244
245 cpu = cpumask_first(mask);
245 /* Get the AUX specific data from the sink buffer */ 246 /* Get the AUX specific data from the sink buffer */
246 event_data->snk_config = 247 event_data->snk_config =
247 sink_ops(sink)->alloc_buffer(sink, cpu, pages, 248 sink_ops(sink)->alloc_buffer(sink, cpu, pages,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 031480f2c34d..d1340fb4e457 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -216,10 +216,14 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
216 goto out; 216 goto out;
217 217
218 /* Go from generic option to ETMv4 specifics */ 218 /* Go from generic option to ETMv4 specifics */
219 if (attr->config & BIT(ETM_OPT_CYCACC)) 219 if (attr->config & BIT(ETM_OPT_CYCACC)) {
220 config->cfg |= ETMv4_MODE_CYCACC; 220 config->cfg |= BIT(4);
221 /* TRM: Must program this for cycacc to work */
222 config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
223 }
221 if (attr->config & BIT(ETM_OPT_TS)) 224 if (attr->config & BIT(ETM_OPT_TS))
222 config->cfg |= ETMv4_MODE_TIMESTAMP; 225 /* bit[11], Global timestamp tracing bit */
226 config->cfg |= BIT(11);
223 227
224out: 228out:
225 return ret; 229 return ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index ba8d3f86de21..b3b5ea7b7fb3 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -146,6 +146,7 @@
146#define ETM_ARCH_V4 0x40 146#define ETM_ARCH_V4 0x40
147#define ETMv4_SYNC_MASK 0x1F 147#define ETMv4_SYNC_MASK 0x1F
148#define ETM_CYC_THRESHOLD_MASK 0xFFF 148#define ETM_CYC_THRESHOLD_MASK 0xFFF
149#define ETM_CYC_THRESHOLD_DEFAULT 0x100
149#define ETMv4_EVENT_MASK 0xFF 150#define ETMv4_EVENT_MASK 0xFF
150#define ETM_CNTR_MAX_VAL 0xFFFF 151#define ETM_CNTR_MAX_VAL 0xFFFF
151#define ETM_TRACEID_MASK 0x3f 152#define ETM_TRACEID_MASK 0x3f
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index e4c55c5f9988..93fc26f01bab 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -356,7 +356,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
356 if (!drvdata || !drvdata->csdev) 356 if (!drvdata || !drvdata->csdev)
357 return; 357 return;
358 358
359 stm_disable(drvdata->csdev, NULL); 359 coresight_disable(drvdata->csdev);
360} 360}
361 361
362static phys_addr_t 362static phys_addr_t
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index a579a0f25840..22c1aeeb6421 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -20,6 +20,7 @@
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_platform.h> 21#include <linux/of_platform.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/platform_data/ti-aemif.h>
23 24
24#define TA_SHIFT 2 25#define TA_SHIFT 2
25#define RHOLD_SHIFT 4 26#define RHOLD_SHIFT 4
@@ -335,6 +336,8 @@ static int aemif_probe(struct platform_device *pdev)
335 struct device_node *np = dev->of_node; 336 struct device_node *np = dev->of_node;
336 struct device_node *child_np; 337 struct device_node *child_np;
337 struct aemif_device *aemif; 338 struct aemif_device *aemif;
339 struct aemif_platform_data *pdata;
340 struct of_dev_auxdata *dev_lookup;
338 341
339 if (np == NULL) 342 if (np == NULL)
340 return 0; 343 return 0;
@@ -343,6 +346,9 @@ static int aemif_probe(struct platform_device *pdev)
343 if (!aemif) 346 if (!aemif)
344 return -ENOMEM; 347 return -ENOMEM;
345 348
349 pdata = dev_get_platdata(&pdev->dev);
350 dev_lookup = pdata ? pdata->dev_lookup : NULL;
351
346 platform_set_drvdata(pdev, aemif); 352 platform_set_drvdata(pdev, aemif);
347 353
348 aemif->clk = devm_clk_get(dev, NULL); 354 aemif->clk = devm_clk_get(dev, NULL);
@@ -390,7 +396,7 @@ static int aemif_probe(struct platform_device *pdev)
390 * parameters are set. 396 * parameters are set.
391 */ 397 */
392 for_each_available_child_of_node(np, child_np) { 398 for_each_available_child_of_node(np, child_np) {
393 ret = of_platform_populate(child_np, NULL, NULL, dev); 399 ret = of_platform_populate(child_np, NULL, dev_lookup, dev);
394 if (ret < 0) 400 if (ret < 0)
395 goto error; 401 goto error;
396 } 402 }
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 64971baf11fa..c290990d73ed 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -474,11 +474,15 @@ config SRAM
474 bool "Generic on-chip SRAM driver" 474 bool "Generic on-chip SRAM driver"
475 depends on HAS_IOMEM 475 depends on HAS_IOMEM
476 select GENERIC_ALLOCATOR 476 select GENERIC_ALLOCATOR
477 select SRAM_EXEC if ARM
477 help 478 help
478 This driver allows you to declare a memory region to be managed by 479 This driver allows you to declare a memory region to be managed by
479 the genalloc API. It is supposed to be used for small on-chip SRAM 480 the genalloc API. It is supposed to be used for small on-chip SRAM
480 areas found on many SoCs. 481 areas found on many SoCs.
481 482
483config SRAM_EXEC
484 bool
485
482config VEXPRESS_SYSCFG 486config VEXPRESS_SYSCFG
483 bool "Versatile Express System Configuration driver" 487 bool "Versatile Express System Configuration driver"
484 depends on VEXPRESS_CONFIG 488 depends on VEXPRESS_CONFIG
@@ -487,6 +491,7 @@ config VEXPRESS_SYSCFG
487 ARM Ltd. Versatile Express uses specialised platform configuration 491 ARM Ltd. Versatile Express uses specialised platform configuration
488 bus. System Configuration interface is one of the possible means 492 bus. System Configuration interface is one of the possible means
489 of generating transactions on this bus. 493 of generating transactions on this bus.
494
490config PANEL 495config PANEL
491 tristate "Parallel port LCD/Keypad Panel support" 496 tristate "Parallel port LCD/Keypad Panel support"
492 depends on PARPORT 497 depends on PARPORT
@@ -494,14 +499,14 @@ config PANEL
494 Say Y here if you have an HD44780 or KS-0074 LCD connected to your 499 Say Y here if you have an HD44780 or KS-0074 LCD connected to your
495 parallel port. This driver also features 4 and 6-key keypads. The LCD 500 parallel port. This driver also features 4 and 6-key keypads. The LCD
496 is accessible through the /dev/lcd char device (10, 156), and the 501 is accessible through the /dev/lcd char device (10, 156), and the
497 keypad through /dev/keypad (10, 185). Both require misc device to be 502 keypad through /dev/keypad (10, 185). This code can either be
498 enabled. This code can either be compiled as a module, or linked into 503 compiled as a module, or linked into the kernel and started at boot.
499 the kernel and started at boot. If you don't understand what all this 504 If you don't understand what all this is about, say N.
500 is about, say N. 505
506if PANEL
501 507
502config PANEL_PARPORT 508config PANEL_PARPORT
503 int "Default parallel port number (0=LPT1)" 509 int "Default parallel port number (0=LPT1)"
504 depends on PANEL
505 range 0 255 510 range 0 255
506 default "0" 511 default "0"
507 ---help--- 512 ---help---
@@ -513,7 +518,6 @@ config PANEL_PARPORT
513 518
514config PANEL_PROFILE 519config PANEL_PROFILE
515 int "Default panel profile (0-5, 0=custom)" 520 int "Default panel profile (0-5, 0=custom)"
516 depends on PANEL
517 range 0 5 521 range 0 5
518 default "5" 522 default "5"
519 ---help--- 523 ---help---
@@ -534,7 +538,7 @@ config PANEL_PROFILE
534 for experts. 538 for experts.
535 539
536config PANEL_KEYPAD 540config PANEL_KEYPAD
537 depends on PANEL && PANEL_PROFILE="0" 541 depends on PANEL_PROFILE="0"
538 int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)" 542 int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
539 range 0 3 543 range 0 3
540 default 0 544 default 0
@@ -551,7 +555,7 @@ config PANEL_KEYPAD
551 supports simultaneous keys pressed when the keypad supports them. 555 supports simultaneous keys pressed when the keypad supports them.
552 556
553config PANEL_LCD 557config PANEL_LCD
554 depends on PANEL && PANEL_PROFILE="0" 558 depends on PANEL_PROFILE="0"
555 int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)" 559 int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
556 range 0 5 560 range 0 5
557 default 0 561 default 0
@@ -574,7 +578,7 @@ config PANEL_LCD
574 that those values changed from the 2.4 driver for better consistency. 578 that those values changed from the 2.4 driver for better consistency.
575 579
576config PANEL_LCD_HEIGHT 580config PANEL_LCD_HEIGHT
577 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" 581 depends on PANEL_PROFILE="0" && PANEL_LCD="1"
578 int "Number of lines on the LCD (1-2)" 582 int "Number of lines on the LCD (1-2)"
579 range 1 2 583 range 1 2
580 default 2 584 default 2
@@ -583,7 +587,7 @@ config PANEL_LCD_HEIGHT
583 It can either be 1 or 2. 587 It can either be 1 or 2.
584 588
585config PANEL_LCD_WIDTH 589config PANEL_LCD_WIDTH
586 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" 590 depends on PANEL_PROFILE="0" && PANEL_LCD="1"
587 int "Number of characters per line on the LCD (1-40)" 591 int "Number of characters per line on the LCD (1-40)"
588 range 1 40 592 range 1 40
589 default 40 593 default 40
@@ -592,7 +596,7 @@ config PANEL_LCD_WIDTH
592 Common values are 16,20,24,40. 596 Common values are 16,20,24,40.
593 597
594config PANEL_LCD_BWIDTH 598config PANEL_LCD_BWIDTH
595 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" 599 depends on PANEL_PROFILE="0" && PANEL_LCD="1"
596 int "Internal LCD line width (1-40, 40 by default)" 600 int "Internal LCD line width (1-40, 40 by default)"
597 range 1 40 601 range 1 40
598 default 40 602 default 40
@@ -608,7 +612,7 @@ config PANEL_LCD_BWIDTH
608 If you don't know, put '40' here. 612 If you don't know, put '40' here.
609 613
610config PANEL_LCD_HWIDTH 614config PANEL_LCD_HWIDTH
611 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" 615 depends on PANEL_PROFILE="0" && PANEL_LCD="1"
612 int "Hardware LCD line width (1-64, 64 by default)" 616 int "Hardware LCD line width (1-64, 64 by default)"
613 range 1 64 617 range 1 64
614 default 64 618 default 64
@@ -622,7 +626,7 @@ config PANEL_LCD_HWIDTH
622 64 here for a 2x40. 626 64 here for a 2x40.
623 627
624config PANEL_LCD_CHARSET 628config PANEL_LCD_CHARSET
625 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" 629 depends on PANEL_PROFILE="0" && PANEL_LCD="1"
626 int "LCD character set (0=normal, 1=KS0074)" 630 int "LCD character set (0=normal, 1=KS0074)"
627 range 0 1 631 range 0 1
628 default 0 632 default 0
@@ -638,7 +642,7 @@ config PANEL_LCD_CHARSET
638 If you don't know, use the normal one (0). 642 If you don't know, use the normal one (0).
639 643
640config PANEL_LCD_PROTO 644config PANEL_LCD_PROTO
641 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" 645 depends on PANEL_PROFILE="0" && PANEL_LCD="1"
642 int "LCD communication mode (0=parallel 8 bits, 1=serial)" 646 int "LCD communication mode (0=parallel 8 bits, 1=serial)"
643 range 0 1 647 range 0 1
644 default 0 648 default 0
@@ -651,7 +655,7 @@ config PANEL_LCD_PROTO
651 parallel LCD, and 1 for a serial LCD. 655 parallel LCD, and 1 for a serial LCD.
652 656
653config PANEL_LCD_PIN_E 657config PANEL_LCD_PIN_E
654 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" 658 depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
655 int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) " 659 int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
656 range -17 17 660 range -17 17
657 default 14 661 default 14
@@ -666,7 +670,7 @@ config PANEL_LCD_PIN_E
666 Default for the 'E' pin in custom profile is '14' (AUTOFEED). 670 Default for the 'E' pin in custom profile is '14' (AUTOFEED).
667 671
668config PANEL_LCD_PIN_RS 672config PANEL_LCD_PIN_RS
669 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" 673 depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
670 int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) " 674 int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
671 range -17 17 675 range -17 17
672 default 17 676 default 17
@@ -681,7 +685,7 @@ config PANEL_LCD_PIN_RS
681 Default for the 'RS' pin in custom profile is '17' (SELECT IN). 685 Default for the 'RS' pin in custom profile is '17' (SELECT IN).
682 686
683config PANEL_LCD_PIN_RW 687config PANEL_LCD_PIN_RW
684 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" 688 depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
685 int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) " 689 int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
686 range -17 17 690 range -17 17
687 default 16 691 default 16
@@ -696,7 +700,7 @@ config PANEL_LCD_PIN_RW
696 Default for the 'RW' pin in custom profile is '16' (INIT). 700 Default for the 'RW' pin in custom profile is '16' (INIT).
697 701
698config PANEL_LCD_PIN_SCL 702config PANEL_LCD_PIN_SCL
699 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" 703 depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
700 int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) " 704 int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
701 range -17 17 705 range -17 17
702 default 1 706 default 1
@@ -711,7 +715,7 @@ config PANEL_LCD_PIN_SCL
711 Default for the 'SCL' pin in custom profile is '1' (STROBE). 715 Default for the 'SCL' pin in custom profile is '1' (STROBE).
712 716
713config PANEL_LCD_PIN_SDA 717config PANEL_LCD_PIN_SDA
714 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" 718 depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
715 int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) " 719 int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
716 range -17 17 720 range -17 17
717 default 2 721 default 2
@@ -726,7 +730,7 @@ config PANEL_LCD_PIN_SDA
726 Default for the 'SDA' pin in custom profile is '2' (D0). 730 Default for the 'SDA' pin in custom profile is '2' (D0).
727 731
728config PANEL_LCD_PIN_BL 732config PANEL_LCD_PIN_BL
729 depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" 733 depends on PANEL_PROFILE="0" && PANEL_LCD="1"
730 int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) " 734 int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
731 range -17 17 735 range -17 17
732 default 0 736 default 0
@@ -741,7 +745,6 @@ config PANEL_LCD_PIN_BL
741 Default for the 'BL' pin in custom profile is '0' (uncontrolled). 745 Default for the 'BL' pin in custom profile is '0' (uncontrolled).
742 746
743config PANEL_CHANGE_MESSAGE 747config PANEL_CHANGE_MESSAGE
744 depends on PANEL
745 bool "Change LCD initialization message ?" 748 bool "Change LCD initialization message ?"
746 default "n" 749 default "n"
747 ---help--- 750 ---help---
@@ -754,7 +757,7 @@ config PANEL_CHANGE_MESSAGE
754 say 'N' and keep the default message with the version. 757 say 'N' and keep the default message with the version.
755 758
756config PANEL_BOOT_MESSAGE 759config PANEL_BOOT_MESSAGE
757 depends on PANEL && PANEL_CHANGE_MESSAGE="y" 760 depends on PANEL_CHANGE_MESSAGE="y"
758 string "New initialization message" 761 string "New initialization message"
759 default "" 762 default ""
760 ---help--- 763 ---help---
@@ -766,6 +769,8 @@ config PANEL_BOOT_MESSAGE
766 An empty message will only clear the display at driver init time. Any other 769 An empty message will only clear the display at driver init time. Any other
767 printf()-formatted message is valid with newline and escape codes. 770 printf()-formatted message is valid with newline and escape codes.
768 771
772endif # PANEL
773
769source "drivers/misc/c2port/Kconfig" 774source "drivers/misc/c2port/Kconfig"
770source "drivers/misc/eeprom/Kconfig" 775source "drivers/misc/eeprom/Kconfig"
771source "drivers/misc/cb710/Kconfig" 776source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 31983366090a..7a3ea89339b4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_INTEL_MEI) += mei/
47obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ 47obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
48obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o 48obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
49obj-$(CONFIG_SRAM) += sram.o 49obj-$(CONFIG_SRAM) += sram.o
50obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
50obj-y += mic/ 51obj-y += mic/
51obj-$(CONFIG_GENWQE) += genwqe/ 52obj-$(CONFIG_GENWQE) += genwqe/
52obj-$(CONFIG_ECHO) += echo/ 53obj-$(CONFIG_ECHO) += echo/
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index c4e41c26649e..de58762097c4 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -100,4 +100,14 @@ config EEPROM_DIGSY_MTC_CFG
100 100
101 If unsure, say N. 101 If unsure, say N.
102 102
103config EEPROM_IDT_89HPESX
104 tristate "IDT 89HPESx PCIe-swtiches EEPROM / CSR support"
105 depends on I2C && SYSFS
106 help
107 Enable this driver to get read/write access to EEPROM / CSRs
108 over IDT PCIe-swtich i2c-slave interface.
109
110 This driver can also be built as a module. If so, the module
111 will be called idt_89hpesx.
112
103endmenu 113endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index fc1e81d29267..90a52624ddeb 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
5obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o 5obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
6obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o 6obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
7obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o 7obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
8obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
new file mode 100644
index 000000000000..4a22a1d99395
--- /dev/null
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -0,0 +1,1581 @@
1/*
2 * This file is provided under a GPLv2 license. When using or
3 * redistributing this file, you may do so under that license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright (C) 2016 T-Platforms. All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, it can be found <http://www.gnu.org/licenses/>.
20 *
21 * The full GNU General Public License is included in this distribution in
22 * the file called "COPYING".
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
30 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
32 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * IDT PCIe-switch NTB Linux driver
37 *
38 * Contact Information:
39 * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru>
40 */
41/*
42 * NOTE of the IDT 89HPESx SMBus-slave interface driver
43 * This driver primarily is developed to have an access to EEPROM device of
44 * IDT PCIe-switches. IDT provides a simple SMBus interface to perform IO-
45 * operations from/to EEPROM, which is located at private (so called Master)
46 * SMBus of switches. Using that interface this the driver creates a simple
47 * binary sysfs-file in the device directory:
48 * /sys/bus/i2c/devices/<bus>-<devaddr>/eeprom
49 * In case if read-only flag is specified in the dts-node of device desription,
50 * User-space applications won't be able to write to the EEPROM sysfs-node.
51 * Additionally IDT 89HPESx SMBus interface has an ability to write/read
52 * data of device CSRs. This driver exposes debugf-file to perform simple IO
53 * operations using that ability for just basic debug purpose. Particularly
54 * next file is created in the specific debugfs-directory:
55 * /sys/kernel/debug/idt_csr/
56 * Format of the debugfs-node is:
57 * $ cat /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>;
58 * <CSR address>:<CSR value>
59 * So reading the content of the file gives current CSR address and it value.
60 * If User-space application wishes to change current CSR address,
61 * it can just write a proper value to the sysfs-file:
62 * $ echo "<CSR address>" > /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>
63 * If it wants to change the CSR value as well, the format of the write
64 * operation is:
65 * $ echo "<CSR address>:<CSR value>" > \
66 * /sys/kernel/debug/idt_csr/<bus>-<devaddr>/<devname>;
67 * CSR address and value can be any of hexadecimal, decimal or octal format.
68 */
69
70#include <linux/kernel.h>
71#include <linux/init.h>
72#include <linux/module.h>
73#include <linux/types.h>
74#include <linux/sizes.h>
75#include <linux/slab.h>
76#include <linux/mutex.h>
77#include <linux/sysfs.h>
78#include <linux/debugfs.h>
79#include <linux/mod_devicetable.h>
80#include <linux/of.h>
81#include <linux/i2c.h>
82#include <linux/pci_ids.h>
83#include <linux/delay.h>
84
85#define IDT_NAME "89hpesx"
86#define IDT_89HPESX_DESC "IDT 89HPESx SMBus-slave interface driver"
87#define IDT_89HPESX_VER "1.0"
88
89MODULE_DESCRIPTION(IDT_89HPESX_DESC);
90MODULE_VERSION(IDT_89HPESX_VER);
91MODULE_LICENSE("GPL v2");
92MODULE_AUTHOR("T-platforms");
93
94/*
95 * csr_dbgdir - CSR read/write operations Debugfs directory
96 */
97static struct dentry *csr_dbgdir;
98
99/*
100 * struct idt_89hpesx_dev - IDT 89HPESx device data structure
101 * @eesize: Size of EEPROM in bytes (calculated from "idt,eecompatible")
102 * @eero: EEPROM Read-only flag
103 * @eeaddr: EEPROM custom address
104 *
105 * @inieecmd: Initial cmd value for EEPROM read/write operations
106 * @inicsrcmd: Initial cmd value for CSR read/write operations
107 * @iniccode: Initialial command code value for IO-operations
108 *
109 * @csr: CSR address to perform read operation
110 *
111 * @smb_write: SMBus write method
112 * @smb_read: SMBus read method
113 * @smb_mtx: SMBus mutex
114 *
115 * @client: i2c client used to perform IO operations
116 *
117 * @ee_file: EEPROM read/write sysfs-file
118 * @csr_file: CSR read/write debugfs-node
119 */
120struct idt_smb_seq;
121struct idt_89hpesx_dev {
122 u32 eesize;
123 bool eero;
124 u8 eeaddr;
125
126 u8 inieecmd;
127 u8 inicsrcmd;
128 u8 iniccode;
129
130 u16 csr;
131
132 int (*smb_write)(struct idt_89hpesx_dev *, const struct idt_smb_seq *);
133 int (*smb_read)(struct idt_89hpesx_dev *, struct idt_smb_seq *);
134 struct mutex smb_mtx;
135
136 struct i2c_client *client;
137
138 struct bin_attribute *ee_file;
139 struct dentry *csr_dir;
140 struct dentry *csr_file;
141};
142
143/*
144 * struct idt_smb_seq - sequence of data to be read/written from/to IDT 89HPESx
145 * @ccode: SMBus command code
146 * @bytecnt: Byte count of operation
147 * @data: Data to by written
148 */
149struct idt_smb_seq {
150 u8 ccode;
151 u8 bytecnt;
152 u8 *data;
153};
154
155/*
156 * struct idt_eeprom_seq - sequence of data to be read/written from/to EEPROM
157 * @cmd: Transaction CMD
158 * @eeaddr: EEPROM custom address
159 * @memaddr: Internal memory address of EEPROM
160 * @data: Data to be written at the memory address
161 */
162struct idt_eeprom_seq {
163 u8 cmd;
164 u8 eeaddr;
165 u16 memaddr;
166 u8 data;
167} __packed;
168
169/*
170 * struct idt_csr_seq - sequence of data to be read/written from/to CSR
171 * @cmd: Transaction CMD
172 * @csraddr: Internal IDT device CSR address
173 * @data: Data to be read/written from/to the CSR address
174 */
175struct idt_csr_seq {
176 u8 cmd;
177 u16 csraddr;
178 u32 data;
179} __packed;
180
181/*
182 * SMBus command code macros
183 * @CCODE_END: Indicates the end of transaction
184 * @CCODE_START: Indicates the start of transaction
185 * @CCODE_CSR: CSR read/write transaction
186 * @CCODE_EEPROM: EEPROM read/write transaction
187 * @CCODE_BYTE: Supplied data has BYTE length
188 * @CCODE_WORD: Supplied data has WORD length
189 * @CCODE_BLOCK: Supplied data has variable length passed in bytecnt
190 * byte right following CCODE byte
191 */
192#define CCODE_END ((u8)0x01)
193#define CCODE_START ((u8)0x02)
194#define CCODE_CSR ((u8)0x00)
195#define CCODE_EEPROM ((u8)0x04)
196#define CCODE_BYTE ((u8)0x00)
197#define CCODE_WORD ((u8)0x20)
198#define CCODE_BLOCK ((u8)0x40)
199#define CCODE_PEC ((u8)0x80)
200
201/*
202 * EEPROM command macros
203 * @EEPROM_OP_WRITE: EEPROM write operation
204 * @EEPROM_OP_READ: EEPROM read operation
205 * @EEPROM_USA: Use specified address of EEPROM
206 * @EEPROM_NAERR: EEPROM device is not ready to respond
207 * @EEPROM_LAERR: EEPROM arbitration loss error
208 * @EEPROM_MSS: EEPROM misplace start & stop bits error
209 * @EEPROM_WR_CNT: Bytes count to perform write operation
210 * @EEPROM_WRRD_CNT: Bytes count to write before reading
211 * @EEPROM_RD_CNT: Bytes count to perform read operation
212 * @EEPROM_DEF_SIZE: Fall back size of EEPROM
213 * @EEPROM_DEF_ADDR: Defatul EEPROM address
214 * @EEPROM_TOUT: Timeout before retry read operation if eeprom is busy
215 */
216#define EEPROM_OP_WRITE ((u8)0x00)
217#define EEPROM_OP_READ ((u8)0x01)
218#define EEPROM_USA ((u8)0x02)
219#define EEPROM_NAERR ((u8)0x08)
220#define EEPROM_LAERR ((u8)0x10)
221#define EEPROM_MSS ((u8)0x20)
222#define EEPROM_WR_CNT ((u8)5)
223#define EEPROM_WRRD_CNT ((u8)4)
224#define EEPROM_RD_CNT ((u8)5)
225#define EEPROM_DEF_SIZE ((u16)4096)
226#define EEPROM_DEF_ADDR ((u8)0x50)
227#define EEPROM_TOUT (100)
228
229/*
230 * CSR command macros
231 * @CSR_DWE: Enable all four bytes of the operation
232 * @CSR_OP_WRITE: CSR write operation
233 * @CSR_OP_READ: CSR read operation
234 * @CSR_RERR: Read operation error
235 * @CSR_WERR: Write operation error
236 * @CSR_WR_CNT: Bytes count to perform write operation
237 * @CSR_WRRD_CNT: Bytes count to write before reading
238 * @CSR_RD_CNT: Bytes count to perform read operation
239 * @CSR_MAX: Maximum CSR address
240 * @CSR_DEF: Default CSR address
241 * @CSR_REAL_ADDR: CSR real unshifted address
242 */
243#define CSR_DWE ((u8)0x0F)
244#define CSR_OP_WRITE ((u8)0x00)
245#define CSR_OP_READ ((u8)0x10)
246#define CSR_RERR ((u8)0x40)
247#define CSR_WERR ((u8)0x80)
248#define CSR_WR_CNT ((u8)7)
249#define CSR_WRRD_CNT ((u8)3)
250#define CSR_RD_CNT ((u8)7)
251#define CSR_MAX ((u32)0x3FFFF)
252#define CSR_DEF ((u16)0x0000)
253#define CSR_REAL_ADDR(val) ((unsigned int)val << 2)
254
255/*
256 * IDT 89HPESx basic register
257 * @IDT_VIDDID_CSR: PCIe VID and DID of IDT 89HPESx
258 * @IDT_VID_MASK: Mask of VID
259 */
260#define IDT_VIDDID_CSR ((u32)0x0000)
261#define IDT_VID_MASK ((u32)0xFFFF)
262
263/*
264 * IDT 89HPESx can send NACK when new command is sent before previous one
265 * fininshed execution. In this case driver retries operation
266 * certain times.
267 * @RETRY_CNT: Number of retries before giving up and fail
268 * @idt_smb_safe: Generate a retry loop on corresponding SMBus method
269 */
270#define RETRY_CNT (128)
271#define idt_smb_safe(ops, args...) ({ \
272 int __retry = RETRY_CNT; \
273 s32 __sts; \
274 do { \
275 __sts = i2c_smbus_ ## ops ## _data(args); \
276 } while (__retry-- && __sts < 0); \
277 __sts; \
278})
279
280/*===========================================================================
281 * i2c bus level IO-operations
282 *===========================================================================
283 */
284
285/*
286 * idt_smb_write_byte() - SMBus write method when I2C_SMBUS_BYTE_DATA operation
287 * is only available
288 * @pdev: Pointer to the driver data
289 * @seq: Sequence of data to be written
290 */
291static int idt_smb_write_byte(struct idt_89hpesx_dev *pdev,
292 const struct idt_smb_seq *seq)
293{
294 s32 sts;
295 u8 ccode;
296 int idx;
297
298 /* Loop over the supplied data sending byte one-by-one */
299 for (idx = 0; idx < seq->bytecnt; idx++) {
300 /* Collect the command code byte */
301 ccode = seq->ccode | CCODE_BYTE;
302 if (idx == 0)
303 ccode |= CCODE_START;
304 if (idx == seq->bytecnt - 1)
305 ccode |= CCODE_END;
306
307 /* Send data to the device */
308 sts = idt_smb_safe(write_byte, pdev->client, ccode,
309 seq->data[idx]);
310 if (sts != 0)
311 return (int)sts;
312 }
313
314 return 0;
315}
316
317/*
318 * idt_smb_read_byte() - SMBus read method when I2C_SMBUS_BYTE_DATA operation
319 * is only available
320 * @pdev: Pointer to the driver data
321 * @seq: Buffer to read data to
322 */
323static int idt_smb_read_byte(struct idt_89hpesx_dev *pdev,
324 struct idt_smb_seq *seq)
325{
326 s32 sts;
327 u8 ccode;
328 int idx;
329
330 /* Loop over the supplied buffer receiving byte one-by-one */
331 for (idx = 0; idx < seq->bytecnt; idx++) {
332 /* Collect the command code byte */
333 ccode = seq->ccode | CCODE_BYTE;
334 if (idx == 0)
335 ccode |= CCODE_START;
336 if (idx == seq->bytecnt - 1)
337 ccode |= CCODE_END;
338
339 /* Read data from the device */
340 sts = idt_smb_safe(read_byte, pdev->client, ccode);
341 if (sts < 0)
342 return (int)sts;
343
344 seq->data[idx] = (u8)sts;
345 }
346
347 return 0;
348}
349
350/*
351 * idt_smb_write_word() - SMBus write method when I2C_SMBUS_BYTE_DATA and
352 * I2C_FUNC_SMBUS_WORD_DATA operations are available
353 * @pdev: Pointer to the driver data
354 * @seq: Sequence of data to be written
355 */
356static int idt_smb_write_word(struct idt_89hpesx_dev *pdev,
357 const struct idt_smb_seq *seq)
358{
359 s32 sts;
360 u8 ccode;
361 int idx, evencnt;
362
363 /* Calculate the even count of data to send */
364 evencnt = seq->bytecnt - (seq->bytecnt % 2);
365
366 /* Loop over the supplied data sending two bytes at a time */
367 for (idx = 0; idx < evencnt; idx += 2) {
368 /* Collect the command code byte */
369 ccode = seq->ccode | CCODE_WORD;
370 if (idx == 0)
371 ccode |= CCODE_START;
372 if (idx == evencnt - 2)
373 ccode |= CCODE_END;
374
375 /* Send word data to the device */
376 sts = idt_smb_safe(write_word, pdev->client, ccode,
377 *(u16 *)&seq->data[idx]);
378 if (sts != 0)
379 return (int)sts;
380 }
381
382 /* If there is odd number of bytes then send just one last byte */
383 if (seq->bytecnt != evencnt) {
384 /* Collect the command code byte */
385 ccode = seq->ccode | CCODE_BYTE | CCODE_END;
386 if (idx == 0)
387 ccode |= CCODE_START;
388
389 /* Send byte data to the device */
390 sts = idt_smb_safe(write_byte, pdev->client, ccode,
391 seq->data[idx]);
392 if (sts != 0)
393 return (int)sts;
394 }
395
396 return 0;
397}
398
399/*
400 * idt_smb_read_word() - SMBus read method when I2C_SMBUS_BYTE_DATA and
401 * I2C_FUNC_SMBUS_WORD_DATA operations are available
402 * @pdev: Pointer to the driver data
403 * @seq: Buffer to read data to
404 */
405static int idt_smb_read_word(struct idt_89hpesx_dev *pdev,
406 struct idt_smb_seq *seq)
407{
408 s32 sts;
409 u8 ccode;
410 int idx, evencnt;
411
412 /* Calculate the even count of data to send */
413 evencnt = seq->bytecnt - (seq->bytecnt % 2);
414
415 /* Loop over the supplied data reading two bytes at a time */
416 for (idx = 0; idx < evencnt; idx += 2) {
417 /* Collect the command code byte */
418 ccode = seq->ccode | CCODE_WORD;
419 if (idx == 0)
420 ccode |= CCODE_START;
421 if (idx == evencnt - 2)
422 ccode |= CCODE_END;
423
424 /* Read word data from the device */
425 sts = idt_smb_safe(read_word, pdev->client, ccode);
426 if (sts < 0)
427 return (int)sts;
428
429 *(u16 *)&seq->data[idx] = (u16)sts;
430 }
431
432 /* If there is odd number of bytes then receive just one last byte */
433 if (seq->bytecnt != evencnt) {
434 /* Collect the command code byte */
435 ccode = seq->ccode | CCODE_BYTE | CCODE_END;
436 if (idx == 0)
437 ccode |= CCODE_START;
438
439 /* Read last data byte from the device */
440 sts = idt_smb_safe(read_byte, pdev->client, ccode);
441 if (sts < 0)
442 return (int)sts;
443
444 seq->data[idx] = (u8)sts;
445 }
446
447 return 0;
448}
449
450/*
451 * idt_smb_write_block() - SMBus write method when I2C_SMBUS_BLOCK_DATA
452 * operation is available
453 * @pdev: Pointer to the driver data
454 * @seq: Sequence of data to be written
455 */
456static int idt_smb_write_block(struct idt_89hpesx_dev *pdev,
457 const struct idt_smb_seq *seq)
458{
459 u8 ccode;
460
461 /* Return error if too much data passed to send */
462 if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
463 return -EINVAL;
464
465 /* Collect the command code byte */
466 ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
467
468 /* Send block of data to the device */
469 return idt_smb_safe(write_block, pdev->client, ccode, seq->bytecnt,
470 seq->data);
471}
472
473/*
474 * idt_smb_read_block() - SMBus read method when I2C_SMBUS_BLOCK_DATA
475 * operation is available
476 * @pdev: Pointer to the driver data
477 * @seq: Buffer to read data to
478 */
479static int idt_smb_read_block(struct idt_89hpesx_dev *pdev,
480 struct idt_smb_seq *seq)
481{
482 s32 sts;
483 u8 ccode;
484
485 /* Return error if too much data passed to send */
486 if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
487 return -EINVAL;
488
489 /* Collect the command code byte */
490 ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
491
492 /* Read block of data from the device */
493 sts = idt_smb_safe(read_block, pdev->client, ccode, seq->data);
494 if (sts != seq->bytecnt)
495 return (sts < 0 ? sts : -ENODATA);
496
497 return 0;
498}
499
500/*
501 * idt_smb_write_i2c_block() - SMBus write method when I2C_SMBUS_I2C_BLOCK_DATA
502 * operation is available
503 * @pdev: Pointer to the driver data
504 * @seq: Sequence of data to be written
505 *
506 * NOTE It's usual SMBus write block operation, except the actual data length is
507 * sent as first byte of data
508 */
509static int idt_smb_write_i2c_block(struct idt_89hpesx_dev *pdev,
510 const struct idt_smb_seq *seq)
511{
512 u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1];
513
514 /* Return error if too much data passed to send */
515 if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
516 return -EINVAL;
517
518 /* Collect the data to send. Length byte must be added prior the data */
519 buf[0] = seq->bytecnt;
520 memcpy(&buf[1], seq->data, seq->bytecnt);
521
522 /* Collect the command code byte */
523 ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
524
525 /* Send length and block of data to the device */
526 return idt_smb_safe(write_i2c_block, pdev->client, ccode,
527 seq->bytecnt + 1, buf);
528}
529
530/*
531 * idt_smb_read_i2c_block() - SMBus read method when I2C_SMBUS_I2C_BLOCK_DATA
532 * operation is available
533 * @pdev: Pointer to the driver data
534 * @seq: Buffer to read data to
535 *
536 * NOTE It's usual SMBus read block operation, except the actual data length is
537 * retrieved as first byte of data
538 */
539static int idt_smb_read_i2c_block(struct idt_89hpesx_dev *pdev,
540 struct idt_smb_seq *seq)
541{
542 u8 ccode, buf[I2C_SMBUS_BLOCK_MAX + 1];
543 s32 sts;
544
545 /* Return error if too much data passed to send */
546 if (seq->bytecnt > I2C_SMBUS_BLOCK_MAX)
547 return -EINVAL;
548
549 /* Collect the command code byte */
550 ccode = seq->ccode | CCODE_BLOCK | CCODE_START | CCODE_END;
551
552 /* Read length and block of data from the device */
553 sts = idt_smb_safe(read_i2c_block, pdev->client, ccode,
554 seq->bytecnt + 1, buf);
555 if (sts != seq->bytecnt + 1)
556 return (sts < 0 ? sts : -ENODATA);
557 if (buf[0] != seq->bytecnt)
558 return -ENODATA;
559
560 /* Copy retrieved data to the output data buffer */
561 memcpy(seq->data, &buf[1], seq->bytecnt);
562
563 return 0;
564}
565
566/*===========================================================================
567 * EEPROM IO-operations
568 *===========================================================================
569 */
570
571/*
572 * idt_eeprom_read_byte() - read just one byte from EEPROM
573 * @pdev: Pointer to the driver data
574 * @memaddr: Start EEPROM memory address
575 * @data: Data to be written to EEPROM
576 */
577static int idt_eeprom_read_byte(struct idt_89hpesx_dev *pdev, u16 memaddr,
578 u8 *data)
579{
580 struct device *dev = &pdev->client->dev;
581 struct idt_eeprom_seq eeseq;
582 struct idt_smb_seq smbseq;
583 int ret, retry;
584
585 /* Initialize SMBus sequence fields */
586 smbseq.ccode = pdev->iniccode | CCODE_EEPROM;
587 smbseq.data = (u8 *)&eeseq;
588
589 /*
590 * Sometimes EEPROM may respond with NACK if it's busy with previous
591 * operation, so we need to perform a few attempts of read cycle
592 */
593 retry = RETRY_CNT;
594 do {
595 /* Send EEPROM memory address to read data from */
596 smbseq.bytecnt = EEPROM_WRRD_CNT;
597 eeseq.cmd = pdev->inieecmd | EEPROM_OP_READ;
598 eeseq.eeaddr = pdev->eeaddr;
599 eeseq.memaddr = cpu_to_le16(memaddr);
600 ret = pdev->smb_write(pdev, &smbseq);
601 if (ret != 0) {
602 dev_err(dev, "Failed to init eeprom addr 0x%02hhx",
603 memaddr);
604 break;
605 }
606
607 /* Perform read operation */
608 smbseq.bytecnt = EEPROM_RD_CNT;
609 ret = pdev->smb_read(pdev, &smbseq);
610 if (ret != 0) {
611 dev_err(dev, "Failed to read eeprom data 0x%02hhx",
612 memaddr);
613 break;
614 }
615
616 /* Restart read operation if the device is busy */
617 if (retry && (eeseq.cmd & EEPROM_NAERR)) {
618 dev_dbg(dev, "EEPROM busy, retry reading after %d ms",
619 EEPROM_TOUT);
620 msleep(EEPROM_TOUT);
621 continue;
622 }
623
624 /* Check whether IDT successfully read data from EEPROM */
625 if (eeseq.cmd & (EEPROM_NAERR | EEPROM_LAERR | EEPROM_MSS)) {
626 dev_err(dev,
627 "Communication with eeprom failed, cmd 0x%hhx",
628 eeseq.cmd);
629 ret = -EREMOTEIO;
630 break;
631 }
632
633 /* Save retrieved data and exit the loop */
634 *data = eeseq.data;
635 break;
636 } while (retry--);
637
638 /* Return the status of operation */
639 return ret;
640}
641
642/*
643 * idt_eeprom_write() - EEPROM write operation
644 * @pdev: Pointer to the driver data
645 * @memaddr: Start EEPROM memory address
646 * @len: Length of data to be written
647 * @data: Data to be written to EEPROM
648 */
649static int idt_eeprom_write(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len,
650 const u8 *data)
651{
652 struct device *dev = &pdev->client->dev;
653 struct idt_eeprom_seq eeseq;
654 struct idt_smb_seq smbseq;
655 int ret;
656 u16 idx;
657
658 /* Initialize SMBus sequence fields */
659 smbseq.ccode = pdev->iniccode | CCODE_EEPROM;
660 smbseq.data = (u8 *)&eeseq;
661
662 /* Send data byte-by-byte, checking if it is successfully written */
663 for (idx = 0; idx < len; idx++, memaddr++) {
664 /* Lock IDT SMBus device */
665 mutex_lock(&pdev->smb_mtx);
666
667 /* Perform write operation */
668 smbseq.bytecnt = EEPROM_WR_CNT;
669 eeseq.cmd = pdev->inieecmd | EEPROM_OP_WRITE;
670 eeseq.eeaddr = pdev->eeaddr;
671 eeseq.memaddr = cpu_to_le16(memaddr);
672 eeseq.data = data[idx];
673 ret = pdev->smb_write(pdev, &smbseq);
674 if (ret != 0) {
675 dev_err(dev,
676 "Failed to write 0x%04hx:0x%02hhx to eeprom",
677 memaddr, data[idx]);
678 goto err_mutex_unlock;
679 }
680
681 /*
682 * Check whether the data is successfully written by reading
683 * from the same EEPROM memory address.
684 */
685 eeseq.data = ~data[idx];
686 ret = idt_eeprom_read_byte(pdev, memaddr, &eeseq.data);
687 if (ret != 0)
688 goto err_mutex_unlock;
689
690 /* Check whether the read byte is the same as written one */
691 if (eeseq.data != data[idx]) {
692 dev_err(dev, "Values don't match 0x%02hhx != 0x%02hhx",
693 eeseq.data, data[idx]);
694 ret = -EREMOTEIO;
695 goto err_mutex_unlock;
696 }
697
698 /* Unlock IDT SMBus device */
699err_mutex_unlock:
700 mutex_unlock(&pdev->smb_mtx);
701 if (ret != 0)
702 return ret;
703 }
704
705 return 0;
706}
707
708/*
709 * idt_eeprom_read() - EEPROM read operation
710 * @pdev: Pointer to the driver data
711 * @memaddr: Start EEPROM memory address
712 * @len: Length of data to read
713 * @buf: Buffer to read data to
714 */
715static int idt_eeprom_read(struct idt_89hpesx_dev *pdev, u16 memaddr, u16 len,
716 u8 *buf)
717{
718 int ret;
719 u16 idx;
720
721 /* Read data byte-by-byte, retrying if it wasn't successful */
722 for (idx = 0; idx < len; idx++, memaddr++) {
723 /* Lock IDT SMBus device */
724 mutex_lock(&pdev->smb_mtx);
725
726 /* Just read the byte to the buffer */
727 ret = idt_eeprom_read_byte(pdev, memaddr, &buf[idx]);
728
729 /* Unlock IDT SMBus device */
730 mutex_unlock(&pdev->smb_mtx);
731
732 /* Return error if read operation failed */
733 if (ret != 0)
734 return ret;
735 }
736
737 return 0;
738}
739
740/*===========================================================================
741 * CSR IO-operations
742 *===========================================================================
743 */
744
745/*
746 * idt_csr_write() - CSR write operation
747 * @pdev: Pointer to the driver data
748 * @csraddr: CSR address (with no two LS bits)
749 * @data: Data to be written to CSR
750 */
751static int idt_csr_write(struct idt_89hpesx_dev *pdev, u16 csraddr,
752 const u32 data)
753{
754 struct device *dev = &pdev->client->dev;
755 struct idt_csr_seq csrseq;
756 struct idt_smb_seq smbseq;
757 int ret;
758
759 /* Initialize SMBus sequence fields */
760 smbseq.ccode = pdev->iniccode | CCODE_CSR;
761 smbseq.data = (u8 *)&csrseq;
762
763 /* Lock IDT SMBus device */
764 mutex_lock(&pdev->smb_mtx);
765
766 /* Perform write operation */
767 smbseq.bytecnt = CSR_WR_CNT;
768 csrseq.cmd = pdev->inicsrcmd | CSR_OP_WRITE;
769 csrseq.csraddr = cpu_to_le16(csraddr);
770 csrseq.data = cpu_to_le32(data);
771 ret = pdev->smb_write(pdev, &smbseq);
772 if (ret != 0) {
773 dev_err(dev, "Failed to write 0x%04x: 0x%04x to csr",
774 CSR_REAL_ADDR(csraddr), data);
775 goto err_mutex_unlock;
776 }
777
778 /* Send CSR address to read data from */
779 smbseq.bytecnt = CSR_WRRD_CNT;
780 csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ;
781 ret = pdev->smb_write(pdev, &smbseq);
782 if (ret != 0) {
783 dev_err(dev, "Failed to init csr address 0x%04x",
784 CSR_REAL_ADDR(csraddr));
785 goto err_mutex_unlock;
786 }
787
788 /* Perform read operation */
789 smbseq.bytecnt = CSR_RD_CNT;
790 ret = pdev->smb_read(pdev, &smbseq);
791 if (ret != 0) {
792 dev_err(dev, "Failed to read csr 0x%04x",
793 CSR_REAL_ADDR(csraddr));
794 goto err_mutex_unlock;
795 }
796
797 /* Check whether IDT successfully retrieved CSR data */
798 if (csrseq.cmd & (CSR_RERR | CSR_WERR)) {
799 dev_err(dev, "IDT failed to perform CSR r/w");
800 ret = -EREMOTEIO;
801 goto err_mutex_unlock;
802 }
803
804 /* Unlock IDT SMBus device */
805err_mutex_unlock:
806 mutex_unlock(&pdev->smb_mtx);
807
808 return ret;
809}
810
811/*
812 * idt_csr_read() - CSR read operation
813 * @pdev: Pointer to the driver data
814 * @csraddr: CSR address (with no two LS bits)
815 * @data: Data to be written to CSR
816 */
817static int idt_csr_read(struct idt_89hpesx_dev *pdev, u16 csraddr, u32 *data)
818{
819 struct device *dev = &pdev->client->dev;
820 struct idt_csr_seq csrseq;
821 struct idt_smb_seq smbseq;
822 int ret;
823
824 /* Initialize SMBus sequence fields */
825 smbseq.ccode = pdev->iniccode | CCODE_CSR;
826 smbseq.data = (u8 *)&csrseq;
827
828 /* Lock IDT SMBus device */
829 mutex_lock(&pdev->smb_mtx);
830
831 /* Send CSR register address before reading it */
832 smbseq.bytecnt = CSR_WRRD_CNT;
833 csrseq.cmd = pdev->inicsrcmd | CSR_OP_READ;
834 csrseq.csraddr = cpu_to_le16(csraddr);
835 ret = pdev->smb_write(pdev, &smbseq);
836 if (ret != 0) {
837 dev_err(dev, "Failed to init csr address 0x%04x",
838 CSR_REAL_ADDR(csraddr));
839 goto err_mutex_unlock;
840 }
841
842 /* Perform read operation */
843 smbseq.bytecnt = CSR_RD_CNT;
844 ret = pdev->smb_read(pdev, &smbseq);
845 if (ret != 0) {
846 dev_err(dev, "Failed to read csr 0x%04hx",
847 CSR_REAL_ADDR(csraddr));
848 goto err_mutex_unlock;
849 }
850
851 /* Check whether IDT successfully retrieved CSR data */
852 if (csrseq.cmd & (CSR_RERR | CSR_WERR)) {
853 dev_err(dev, "IDT failed to perform CSR r/w");
854 ret = -EREMOTEIO;
855 goto err_mutex_unlock;
856 }
857
858 /* Save data retrieved from IDT */
859 *data = le32_to_cpu(csrseq.data);
860
861 /* Unlock IDT SMBus device */
862err_mutex_unlock:
863 mutex_unlock(&pdev->smb_mtx);
864
865 return ret;
866}
867
868/*===========================================================================
869 * Sysfs/debugfs-nodes IO-operations
870 *===========================================================================
871 */
872
873/*
874 * eeprom_write() - EEPROM sysfs-node write callback
875 * @filep: Pointer to the file system node
876 * @kobj: Pointer to the kernel object related to the sysfs-node
877 * @attr: Attributes of the file
878 * @buf: Buffer to write data to
879 * @off: Offset at which data should be written to
880 * @count: Number of bytes to write
881 */
882static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
883 struct bin_attribute *attr,
884 char *buf, loff_t off, size_t count)
885{
886 struct idt_89hpesx_dev *pdev;
887 int ret;
888
889 /* Retrieve driver data */
890 pdev = dev_get_drvdata(kobj_to_dev(kobj));
891
892 /* Perform EEPROM write operation */
893 ret = idt_eeprom_write(pdev, (u16)off, (u16)count, (u8 *)buf);
894 return (ret != 0 ? ret : count);
895}
896
897/*
898 * eeprom_read() - EEPROM sysfs-node read callback
899 * @filep: Pointer to the file system node
900 * @kobj: Pointer to the kernel object related to the sysfs-node
901 * @attr: Attributes of the file
902 * @buf: Buffer to write data to
903 * @off: Offset at which data should be written to
904 * @count: Number of bytes to write
905 */
906static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
907 struct bin_attribute *attr,
908 char *buf, loff_t off, size_t count)
909{
910 struct idt_89hpesx_dev *pdev;
911 int ret;
912
913 /* Retrieve driver data */
914 pdev = dev_get_drvdata(kobj_to_dev(kobj));
915
916 /* Perform EEPROM read operation */
917 ret = idt_eeprom_read(pdev, (u16)off, (u16)count, (u8 *)buf);
918 return (ret != 0 ? ret : count);
919}
920
921/*
922 * idt_dbgfs_csr_write() - CSR debugfs-node write callback
923 * @filep: Pointer to the file system file descriptor
924 * @buf: Buffer to read data from
925 * @count: Size of the buffer
926 * @offp: Offset within the file
927 *
928 * It accepts either "0x<reg addr>:0x<value>" for saving register address
929 * and writing value to specified DWORD register or "0x<reg addr>" for
930 * just saving register address in order to perform next read operation.
931 *
932 * WARNING No spaces are allowed. Incoming string must be strictly formated as:
933 * "<reg addr>:<value>". Register address must be aligned within 4 bytes
934 * (one DWORD).
935 */
936static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
937 size_t count, loff_t *offp)
938{
939 struct idt_89hpesx_dev *pdev = filep->private_data;
940 char *colon_ch, *csraddr_str, *csrval_str;
941 int ret, csraddr_len, csrval_len;
942 u32 csraddr, csrval;
943 char *buf;
944
945 /* Copy data from User-space */
946 buf = kmalloc(count + 1, GFP_KERNEL);
947 if (!buf)
948 return -ENOMEM;
949
950 ret = simple_write_to_buffer(buf, count, offp, ubuf, count);
951 if (ret < 0)
952 goto free_buf;
953 buf[count] = 0;
954
955 /* Find position of colon in the buffer */
956 colon_ch = strnchr(buf, count, ':');
957
958 /*
959 * If there is colon passed then new CSR value should be parsed as
960 * well, so allocate buffer for CSR address substring.
961 * If no colon is found, then string must have just one number with
962 * no new CSR value
963 */
964 if (colon_ch != NULL) {
965 csraddr_len = colon_ch - buf;
966 csraddr_str =
967 kmalloc(sizeof(char)*(csraddr_len + 1), GFP_KERNEL);
968 if (csraddr_str == NULL) {
969 ret = -ENOMEM;
970 goto free_buf;
971 }
972 /* Copy the register address to the substring buffer */
973 strncpy(csraddr_str, buf, csraddr_len);
974 csraddr_str[csraddr_len] = '\0';
975 /* Register value must follow the colon */
976 csrval_str = colon_ch + 1;
977 csrval_len = strnlen(csrval_str, count - csraddr_len);
978 } else /* if (str_colon == NULL) */ {
979 csraddr_str = (char *)buf; /* Just to shut warning up */
980 csraddr_len = strnlen(csraddr_str, count);
981 csrval_str = NULL;
982 csrval_len = 0;
983 }
984
985 /* Convert CSR address to u32 value */
986 ret = kstrtou32(csraddr_str, 0, &csraddr);
987 if (ret != 0)
988 goto free_csraddr_str;
989
990 /* Check whether passed register address is valid */
991 if (csraddr > CSR_MAX || !IS_ALIGNED(csraddr, SZ_4)) {
992 ret = -EINVAL;
993 goto free_csraddr_str;
994 }
995
996 /* Shift register address to the right so to have u16 address */
997 pdev->csr = (csraddr >> 2);
998
999 /* Parse new CSR value and send it to IDT, if colon has been found */
1000 if (colon_ch != NULL) {
1001 ret = kstrtou32(csrval_str, 0, &csrval);
1002 if (ret != 0)
1003 goto free_csraddr_str;
1004
1005 ret = idt_csr_write(pdev, pdev->csr, csrval);
1006 if (ret != 0)
1007 goto free_csraddr_str;
1008 }
1009
1010 /* Free memory only if colon has been found */
1011free_csraddr_str:
1012 if (colon_ch != NULL)
1013 kfree(csraddr_str);
1014
1015 /* Free buffer allocated for data retrieved from User-space */
1016free_buf:
1017 kfree(buf);
1018
1019 return (ret != 0 ? ret : count);
1020}
1021
1022/*
1023 * idt_dbgfs_csr_read() - CSR debugfs-node read callback
1024 * @filep: Pointer to the file system file descriptor
1025 * @buf: Buffer to write data to
1026 * @count: Size of the buffer
1027 * @offp: Offset within the file
1028 *
1029 * It just prints the pair "0x<reg addr>:0x<value>" to passed buffer.
1030 */
1031#define CSRBUF_SIZE ((size_t)32)
1032static ssize_t idt_dbgfs_csr_read(struct file *filep, char __user *ubuf,
1033 size_t count, loff_t *offp)
1034{
1035 struct idt_89hpesx_dev *pdev = filep->private_data;
1036 u32 csraddr, csrval;
1037 char buf[CSRBUF_SIZE];
1038 int ret, size;
1039
1040 /* Perform CSR read operation */
1041 ret = idt_csr_read(pdev, pdev->csr, &csrval);
1042 if (ret != 0)
1043 return ret;
1044
1045 /* Shift register address to the left so to have real address */
1046 csraddr = ((u32)pdev->csr << 2);
1047
1048 /* Print the "0x<reg addr>:0x<value>" to buffer */
1049 size = snprintf(buf, CSRBUF_SIZE, "0x%05x:0x%08x\n",
1050 (unsigned int)csraddr, (unsigned int)csrval);
1051
1052 /* Copy data to User-space */
1053 return simple_read_from_buffer(ubuf, count, offp, buf, size);
1054}
1055
1056/*
1057 * eeprom_attribute - EEPROM sysfs-node attributes
1058 *
1059 * NOTE Size will be changed in compliance with OF node. EEPROM attribute will
1060 * be read-only as well if the corresponding flag is specified in OF node.
1061 */
1062static BIN_ATTR_RW(eeprom, EEPROM_DEF_SIZE);
1063
1064/*
1065 * csr_dbgfs_ops - CSR debugfs-node read/write operations
1066 */
1067static const struct file_operations csr_dbgfs_ops = {
1068 .owner = THIS_MODULE,
1069 .open = simple_open,
1070 .write = idt_dbgfs_csr_write,
1071 .read = idt_dbgfs_csr_read
1072};
1073
1074/*===========================================================================
1075 * Driver init/deinit methods
1076 *===========================================================================
1077 */
1078
1079/*
1080 * idt_set_defval() - disable EEPROM access by default
1081 * @pdev: Pointer to the driver data
1082 */
1083static void idt_set_defval(struct idt_89hpesx_dev *pdev)
1084{
1085 /* If OF info is missing then use next values */
1086 pdev->eesize = 0;
1087 pdev->eero = true;
1088 pdev->inieecmd = 0;
1089 pdev->eeaddr = 0;
1090}
1091
1092#ifdef CONFIG_OF
1093static const struct i2c_device_id ee_ids[];
1094/*
1095 * idt_ee_match_id() - check whether the node belongs to compatible EEPROMs
1096 */
1097static const struct i2c_device_id *idt_ee_match_id(struct device_node *node)
1098{
1099 const struct i2c_device_id *id = ee_ids;
1100 char devname[I2C_NAME_SIZE];
1101
1102 /* Retrieve the device name without manufacturer name */
1103 if (of_modalias_node(node, devname, sizeof(devname)))
1104 return NULL;
1105
1106 /* Search through the device name */
1107 while (id->name[0]) {
1108 if (strcmp(devname, id->name) == 0)
1109 return id;
1110 id++;
1111 }
1112 return NULL;
1113}
1114
1115/*
1116 * idt_get_ofdata() - get IDT i2c-device parameters from device tree
1117 * @pdev: Pointer to the driver data
1118 */
1119static void idt_get_ofdata(struct idt_89hpesx_dev *pdev)
1120{
1121 const struct device_node *node = pdev->client->dev.of_node;
1122 struct device *dev = &pdev->client->dev;
1123
1124 /* Read dts node parameters */
1125 if (node) {
1126 const struct i2c_device_id *ee_id = NULL;
1127 struct device_node *child;
1128 const __be32 *addr_be;
1129 int len;
1130
1131 /* Walk through all child nodes looking for compatible one */
1132 for_each_available_child_of_node(node, child) {
1133 ee_id = idt_ee_match_id(child);
1134 if (IS_ERR_OR_NULL(ee_id)) {
1135 dev_warn(dev, "Skip unsupported child node %s",
1136 child->full_name);
1137 continue;
1138 } else
1139 break;
1140 }
1141
1142 /* If there is no child EEPROM device, then set zero size */
1143 if (!ee_id) {
1144 idt_set_defval(pdev);
1145 return;
1146 }
1147
1148 /* Retrieve EEPROM size */
1149 pdev->eesize = (u32)ee_id->driver_data;
1150
1151 /* Get custom EEPROM address from 'reg' attribute */
1152 addr_be = of_get_property(child, "reg", &len);
1153 if (!addr_be || (len < sizeof(*addr_be))) {
1154 dev_warn(dev, "No reg on %s, use default address %d",
1155 child->full_name, EEPROM_DEF_ADDR);
1156 pdev->inieecmd = 0;
1157 pdev->eeaddr = EEPROM_DEF_ADDR << 1;
1158 } else {
1159 pdev->inieecmd = EEPROM_USA;
1160 pdev->eeaddr = be32_to_cpup(addr_be) << 1;
1161 }
1162
1163 /* Check EEPROM 'read-only' flag */
1164 if (of_get_property(child, "read-only", NULL))
1165 pdev->eero = true;
1166 else /* if (!of_get_property(node, "read-only", NULL)) */
1167 pdev->eero = false;
1168
1169 dev_dbg(dev, "EEPROM of %u bytes found by %hhu",
1170 pdev->eesize, pdev->eeaddr);
1171 } else {
1172 dev_warn(dev, "No dts node, EEPROM access disabled");
1173 idt_set_defval(pdev);
1174 }
1175}
1176#else
1177static void idt_get_ofdata(struct idt_89hpesx_dev *pdev)
1178{
1179 struct device *dev = &pdev->client->dev;
1180
1181 dev_warn(dev, "OF table is unsupported, EEPROM access disabled");
1182
1183 /* Nothing we can do, just set the default values */
1184 idt_set_defval(pdev);
1185}
1186#endif /* CONFIG_OF */
1187
1188/*
1189 * idt_create_pdev() - create and init data structure of the driver
1190 * @client: i2c client of IDT PCIe-switch device
1191 */
1192static struct idt_89hpesx_dev *idt_create_pdev(struct i2c_client *client)
1193{
1194 struct idt_89hpesx_dev *pdev;
1195
1196 /* Allocate memory for driver data */
1197 pdev = devm_kmalloc(&client->dev, sizeof(struct idt_89hpesx_dev),
1198 GFP_KERNEL);
1199 if (pdev == NULL)
1200 return ERR_PTR(-ENOMEM);
1201
1202 /* Initialize basic fields of the data */
1203 pdev->client = client;
1204 i2c_set_clientdata(client, pdev);
1205
1206 /* Read OF nodes information */
1207 idt_get_ofdata(pdev);
1208
1209 /* Initialize basic CSR CMD field - use full DWORD-sized r/w ops */
1210 pdev->inicsrcmd = CSR_DWE;
1211 pdev->csr = CSR_DEF;
1212
1213 /* Enable Packet Error Checking if it's supported by adapter */
1214 if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) {
1215 pdev->iniccode = CCODE_PEC;
1216 client->flags |= I2C_CLIENT_PEC;
1217 } else /* PEC is unsupported */ {
1218 pdev->iniccode = 0;
1219 }
1220
1221 return pdev;
1222}
1223
1224/*
1225 * idt_free_pdev() - free data structure of the driver
1226 * @pdev: Pointer to the driver data
1227 */
1228static void idt_free_pdev(struct idt_89hpesx_dev *pdev)
1229{
1230 /* Clear driver data from device private field */
1231 i2c_set_clientdata(pdev->client, NULL);
1232}
1233
1234/*
1235 * idt_set_smbus_ops() - set supported SMBus operations
1236 * @pdev: Pointer to the driver data
1237 * Return status of smbus check operations
1238 */
1239static int idt_set_smbus_ops(struct idt_89hpesx_dev *pdev)
1240{
1241 struct i2c_adapter *adapter = pdev->client->adapter;
1242 struct device *dev = &pdev->client->dev;
1243
1244 /* Check i2c adapter read functionality */
1245 if (i2c_check_functionality(adapter,
1246 I2C_FUNC_SMBUS_READ_BLOCK_DATA)) {
1247 pdev->smb_read = idt_smb_read_block;
1248 dev_dbg(dev, "SMBus block-read op chosen");
1249 } else if (i2c_check_functionality(adapter,
1250 I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
1251 pdev->smb_read = idt_smb_read_i2c_block;
1252 dev_dbg(dev, "SMBus i2c-block-read op chosen");
1253 } else if (i2c_check_functionality(adapter,
1254 I2C_FUNC_SMBUS_READ_WORD_DATA) &&
1255 i2c_check_functionality(adapter,
1256 I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
1257 pdev->smb_read = idt_smb_read_word;
1258 dev_warn(dev, "Use slow word/byte SMBus read ops");
1259 } else if (i2c_check_functionality(adapter,
1260 I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
1261 pdev->smb_read = idt_smb_read_byte;
1262 dev_warn(dev, "Use slow byte SMBus read op");
1263 } else /* no supported smbus read operations */ {
1264 dev_err(dev, "No supported SMBus read op");
1265 return -EPFNOSUPPORT;
1266 }
1267
1268 /* Check i2c adapter write functionality */
1269 if (i2c_check_functionality(adapter,
1270 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)) {
1271 pdev->smb_write = idt_smb_write_block;
1272 dev_dbg(dev, "SMBus block-write op chosen");
1273 } else if (i2c_check_functionality(adapter,
1274 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
1275 pdev->smb_write = idt_smb_write_i2c_block;
1276 dev_dbg(dev, "SMBus i2c-block-write op chosen");
1277 } else if (i2c_check_functionality(adapter,
1278 I2C_FUNC_SMBUS_WRITE_WORD_DATA) &&
1279 i2c_check_functionality(adapter,
1280 I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
1281 pdev->smb_write = idt_smb_write_word;
1282 dev_warn(dev, "Use slow word/byte SMBus write op");
1283 } else if (i2c_check_functionality(adapter,
1284 I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) {
1285 pdev->smb_write = idt_smb_write_byte;
1286 dev_warn(dev, "Use slow byte SMBus write op");
1287 } else /* no supported smbus write operations */ {
1288 dev_err(dev, "No supported SMBus write op");
1289 return -EPFNOSUPPORT;
1290 }
1291
1292 /* Initialize IDT SMBus slave interface mutex */
1293 mutex_init(&pdev->smb_mtx);
1294
1295 return 0;
1296}
1297
1298/*
1299 * idt_check_dev() - check whether it's really IDT 89HPESx device
1300 * @pdev: Pointer to the driver data
1301 * Return status of i2c adapter check operation
1302 */
1303static int idt_check_dev(struct idt_89hpesx_dev *pdev)
1304{
1305 struct device *dev = &pdev->client->dev;
1306 u32 viddid;
1307 int ret;
1308
1309 /* Read VID and DID directly from IDT memory space */
1310 ret = idt_csr_read(pdev, IDT_VIDDID_CSR, &viddid);
1311 if (ret != 0) {
1312 dev_err(dev, "Failed to read VID/DID");
1313 return ret;
1314 }
1315
1316 /* Check whether it's IDT device */
1317 if ((viddid & IDT_VID_MASK) != PCI_VENDOR_ID_IDT) {
1318 dev_err(dev, "Got unsupported VID/DID: 0x%08x", viddid);
1319 return -ENODEV;
1320 }
1321
1322 dev_info(dev, "Found IDT 89HPES device VID:0x%04x, DID:0x%04x",
1323 (viddid & IDT_VID_MASK), (viddid >> 16));
1324
1325 return 0;
1326}
1327
1328/*
1329 * idt_create_sysfs_files() - create sysfs attribute files
1330 * @pdev: Pointer to the driver data
1331 * Return status of operation
1332 */
1333static int idt_create_sysfs_files(struct idt_89hpesx_dev *pdev)
1334{
1335 struct device *dev = &pdev->client->dev;
1336 int ret;
1337
1338 /* Don't do anything if EEPROM isn't accessible */
1339 if (pdev->eesize == 0) {
1340 dev_dbg(dev, "Skip creating sysfs-files");
1341 return 0;
1342 }
1343
1344 /* Allocate memory for attribute file */
1345 pdev->ee_file = devm_kmalloc(dev, sizeof(*pdev->ee_file), GFP_KERNEL);
1346 if (!pdev->ee_file)
1347 return -ENOMEM;
1348
1349 /* Copy the declared EEPROM attr structure to change some of fields */
1350 memcpy(pdev->ee_file, &bin_attr_eeprom, sizeof(*pdev->ee_file));
1351
1352 /* In case of read-only EEPROM get rid of write ability */
1353 if (pdev->eero) {
1354 pdev->ee_file->attr.mode &= ~0200;
1355 pdev->ee_file->write = NULL;
1356 }
1357 /* Create EEPROM sysfs file */
1358 pdev->ee_file->size = pdev->eesize;
1359 ret = sysfs_create_bin_file(&dev->kobj, pdev->ee_file);
1360 if (ret != 0) {
1361 dev_err(dev, "Failed to create EEPROM sysfs-node");
1362 return ret;
1363 }
1364
1365 return 0;
1366}
1367
1368/*
1369 * idt_remove_sysfs_files() - remove sysfs attribute files
1370 * @pdev: Pointer to the driver data
1371 */
1372static void idt_remove_sysfs_files(struct idt_89hpesx_dev *pdev)
1373{
1374 struct device *dev = &pdev->client->dev;
1375
1376 /* Don't do anything if EEPROM wasn't accessible */
1377 if (pdev->eesize == 0)
1378 return;
1379
1380 /* Remove EEPROM sysfs file */
1381 sysfs_remove_bin_file(&dev->kobj, pdev->ee_file);
1382}
1383
1384/*
1385 * idt_create_dbgfs_files() - create debugfs files
1386 * @pdev: Pointer to the driver data
1387 */
1388#define CSRNAME_LEN ((size_t)32)
1389static void idt_create_dbgfs_files(struct idt_89hpesx_dev *pdev)
1390{
1391 struct i2c_client *cli = pdev->client;
1392 char fname[CSRNAME_LEN];
1393
1394 /* Create Debugfs directory for CSR file */
1395 snprintf(fname, CSRNAME_LEN, "%d-%04hx", cli->adapter->nr, cli->addr);
1396 pdev->csr_dir = debugfs_create_dir(fname, csr_dbgdir);
1397
1398 /* Create Debugfs file for CSR read/write operations */
1399 pdev->csr_file = debugfs_create_file(cli->name, 0600,
1400 pdev->csr_dir, pdev, &csr_dbgfs_ops);
1401}
1402
1403/*
1404 * idt_remove_dbgfs_files() - remove debugfs files
1405 * @pdev: Pointer to the driver data
1406 */
1407static void idt_remove_dbgfs_files(struct idt_89hpesx_dev *pdev)
1408{
1409 /* Remove CSR directory and it sysfs-node */
1410 debugfs_remove_recursive(pdev->csr_dir);
1411}
1412
1413/*
1414 * idt_probe() - IDT 89HPESx driver probe() callback method
1415 */
1416static int idt_probe(struct i2c_client *client, const struct i2c_device_id *id)
1417{
1418 struct idt_89hpesx_dev *pdev;
1419 int ret;
1420
1421 /* Create driver data */
1422 pdev = idt_create_pdev(client);
1423 if (IS_ERR(pdev))
1424 return PTR_ERR(pdev);
1425
1426 /* Set SMBus operations */
1427 ret = idt_set_smbus_ops(pdev);
1428 if (ret != 0)
1429 goto err_free_pdev;
1430
1431 /* Check whether it is truly IDT 89HPESx device */
1432 ret = idt_check_dev(pdev);
1433 if (ret != 0)
1434 goto err_free_pdev;
1435
1436 /* Create sysfs files */
1437 ret = idt_create_sysfs_files(pdev);
1438 if (ret != 0)
1439 goto err_free_pdev;
1440
1441 /* Create debugfs files */
1442 idt_create_dbgfs_files(pdev);
1443
1444 return 0;
1445
1446err_free_pdev:
1447 idt_free_pdev(pdev);
1448
1449 return ret;
1450}
1451
1452/*
1453 * idt_remove() - IDT 89HPESx driver remove() callback method
1454 */
1455static int idt_remove(struct i2c_client *client)
1456{
1457 struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client);
1458
1459 /* Remove debugfs files first */
1460 idt_remove_dbgfs_files(pdev);
1461
1462 /* Remove sysfs files */
1463 idt_remove_sysfs_files(pdev);
1464
1465 /* Discard driver data structure */
1466 idt_free_pdev(pdev);
1467
1468 return 0;
1469}
1470
1471/*
1472 * ee_ids - array of supported EEPROMs
1473 */
1474static const struct i2c_device_id ee_ids[] = {
1475 { "24c32", 4096},
1476 { "24c64", 8192},
1477 { "24c128", 16384},
1478 { "24c256", 32768},
1479 { "24c512", 65536},
1480 {}
1481};
1482MODULE_DEVICE_TABLE(i2c, ee_ids);
1483
1484/*
1485 * idt_ids - supported IDT 89HPESx devices
1486 */
1487static const struct i2c_device_id idt_ids[] = {
1488 { "89hpes8nt2", 0 },
1489 { "89hpes12nt3", 0 },
1490
1491 { "89hpes24nt6ag2", 0 },
1492 { "89hpes32nt8ag2", 0 },
1493 { "89hpes32nt8bg2", 0 },
1494 { "89hpes12nt12g2", 0 },
1495 { "89hpes16nt16g2", 0 },
1496 { "89hpes24nt24g2", 0 },
1497 { "89hpes32nt24ag2", 0 },
1498 { "89hpes32nt24bg2", 0 },
1499
1500 { "89hpes12n3", 0 },
1501 { "89hpes12n3a", 0 },
1502 { "89hpes24n3", 0 },
1503 { "89hpes24n3a", 0 },
1504
1505 { "89hpes32h8", 0 },
1506 { "89hpes32h8g2", 0 },
1507 { "89hpes48h12", 0 },
1508 { "89hpes48h12g2", 0 },
1509 { "89hpes48h12ag2", 0 },
1510 { "89hpes16h16", 0 },
1511 { "89hpes22h16", 0 },
1512 { "89hpes22h16g2", 0 },
1513 { "89hpes34h16", 0 },
1514 { "89hpes34h16g2", 0 },
1515 { "89hpes64h16", 0 },
1516 { "89hpes64h16g2", 0 },
1517 { "89hpes64h16ag2", 0 },
1518
1519 /* { "89hpes3t3", 0 }, // No SMBus-slave iface */
1520 { "89hpes12t3g2", 0 },
1521 { "89hpes24t3g2", 0 },
1522 /* { "89hpes4t4", 0 }, // No SMBus-slave iface */
1523 { "89hpes16t4", 0 },
1524 { "89hpes4t4g2", 0 },
1525 { "89hpes10t4g2", 0 },
1526 { "89hpes16t4g2", 0 },
1527 { "89hpes16t4ag2", 0 },
1528 { "89hpes5t5", 0 },
1529 { "89hpes6t5", 0 },
1530 { "89hpes8t5", 0 },
1531 { "89hpes8t5a", 0 },
1532 { "89hpes24t6", 0 },
1533 { "89hpes6t6g2", 0 },
1534 { "89hpes24t6g2", 0 },
1535 { "89hpes16t7", 0 },
1536 { "89hpes32t8", 0 },
1537 { "89hpes32t8g2", 0 },
1538 { "89hpes48t12", 0 },
1539 { "89hpes48t12g2", 0 },
1540 { /* END OF LIST */ }
1541};
1542MODULE_DEVICE_TABLE(i2c, idt_ids);
1543
1544/*
1545 * idt_driver - IDT 89HPESx driver structure
1546 */
1547static struct i2c_driver idt_driver = {
1548 .driver = {
1549 .name = IDT_NAME,
1550 },
1551 .probe = idt_probe,
1552 .remove = idt_remove,
1553 .id_table = idt_ids,
1554};
1555
1556/*
1557 * idt_init() - IDT 89HPESx driver init() callback method
1558 */
1559static int __init idt_init(void)
1560{
1561 /* Create Debugfs directory first */
1562 if (debugfs_initialized())
1563 csr_dbgdir = debugfs_create_dir("idt_csr", NULL);
1564
1565 /* Add new i2c-device driver */
1566 return i2c_add_driver(&idt_driver);
1567}
1568module_init(idt_init);
1569
1570/*
1571 * idt_exit() - IDT 89HPESx driver exit() callback method
1572 */
1573static void __exit idt_exit(void)
1574{
1575 /* Discard debugfs directory and all files if any */
1576 debugfs_remove_recursive(csr_dbgdir);
1577
1578 /* Unregister i2c-device driver */
1579 i2c_del_driver(&idt_driver);
1580}
1581module_exit(idt_exit);
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 6c1f49a85023..4fd21e86ad56 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -1336,7 +1336,6 @@ static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
1336static struct pci_error_handlers genwqe_err_handler = { 1336static struct pci_error_handlers genwqe_err_handler = {
1337 .error_detected = genwqe_err_error_detected, 1337 .error_detected = genwqe_err_error_detected,
1338 .mmio_enabled = genwqe_err_result_none, 1338 .mmio_enabled = genwqe_err_result_none,
1339 .link_reset = genwqe_err_result_none,
1340 .slot_reset = genwqe_err_slot_reset, 1339 .slot_reset = genwqe_err_slot_reset,
1341 .resume = genwqe_err_resume, 1340 .resume = genwqe_err_resume,
1342}; 1341};
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index cba0837aee2e..e3f4cd8876b5 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -81,12 +81,17 @@ void lkdtm_OVERFLOW(void)
81 (void) recursive_loop(recur_count); 81 (void) recursive_loop(recur_count);
82} 82}
83 83
84static noinline void __lkdtm_CORRUPT_STACK(void *stack)
85{
86 memset(stack, 'a', 64);
87}
88
84noinline void lkdtm_CORRUPT_STACK(void) 89noinline void lkdtm_CORRUPT_STACK(void)
85{ 90{
86 /* Use default char array length that triggers stack protection. */ 91 /* Use default char array length that triggers stack protection. */
87 char data[8]; 92 char data[8];
93 __lkdtm_CORRUPT_STACK(&data);
88 94
89 memset((void *)data, 'a', 64);
90 pr_info("Corrupted stack with '%16s'...\n", data); 95 pr_info("Corrupted stack with '%16s'...\n", data);
91} 96}
92 97
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 16e4cf110930..b9a4cd4a9b68 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -539,7 +539,9 @@ static void __exit lkdtm_module_exit(void)
539 /* Handle test-specific clean-up. */ 539 /* Handle test-specific clean-up. */
540 lkdtm_usercopy_exit(); 540 lkdtm_usercopy_exit();
541 541
542 unregister_jprobe(lkdtm_jprobe); 542 if (lkdtm_jprobe != NULL)
543 unregister_jprobe(lkdtm_jprobe);
544
543 pr_info("Crash point unregistered\n"); 545 pr_info("Crash point unregistered\n");
544} 546}
545 547
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 466afb2611c6..0e7406ccb6dd 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -132,8 +132,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
132 132
133 dev_dbg(dev->dev, "complete amthif cmd_list cb.\n"); 133 dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
134 134
135 cb = list_first_entry_or_null(&dev->amthif_cmd_list.list, 135 cb = list_first_entry_or_null(&dev->amthif_cmd_list, typeof(*cb), list);
136 typeof(*cb), list);
137 if (!cb) { 136 if (!cb) {
138 dev->iamthif_state = MEI_IAMTHIF_IDLE; 137 dev->iamthif_state = MEI_IAMTHIF_IDLE;
139 cl->fp = NULL; 138 cl->fp = NULL;
@@ -167,7 +166,7 @@ int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
167 166
168 struct mei_device *dev = cl->dev; 167 struct mei_device *dev = cl->dev;
169 168
170 list_add_tail(&cb->list, &dev->amthif_cmd_list.list); 169 list_add_tail(&cb->list, &dev->amthif_cmd_list);
171 170
172 /* 171 /*
173 * The previous request is still in processing, queue this one. 172 * The previous request is still in processing, queue this one.
@@ -211,7 +210,7 @@ unsigned int mei_amthif_poll(struct file *file, poll_table *wait)
211 * Return: 0, OK; otherwise, error. 210 * Return: 0, OK; otherwise, error.
212 */ 211 */
213int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 212int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
214 struct mei_cl_cb *cmpl_list) 213 struct list_head *cmpl_list)
215{ 214{
216 int ret; 215 int ret;
217 216
@@ -237,7 +236,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
237 */ 236 */
238int mei_amthif_irq_read_msg(struct mei_cl *cl, 237int mei_amthif_irq_read_msg(struct mei_cl *cl,
239 struct mei_msg_hdr *mei_hdr, 238 struct mei_msg_hdr *mei_hdr,
240 struct mei_cl_cb *cmpl_list) 239 struct list_head *cmpl_list)
241{ 240{
242 struct mei_device *dev; 241 struct mei_device *dev;
243 int ret; 242 int ret;
@@ -312,50 +311,30 @@ void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
312} 311}
313 312
314/** 313/**
315 * mei_clear_list - removes all callbacks associated with file
316 * from mei_cb_list
317 *
318 * @file: file structure
319 * @mei_cb_list: callbacks list
320 *
321 * mei_clear_list is called to clear resources associated with file
322 * when application calls close function or Ctrl-C was pressed
323 */
324static void mei_clear_list(const struct file *file,
325 struct list_head *mei_cb_list)
326{
327 struct mei_cl_cb *cb, *next;
328
329 list_for_each_entry_safe(cb, next, mei_cb_list, list)
330 if (file == cb->fp)
331 mei_io_cb_free(cb);
332}
333
334/**
335* mei_amthif_release - the release function 314* mei_amthif_release - the release function
336* 315*
337* @dev: device structure 316* @dev: device structure
338* @file: pointer to file structure 317* @fp: pointer to file structure
339* 318*
340* Return: 0 on success, <0 on error 319* Return: 0 on success, <0 on error
341*/ 320*/
342int mei_amthif_release(struct mei_device *dev, struct file *file) 321int mei_amthif_release(struct mei_device *dev, struct file *fp)
343{ 322{
344 struct mei_cl *cl = file->private_data; 323 struct mei_cl *cl = fp->private_data;
345 324
346 if (dev->iamthif_open_count > 0) 325 if (dev->iamthif_open_count > 0)
347 dev->iamthif_open_count--; 326 dev->iamthif_open_count--;
348 327
349 if (cl->fp == file && dev->iamthif_state != MEI_IAMTHIF_IDLE) { 328 if (cl->fp == fp && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
350 329
351 dev_dbg(dev->dev, "amthif canceled iamthif state %d\n", 330 dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
352 dev->iamthif_state); 331 dev->iamthif_state);
353 dev->iamthif_canceled = true; 332 dev->iamthif_canceled = true;
354 } 333 }
355 334
356 mei_clear_list(file, &dev->amthif_cmd_list.list); 335 /* Don't clean ctrl_rd_list here, the reads has to be completed */
357 mei_clear_list(file, &cl->rd_completed); 336 mei_io_list_free_fp(&dev->amthif_cmd_list, fp);
358 mei_clear_list(file, &dev->ctrl_rd_list.list); 337 mei_io_list_free_fp(&cl->rd_completed, fp);
359 338
360 return 0; 339 return 0;
361} 340}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 2d9c5dd06e42..cb3e9e0ca049 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -499,6 +499,25 @@ out:
499EXPORT_SYMBOL_GPL(mei_cldev_enable); 499EXPORT_SYMBOL_GPL(mei_cldev_enable);
500 500
501/** 501/**
502 * mei_cldev_unregister_callbacks - internal wrapper for unregistering
503 * callbacks.
504 *
505 * @cldev: client device
506 */
507static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
508{
509 if (cldev->rx_cb) {
510 cancel_work_sync(&cldev->rx_work);
511 cldev->rx_cb = NULL;
512 }
513
514 if (cldev->notif_cb) {
515 cancel_work_sync(&cldev->notif_work);
516 cldev->notif_cb = NULL;
517 }
518}
519
520/**
502 * mei_cldev_disable - disable me client device 521 * mei_cldev_disable - disable me client device
503 * disconnect form the me client 522 * disconnect form the me client
504 * 523 *
@@ -519,6 +538,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
519 538
520 bus = cldev->bus; 539 bus = cldev->bus;
521 540
541 mei_cldev_unregister_callbacks(cldev);
542
522 mutex_lock(&bus->device_lock); 543 mutex_lock(&bus->device_lock);
523 544
524 if (!mei_cl_is_connected(cl)) { 545 if (!mei_cl_is_connected(cl)) {
@@ -542,6 +563,37 @@ out:
542EXPORT_SYMBOL_GPL(mei_cldev_disable); 563EXPORT_SYMBOL_GPL(mei_cldev_disable);
543 564
544/** 565/**
566 * mei_cl_bus_module_get - acquire module of the underlying
567 * hw module.
568 *
569 * @cl: host client
570 *
571 * Return: true on success; false if the module was removed.
572 */
573bool mei_cl_bus_module_get(struct mei_cl *cl)
574{
575 struct mei_cl_device *cldev = cl->cldev;
576
577 if (!cldev)
578 return true;
579
580 return try_module_get(cldev->bus->dev->driver->owner);
581}
582
583/**
584 * mei_cl_bus_module_put - release the underlying hw module.
585 *
586 * @cl: host client
587 */
588void mei_cl_bus_module_put(struct mei_cl *cl)
589{
590 struct mei_cl_device *cldev = cl->cldev;
591
592 if (cldev)
593 module_put(cldev->bus->dev->driver->owner);
594}
595
596/**
545 * mei_cl_device_find - find matching entry in the driver id table 597 * mei_cl_device_find - find matching entry in the driver id table
546 * 598 *
547 * @cldev: me client device 599 * @cldev: me client device
@@ -665,19 +717,12 @@ static int mei_cl_device_remove(struct device *dev)
665 if (!cldev || !dev->driver) 717 if (!cldev || !dev->driver)
666 return 0; 718 return 0;
667 719
668 if (cldev->rx_cb) {
669 cancel_work_sync(&cldev->rx_work);
670 cldev->rx_cb = NULL;
671 }
672 if (cldev->notif_cb) {
673 cancel_work_sync(&cldev->notif_work);
674 cldev->notif_cb = NULL;
675 }
676
677 cldrv = to_mei_cl_driver(dev->driver); 720 cldrv = to_mei_cl_driver(dev->driver);
678 if (cldrv->remove) 721 if (cldrv->remove)
679 ret = cldrv->remove(cldev); 722 ret = cldrv->remove(cldev);
680 723
724 mei_cldev_unregister_callbacks(cldev);
725
681 module_put(THIS_MODULE); 726 module_put(THIS_MODULE);
682 dev->driver = NULL; 727 dev->driver = NULL;
683 return ret; 728 return ret;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index b0395601c6ae..68fe37b5bc52 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -377,19 +377,19 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
377} 377}
378 378
379/** 379/**
380 * __mei_io_list_flush - removes and frees cbs belonging to cl. 380 * __mei_io_list_flush_cl - removes and frees cbs belonging to cl.
381 * 381 *
382 * @list: an instance of our list structure 382 * @head: an instance of our list structure
383 * @cl: host client, can be NULL for flushing the whole list 383 * @cl: host client, can be NULL for flushing the whole list
384 * @free: whether to free the cbs 384 * @free: whether to free the cbs
385 */ 385 */
386static void __mei_io_list_flush(struct mei_cl_cb *list, 386static void __mei_io_list_flush_cl(struct list_head *head,
387 struct mei_cl *cl, bool free) 387 const struct mei_cl *cl, bool free)
388{ 388{
389 struct mei_cl_cb *cb, *next; 389 struct mei_cl_cb *cb, *next;
390 390
391 /* enable removing everything if no cl is specified */ 391 /* enable removing everything if no cl is specified */
392 list_for_each_entry_safe(cb, next, &list->list, list) { 392 list_for_each_entry_safe(cb, next, head, list) {
393 if (!cl || mei_cl_cmp_id(cl, cb->cl)) { 393 if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
394 list_del_init(&cb->list); 394 list_del_init(&cb->list);
395 if (free) 395 if (free)
@@ -399,25 +399,42 @@ static void __mei_io_list_flush(struct mei_cl_cb *list,
399} 399}
400 400
401/** 401/**
402 * mei_io_list_flush - removes list entry belonging to cl. 402 * mei_io_list_flush_cl - removes list entry belonging to cl.
403 * 403 *
404 * @list: An instance of our list structure 404 * @head: An instance of our list structure
405 * @cl: host client 405 * @cl: host client
406 */ 406 */
407void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 407static inline void mei_io_list_flush_cl(struct list_head *head,
408 const struct mei_cl *cl)
408{ 409{
409 __mei_io_list_flush(list, cl, false); 410 __mei_io_list_flush_cl(head, cl, false);
410} 411}
411 412
412/** 413/**
413 * mei_io_list_free - removes cb belonging to cl and free them 414 * mei_io_list_free_cl - removes cb belonging to cl and free them
414 * 415 *
415 * @list: An instance of our list structure 416 * @head: An instance of our list structure
416 * @cl: host client 417 * @cl: host client
417 */ 418 */
418static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) 419static inline void mei_io_list_free_cl(struct list_head *head,
420 const struct mei_cl *cl)
419{ 421{
420 __mei_io_list_flush(list, cl, true); 422 __mei_io_list_flush_cl(head, cl, true);
423}
424
425/**
426 * mei_io_list_free_fp - free cb from a list that matches file pointer
427 *
428 * @head: io list
429 * @fp: file pointer (matching cb file object), may be NULL
430 */
431void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
432{
433 struct mei_cl_cb *cb, *next;
434
435 list_for_each_entry_safe(cb, next, head, list)
436 if (!fp || fp == cb->fp)
437 mei_io_cb_free(cb);
421} 438}
422 439
423/** 440/**
@@ -479,7 +496,7 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
479 if (!cb) 496 if (!cb)
480 return NULL; 497 return NULL;
481 498
482 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list.list); 499 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
483 return cb; 500 return cb;
484} 501}
485 502
@@ -504,27 +521,6 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
504} 521}
505 522
506/** 523/**
507 * mei_cl_read_cb_flush - free client's read pending and completed cbs
508 * for a specific file
509 *
510 * @cl: host client
511 * @fp: file pointer (matching cb file object), may be NULL
512 */
513void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
514{
515 struct mei_cl_cb *cb, *next;
516
517 list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
518 if (!fp || fp == cb->fp)
519 mei_io_cb_free(cb);
520
521
522 list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
523 if (!fp || fp == cb->fp)
524 mei_io_cb_free(cb);
525}
526
527/**
528 * mei_cl_flush_queues - flushes queue lists belonging to cl. 524 * mei_cl_flush_queues - flushes queue lists belonging to cl.
529 * 525 *
530 * @cl: host client 526 * @cl: host client
@@ -542,18 +538,16 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
542 dev = cl->dev; 538 dev = cl->dev;
543 539
544 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 540 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
545 mei_io_list_free(&cl->dev->write_list, cl); 541 mei_io_list_free_cl(&cl->dev->write_list, cl);
546 mei_io_list_free(&cl->dev->write_waiting_list, cl); 542 mei_io_list_free_cl(&cl->dev->write_waiting_list, cl);
547 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 543 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
548 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 544 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
549 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 545 mei_io_list_free_fp(&cl->rd_pending, fp);
550 546 mei_io_list_free_fp(&cl->rd_completed, fp);
551 mei_cl_read_cb_flush(cl, fp);
552 547
553 return 0; 548 return 0;
554} 549}
555 550
556
557/** 551/**
558 * mei_cl_init - initializes cl. 552 * mei_cl_init - initializes cl.
559 * 553 *
@@ -756,7 +750,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
756 * 750 *
757 * @cl: host client 751 * @cl: host client
758 */ 752 */
759void mei_cl_set_disconnected(struct mei_cl *cl) 753static void mei_cl_set_disconnected(struct mei_cl *cl)
760{ 754{
761 struct mei_device *dev = cl->dev; 755 struct mei_device *dev = cl->dev;
762 756
@@ -765,15 +759,18 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
765 return; 759 return;
766 760
767 cl->state = MEI_FILE_DISCONNECTED; 761 cl->state = MEI_FILE_DISCONNECTED;
768 mei_io_list_free(&dev->write_list, cl); 762 mei_io_list_free_cl(&dev->write_list, cl);
769 mei_io_list_free(&dev->write_waiting_list, cl); 763 mei_io_list_free_cl(&dev->write_waiting_list, cl);
770 mei_io_list_flush(&dev->ctrl_rd_list, cl); 764 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
771 mei_io_list_flush(&dev->ctrl_wr_list, cl); 765 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
766 mei_io_list_free_cl(&dev->amthif_cmd_list, cl);
772 mei_cl_wake_all(cl); 767 mei_cl_wake_all(cl);
773 cl->rx_flow_ctrl_creds = 0; 768 cl->rx_flow_ctrl_creds = 0;
774 cl->tx_flow_ctrl_creds = 0; 769 cl->tx_flow_ctrl_creds = 0;
775 cl->timer_count = 0; 770 cl->timer_count = 0;
776 771
772 mei_cl_bus_module_put(cl);
773
777 if (!cl->me_cl) 774 if (!cl->me_cl)
778 return; 775 return;
779 776
@@ -829,7 +826,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
829 return ret; 826 return ret;
830 } 827 }
831 828
832 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 829 list_move_tail(&cb->list, &dev->ctrl_rd_list);
833 cl->timer_count = MEI_CONNECT_TIMEOUT; 830 cl->timer_count = MEI_CONNECT_TIMEOUT;
834 mei_schedule_stall_timer(dev); 831 mei_schedule_stall_timer(dev);
835 832
@@ -847,7 +844,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
847 * Return: 0, OK; otherwise, error. 844 * Return: 0, OK; otherwise, error.
848 */ 845 */
849int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 846int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
850 struct mei_cl_cb *cmpl_list) 847 struct list_head *cmpl_list)
851{ 848{
852 struct mei_device *dev = cl->dev; 849 struct mei_device *dev = cl->dev;
853 u32 msg_slots; 850 u32 msg_slots;
@@ -862,7 +859,7 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
862 859
863 ret = mei_cl_send_disconnect(cl, cb); 860 ret = mei_cl_send_disconnect(cl, cb);
864 if (ret) 861 if (ret)
865 list_move_tail(&cb->list, &cmpl_list->list); 862 list_move_tail(&cb->list, cmpl_list);
866 863
867 return ret; 864 return ret;
868} 865}
@@ -984,7 +981,7 @@ static bool mei_cl_is_other_connecting(struct mei_cl *cl)
984 981
985 dev = cl->dev; 982 dev = cl->dev;
986 983
987 list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) { 984 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
988 if (cb->fop_type == MEI_FOP_CONNECT && 985 if (cb->fop_type == MEI_FOP_CONNECT &&
989 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 986 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
990 return true; 987 return true;
@@ -1015,7 +1012,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1015 return ret; 1012 return ret;
1016 } 1013 }
1017 1014
1018 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1015 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1019 cl->timer_count = MEI_CONNECT_TIMEOUT; 1016 cl->timer_count = MEI_CONNECT_TIMEOUT;
1020 mei_schedule_stall_timer(dev); 1017 mei_schedule_stall_timer(dev);
1021 return 0; 1018 return 0;
@@ -1031,7 +1028,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1031 * Return: 0, OK; otherwise, error. 1028 * Return: 0, OK; otherwise, error.
1032 */ 1029 */
1033int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 1030int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1034 struct mei_cl_cb *cmpl_list) 1031 struct list_head *cmpl_list)
1035{ 1032{
1036 struct mei_device *dev = cl->dev; 1033 struct mei_device *dev = cl->dev;
1037 u32 msg_slots; 1034 u32 msg_slots;
@@ -1049,7 +1046,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1049 1046
1050 rets = mei_cl_send_connect(cl, cb); 1047 rets = mei_cl_send_connect(cl, cb);
1051 if (rets) 1048 if (rets)
1052 list_move_tail(&cb->list, &cmpl_list->list); 1049 list_move_tail(&cb->list, cmpl_list);
1053 1050
1054 return rets; 1051 return rets;
1055} 1052}
@@ -1077,13 +1074,17 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1077 1074
1078 dev = cl->dev; 1075 dev = cl->dev;
1079 1076
1077 if (!mei_cl_bus_module_get(cl))
1078 return -ENODEV;
1079
1080 rets = mei_cl_set_connecting(cl, me_cl); 1080 rets = mei_cl_set_connecting(cl, me_cl);
1081 if (rets) 1081 if (rets)
1082 return rets; 1082 goto nortpm;
1083 1083
1084 if (mei_cl_is_fixed_address(cl)) { 1084 if (mei_cl_is_fixed_address(cl)) {
1085 cl->state = MEI_FILE_CONNECTED; 1085 cl->state = MEI_FILE_CONNECTED;
1086 return 0; 1086 rets = 0;
1087 goto nortpm;
1087 } 1088 }
1088 1089
1089 rets = pm_runtime_get(dev->dev); 1090 rets = pm_runtime_get(dev->dev);
@@ -1117,8 +1118,8 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1117 1118
1118 if (!mei_cl_is_connected(cl)) { 1119 if (!mei_cl_is_connected(cl)) {
1119 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1120 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1120 mei_io_list_flush(&dev->ctrl_rd_list, cl); 1121 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1121 mei_io_list_flush(&dev->ctrl_wr_list, cl); 1122 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1122 /* ignore disconnect return valuue; 1123 /* ignore disconnect return valuue;
1123 * in case of failure reset will be invoked 1124 * in case of failure reset will be invoked
1124 */ 1125 */
@@ -1270,7 +1271,7 @@ enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1270 * Return: 0 on such and error otherwise. 1271 * Return: 0 on such and error otherwise.
1271 */ 1272 */
1272int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1273int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1273 struct mei_cl_cb *cmpl_list) 1274 struct list_head *cmpl_list)
1274{ 1275{
1275 struct mei_device *dev = cl->dev; 1276 struct mei_device *dev = cl->dev;
1276 u32 msg_slots; 1277 u32 msg_slots;
@@ -1288,11 +1289,11 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1288 ret = mei_hbm_cl_notify_req(dev, cl, request); 1289 ret = mei_hbm_cl_notify_req(dev, cl, request);
1289 if (ret) { 1290 if (ret) {
1290 cl->status = ret; 1291 cl->status = ret;
1291 list_move_tail(&cb->list, &cmpl_list->list); 1292 list_move_tail(&cb->list, cmpl_list);
1292 return ret; 1293 return ret;
1293 } 1294 }
1294 1295
1295 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1296 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1296 return 0; 1297 return 0;
1297} 1298}
1298 1299
@@ -1325,6 +1326,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
1325 return -EOPNOTSUPP; 1326 return -EOPNOTSUPP;
1326 } 1327 }
1327 1328
1329 if (!mei_cl_is_connected(cl))
1330 return -ENODEV;
1331
1328 rets = pm_runtime_get(dev->dev); 1332 rets = pm_runtime_get(dev->dev);
1329 if (rets < 0 && rets != -EINPROGRESS) { 1333 if (rets < 0 && rets != -EINPROGRESS) {
1330 pm_runtime_put_noidle(dev->dev); 1334 pm_runtime_put_noidle(dev->dev);
@@ -1344,7 +1348,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
1344 rets = -ENODEV; 1348 rets = -ENODEV;
1345 goto out; 1349 goto out;
1346 } 1350 }
1347 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1351 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1348 } 1352 }
1349 1353
1350 mutex_unlock(&dev->device_lock); 1354 mutex_unlock(&dev->device_lock);
@@ -1419,6 +1423,11 @@ int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1419 1423
1420 dev = cl->dev; 1424 dev = cl->dev;
1421 1425
1426 if (!dev->hbm_f_ev_supported) {
1427 cl_dbg(dev, cl, "notifications not supported\n");
1428 return -EOPNOTSUPP;
1429 }
1430
1422 if (!mei_cl_is_connected(cl)) 1431 if (!mei_cl_is_connected(cl))
1423 return -ENODEV; 1432 return -ENODEV;
1424 1433
@@ -1519,7 +1528,7 @@ nortpm:
1519 * Return: 0, OK; otherwise error. 1528 * Return: 0, OK; otherwise error.
1520 */ 1529 */
1521int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1530int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1522 struct mei_cl_cb *cmpl_list) 1531 struct list_head *cmpl_list)
1523{ 1532{
1524 struct mei_device *dev; 1533 struct mei_device *dev;
1525 struct mei_msg_data *buf; 1534 struct mei_msg_data *buf;
@@ -1591,13 +1600,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1591 } 1600 }
1592 1601
1593 if (mei_hdr.msg_complete) 1602 if (mei_hdr.msg_complete)
1594 list_move_tail(&cb->list, &dev->write_waiting_list.list); 1603 list_move_tail(&cb->list, &dev->write_waiting_list);
1595 1604
1596 return 0; 1605 return 0;
1597 1606
1598err: 1607err:
1599 cl->status = rets; 1608 cl->status = rets;
1600 list_move_tail(&cb->list, &cmpl_list->list); 1609 list_move_tail(&cb->list, cmpl_list);
1601 return rets; 1610 return rets;
1602} 1611}
1603 1612
@@ -1687,9 +1696,9 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1687 1696
1688out: 1697out:
1689 if (mei_hdr.msg_complete) 1698 if (mei_hdr.msg_complete)
1690 list_add_tail(&cb->list, &dev->write_waiting_list.list); 1699 list_add_tail(&cb->list, &dev->write_waiting_list);
1691 else 1700 else
1692 list_add_tail(&cb->list, &dev->write_list.list); 1701 list_add_tail(&cb->list, &dev->write_list);
1693 1702
1694 cb = NULL; 1703 cb = NULL;
1695 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 1704 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index f2545af9be7b..545ae319ba90 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -83,17 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
83 * MEI IO Functions 83 * MEI IO Functions
84 */ 84 */
85void mei_io_cb_free(struct mei_cl_cb *priv_cb); 85void mei_io_cb_free(struct mei_cl_cb *priv_cb);
86 86void mei_io_list_free_fp(struct list_head *head, const struct file *fp);
87/**
88 * mei_io_list_init - Sets up a queue list.
89 *
90 * @list: An instance cl callback structure
91 */
92static inline void mei_io_list_init(struct mei_cl_cb *list)
93{
94 INIT_LIST_HEAD(&list->list);
95}
96void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
97 87
98/* 88/*
99 * MEI Host Client Functions 89 * MEI Host Client Functions
@@ -110,7 +100,6 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
110 100
111struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, 101struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
112 const struct file *fp); 102 const struct file *fp);
113void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
114struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 103struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
115 enum mei_cb_file_ops type, 104 enum mei_cb_file_ops type,
116 const struct file *fp); 105 const struct file *fp);
@@ -209,19 +198,18 @@ static inline u8 mei_cl_host_addr(const struct mei_cl *cl)
209} 198}
210 199
211int mei_cl_disconnect(struct mei_cl *cl); 200int mei_cl_disconnect(struct mei_cl *cl);
212void mei_cl_set_disconnected(struct mei_cl *cl);
213int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 201int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
214 struct mei_cl_cb *cmpl_list); 202 struct list_head *cmpl_list);
215int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 203int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
216 const struct file *file); 204 const struct file *file);
217int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 205int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
218 struct mei_cl_cb *cmpl_list); 206 struct list_head *cmpl_list);
219int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); 207int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
220int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr, 208int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
221 struct mei_cl_cb *cmpl_list); 209 struct list_head *cmpl_list);
222int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); 210int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
223int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 211int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
224 struct mei_cl_cb *cmpl_list); 212 struct list_head *cmpl_list);
225 213
226void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); 214void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
227 215
@@ -232,7 +220,7 @@ enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
232int mei_cl_notify_request(struct mei_cl *cl, 220int mei_cl_notify_request(struct mei_cl *cl,
233 const struct file *file, u8 request); 221 const struct file *file, u8 request);
234int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 222int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
235 struct mei_cl_cb *cmpl_list); 223 struct list_head *cmpl_list);
236int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev); 224int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
237void mei_cl_notify(struct mei_cl *cl); 225void mei_cl_notify(struct mei_cl *cl);
238 226
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 25b4a1ba522d..ba3a774c8d71 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -815,7 +815,7 @@ static void mei_hbm_cl_res(struct mei_device *dev,
815 struct mei_cl_cb *cb, *next; 815 struct mei_cl_cb *cb, *next;
816 816
817 cl = NULL; 817 cl = NULL;
818 list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { 818 list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
819 819
820 cl = cb->cl; 820 cl = cb->cl;
821 821
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index a05375a3338a..71216affcab1 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -140,6 +140,19 @@ static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
140} 140}
141 141
142/** 142/**
143 * mei_hcsr_set_hig - set host interrupt (set H_IG)
144 *
145 * @dev: the device structure
146 */
147static inline void mei_hcsr_set_hig(struct mei_device *dev)
148{
149 u32 hcsr;
150
151 hcsr = mei_hcsr_read(dev) | H_IG;
152 mei_hcsr_set(dev, hcsr);
153}
154
155/**
143 * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register 156 * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
144 * 157 *
145 * @dev: the device structure 158 * @dev: the device structure
@@ -381,6 +394,19 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)
381} 394}
382 395
383/** 396/**
397 * mei_me_hw_is_resetting - check whether the me(hw) is in reset
398 *
399 * @dev: mei device
400 * Return: bool
401 */
402static bool mei_me_hw_is_resetting(struct mei_device *dev)
403{
404 u32 mecsr = mei_me_mecsr_read(dev);
405
406 return (mecsr & ME_RST_HRA) == ME_RST_HRA;
407}
408
409/**
384 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready 410 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
385 * or timeout is reached 411 * or timeout is reached
386 * 412 *
@@ -505,7 +531,6 @@ static int mei_me_hbuf_write(struct mei_device *dev,
505 unsigned long rem; 531 unsigned long rem;
506 unsigned long length = header->length; 532 unsigned long length = header->length;
507 u32 *reg_buf = (u32 *)buf; 533 u32 *reg_buf = (u32 *)buf;
508 u32 hcsr;
509 u32 dw_cnt; 534 u32 dw_cnt;
510 int i; 535 int i;
511 int empty_slots; 536 int empty_slots;
@@ -532,8 +557,7 @@ static int mei_me_hbuf_write(struct mei_device *dev,
532 mei_me_hcbww_write(dev, reg); 557 mei_me_hcbww_write(dev, reg);
533 } 558 }
534 559
535 hcsr = mei_hcsr_read(dev) | H_IG; 560 mei_hcsr_set_hig(dev);
536 mei_hcsr_set(dev, hcsr);
537 if (!mei_me_hw_is_ready(dev)) 561 if (!mei_me_hw_is_ready(dev))
538 return -EIO; 562 return -EIO;
539 563
@@ -580,7 +604,6 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
580 unsigned long buffer_length) 604 unsigned long buffer_length)
581{ 605{
582 u32 *reg_buf = (u32 *)buffer; 606 u32 *reg_buf = (u32 *)buffer;
583 u32 hcsr;
584 607
585 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32)) 608 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
586 *reg_buf++ = mei_me_mecbrw_read(dev); 609 *reg_buf++ = mei_me_mecbrw_read(dev);
@@ -591,8 +614,7 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
591 memcpy(reg_buf, &reg, buffer_length); 614 memcpy(reg_buf, &reg, buffer_length);
592 } 615 }
593 616
594 hcsr = mei_hcsr_read(dev) | H_IG; 617 mei_hcsr_set_hig(dev);
595 mei_hcsr_set(dev, hcsr);
596 return 0; 618 return 0;
597} 619}
598 620
@@ -1189,7 +1211,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
1189irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) 1211irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1190{ 1212{
1191 struct mei_device *dev = (struct mei_device *) dev_id; 1213 struct mei_device *dev = (struct mei_device *) dev_id;
1192 struct mei_cl_cb complete_list; 1214 struct list_head cmpl_list;
1193 s32 slots; 1215 s32 slots;
1194 u32 hcsr; 1216 u32 hcsr;
1195 int rets = 0; 1217 int rets = 0;
@@ -1201,7 +1223,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1201 hcsr = mei_hcsr_read(dev); 1223 hcsr = mei_hcsr_read(dev);
1202 me_intr_clear(dev, hcsr); 1224 me_intr_clear(dev, hcsr);
1203 1225
1204 mei_io_list_init(&complete_list); 1226 INIT_LIST_HEAD(&cmpl_list);
1205 1227
1206 /* check if ME wants a reset */ 1228 /* check if ME wants a reset */
1207 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { 1229 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
@@ -1210,6 +1232,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1210 goto end; 1232 goto end;
1211 } 1233 }
1212 1234
1235 if (mei_me_hw_is_resetting(dev))
1236 mei_hcsr_set_hig(dev);
1237
1213 mei_me_pg_intr(dev, me_intr_src(hcsr)); 1238 mei_me_pg_intr(dev, me_intr_src(hcsr));
1214 1239
1215 /* check if we need to start the dev */ 1240 /* check if we need to start the dev */
@@ -1227,7 +1252,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1227 slots = mei_count_full_read_slots(dev); 1252 slots = mei_count_full_read_slots(dev);
1228 while (slots > 0) { 1253 while (slots > 0) {
1229 dev_dbg(dev->dev, "slots to read = %08x\n", slots); 1254 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
1230 rets = mei_irq_read_handler(dev, &complete_list, &slots); 1255 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1231 /* There is a race between ME write and interrupt delivery: 1256 /* There is a race between ME write and interrupt delivery:
1232 * Not all data is always available immediately after the 1257 * Not all data is always available immediately after the
1233 * interrupt, so try to read again on the next interrupt. 1258 * interrupt, so try to read again on the next interrupt.
@@ -1252,11 +1277,11 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
1252 */ 1277 */
1253 if (dev->pg_event != MEI_PG_EVENT_WAIT && 1278 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
1254 dev->pg_event != MEI_PG_EVENT_RECEIVED) { 1279 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
1255 rets = mei_irq_write_handler(dev, &complete_list); 1280 rets = mei_irq_write_handler(dev, &cmpl_list);
1256 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 1281 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1257 } 1282 }
1258 1283
1259 mei_irq_compl_handler(dev, &complete_list); 1284 mei_irq_compl_handler(dev, &cmpl_list);
1260 1285
1261end: 1286end:
1262 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); 1287 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
@@ -1389,7 +1414,7 @@ const struct mei_cfg mei_me_pch8_sps_cfg = {
1389 * @pdev: The pci device structure 1414 * @pdev: The pci device structure
1390 * @cfg: per device generation config 1415 * @cfg: per device generation config
1391 * 1416 *
1392 * Return: The mei_device_device pointer on success, NULL on failure. 1417 * Return: The mei_device pointer on success, NULL on failure.
1393 */ 1418 */
1394struct mei_device *mei_me_dev_init(struct pci_dev *pdev, 1419struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
1395 const struct mei_cfg *cfg) 1420 const struct mei_cfg *cfg)
@@ -1397,8 +1422,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
1397 struct mei_device *dev; 1422 struct mei_device *dev;
1398 struct mei_me_hw *hw; 1423 struct mei_me_hw *hw;
1399 1424
1400 dev = kzalloc(sizeof(struct mei_device) + 1425 dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
1401 sizeof(struct mei_me_hw), GFP_KERNEL); 1426 sizeof(struct mei_me_hw), GFP_KERNEL);
1402 if (!dev) 1427 if (!dev)
1403 return NULL; 1428 return NULL;
1404 hw = to_me_hw(dev); 1429 hw = to_me_hw(dev);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index e9f8c0aeec13..24e4a4c96606 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1057,7 +1057,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
1057{ 1057{
1058 struct mei_device *dev = (struct mei_device *) dev_id; 1058 struct mei_device *dev = (struct mei_device *) dev_id;
1059 struct mei_txe_hw *hw = to_txe_hw(dev); 1059 struct mei_txe_hw *hw = to_txe_hw(dev);
1060 struct mei_cl_cb complete_list; 1060 struct list_head cmpl_list;
1061 s32 slots; 1061 s32 slots;
1062 int rets = 0; 1062 int rets = 0;
1063 1063
@@ -1069,7 +1069,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
1069 1069
1070 /* initialize our complete list */ 1070 /* initialize our complete list */
1071 mutex_lock(&dev->device_lock); 1071 mutex_lock(&dev->device_lock);
1072 mei_io_list_init(&complete_list); 1072 INIT_LIST_HEAD(&cmpl_list);
1073 1073
1074 if (pci_dev_msi_enabled(to_pci_dev(dev->dev))) 1074 if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
1075 mei_txe_check_and_ack_intrs(dev, true); 1075 mei_txe_check_and_ack_intrs(dev, true);
@@ -1126,7 +1126,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
1126 slots = mei_count_full_read_slots(dev); 1126 slots = mei_count_full_read_slots(dev);
1127 if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) { 1127 if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
1128 /* Read from TXE */ 1128 /* Read from TXE */
1129 rets = mei_irq_read_handler(dev, &complete_list, &slots); 1129 rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
1130 if (rets && dev->dev_state != MEI_DEV_RESETTING) { 1130 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
1131 dev_err(dev->dev, 1131 dev_err(dev->dev,
1132 "mei_irq_read_handler ret = %d.\n", rets); 1132 "mei_irq_read_handler ret = %d.\n", rets);
@@ -1144,14 +1144,14 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
1144 if (hw->aliveness && dev->hbuf_is_ready) { 1144 if (hw->aliveness && dev->hbuf_is_ready) {
1145 /* get the real register value */ 1145 /* get the real register value */
1146 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 1146 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1147 rets = mei_irq_write_handler(dev, &complete_list); 1147 rets = mei_irq_write_handler(dev, &cmpl_list);
1148 if (rets && rets != -EMSGSIZE) 1148 if (rets && rets != -EMSGSIZE)
1149 dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n", 1149 dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
1150 rets); 1150 rets);
1151 dev->hbuf_is_ready = mei_hbuf_is_ready(dev); 1151 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
1152 } 1152 }
1153 1153
1154 mei_irq_compl_handler(dev, &complete_list); 1154 mei_irq_compl_handler(dev, &cmpl_list);
1155 1155
1156end: 1156end:
1157 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); 1157 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
@@ -1207,8 +1207,8 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
1207 struct mei_device *dev; 1207 struct mei_device *dev;
1208 struct mei_txe_hw *hw; 1208 struct mei_txe_hw *hw;
1209 1209
1210 dev = kzalloc(sizeof(struct mei_device) + 1210 dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
1211 sizeof(struct mei_txe_hw), GFP_KERNEL); 1211 sizeof(struct mei_txe_hw), GFP_KERNEL);
1212 if (!dev) 1212 if (!dev)
1213 return NULL; 1213 return NULL;
1214 1214
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
index ce3ed0b88b0c..e1e8b66d7648 100644
--- a/drivers/misc/mei/hw-txe.h
+++ b/drivers/misc/mei/hw-txe.h
@@ -45,7 +45,7 @@
45 * @intr_cause: translated interrupt cause 45 * @intr_cause: translated interrupt cause
46 */ 46 */
47struct mei_txe_hw { 47struct mei_txe_hw {
48 void __iomem *mem_addr[NUM_OF_MEM_BARS]; 48 void __iomem * const *mem_addr;
49 u32 aliveness; 49 u32 aliveness;
50 u32 readiness; 50 u32 readiness;
51 u32 slots; 51 u32 slots;
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 41e5760a6886..cfb1cdf176fa 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -349,16 +349,16 @@ EXPORT_SYMBOL_GPL(mei_stop);
349bool mei_write_is_idle(struct mei_device *dev) 349bool mei_write_is_idle(struct mei_device *dev)
350{ 350{
351 bool idle = (dev->dev_state == MEI_DEV_ENABLED && 351 bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
352 list_empty(&dev->ctrl_wr_list.list) && 352 list_empty(&dev->ctrl_wr_list) &&
353 list_empty(&dev->write_list.list) && 353 list_empty(&dev->write_list) &&
354 list_empty(&dev->write_waiting_list.list)); 354 list_empty(&dev->write_waiting_list));
355 355
356 dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n", 356 dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
357 idle, 357 idle,
358 mei_dev_state_str(dev->dev_state), 358 mei_dev_state_str(dev->dev_state),
359 list_empty(&dev->ctrl_wr_list.list), 359 list_empty(&dev->ctrl_wr_list),
360 list_empty(&dev->write_list.list), 360 list_empty(&dev->write_list),
361 list_empty(&dev->write_waiting_list.list)); 361 list_empty(&dev->write_waiting_list));
362 362
363 return idle; 363 return idle;
364} 364}
@@ -388,17 +388,17 @@ void mei_device_init(struct mei_device *dev,
388 dev->dev_state = MEI_DEV_INITIALIZING; 388 dev->dev_state = MEI_DEV_INITIALIZING;
389 dev->reset_count = 0; 389 dev->reset_count = 0;
390 390
391 mei_io_list_init(&dev->write_list); 391 INIT_LIST_HEAD(&dev->write_list);
392 mei_io_list_init(&dev->write_waiting_list); 392 INIT_LIST_HEAD(&dev->write_waiting_list);
393 mei_io_list_init(&dev->ctrl_wr_list); 393 INIT_LIST_HEAD(&dev->ctrl_wr_list);
394 mei_io_list_init(&dev->ctrl_rd_list); 394 INIT_LIST_HEAD(&dev->ctrl_rd_list);
395 395
396 INIT_DELAYED_WORK(&dev->timer_work, mei_timer); 396 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
397 INIT_WORK(&dev->reset_work, mei_reset_work); 397 INIT_WORK(&dev->reset_work, mei_reset_work);
398 INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work); 398 INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
399 399
400 INIT_LIST_HEAD(&dev->iamthif_cl.link); 400 INIT_LIST_HEAD(&dev->iamthif_cl.link);
401 mei_io_list_init(&dev->amthif_cmd_list); 401 INIT_LIST_HEAD(&dev->amthif_cmd_list);
402 402
403 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); 403 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
404 dev->open_handle_count = 0; 404 dev->open_handle_count = 0;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index b584749bcc4a..406e9e2b2fff 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -35,14 +35,14 @@
35 * for the completed callbacks 35 * for the completed callbacks
36 * 36 *
37 * @dev: mei device 37 * @dev: mei device
38 * @compl_list: list of completed cbs 38 * @cmpl_list: list of completed cbs
39 */ 39 */
40void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) 40void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
41{ 41{
42 struct mei_cl_cb *cb, *next; 42 struct mei_cl_cb *cb, *next;
43 struct mei_cl *cl; 43 struct mei_cl *cl;
44 44
45 list_for_each_entry_safe(cb, next, &compl_list->list, list) { 45 list_for_each_entry_safe(cb, next, cmpl_list, list) {
46 cl = cb->cl; 46 cl = cb->cl;
47 list_del_init(&cb->list); 47 list_del_init(&cb->list);
48 48
@@ -92,13 +92,13 @@ void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
92 * 92 *
93 * @cl: reading client 93 * @cl: reading client
94 * @mei_hdr: header of mei client message 94 * @mei_hdr: header of mei client message
95 * @complete_list: completion list 95 * @cmpl_list: completion list
96 * 96 *
97 * Return: always 0 97 * Return: always 0
98 */ 98 */
99int mei_cl_irq_read_msg(struct mei_cl *cl, 99int mei_cl_irq_read_msg(struct mei_cl *cl,
100 struct mei_msg_hdr *mei_hdr, 100 struct mei_msg_hdr *mei_hdr,
101 struct mei_cl_cb *complete_list) 101 struct list_head *cmpl_list)
102{ 102{
103 struct mei_device *dev = cl->dev; 103 struct mei_device *dev = cl->dev;
104 struct mei_cl_cb *cb; 104 struct mei_cl_cb *cb;
@@ -144,7 +144,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
144 144
145 if (mei_hdr->msg_complete) { 145 if (mei_hdr->msg_complete) {
146 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); 146 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
147 list_move_tail(&cb->list, &complete_list->list); 147 list_move_tail(&cb->list, cmpl_list);
148 } else { 148 } else {
149 pm_runtime_mark_last_busy(dev->dev); 149 pm_runtime_mark_last_busy(dev->dev);
150 pm_request_autosuspend(dev->dev); 150 pm_request_autosuspend(dev->dev);
@@ -154,7 +154,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
154 154
155discard: 155discard:
156 if (cb) 156 if (cb)
157 list_move_tail(&cb->list, &complete_list->list); 157 list_move_tail(&cb->list, cmpl_list);
158 mei_irq_discard_msg(dev, mei_hdr); 158 mei_irq_discard_msg(dev, mei_hdr);
159 return 0; 159 return 0;
160} 160}
@@ -169,7 +169,7 @@ discard:
169 * Return: 0, OK; otherwise, error. 169 * Return: 0, OK; otherwise, error.
170 */ 170 */
171static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, 171static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
172 struct mei_cl_cb *cmpl_list) 172 struct list_head *cmpl_list)
173{ 173{
174 struct mei_device *dev = cl->dev; 174 struct mei_device *dev = cl->dev;
175 u32 msg_slots; 175 u32 msg_slots;
@@ -183,7 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
183 return -EMSGSIZE; 183 return -EMSGSIZE;
184 184
185 ret = mei_hbm_cl_disconnect_rsp(dev, cl); 185 ret = mei_hbm_cl_disconnect_rsp(dev, cl);
186 list_move_tail(&cb->list, &cmpl_list->list); 186 list_move_tail(&cb->list, cmpl_list);
187 187
188 return ret; 188 return ret;
189} 189}
@@ -199,7 +199,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
199 * Return: 0, OK; otherwise, error. 199 * Return: 0, OK; otherwise, error.
200 */ 200 */
201static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, 201static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
202 struct mei_cl_cb *cmpl_list) 202 struct list_head *cmpl_list)
203{ 203{
204 struct mei_device *dev = cl->dev; 204 struct mei_device *dev = cl->dev;
205 u32 msg_slots; 205 u32 msg_slots;
@@ -219,7 +219,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
219 if (ret) { 219 if (ret) {
220 cl->status = ret; 220 cl->status = ret;
221 cb->buf_idx = 0; 221 cb->buf_idx = 0;
222 list_move_tail(&cb->list, &cmpl_list->list); 222 list_move_tail(&cb->list, cmpl_list);
223 return ret; 223 return ret;
224 } 224 }
225 225
@@ -249,7 +249,7 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
249 * Return: 0 on success, <0 on failure. 249 * Return: 0 on success, <0 on failure.
250 */ 250 */
251int mei_irq_read_handler(struct mei_device *dev, 251int mei_irq_read_handler(struct mei_device *dev,
252 struct mei_cl_cb *cmpl_list, s32 *slots) 252 struct list_head *cmpl_list, s32 *slots)
253{ 253{
254 struct mei_msg_hdr *mei_hdr; 254 struct mei_msg_hdr *mei_hdr;
255 struct mei_cl *cl; 255 struct mei_cl *cl;
@@ -347,12 +347,11 @@ EXPORT_SYMBOL_GPL(mei_irq_read_handler);
347 * 347 *
348 * Return: 0 on success, <0 on failure. 348 * Return: 0 on success, <0 on failure.
349 */ 349 */
350int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) 350int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
351{ 351{
352 352
353 struct mei_cl *cl; 353 struct mei_cl *cl;
354 struct mei_cl_cb *cb, *next; 354 struct mei_cl_cb *cb, *next;
355 struct mei_cl_cb *list;
356 s32 slots; 355 s32 slots;
357 int ret; 356 int ret;
358 357
@@ -367,19 +366,18 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
367 /* complete all waiting for write CB */ 366 /* complete all waiting for write CB */
368 dev_dbg(dev->dev, "complete all waiting for write cb.\n"); 367 dev_dbg(dev->dev, "complete all waiting for write cb.\n");
369 368
370 list = &dev->write_waiting_list; 369 list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
371 list_for_each_entry_safe(cb, next, &list->list, list) {
372 cl = cb->cl; 370 cl = cb->cl;
373 371
374 cl->status = 0; 372 cl->status = 0;
375 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); 373 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
376 cl->writing_state = MEI_WRITE_COMPLETE; 374 cl->writing_state = MEI_WRITE_COMPLETE;
377 list_move_tail(&cb->list, &cmpl_list->list); 375 list_move_tail(&cb->list, cmpl_list);
378 } 376 }
379 377
380 /* complete control write list CB */ 378 /* complete control write list CB */
381 dev_dbg(dev->dev, "complete control write list cb.\n"); 379 dev_dbg(dev->dev, "complete control write list cb.\n");
382 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { 380 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
383 cl = cb->cl; 381 cl = cb->cl;
384 switch (cb->fop_type) { 382 switch (cb->fop_type) {
385 case MEI_FOP_DISCONNECT: 383 case MEI_FOP_DISCONNECT:
@@ -423,7 +421,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
423 } 421 }
424 /* complete write list CB */ 422 /* complete write list CB */
425 dev_dbg(dev->dev, "complete write list cb.\n"); 423 dev_dbg(dev->dev, "complete write list cb.\n");
426 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { 424 list_for_each_entry_safe(cb, next, &dev->write_list, list) {
427 cl = cb->cl; 425 cl = cb->cl;
428 if (cl == &dev->iamthif_cl) 426 if (cl == &dev->iamthif_cl)
429 ret = mei_amthif_irq_write(cl, cb, cmpl_list); 427 ret = mei_amthif_irq_write(cl, cb, cmpl_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index e1bf54481fd6..9d0b7050c79a 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -182,32 +182,36 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
182 goto out; 182 goto out;
183 } 183 }
184 184
185 if (rets == -EBUSY &&
186 !mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, file)) {
187 rets = -ENOMEM;
188 goto out;
189 }
190 185
191 do { 186again:
192 mutex_unlock(&dev->device_lock); 187 mutex_unlock(&dev->device_lock);
193 188 if (wait_event_interruptible(cl->rx_wait,
194 if (wait_event_interruptible(cl->rx_wait, 189 !list_empty(&cl->rd_completed) ||
195 (!list_empty(&cl->rd_completed)) || 190 !mei_cl_is_connected(cl))) {
196 (!mei_cl_is_connected(cl)))) { 191 if (signal_pending(current))
192 return -EINTR;
193 return -ERESTARTSYS;
194 }
195 mutex_lock(&dev->device_lock);
197 196
198 if (signal_pending(current)) 197 if (!mei_cl_is_connected(cl)) {
199 return -EINTR; 198 rets = -ENODEV;
200 return -ERESTARTSYS; 199 goto out;
201 } 200 }
202 201
203 mutex_lock(&dev->device_lock); 202 cb = mei_cl_read_cb(cl, file);
204 if (!mei_cl_is_connected(cl)) { 203 if (!cb) {
205 rets = -ENODEV; 204 /*
206 goto out; 205 * For amthif all the waiters are woken up,
207 } 206 * but only fp with matching cb->fp get the cb,
207 * the others have to return to wait on read.
208 */
209 if (cl == &dev->iamthif_cl)
210 goto again;
208 211
209 cb = mei_cl_read_cb(cl, file); 212 rets = 0;
210 } while (!cb); 213 goto out;
214 }
211 215
212copy_buffer: 216copy_buffer:
213 /* now copy the data to user space */ 217 /* now copy the data to user space */
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 8dadb98662a9..d41aac53a2ac 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -328,6 +328,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
328bool mei_cl_bus_rx_event(struct mei_cl *cl); 328bool mei_cl_bus_rx_event(struct mei_cl *cl);
329bool mei_cl_bus_notify_event(struct mei_cl *cl); 329bool mei_cl_bus_notify_event(struct mei_cl *cl);
330void mei_cl_bus_remove_devices(struct mei_device *bus); 330void mei_cl_bus_remove_devices(struct mei_device *bus);
331bool mei_cl_bus_module_get(struct mei_cl *cl);
332void mei_cl_bus_module_put(struct mei_cl *cl);
331int mei_cl_bus_init(void); 333int mei_cl_bus_init(void);
332void mei_cl_bus_exit(void); 334void mei_cl_bus_exit(void);
333 335
@@ -439,10 +441,10 @@ struct mei_device {
439 struct cdev cdev; 441 struct cdev cdev;
440 int minor; 442 int minor;
441 443
442 struct mei_cl_cb write_list; 444 struct list_head write_list;
443 struct mei_cl_cb write_waiting_list; 445 struct list_head write_waiting_list;
444 struct mei_cl_cb ctrl_wr_list; 446 struct list_head ctrl_wr_list;
445 struct mei_cl_cb ctrl_rd_list; 447 struct list_head ctrl_rd_list;
446 448
447 struct list_head file_list; 449 struct list_head file_list;
448 long open_handle_count; 450 long open_handle_count;
@@ -499,7 +501,7 @@ struct mei_device {
499 bool override_fixed_address; 501 bool override_fixed_address;
500 502
501 /* amthif list for cmd waiting */ 503 /* amthif list for cmd waiting */
502 struct mei_cl_cb amthif_cmd_list; 504 struct list_head amthif_cmd_list;
503 struct mei_cl iamthif_cl; 505 struct mei_cl iamthif_cl;
504 long iamthif_open_count; 506 long iamthif_open_count;
505 u32 iamthif_stall_timer; 507 u32 iamthif_stall_timer;
@@ -571,10 +573,10 @@ void mei_cancel_work(struct mei_device *dev);
571void mei_timer(struct work_struct *work); 573void mei_timer(struct work_struct *work);
572void mei_schedule_stall_timer(struct mei_device *dev); 574void mei_schedule_stall_timer(struct mei_device *dev);
573int mei_irq_read_handler(struct mei_device *dev, 575int mei_irq_read_handler(struct mei_device *dev,
574 struct mei_cl_cb *cmpl_list, s32 *slots); 576 struct list_head *cmpl_list, s32 *slots);
575 577
576int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list); 578int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list);
577void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list); 579void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
578 580
579/* 581/*
580 * AMTHIF - AMT Host Interface Functions 582 * AMTHIF - AMT Host Interface Functions
@@ -590,12 +592,12 @@ int mei_amthif_release(struct mei_device *dev, struct file *file);
590int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb); 592int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
591int mei_amthif_run_next_cmd(struct mei_device *dev); 593int mei_amthif_run_next_cmd(struct mei_device *dev);
592int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 594int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
593 struct mei_cl_cb *cmpl_list); 595 struct list_head *cmpl_list);
594 596
595void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb); 597void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
596int mei_amthif_irq_read_msg(struct mei_cl *cl, 598int mei_amthif_irq_read_msg(struct mei_cl *cl,
597 struct mei_msg_hdr *mei_hdr, 599 struct mei_msg_hdr *mei_hdr,
598 struct mei_cl_cb *complete_list); 600 struct list_head *cmpl_list);
599int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); 601int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
600 602
601/* 603/*
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f9c6ec4b98ab..0a668fdfbbe9 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -149,18 +149,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
149 return -ENODEV; 149 return -ENODEV;
150 150
151 /* enable pci dev */ 151 /* enable pci dev */
152 err = pci_enable_device(pdev); 152 err = pcim_enable_device(pdev);
153 if (err) { 153 if (err) {
154 dev_err(&pdev->dev, "failed to enable pci device.\n"); 154 dev_err(&pdev->dev, "failed to enable pci device.\n");
155 goto end; 155 goto end;
156 } 156 }
157 /* set PCI host mastering */ 157 /* set PCI host mastering */
158 pci_set_master(pdev); 158 pci_set_master(pdev);
159 /* pci request regions for mei driver */ 159 /* pci request regions and mapping IO device memory for mei driver */
160 err = pci_request_regions(pdev, KBUILD_MODNAME); 160 err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
161 if (err) { 161 if (err) {
162 dev_err(&pdev->dev, "failed to get pci regions.\n"); 162 dev_err(&pdev->dev, "failed to get pci regions.\n");
163 goto disable_device; 163 goto end;
164 } 164 }
165 165
166 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || 166 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
@@ -173,24 +173,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
173 } 173 }
174 if (err) { 174 if (err) {
175 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 175 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
176 goto release_regions; 176 goto end;
177 } 177 }
178 178
179
180 /* allocates and initializes the mei dev structure */ 179 /* allocates and initializes the mei dev structure */
181 dev = mei_me_dev_init(pdev, cfg); 180 dev = mei_me_dev_init(pdev, cfg);
182 if (!dev) { 181 if (!dev) {
183 err = -ENOMEM; 182 err = -ENOMEM;
184 goto release_regions; 183 goto end;
185 } 184 }
186 hw = to_me_hw(dev); 185 hw = to_me_hw(dev);
187 /* mapping IO device memory */ 186 hw->mem_addr = pcim_iomap_table(pdev)[0];
188 hw->mem_addr = pci_iomap(pdev, 0, 0); 187
189 if (!hw->mem_addr) {
190 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
191 err = -ENOMEM;
192 goto free_device;
193 }
194 pci_enable_msi(pdev); 188 pci_enable_msi(pdev);
195 189
196 /* request and enable interrupt */ 190 /* request and enable interrupt */
@@ -203,7 +197,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
203 if (err) { 197 if (err) {
204 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", 198 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
205 pdev->irq); 199 pdev->irq);
206 goto disable_msi; 200 goto end;
207 } 201 }
208 202
209 if (mei_start(dev)) { 203 if (mei_start(dev)) {
@@ -242,15 +236,6 @@ release_irq:
242 mei_cancel_work(dev); 236 mei_cancel_work(dev);
243 mei_disable_interrupts(dev); 237 mei_disable_interrupts(dev);
244 free_irq(pdev->irq, dev); 238 free_irq(pdev->irq, dev);
245disable_msi:
246 pci_disable_msi(pdev);
247 pci_iounmap(pdev, hw->mem_addr);
248free_device:
249 kfree(dev);
250release_regions:
251 pci_release_regions(pdev);
252disable_device:
253 pci_disable_device(pdev);
254end: 239end:
255 dev_err(&pdev->dev, "initialization failed.\n"); 240 dev_err(&pdev->dev, "initialization failed.\n");
256 return err; 241 return err;
@@ -267,7 +252,6 @@ end:
267static void mei_me_remove(struct pci_dev *pdev) 252static void mei_me_remove(struct pci_dev *pdev)
268{ 253{
269 struct mei_device *dev; 254 struct mei_device *dev;
270 struct mei_me_hw *hw;
271 255
272 dev = pci_get_drvdata(pdev); 256 dev = pci_get_drvdata(pdev);
273 if (!dev) 257 if (!dev)
@@ -276,33 +260,19 @@ static void mei_me_remove(struct pci_dev *pdev)
276 if (mei_pg_is_enabled(dev)) 260 if (mei_pg_is_enabled(dev))
277 pm_runtime_get_noresume(&pdev->dev); 261 pm_runtime_get_noresume(&pdev->dev);
278 262
279 hw = to_me_hw(dev);
280
281
282 dev_dbg(&pdev->dev, "stop\n"); 263 dev_dbg(&pdev->dev, "stop\n");
283 mei_stop(dev); 264 mei_stop(dev);
284 265
285 if (!pci_dev_run_wake(pdev)) 266 if (!pci_dev_run_wake(pdev))
286 mei_me_unset_pm_domain(dev); 267 mei_me_unset_pm_domain(dev);
287 268
288 /* disable interrupts */
289 mei_disable_interrupts(dev); 269 mei_disable_interrupts(dev);
290 270
291 free_irq(pdev->irq, dev); 271 free_irq(pdev->irq, dev);
292 pci_disable_msi(pdev);
293
294 if (hw->mem_addr)
295 pci_iounmap(pdev, hw->mem_addr);
296 272
297 mei_deregister(dev); 273 mei_deregister(dev);
298
299 kfree(dev);
300
301 pci_release_regions(pdev);
302 pci_disable_device(pdev);
303
304
305} 274}
275
306#ifdef CONFIG_PM_SLEEP 276#ifdef CONFIG_PM_SLEEP
307static int mei_me_pci_suspend(struct device *device) 277static int mei_me_pci_suspend(struct device *device)
308{ 278{
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 58ffd30dcc91..fe088b40daf9 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -52,17 +52,6 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
52static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {} 52static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
53#endif /* CONFIG_PM */ 53#endif /* CONFIG_PM */
54 54
55static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
56{
57 int i;
58
59 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
60 if (hw->mem_addr[i]) {
61 pci_iounmap(pdev, hw->mem_addr[i]);
62 hw->mem_addr[i] = NULL;
63 }
64 }
65}
66/** 55/**
67 * mei_txe_probe - Device Initialization Routine 56 * mei_txe_probe - Device Initialization Routine
68 * 57 *
@@ -75,22 +64,22 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
75{ 64{
76 struct mei_device *dev; 65 struct mei_device *dev;
77 struct mei_txe_hw *hw; 66 struct mei_txe_hw *hw;
67 const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
78 int err; 68 int err;
79 int i;
80 69
81 /* enable pci dev */ 70 /* enable pci dev */
82 err = pci_enable_device(pdev); 71 err = pcim_enable_device(pdev);
83 if (err) { 72 if (err) {
84 dev_err(&pdev->dev, "failed to enable pci device.\n"); 73 dev_err(&pdev->dev, "failed to enable pci device.\n");
85 goto end; 74 goto end;
86 } 75 }
87 /* set PCI host mastering */ 76 /* set PCI host mastering */
88 pci_set_master(pdev); 77 pci_set_master(pdev);
89 /* pci request regions for mei driver */ 78 /* pci request regions and mapping IO device memory for mei driver */
90 err = pci_request_regions(pdev, KBUILD_MODNAME); 79 err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
91 if (err) { 80 if (err) {
92 dev_err(&pdev->dev, "failed to get pci regions.\n"); 81 dev_err(&pdev->dev, "failed to get pci regions.\n");
93 goto disable_device; 82 goto end;
94 } 83 }
95 84
96 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 85 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
@@ -98,7 +87,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
98 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 87 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
99 if (err) { 88 if (err) {
100 dev_err(&pdev->dev, "No suitable DMA available.\n"); 89 dev_err(&pdev->dev, "No suitable DMA available.\n");
101 goto release_regions; 90 goto end;
102 } 91 }
103 } 92 }
104 93
@@ -106,20 +95,10 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
106 dev = mei_txe_dev_init(pdev); 95 dev = mei_txe_dev_init(pdev);
107 if (!dev) { 96 if (!dev) {
108 err = -ENOMEM; 97 err = -ENOMEM;
109 goto release_regions; 98 goto end;
110 } 99 }
111 hw = to_txe_hw(dev); 100 hw = to_txe_hw(dev);
112 101 hw->mem_addr = pcim_iomap_table(pdev);
113 /* mapping IO device memory */
114 for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
115 hw->mem_addr[i] = pci_iomap(pdev, i, 0);
116 if (!hw->mem_addr[i]) {
117 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
118 err = -ENOMEM;
119 goto free_device;
120 }
121 }
122
123 102
124 pci_enable_msi(pdev); 103 pci_enable_msi(pdev);
125 104
@@ -140,7 +119,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
140 if (err) { 119 if (err) {
141 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n", 120 dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
142 pdev->irq); 121 pdev->irq);
143 goto free_device; 122 goto end;
144 } 123 }
145 124
146 if (mei_start(dev)) { 125 if (mei_start(dev)) {
@@ -173,23 +152,9 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
173stop: 152stop:
174 mei_stop(dev); 153 mei_stop(dev);
175release_irq: 154release_irq:
176
177 mei_cancel_work(dev); 155 mei_cancel_work(dev);
178
179 /* disable interrupts */
180 mei_disable_interrupts(dev); 156 mei_disable_interrupts(dev);
181
182 free_irq(pdev->irq, dev); 157 free_irq(pdev->irq, dev);
183 pci_disable_msi(pdev);
184
185free_device:
186 mei_txe_pci_iounmap(pdev, hw);
187
188 kfree(dev);
189release_regions:
190 pci_release_regions(pdev);
191disable_device:
192 pci_disable_device(pdev);
193end: 158end:
194 dev_err(&pdev->dev, "initialization failed.\n"); 159 dev_err(&pdev->dev, "initialization failed.\n");
195 return err; 160 return err;
@@ -206,38 +171,24 @@ end:
206static void mei_txe_remove(struct pci_dev *pdev) 171static void mei_txe_remove(struct pci_dev *pdev)
207{ 172{
208 struct mei_device *dev; 173 struct mei_device *dev;
209 struct mei_txe_hw *hw;
210 174
211 dev = pci_get_drvdata(pdev); 175 dev = pci_get_drvdata(pdev);
212 if (!dev) { 176 if (!dev) {
213 dev_err(&pdev->dev, "mei: dev =NULL\n"); 177 dev_err(&pdev->dev, "mei: dev == NULL\n");
214 return; 178 return;
215 } 179 }
216 180
217 pm_runtime_get_noresume(&pdev->dev); 181 pm_runtime_get_noresume(&pdev->dev);
218 182
219 hw = to_txe_hw(dev);
220
221 mei_stop(dev); 183 mei_stop(dev);
222 184
223 if (!pci_dev_run_wake(pdev)) 185 if (!pci_dev_run_wake(pdev))
224 mei_txe_unset_pm_domain(dev); 186 mei_txe_unset_pm_domain(dev);
225 187
226 /* disable interrupts */
227 mei_disable_interrupts(dev); 188 mei_disable_interrupts(dev);
228 free_irq(pdev->irq, dev); 189 free_irq(pdev->irq, dev);
229 pci_disable_msi(pdev);
230
231 pci_set_drvdata(pdev, NULL);
232
233 mei_txe_pci_iounmap(pdev, hw);
234 190
235 mei_deregister(dev); 191 mei_deregister(dev);
236
237 kfree(dev);
238
239 pci_release_regions(pdev);
240 pci_disable_device(pdev);
241} 192}
242 193
243 194
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index 88e45234d527..fed992e2c258 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -292,7 +292,6 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
292 if (ret) { 292 if (ret) {
293 dev_err(vop_dev(vdev), "%s %d err %d\n", 293 dev_err(vop_dev(vdev), "%s %d err %d\n",
294 __func__, __LINE__, ret); 294 __func__, __LINE__, ret);
295 kfree(vdev);
296 return ret; 295 return ret;
297 } 296 }
298 297
diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c
index 6030ac5b8c63..ef2ece0f26af 100644
--- a/drivers/misc/panel.c
+++ b/drivers/misc/panel.c
@@ -56,6 +56,7 @@
56#include <linux/list.h> 56#include <linux/list.h>
57#include <linux/notifier.h> 57#include <linux/notifier.h>
58#include <linux/reboot.h> 58#include <linux/reboot.h>
59#include <linux/workqueue.h>
59#include <generated/utsrelease.h> 60#include <generated/utsrelease.h>
60 61
61#include <linux/io.h> 62#include <linux/io.h>
@@ -64,8 +65,6 @@
64#define LCD_MINOR 156 65#define LCD_MINOR 156
65#define KEYPAD_MINOR 185 66#define KEYPAD_MINOR 185
66 67
67#define PANEL_VERSION "0.9.5"
68
69#define LCD_MAXBYTES 256 /* max burst write */ 68#define LCD_MAXBYTES 256 /* max burst write */
70 69
71#define KEYPAD_BUFFER 64 70#define KEYPAD_BUFFER 64
@@ -77,8 +76,8 @@
77/* a key repeats this times INPUT_POLL_TIME */ 76/* a key repeats this times INPUT_POLL_TIME */
78#define KEYPAD_REP_DELAY (2) 77#define KEYPAD_REP_DELAY (2)
79 78
80/* keep the light on this times INPUT_POLL_TIME for each flash */ 79/* keep the light on this many seconds for each flash */
81#define FLASH_LIGHT_TEMPO (200) 80#define FLASH_LIGHT_TEMPO (4)
82 81
83/* converts an r_str() input to an active high, bits string : 000BAOSE */ 82/* converts an r_str() input to an active high, bits string : 000BAOSE */
84#define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3) 83#define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3)
@@ -121,8 +120,6 @@
121#define PIN_SELECP 17 120#define PIN_SELECP 17
122#define PIN_NOT_SET 127 121#define PIN_NOT_SET 127
123 122
124#define LCD_FLAG_S 0x0001
125#define LCD_FLAG_ID 0x0002
126#define LCD_FLAG_B 0x0004 /* blink on */ 123#define LCD_FLAG_B 0x0004 /* blink on */
127#define LCD_FLAG_C 0x0008 /* cursor on */ 124#define LCD_FLAG_C 0x0008 /* cursor on */
128#define LCD_FLAG_D 0x0010 /* display on */ 125#define LCD_FLAG_D 0x0010 /* display on */
@@ -256,7 +253,10 @@ static struct {
256 int hwidth; 253 int hwidth;
257 int charset; 254 int charset;
258 int proto; 255 int proto;
259 int light_tempo; 256
257 struct delayed_work bl_work;
258 struct mutex bl_tempo_lock; /* Protects access to bl_tempo */
259 bool bl_tempo;
260 260
261 /* TODO: use union here? */ 261 /* TODO: use union here? */
262 struct { 262 struct {
@@ -661,8 +661,6 @@ static void lcd_get_bits(unsigned int port, int *val)
661 } 661 }
662} 662}
663 663
664static void init_scan_timer(void);
665
666/* sets data port bits according to current signals values */ 664/* sets data port bits according to current signals values */
667static int set_data_bits(void) 665static int set_data_bits(void)
668{ 666{
@@ -794,11 +792,8 @@ static void lcd_send_serial(int byte)
794} 792}
795 793
796/* turn the backlight on or off */ 794/* turn the backlight on or off */
797static void lcd_backlight(int on) 795static void __lcd_backlight(int on)
798{ 796{
799 if (lcd.pins.bl == PIN_NONE)
800 return;
801
802 /* The backlight is activated by setting the AUTOFEED line to +5V */ 797 /* The backlight is activated by setting the AUTOFEED line to +5V */
803 spin_lock_irq(&pprt_lock); 798 spin_lock_irq(&pprt_lock);
804 if (on) 799 if (on)
@@ -809,6 +804,44 @@ static void lcd_backlight(int on)
809 spin_unlock_irq(&pprt_lock); 804 spin_unlock_irq(&pprt_lock);
810} 805}
811 806
807static void lcd_backlight(int on)
808{
809 if (lcd.pins.bl == PIN_NONE)
810 return;
811
812 mutex_lock(&lcd.bl_tempo_lock);
813 if (!lcd.bl_tempo)
814 __lcd_backlight(on);
815 mutex_unlock(&lcd.bl_tempo_lock);
816}
817
818static void lcd_bl_off(struct work_struct *work)
819{
820 mutex_lock(&lcd.bl_tempo_lock);
821 if (lcd.bl_tempo) {
822 lcd.bl_tempo = false;
823 if (!(lcd.flags & LCD_FLAG_L))
824 __lcd_backlight(0);
825 }
826 mutex_unlock(&lcd.bl_tempo_lock);
827}
828
829/* turn the backlight on for a little while */
830static void lcd_poke(void)
831{
832 if (lcd.pins.bl == PIN_NONE)
833 return;
834
835 cancel_delayed_work_sync(&lcd.bl_work);
836
837 mutex_lock(&lcd.bl_tempo_lock);
838 if (!lcd.bl_tempo && !(lcd.flags & LCD_FLAG_L))
839 __lcd_backlight(1);
840 lcd.bl_tempo = true;
841 schedule_delayed_work(&lcd.bl_work, FLASH_LIGHT_TEMPO * HZ);
842 mutex_unlock(&lcd.bl_tempo_lock);
843}
844
812/* send a command to the LCD panel in serial mode */ 845/* send a command to the LCD panel in serial mode */
813static void lcd_write_cmd_s(int cmd) 846static void lcd_write_cmd_s(int cmd)
814{ 847{
@@ -907,6 +940,13 @@ static void lcd_gotoxy(void)
907 (lcd.hwidth - 1) : lcd.bwidth - 1)); 940 (lcd.hwidth - 1) : lcd.bwidth - 1));
908} 941}
909 942
943static void lcd_home(void)
944{
945 lcd.addr.x = 0;
946 lcd.addr.y = 0;
947 lcd_gotoxy();
948}
949
910static void lcd_print(char c) 950static void lcd_print(char c)
911{ 951{
912 if (lcd.addr.x < lcd.bwidth) { 952 if (lcd.addr.x < lcd.bwidth) {
@@ -925,9 +965,7 @@ static void lcd_clear_fast_s(void)
925{ 965{
926 int pos; 966 int pos;
927 967
928 lcd.addr.x = 0; 968 lcd_home();
929 lcd.addr.y = 0;
930 lcd_gotoxy();
931 969
932 spin_lock_irq(&pprt_lock); 970 spin_lock_irq(&pprt_lock);
933 for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) { 971 for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -939,9 +977,7 @@ static void lcd_clear_fast_s(void)
939 } 977 }
940 spin_unlock_irq(&pprt_lock); 978 spin_unlock_irq(&pprt_lock);
941 979
942 lcd.addr.x = 0; 980 lcd_home();
943 lcd.addr.y = 0;
944 lcd_gotoxy();
945} 981}
946 982
947/* fills the display with spaces and resets X/Y */ 983/* fills the display with spaces and resets X/Y */
@@ -949,9 +985,7 @@ static void lcd_clear_fast_p8(void)
949{ 985{
950 int pos; 986 int pos;
951 987
952 lcd.addr.x = 0; 988 lcd_home();
953 lcd.addr.y = 0;
954 lcd_gotoxy();
955 989
956 spin_lock_irq(&pprt_lock); 990 spin_lock_irq(&pprt_lock);
957 for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) { 991 for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -977,9 +1011,7 @@ static void lcd_clear_fast_p8(void)
977 } 1011 }
978 spin_unlock_irq(&pprt_lock); 1012 spin_unlock_irq(&pprt_lock);
979 1013
980 lcd.addr.x = 0; 1014 lcd_home();
981 lcd.addr.y = 0;
982 lcd_gotoxy();
983} 1015}
984 1016
985/* fills the display with spaces and resets X/Y */ 1017/* fills the display with spaces and resets X/Y */
@@ -987,9 +1019,7 @@ static void lcd_clear_fast_tilcd(void)
987{ 1019{
988 int pos; 1020 int pos;
989 1021
990 lcd.addr.x = 0; 1022 lcd_home();
991 lcd.addr.y = 0;
992 lcd_gotoxy();
993 1023
994 spin_lock_irq(&pprt_lock); 1024 spin_lock_irq(&pprt_lock);
995 for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) { 1025 for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
@@ -1000,9 +1030,7 @@ static void lcd_clear_fast_tilcd(void)
1000 1030
1001 spin_unlock_irq(&pprt_lock); 1031 spin_unlock_irq(&pprt_lock);
1002 1032
1003 lcd.addr.x = 0; 1033 lcd_home();
1004 lcd.addr.y = 0;
1005 lcd_gotoxy();
1006} 1034}
1007 1035
1008/* clears the display and resets X/Y */ 1036/* clears the display and resets X/Y */
@@ -1108,13 +1136,8 @@ static inline int handle_lcd_special_code(void)
1108 processed = 1; 1136 processed = 1;
1109 break; 1137 break;
1110 case '*': 1138 case '*':
1111 /* flash back light using the keypad timer */ 1139 /* flash back light */
1112 if (scan_timer.function) { 1140 lcd_poke();
1113 if (lcd.light_tempo == 0 &&
1114 ((lcd.flags & LCD_FLAG_L) == 0))
1115 lcd_backlight(1);
1116 lcd.light_tempo = FLASH_LIGHT_TEMPO;
1117 }
1118 processed = 1; 1141 processed = 1;
1119 break; 1142 break;
1120 case 'f': /* Small Font */ 1143 case 'f': /* Small Font */
@@ -1278,21 +1301,14 @@ static inline int handle_lcd_special_code(void)
1278 lcd_write_cmd(LCD_CMD_FUNCTION_SET 1301 lcd_write_cmd(LCD_CMD_FUNCTION_SET
1279 | LCD_CMD_DATA_LEN_8BITS 1302 | LCD_CMD_DATA_LEN_8BITS
1280 | ((lcd.flags & LCD_FLAG_F) 1303 | ((lcd.flags & LCD_FLAG_F)
1281 ? LCD_CMD_TWO_LINES : 0)
1282 | ((lcd.flags & LCD_FLAG_N)
1283 ? LCD_CMD_FONT_5X10_DOTS 1304 ? LCD_CMD_FONT_5X10_DOTS
1305 : 0)
1306 | ((lcd.flags & LCD_FLAG_N)
1307 ? LCD_CMD_TWO_LINES
1284 : 0)); 1308 : 0));
1285 /* check whether L flag was changed */ 1309 /* check whether L flag was changed */
1286 else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L)) { 1310 else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L))
1287 if (lcd.flags & (LCD_FLAG_L)) 1311 lcd_backlight(!!(lcd.flags & LCD_FLAG_L));
1288 lcd_backlight(1);
1289 else if (lcd.light_tempo == 0)
1290 /*
1291 * switch off the light only when the tempo
1292 * lighting is gone
1293 */
1294 lcd_backlight(0);
1295 }
1296 } 1312 }
1297 1313
1298 return processed; 1314 return processed;
@@ -1376,9 +1392,7 @@ static void lcd_write_char(char c)
1376 processed = 1; 1392 processed = 1;
1377 } else if (!strcmp(lcd.esc_seq.buf, "[H")) { 1393 } else if (!strcmp(lcd.esc_seq.buf, "[H")) {
1378 /* cursor to home */ 1394 /* cursor to home */
1379 lcd.addr.x = 0; 1395 lcd_home();
1380 lcd.addr.y = 0;
1381 lcd_gotoxy();
1382 processed = 1; 1396 processed = 1;
1383 } 1397 }
1384 /* codes starting with ^[[L */ 1398 /* codes starting with ^[[L */
@@ -1625,8 +1639,10 @@ static void lcd_init(void)
1625 else 1639 else
1626 lcd_char_conv = NULL; 1640 lcd_char_conv = NULL;
1627 1641
1628 if (lcd.pins.bl != PIN_NONE) 1642 if (lcd.pins.bl != PIN_NONE) {
1629 init_scan_timer(); 1643 mutex_init(&lcd.bl_tempo_lock);
1644 INIT_DELAYED_WORK(&lcd.bl_work, lcd_bl_off);
1645 }
1630 1646
1631 pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E], 1647 pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
1632 lcd_bits[LCD_PORT_C][LCD_BIT_E]); 1648 lcd_bits[LCD_PORT_C][LCD_BIT_E]);
@@ -1655,14 +1671,11 @@ static void lcd_init(void)
1655 panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE); 1671 panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
1656#endif 1672#endif
1657#else 1673#else
1658 panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-" 1674 panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE);
1659 PANEL_VERSION);
1660#endif 1675#endif
1661 lcd.addr.x = 0;
1662 lcd.addr.y = 0;
1663 /* clear the display on the next device opening */ 1676 /* clear the display on the next device opening */
1664 lcd.must_clear = true; 1677 lcd.must_clear = true;
1665 lcd_gotoxy(); 1678 lcd_home();
1666} 1679}
1667 1680
1668/* 1681/*
@@ -1997,19 +2010,8 @@ static void panel_scan_timer(void)
1997 panel_process_inputs(); 2010 panel_process_inputs();
1998 } 2011 }
1999 2012
2000 if (lcd.enabled && lcd.initialized) { 2013 if (keypressed && lcd.enabled && lcd.initialized)
2001 if (keypressed) { 2014 lcd_poke();
2002 if (lcd.light_tempo == 0 &&
2003 ((lcd.flags & LCD_FLAG_L) == 0))
2004 lcd_backlight(1);
2005 lcd.light_tempo = FLASH_LIGHT_TEMPO;
2006 } else if (lcd.light_tempo > 0) {
2007 lcd.light_tempo--;
2008 if (lcd.light_tempo == 0 &&
2009 ((lcd.flags & LCD_FLAG_L) == 0))
2010 lcd_backlight(0);
2011 }
2012 }
2013 2015
2014 mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME); 2016 mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME);
2015} 2017}
@@ -2270,25 +2272,26 @@ static void panel_detach(struct parport *port)
2270 if (scan_timer.function) 2272 if (scan_timer.function)
2271 del_timer_sync(&scan_timer); 2273 del_timer_sync(&scan_timer);
2272 2274
2273 if (pprt) { 2275 if (keypad.enabled) {
2274 if (keypad.enabled) { 2276 misc_deregister(&keypad_dev);
2275 misc_deregister(&keypad_dev); 2277 keypad_initialized = 0;
2276 keypad_initialized = 0; 2278 }
2277 }
2278 2279
2279 if (lcd.enabled) { 2280 if (lcd.enabled) {
2280 panel_lcd_print("\x0cLCD driver " PANEL_VERSION 2281 panel_lcd_print("\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
2281 "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-"); 2282 misc_deregister(&lcd_dev);
2282 misc_deregister(&lcd_dev); 2283 if (lcd.pins.bl != PIN_NONE) {
2283 lcd.initialized = false; 2284 cancel_delayed_work_sync(&lcd.bl_work);
2285 __lcd_backlight(0);
2284 } 2286 }
2285 2287 lcd.initialized = false;
2286 /* TODO: free all input signals */
2287 parport_release(pprt);
2288 parport_unregister_device(pprt);
2289 pprt = NULL;
2290 unregister_reboot_notifier(&panel_notifier);
2291 } 2288 }
2289
2290 /* TODO: free all input signals */
2291 parport_release(pprt);
2292 parport_unregister_device(pprt);
2293 pprt = NULL;
2294 unregister_reboot_notifier(&panel_notifier);
2292} 2295}
2293 2296
2294static struct parport_driver panel_driver = { 2297static struct parport_driver panel_driver = {
@@ -2400,7 +2403,7 @@ static int __init panel_init_module(void)
2400 2403
2401 if (!lcd.enabled && !keypad.enabled) { 2404 if (!lcd.enabled && !keypad.enabled) {
2402 /* no device enabled, let's exit */ 2405 /* no device enabled, let's exit */
2403 pr_err("driver version " PANEL_VERSION " disabled.\n"); 2406 pr_err("panel driver disabled.\n");
2404 return -ENODEV; 2407 return -ENODEV;
2405 } 2408 }
2406 2409
@@ -2411,12 +2414,10 @@ static int __init panel_init_module(void)
2411 } 2414 }
2412 2415
2413 if (pprt) 2416 if (pprt)
2414 pr_info("driver version " PANEL_VERSION 2417 pr_info("panel driver registered on parport%d (io=0x%lx).\n",
2415 " registered on parport%d (io=0x%lx).\n", parport, 2418 parport, pprt->port->base);
2416 pprt->port->base);
2417 else 2419 else
2418 pr_info("driver version " PANEL_VERSION 2420 pr_info("panel driver not yet registered\n");
2419 " not yet registered\n");
2420 return 0; 2421 return 0;
2421} 2422}
2422 2423
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
new file mode 100644
index 000000000000..ac522417c462
--- /dev/null
+++ b/drivers/misc/sram-exec.c
@@ -0,0 +1,105 @@
1/*
2 * SRAM protect-exec region helper functions
3 *
4 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
5 * Dave Gerlach
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/device.h>
18#include <linux/genalloc.h>
19#include <linux/sram.h>
20
21#include <asm/cacheflush.h>
22
23#include "sram.h"
24
25static DEFINE_MUTEX(exec_pool_list_mutex);
26static LIST_HEAD(exec_pool_list);
27
28int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
29 struct sram_partition *part)
30{
31 unsigned long base = (unsigned long)part->base;
32 unsigned long end = base + block->size;
33
34 if (!PAGE_ALIGNED(base) || !PAGE_ALIGNED(end)) {
35 dev_err(sram->dev,
36 "SRAM pool marked with 'protect-exec' is not page aligned and will not be created.\n");
37 return -ENOMEM;
38 }
39
40 return 0;
41}
42
43int sram_add_protect_exec(struct sram_partition *part)
44{
45 mutex_lock(&exec_pool_list_mutex);
46 list_add_tail(&part->list, &exec_pool_list);
47 mutex_unlock(&exec_pool_list_mutex);
48
49 return 0;
50}
51
52/**
53 * sram_exec_copy - copy data to a protected executable region of sram
54 *
55 * @pool: struct gen_pool retrieved that is part of this sram
56 * @dst: Destination address for the copy, that must be inside pool
57 * @src: Source address for the data to copy
58 * @size: Size of copy to perform, which starting from dst, must reside in pool
59 *
60 * This helper function allows sram driver to act as central control location
61 * of 'protect-exec' pools which are normal sram pools but are always set
62 * read-only and executable except when copying data to them, at which point
63 * they are set to read-write non-executable, to make sure no memory is
64 * writeable and executable at the same time. This region must be page-aligned
65 * and is checked during probe, otherwise page attribute manipulation would
66 * not be possible.
67 */
68int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
69 size_t size)
70{
71 struct sram_partition *part = NULL, *p;
72 unsigned long base;
73 int pages;
74
75 mutex_lock(&exec_pool_list_mutex);
76 list_for_each_entry(p, &exec_pool_list, list) {
77 if (p->pool == pool)
78 part = p;
79 }
80 mutex_unlock(&exec_pool_list_mutex);
81
82 if (!part)
83 return -EINVAL;
84
85 if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
86 return -EINVAL;
87
88 base = (unsigned long)part->base;
89 pages = PAGE_ALIGN(size) / PAGE_SIZE;
90
91 mutex_lock(&part->lock);
92
93 set_memory_nx((unsigned long)base, pages);
94 set_memory_rw((unsigned long)base, pages);
95
96 memcpy(dst, src, size);
97
98 set_memory_ro((unsigned long)base, pages);
99 set_memory_x((unsigned long)base, pages);
100
101 mutex_unlock(&part->lock);
102
103 return 0;
104}
105EXPORT_SYMBOL_GPL(sram_exec_copy);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index b33ab8ce47ab..d1185b78cf9a 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -31,35 +31,9 @@
31#include <linux/mfd/syscon.h> 31#include <linux/mfd/syscon.h>
32#include <soc/at91/atmel-secumod.h> 32#include <soc/at91/atmel-secumod.h>
33 33
34#define SRAM_GRANULARITY 32 34#include "sram.h"
35
36struct sram_partition {
37 void __iomem *base;
38
39 struct gen_pool *pool;
40 struct bin_attribute battr;
41 struct mutex lock;
42};
43
44struct sram_dev {
45 struct device *dev;
46 void __iomem *virt_base;
47
48 struct gen_pool *pool;
49 struct clk *clk;
50 35
51 struct sram_partition *partition; 36#define SRAM_GRANULARITY 32
52 u32 partitions;
53};
54
55struct sram_reserve {
56 struct list_head list;
57 u32 start;
58 u32 size;
59 bool export;
60 bool pool;
61 const char *label;
62};
63 37
64static ssize_t sram_read(struct file *filp, struct kobject *kobj, 38static ssize_t sram_read(struct file *filp, struct kobject *kobj,
65 struct bin_attribute *attr, 39 struct bin_attribute *attr,
@@ -148,6 +122,18 @@ static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
148 if (ret) 122 if (ret)
149 return ret; 123 return ret;
150 } 124 }
125 if (block->protect_exec) {
126 ret = sram_check_protect_exec(sram, block, part);
127 if (ret)
128 return ret;
129
130 ret = sram_add_pool(sram, block, start, part);
131 if (ret)
132 return ret;
133
134 sram_add_protect_exec(part);
135 }
136
151 sram->partitions++; 137 sram->partitions++;
152 138
153 return 0; 139 return 0;
@@ -233,7 +219,11 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
233 if (of_find_property(child, "pool", NULL)) 219 if (of_find_property(child, "pool", NULL))
234 block->pool = true; 220 block->pool = true;
235 221
236 if ((block->export || block->pool) && block->size) { 222 if (of_find_property(child, "protect-exec", NULL))
223 block->protect_exec = true;
224
225 if ((block->export || block->pool || block->protect_exec) &&
226 block->size) {
237 exports++; 227 exports++;
238 228
239 label = NULL; 229 label = NULL;
@@ -249,8 +239,10 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
249 239
250 block->label = devm_kstrdup(sram->dev, 240 block->label = devm_kstrdup(sram->dev,
251 label, GFP_KERNEL); 241 label, GFP_KERNEL);
252 if (!block->label) 242 if (!block->label) {
243 ret = -ENOMEM;
253 goto err_chunks; 244 goto err_chunks;
245 }
254 246
255 dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n", 247 dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
256 block->export ? "exported " : "", block->label, 248 block->export ? "exported " : "", block->label,
@@ -293,7 +285,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
293 goto err_chunks; 285 goto err_chunks;
294 } 286 }
295 287
296 if ((block->export || block->pool) && block->size) { 288 if ((block->export || block->pool || block->protect_exec) &&
289 block->size) {
297 ret = sram_add_partition(sram, block, 290 ret = sram_add_partition(sram, block,
298 res->start + block->start); 291 res->start + block->start);
299 if (ret) { 292 if (ret) {
diff --git a/drivers/misc/sram.h b/drivers/misc/sram.h
new file mode 100644
index 000000000000..c181ce4c8fca
--- /dev/null
+++ b/drivers/misc/sram.h
@@ -0,0 +1,58 @@
1/*
2 * Defines for the SRAM driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef __SRAM_H
9#define __SRAM_H
10
11struct sram_partition {
12 void __iomem *base;
13
14 struct gen_pool *pool;
15 struct bin_attribute battr;
16 struct mutex lock;
17 struct list_head list;
18};
19
20struct sram_dev {
21 struct device *dev;
22 void __iomem *virt_base;
23
24 struct gen_pool *pool;
25 struct clk *clk;
26
27 struct sram_partition *partition;
28 u32 partitions;
29};
30
31struct sram_reserve {
32 struct list_head list;
33 u32 start;
34 u32 size;
35 bool export;
36 bool pool;
37 bool protect_exec;
38 const char *label;
39};
40
41#ifdef CONFIG_SRAM_EXEC
42int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
43 struct sram_partition *part);
44int sram_add_protect_exec(struct sram_partition *part);
45#else
46static inline int sram_check_protect_exec(struct sram_dev *sram,
47 struct sram_reserve *block,
48 struct sram_partition *part)
49{
50 return -ENODEV;
51}
52
53static inline int sram_add_protect_exec(struct sram_partition *part)
54{
55 return -ENODEV;
56}
57#endif /* CONFIG_SRAM_EXEC */
58#endif /* __SRAM_H */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 189b32519748..9d659542a335 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -54,10 +54,7 @@ struct vmci_guest_device {
54 struct device *dev; /* PCI device we are attached to */ 54 struct device *dev; /* PCI device we are attached to */
55 void __iomem *iobase; 55 void __iomem *iobase;
56 56
57 unsigned int irq;
58 unsigned int intr_type;
59 bool exclusive_vectors; 57 bool exclusive_vectors;
60 struct msix_entry msix_entries[VMCI_MAX_INTRS];
61 58
62 struct tasklet_struct datagram_tasklet; 59 struct tasklet_struct datagram_tasklet;
63 struct tasklet_struct bm_tasklet; 60 struct tasklet_struct bm_tasklet;
@@ -369,30 +366,6 @@ static void vmci_process_bitmap(unsigned long data)
369} 366}
370 367
371/* 368/*
372 * Enable MSI-X. Try exclusive vectors first, then shared vectors.
373 */
374static int vmci_enable_msix(struct pci_dev *pdev,
375 struct vmci_guest_device *vmci_dev)
376{
377 int i;
378 int result;
379
380 for (i = 0; i < VMCI_MAX_INTRS; ++i) {
381 vmci_dev->msix_entries[i].entry = i;
382 vmci_dev->msix_entries[i].vector = i;
383 }
384
385 result = pci_enable_msix_exact(pdev,
386 vmci_dev->msix_entries, VMCI_MAX_INTRS);
387 if (result == 0)
388 vmci_dev->exclusive_vectors = true;
389 else if (result == -ENOSPC)
390 result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1);
391
392 return result;
393}
394
395/*
396 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X 369 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
397 * interrupt (vector VMCI_INTR_DATAGRAM). 370 * interrupt (vector VMCI_INTR_DATAGRAM).
398 */ 371 */
@@ -406,7 +379,7 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
406 * Otherwise we must read the ICR to determine what to do. 379 * Otherwise we must read the ICR to determine what to do.
407 */ 380 */
408 381
409 if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) { 382 if (dev->exclusive_vectors) {
410 tasklet_schedule(&dev->datagram_tasklet); 383 tasklet_schedule(&dev->datagram_tasklet);
411 } else { 384 } else {
412 unsigned int icr; 385 unsigned int icr;
@@ -491,7 +464,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
491 } 464 }
492 465
493 vmci_dev->dev = &pdev->dev; 466 vmci_dev->dev = &pdev->dev;
494 vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
495 vmci_dev->exclusive_vectors = false; 467 vmci_dev->exclusive_vectors = false;
496 vmci_dev->iobase = iobase; 468 vmci_dev->iobase = iobase;
497 469
@@ -592,26 +564,26 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
592 * Enable interrupts. Try MSI-X first, then MSI, and then fallback on 564 * Enable interrupts. Try MSI-X first, then MSI, and then fallback on
593 * legacy interrupts. 565 * legacy interrupts.
594 */ 566 */
595 if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) { 567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
596 vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX; 568 PCI_IRQ_MSIX);
597 vmci_dev->irq = vmci_dev->msix_entries[0].vector; 569 if (error) {
598 } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) { 570 error = pci_alloc_irq_vectors(pdev, 1, 1,
599 vmci_dev->intr_type = VMCI_INTR_TYPE_MSI; 571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
600 vmci_dev->irq = pdev->irq; 572 if (error)
573 goto err_remove_bitmap;
601 } else { 574 } else {
602 vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; 575 vmci_dev->exclusive_vectors = true;
603 vmci_dev->irq = pdev->irq;
604 } 576 }
605 577
606 /* 578 /*
607 * Request IRQ for legacy or MSI interrupts, or for first 579 * Request IRQ for legacy or MSI interrupts, or for first
608 * MSI-X vector. 580 * MSI-X vector.
609 */ 581 */
610 error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED, 582 error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
611 KBUILD_MODNAME, vmci_dev); 583 IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
612 if (error) { 584 if (error) {
613 dev_err(&pdev->dev, "Irq %u in use: %d\n", 585 dev_err(&pdev->dev, "Irq %u in use: %d\n",
614 vmci_dev->irq, error); 586 pci_irq_vector(pdev, 0), error);
615 goto err_disable_msi; 587 goto err_disable_msi;
616 } 588 }
617 589
@@ -622,13 +594,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
622 * between the vectors. 594 * between the vectors.
623 */ 595 */
624 if (vmci_dev->exclusive_vectors) { 596 if (vmci_dev->exclusive_vectors) {
625 error = request_irq(vmci_dev->msix_entries[1].vector, 597 error = request_irq(pci_irq_vector(pdev, 1),
626 vmci_interrupt_bm, 0, KBUILD_MODNAME, 598 vmci_interrupt_bm, 0, KBUILD_MODNAME,
627 vmci_dev); 599 vmci_dev);
628 if (error) { 600 if (error) {
629 dev_err(&pdev->dev, 601 dev_err(&pdev->dev,
630 "Failed to allocate irq %u: %d\n", 602 "Failed to allocate irq %u: %d\n",
631 vmci_dev->msix_entries[1].vector, error); 603 pci_irq_vector(pdev, 1), error);
632 goto err_free_irq; 604 goto err_free_irq;
633 } 605 }
634 } 606 }
@@ -651,15 +623,12 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
651 return 0; 623 return 0;
652 624
653err_free_irq: 625err_free_irq:
654 free_irq(vmci_dev->irq, vmci_dev); 626 free_irq(pci_irq_vector(pdev, 0), vmci_dev);
655 tasklet_kill(&vmci_dev->datagram_tasklet); 627 tasklet_kill(&vmci_dev->datagram_tasklet);
656 tasklet_kill(&vmci_dev->bm_tasklet); 628 tasklet_kill(&vmci_dev->bm_tasklet);
657 629
658err_disable_msi: 630err_disable_msi:
659 if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) 631 pci_free_irq_vectors(pdev);
660 pci_disable_msix(pdev);
661 else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
662 pci_disable_msi(pdev);
663 632
664 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 633 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
665 if (vmci_err < VMCI_SUCCESS) 634 if (vmci_err < VMCI_SUCCESS)
@@ -719,14 +688,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
719 * MSI-X, we might have multiple vectors, each with their own 688 * MSI-X, we might have multiple vectors, each with their own
720 * IRQ, which we must free too. 689 * IRQ, which we must free too.
721 */ 690 */
722 free_irq(vmci_dev->irq, vmci_dev); 691 if (vmci_dev->exclusive_vectors)
723 if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) { 692 free_irq(pci_irq_vector(pdev, 1), vmci_dev);
724 if (vmci_dev->exclusive_vectors) 693 free_irq(pci_irq_vector(pdev, 0), vmci_dev);
725 free_irq(vmci_dev->msix_entries[1].vector, vmci_dev); 694 pci_free_irq_vectors(pdev);
726 pci_disable_msix(pdev);
727 } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
728 pci_disable_msi(pdev);
729 }
730 695
731 tasklet_kill(&vmci_dev->datagram_tasklet); 696 tasklet_kill(&vmci_dev->datagram_tasklet);
732 tasklet_kill(&vmci_dev->bm_tasklet); 697 tasklet_kill(&vmci_dev->bm_tasklet);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index fd6ebbefd919..d35ebd993b38 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -703,8 +703,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
703 char *dest = start + (section_index * net_device->send_section_size) 703 char *dest = start + (section_index * net_device->send_section_size)
704 + pend_size; 704 + pend_size;
705 int i; 705 int i;
706 bool is_data_pkt = (skb != NULL) ? true : false;
707 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
708 u32 msg_size = 0; 706 u32 msg_size = 0;
709 u32 padding = 0; 707 u32 padding = 0;
710 u32 remain = packet->total_data_buflen % net_device->pkt_align; 708 u32 remain = packet->total_data_buflen % net_device->pkt_align;
@@ -712,7 +710,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
712 packet->page_buf_cnt; 710 packet->page_buf_cnt;
713 711
714 /* Add padding */ 712 /* Add padding */
715 if (is_data_pkt && xmit_more && remain && 713 if (skb && skb->xmit_more && remain &&
716 !packet->cp_partial) { 714 !packet->cp_partial) {
717 padding = net_device->pkt_align - remain; 715 padding = net_device->pkt_align - remain;
718 rndis_msg->msg_len += padding; 716 rndis_msg->msg_len += padding;
@@ -754,7 +752,6 @@ static inline int netvsc_send_pkt(
754 int ret; 752 int ret;
755 struct hv_page_buffer *pgbuf; 753 struct hv_page_buffer *pgbuf;
756 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); 754 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
757 bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
758 755
759 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; 756 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
760 if (skb != NULL) { 757 if (skb != NULL) {
@@ -778,16 +775,6 @@ static inline int netvsc_send_pkt(
778 if (out_channel->rescind) 775 if (out_channel->rescind)
779 return -ENODEV; 776 return -ENODEV;
780 777
781 /*
782 * It is possible that once we successfully place this packet
783 * on the ringbuffer, we may stop the queue. In that case, we want
784 * to notify the host independent of the xmit_more flag. We don't
785 * need to be precise here; in the worst case we may signal the host
786 * unnecessarily.
787 */
788 if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
789 xmit_more = false;
790
791 if (packet->page_buf_cnt) { 778 if (packet->page_buf_cnt) {
792 pgbuf = packet->cp_partial ? (*pb) + 779 pgbuf = packet->cp_partial ? (*pb) +
793 packet->rmsg_pgcnt : (*pb); 780 packet->rmsg_pgcnt : (*pb);
@@ -797,15 +784,13 @@ static inline int netvsc_send_pkt(
797 &nvmsg, 784 &nvmsg,
798 sizeof(struct nvsp_message), 785 sizeof(struct nvsp_message),
799 req_id, 786 req_id,
800 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, 787 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
801 !xmit_more);
802 } else { 788 } else {
803 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg, 789 ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
804 sizeof(struct nvsp_message), 790 sizeof(struct nvsp_message),
805 req_id, 791 req_id,
806 VM_PKT_DATA_INBAND, 792 VM_PKT_DATA_INBAND,
807 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED, 793 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
808 !xmit_more);
809 } 794 }
810 795
811 if (ret == 0) { 796 if (ret == 0) {
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 398ea7f54826..408b521ee520 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -608,7 +608,7 @@ static struct nvmem_device *nvmem_find(const char *name)
608/** 608/**
609 * of_nvmem_device_get() - Get nvmem device from a given id 609 * of_nvmem_device_get() - Get nvmem device from a given id
610 * 610 *
611 * @dev node: Device tree node that uses the nvmem device 611 * @np: Device tree node that uses the nvmem device.
612 * @id: nvmem name from nvmem-names property. 612 * @id: nvmem name from nvmem-names property.
613 * 613 *
614 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 614 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
@@ -634,8 +634,8 @@ EXPORT_SYMBOL_GPL(of_nvmem_device_get);
634/** 634/**
635 * nvmem_device_get() - Get nvmem device from a given id 635 * nvmem_device_get() - Get nvmem device from a given id
636 * 636 *
637 * @dev : Device that uses the nvmem device 637 * @dev: Device that uses the nvmem device.
638 * @id: nvmem name from nvmem-names property. 638 * @dev_name: name of the requested nvmem device.
639 * 639 *
640 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 640 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
641 * on success. 641 * on success.
@@ -674,6 +674,7 @@ static void devm_nvmem_device_release(struct device *dev, void *res)
674/** 674/**
675 * devm_nvmem_device_put() - put alredy got nvmem device 675 * devm_nvmem_device_put() - put alredy got nvmem device
676 * 676 *
677 * @dev: Device that uses the nvmem device.
677 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 678 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
678 * that needs to be released. 679 * that needs to be released.
679 */ 680 */
@@ -702,8 +703,8 @@ EXPORT_SYMBOL_GPL(nvmem_device_put);
702/** 703/**
703 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 704 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
704 * 705 *
705 * @dev node: Device tree node that uses the nvmem cell 706 * @dev: Device that requests the nvmem device.
706 * @id: nvmem name in nvmems property. 707 * @id: name id for the requested nvmem device.
707 * 708 *
708 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 709 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
709 * on success. The nvmem_cell will be freed by the automatically once the 710 * on success. The nvmem_cell will be freed by the automatically once the
@@ -745,8 +746,10 @@ static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
745/** 746/**
746 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 747 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
747 * 748 *
748 * @dev node: Device tree node that uses the nvmem cell 749 * @np: Device tree node that uses the nvmem cell.
749 * @id: nvmem cell name from nvmem-cell-names property. 750 * @name: nvmem cell name from nvmem-cell-names property, or NULL
751 * for the cell at index 0 (the lone cell with no accompanying
752 * nvmem-cell-names property).
750 * 753 *
751 * Return: Will be an ERR_PTR() on error or a valid pointer 754 * Return: Will be an ERR_PTR() on error or a valid pointer
752 * to a struct nvmem_cell. The nvmem_cell will be freed by the 755 * to a struct nvmem_cell. The nvmem_cell will be freed by the
@@ -759,9 +762,12 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
759 struct nvmem_cell *cell; 762 struct nvmem_cell *cell;
760 struct nvmem_device *nvmem; 763 struct nvmem_device *nvmem;
761 const __be32 *addr; 764 const __be32 *addr;
762 int rval, len, index; 765 int rval, len;
766 int index = 0;
763 767
764 index = of_property_match_string(np, "nvmem-cell-names", name); 768 /* if cell name exists, find index to the name */
769 if (name)
770 index = of_property_match_string(np, "nvmem-cell-names", name);
765 771
766 cell_np = of_parse_phandle(np, "nvmem-cells", index); 772 cell_np = of_parse_phandle(np, "nvmem-cells", index);
767 if (!cell_np) 773 if (!cell_np)
@@ -830,8 +836,8 @@ EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
830/** 836/**
831 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 837 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
832 * 838 *
833 * @dev node: Device tree node that uses the nvmem cell 839 * @dev: Device that requests the nvmem cell.
834 * @id: nvmem cell name to get. 840 * @cell_id: nvmem cell name to get.
835 * 841 *
836 * Return: Will be an ERR_PTR() on error or a valid pointer 842 * Return: Will be an ERR_PTR() on error or a valid pointer
837 * to a struct nvmem_cell. The nvmem_cell will be freed by the 843 * to a struct nvmem_cell. The nvmem_cell will be freed by the
@@ -859,8 +865,8 @@ static void devm_nvmem_cell_release(struct device *dev, void *res)
859/** 865/**
860 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 866 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
861 * 867 *
862 * @dev node: Device tree node that uses the nvmem cell 868 * @dev: Device that requests the nvmem cell.
863 * @id: nvmem id in nvmem-names property. 869 * @id: nvmem cell name id to get.
864 * 870 *
865 * Return: Will be an ERR_PTR() on error or a valid pointer 871 * Return: Will be an ERR_PTR() on error or a valid pointer
866 * to a struct nvmem_cell. The nvmem_cell will be freed by the 872 * to a struct nvmem_cell. The nvmem_cell will be freed by the
@@ -900,7 +906,8 @@ static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
900 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 906 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
901 * from devm_nvmem_cell_get. 907 * from devm_nvmem_cell_get.
902 * 908 *
903 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get() 909 * @dev: Device that requests the nvmem cell.
910 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
904 */ 911 */
905void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 912void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
906{ 913{
@@ -916,7 +923,7 @@ EXPORT_SYMBOL(devm_nvmem_cell_put);
916/** 923/**
917 * nvmem_cell_put() - Release previously allocated nvmem cell. 924 * nvmem_cell_put() - Release previously allocated nvmem cell.
918 * 925 *
919 * @cell: Previously allocated nvmem cell by nvmem_cell_get() 926 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
920 */ 927 */
921void nvmem_cell_put(struct nvmem_cell *cell) 928void nvmem_cell_put(struct nvmem_cell *cell)
922{ 929{
@@ -970,7 +977,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
970 if (cell->bit_offset || cell->nbits) 977 if (cell->bit_offset || cell->nbits)
971 nvmem_shift_read_buffer_in_place(cell, buf); 978 nvmem_shift_read_buffer_in_place(cell, buf);
972 979
973 *len = cell->bytes; 980 if (len)
981 *len = cell->bytes;
974 982
975 return 0; 983 return 0;
976} 984}
@@ -979,7 +987,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
979 * nvmem_cell_read() - Read a given nvmem cell 987 * nvmem_cell_read() - Read a given nvmem cell
980 * 988 *
981 * @cell: nvmem cell to be read. 989 * @cell: nvmem cell to be read.
982 * @len: pointer to length of cell which will be populated on successful read. 990 * @len: pointer to length of cell which will be populated on successful read;
991 * can be NULL.
983 * 992 *
984 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 993 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
985 * buffer should be freed by the consumer with a kfree(). 994 * buffer should be freed by the consumer with a kfree().
@@ -1126,7 +1135,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1126 * nvmem_device_cell_write() - Write cell to a given nvmem device 1135 * nvmem_device_cell_write() - Write cell to a given nvmem device
1127 * 1136 *
1128 * @nvmem: nvmem device to be written to. 1137 * @nvmem: nvmem device to be written to.
1129 * @info: nvmem cell info to be written 1138 * @info: nvmem cell info to be written.
1130 * @buf: buffer to be written to cell. 1139 * @buf: buffer to be written to cell.
1131 * 1140 *
1132 * Return: length of bytes written or negative error code on failure. 1141 * Return: length of bytes written or negative error code on failure.
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 8e7b120696fa..b8ca1e677b01 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -73,6 +73,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
73 { .compatible = "fsl,imx6q-ocotp", (void *)128 }, 73 { .compatible = "fsl,imx6q-ocotp", (void *)128 },
74 { .compatible = "fsl,imx6sl-ocotp", (void *)64 }, 74 { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
75 { .compatible = "fsl,imx6sx-ocotp", (void *)128 }, 75 { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
76 { .compatible = "fsl,imx6ul-ocotp", (void *)128 },
76 { }, 77 { },
77}; 78};
78MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); 79MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
index 1f52462f4cdd..dd9ea463c2a4 100644
--- a/drivers/platform/goldfish/pdev_bus.c
+++ b/drivers/platform/goldfish/pdev_bus.c
@@ -157,23 +157,26 @@ static int goldfish_new_pdev(void)
157static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id) 157static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
158{ 158{
159 irqreturn_t ret = IRQ_NONE; 159 irqreturn_t ret = IRQ_NONE;
160
160 while (1) { 161 while (1) {
161 u32 op = readl(pdev_bus_base + PDEV_BUS_OP); 162 u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
162 switch (op) {
163 case PDEV_BUS_OP_DONE:
164 return IRQ_NONE;
165 163
164 switch (op) {
166 case PDEV_BUS_OP_REMOVE_DEV: 165 case PDEV_BUS_OP_REMOVE_DEV:
167 goldfish_pdev_remove(); 166 goldfish_pdev_remove();
167 ret = IRQ_HANDLED;
168 break; 168 break;
169 169
170 case PDEV_BUS_OP_ADD_DEV: 170 case PDEV_BUS_OP_ADD_DEV:
171 goldfish_new_pdev(); 171 goldfish_new_pdev();
172 ret = IRQ_HANDLED;
172 break; 173 break;
174
175 case PDEV_BUS_OP_DONE:
176 default:
177 return ret;
173 } 178 }
174 ret = IRQ_HANDLED;
175 } 179 }
176 return ret;
177} 180}
178 181
179static int goldfish_pdev_bus_probe(struct platform_device *pdev) 182static int goldfish_pdev_bus_probe(struct platform_device *pdev)
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 50958f167305..48d5327d38d4 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -125,7 +125,7 @@ hv_uio_probe(struct hv_device *dev,
125 goto fail; 125 goto fail;
126 126
127 dev->channel->inbound.ring_buffer->interrupt_mask = 1; 127 dev->channel->inbound.ring_buffer->interrupt_mask = 1;
128 dev->channel->batched_reading = false; 128 set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
129 129
130 /* Fill general uio info */ 130 /* Fill general uio info */
131 pdata->info.name = "uio_hv_generic"; 131 pdata->info.name = "uio_hv_generic";
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index bdbadaa47ef3..0035cf79760a 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1625,10 +1625,25 @@ static int vme_bus_probe(struct device *dev)
1625 return retval; 1625 return retval;
1626} 1626}
1627 1627
1628static int vme_bus_remove(struct device *dev)
1629{
1630 int retval = -ENODEV;
1631 struct vme_driver *driver;
1632 struct vme_dev *vdev = dev_to_vme_dev(dev);
1633
1634 driver = dev->platform_data;
1635
1636 if (driver->remove != NULL)
1637 retval = driver->remove(vdev);
1638
1639 return retval;
1640}
1641
1628struct bus_type vme_bus_type = { 1642struct bus_type vme_bus_type = {
1629 .name = "vme", 1643 .name = "vme",
1630 .match = vme_bus_match, 1644 .match = vme_bus_match,
1631 .probe = vme_bus_probe, 1645 .probe = vme_bus_probe,
1646 .remove = vme_bus_remove,
1632}; 1647};
1633EXPORT_SYMBOL(vme_bus_type); 1648EXPORT_SYMBOL(vme_bus_type);
1634 1649
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 049a884a756f..be77b7914fad 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -153,6 +153,9 @@ struct ds_device
153 */ 153 */
154 u16 spu_bit; 154 u16 spu_bit;
155 155
156 u8 st_buf[ST_SIZE];
157 u8 byte_buf;
158
156 struct w1_bus_master master; 159 struct w1_bus_master master;
157}; 160};
158 161
@@ -174,7 +177,6 @@ struct ds_status
174 u8 data_in_buffer_status; 177 u8 data_in_buffer_status;
175 u8 reserved1; 178 u8 reserved1;
176 u8 reserved2; 179 u8 reserved2;
177
178}; 180};
179 181
180static struct usb_device_id ds_id_table [] = { 182static struct usb_device_id ds_id_table [] = {
@@ -244,28 +246,6 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
244 return err; 246 return err;
245} 247}
246 248
247static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
248 unsigned char *buf, int size)
249{
250 int count, err;
251
252 memset(st, 0, sizeof(*st));
253
254 count = 0;
255 err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
256 dev->ep[EP_STATUS]), buf, size, &count, 1000);
257 if (err < 0) {
258 pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
259 dev->ep[EP_STATUS], err);
260 return err;
261 }
262
263 if (count >= sizeof(*st))
264 memcpy(st, buf, sizeof(*st));
265
266 return count;
267}
268
269static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off) 249static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
270{ 250{
271 pr_info("%45s: %8x\n", str, buf[off]); 251 pr_info("%45s: %8x\n", str, buf[off]);
@@ -324,6 +304,35 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
324 } 304 }
325} 305}
326 306
307static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
308 bool dump)
309{
310 int count, err;
311
312 if (st)
313 memset(st, 0, sizeof(*st));
314
315 count = 0;
316 err = usb_interrupt_msg(dev->udev,
317 usb_rcvintpipe(dev->udev,
318 dev->ep[EP_STATUS]),
319 dev->st_buf, sizeof(dev->st_buf),
320 &count, 1000);
321 if (err < 0) {
322 pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
323 dev->ep[EP_STATUS], err);
324 return err;
325 }
326
327 if (dump)
328 ds_dump_status(dev, dev->st_buf, count);
329
330 if (st && count >= sizeof(*st))
331 memcpy(st, dev->st_buf, sizeof(*st));
332
333 return count;
334}
335
327static void ds_reset_device(struct ds_device *dev) 336static void ds_reset_device(struct ds_device *dev)
328{ 337{
329 ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0); 338 ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0);
@@ -344,7 +353,6 @@ static void ds_reset_device(struct ds_device *dev)
344static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size) 353static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
345{ 354{
346 int count, err; 355 int count, err;
347 struct ds_status st;
348 356
349 /* Careful on size. If size is less than what is available in 357 /* Careful on size. If size is less than what is available in
350 * the input buffer, the device fails the bulk transfer and 358 * the input buffer, the device fails the bulk transfer and
@@ -359,14 +367,9 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
359 err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]), 367 err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
360 buf, size, &count, 1000); 368 buf, size, &count, 1000);
361 if (err < 0) { 369 if (err < 0) {
362 u8 buf[ST_SIZE];
363 int count;
364
365 pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); 370 pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
366 usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN])); 371 usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
367 372 ds_recv_status(dev, NULL, true);
368 count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
369 ds_dump_status(dev, buf, count);
370 return err; 373 return err;
371 } 374 }
372 375
@@ -404,7 +407,6 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
404{ 407{
405 struct ds_status st; 408 struct ds_status st;
406 int count = 0, err = 0; 409 int count = 0, err = 0;
407 u8 buf[ST_SIZE];
408 410
409 do { 411 do {
410 err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0); 412 err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
@@ -413,7 +415,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
413 err = ds_send_control(dev, CTL_RESUME_EXE, 0); 415 err = ds_send_control(dev, CTL_RESUME_EXE, 0);
414 if (err) 416 if (err)
415 break; 417 break;
416 err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); 418 err = ds_recv_status(dev, &st, false);
417 if (err) 419 if (err)
418 break; 420 break;
419 421
@@ -456,18 +458,17 @@ int ds_detect(struct ds_device *dev, struct ds_status *st)
456 458
457static int ds_wait_status(struct ds_device *dev, struct ds_status *st) 459static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
458{ 460{
459 u8 buf[ST_SIZE];
460 int err, count = 0; 461 int err, count = 0;
461 462
462 do { 463 do {
463 st->status = 0; 464 st->status = 0;
464 err = ds_recv_status_nodump(dev, st, buf, sizeof(buf)); 465 err = ds_recv_status(dev, st, false);
465#if 0 466#if 0
466 if (err >= 0) { 467 if (err >= 0) {
467 int i; 468 int i;
468 printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err); 469 printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err);
469 for (i=0; i<err; ++i) 470 for (i=0; i<err; ++i)
470 printk("%02x ", buf[i]); 471 printk("%02x ", dev->st_buf[i]);
471 printk("\n"); 472 printk("\n");
472 } 473 }
473#endif 474#endif
@@ -485,7 +486,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
485 * can do something with it). 486 * can do something with it).
486 */ 487 */
487 if (err > 16 || count >= 100 || err < 0) 488 if (err > 16 || count >= 100 || err < 0)
488 ds_dump_status(dev, buf, err); 489 ds_dump_status(dev, dev->st_buf, err);
489 490
490 /* Extended data isn't an error. Well, a short is, but the dump 491 /* Extended data isn't an error. Well, a short is, but the dump
491 * would have already told the user that and we can't do anything 492 * would have already told the user that and we can't do anything
@@ -608,7 +609,6 @@ static int ds_write_byte(struct ds_device *dev, u8 byte)
608{ 609{
609 int err; 610 int err;
610 struct ds_status st; 611 struct ds_status st;
611 u8 rbyte;
612 612
613 err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte); 613 err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte);
614 if (err) 614 if (err)
@@ -621,11 +621,11 @@ static int ds_write_byte(struct ds_device *dev, u8 byte)
621 if (err) 621 if (err)
622 return err; 622 return err;
623 623
624 err = ds_recv_data(dev, &rbyte, sizeof(rbyte)); 624 err = ds_recv_data(dev, &dev->byte_buf, 1);
625 if (err < 0) 625 if (err < 0)
626 return err; 626 return err;
627 627
628 return !(byte == rbyte); 628 return !(byte == dev->byte_buf);
629} 629}
630 630
631static int ds_read_byte(struct ds_device *dev, u8 *byte) 631static int ds_read_byte(struct ds_device *dev, u8 *byte)
@@ -712,7 +712,6 @@ static void ds9490r_search(void *data, struct w1_master *master,
712 int err; 712 int err;
713 u16 value, index; 713 u16 value, index;
714 struct ds_status st; 714 struct ds_status st;
715 u8 st_buf[ST_SIZE];
716 int search_limit; 715 int search_limit;
717 int found = 0; 716 int found = 0;
718 int i; 717 int i;
@@ -724,7 +723,12 @@ static void ds9490r_search(void *data, struct w1_master *master,
724 /* FIFO 128 bytes, bulk packet size 64, read a multiple of the 723 /* FIFO 128 bytes, bulk packet size 64, read a multiple of the
725 * packet size. 724 * packet size.
726 */ 725 */
727 u64 buf[2*64/8]; 726 const size_t bufsize = 2 * 64;
727 u64 *buf;
728
729 buf = kmalloc(bufsize, GFP_KERNEL);
730 if (!buf)
731 return;
728 732
729 mutex_lock(&master->bus_mutex); 733 mutex_lock(&master->bus_mutex);
730 734
@@ -745,10 +749,9 @@ static void ds9490r_search(void *data, struct w1_master *master,
745 do { 749 do {
746 schedule_timeout(jtime); 750 schedule_timeout(jtime);
747 751
748 if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) < 752 err = ds_recv_status(dev, &st, false);
749 sizeof(st)) { 753 if (err < 0 || err < sizeof(st))
750 break; 754 break;
751 }
752 755
753 if (st.data_in_buffer_status) { 756 if (st.data_in_buffer_status) {
754 /* Bulk in can receive partial ids, but when it does 757 /* Bulk in can receive partial ids, but when it does
@@ -758,7 +761,7 @@ static void ds9490r_search(void *data, struct w1_master *master,
758 * bulk without first checking if status says there 761 * bulk without first checking if status says there
759 * is data to read. 762 * is data to read.
760 */ 763 */
761 err = ds_recv_data(dev, (u8 *)buf, sizeof(buf)); 764 err = ds_recv_data(dev, (u8 *)buf, bufsize);
762 if (err < 0) 765 if (err < 0)
763 break; 766 break;
764 for (i = 0; i < err/8; ++i) { 767 for (i = 0; i < err/8; ++i) {
@@ -794,9 +797,14 @@ static void ds9490r_search(void *data, struct w1_master *master,
794 } 797 }
795search_out: 798search_out:
796 mutex_unlock(&master->bus_mutex); 799 mutex_unlock(&master->bus_mutex);
800 kfree(buf);
797} 801}
798 802
799#if 0 803#if 0
804/*
805 * FIXME: if this disabled code is ever used in the future all ds_send_data()
806 * calls must be changed to use a DMAable buffer.
807 */
800static int ds_match_access(struct ds_device *dev, u64 init) 808static int ds_match_access(struct ds_device *dev, u64 init)
801{ 809{
802 int err; 810 int err;
@@ -845,13 +853,12 @@ static int ds_set_path(struct ds_device *dev, u64 init)
845 853
846static u8 ds9490r_touch_bit(void *data, u8 bit) 854static u8 ds9490r_touch_bit(void *data, u8 bit)
847{ 855{
848 u8 ret;
849 struct ds_device *dev = data; 856 struct ds_device *dev = data;
850 857
851 if (ds_touch_bit(dev, bit, &ret)) 858 if (ds_touch_bit(dev, bit, &dev->byte_buf))
852 return 0; 859 return 0;
853 860
854 return ret; 861 return dev->byte_buf;
855} 862}
856 863
857#if 0 864#if 0
@@ -866,13 +873,12 @@ static u8 ds9490r_read_bit(void *data)
866{ 873{
867 struct ds_device *dev = data; 874 struct ds_device *dev = data;
868 int err; 875 int err;
869 u8 bit = 0;
870 876
871 err = ds_touch_bit(dev, 1, &bit); 877 err = ds_touch_bit(dev, 1, &dev->byte_buf);
872 if (err) 878 if (err)
873 return 0; 879 return 0;
874 880
875 return bit & 1; 881 return dev->byte_buf & 1;
876} 882}
877#endif 883#endif
878 884
@@ -887,32 +893,51 @@ static u8 ds9490r_read_byte(void *data)
887{ 893{
888 struct ds_device *dev = data; 894 struct ds_device *dev = data;
889 int err; 895 int err;
890 u8 byte = 0;
891 896
892 err = ds_read_byte(dev, &byte); 897 err = ds_read_byte(dev, &dev->byte_buf);
893 if (err) 898 if (err)
894 return 0; 899 return 0;
895 900
896 return byte; 901 return dev->byte_buf;
897} 902}
898 903
899static void ds9490r_write_block(void *data, const u8 *buf, int len) 904static void ds9490r_write_block(void *data, const u8 *buf, int len)
900{ 905{
901 struct ds_device *dev = data; 906 struct ds_device *dev = data;
907 u8 *tbuf;
908
909 if (len <= 0)
910 return;
911
912 tbuf = kmemdup(buf, len, GFP_KERNEL);
913 if (!tbuf)
914 return;
902 915
903 ds_write_block(dev, (u8 *)buf, len); 916 ds_write_block(dev, tbuf, len);
917
918 kfree(tbuf);
904} 919}
905 920
906static u8 ds9490r_read_block(void *data, u8 *buf, int len) 921static u8 ds9490r_read_block(void *data, u8 *buf, int len)
907{ 922{
908 struct ds_device *dev = data; 923 struct ds_device *dev = data;
909 int err; 924 int err;
925 u8 *tbuf;
910 926
911 err = ds_read_block(dev, buf, len); 927 if (len <= 0)
912 if (err < 0) 928 return 0;
929
930 tbuf = kmalloc(len, GFP_KERNEL);
931 if (!tbuf)
913 return 0; 932 return 0;
914 933
915 return len; 934 err = ds_read_block(dev, tbuf, len);
935 if (err >= 0)
936 memcpy(buf, tbuf, len);
937
938 kfree(tbuf);
939
940 return err >= 0 ? len : 0;
916} 941}
917 942
918static u8 ds9490r_reset(void *data) 943static u8 ds9490r_reset(void *data)
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index bb09de633939..fb190c259607 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -715,7 +715,7 @@ static int omap_hdq_probe(struct platform_device *pdev)
715 ret = _omap_hdq_reset(hdq_data); 715 ret = _omap_hdq_reset(hdq_data);
716 if (ret) { 716 if (ret) {
717 dev_dbg(&pdev->dev, "reset failed\n"); 717 dev_dbg(&pdev->dev, "reset failed\n");
718 return -EINVAL; 718 goto err_irq;
719 } 719 }
720 720
721 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION); 721 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index cfe74d09932e..0ef9f2663dbd 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,14 @@ config W1_SLAVE_SMEM
16 Say Y here if you want to connect 1-wire 16 Say Y here if you want to connect 1-wire
17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire. 17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
18 18
19config W1_SLAVE_DS2405
20 tristate "DS2405 Addressable Switch"
21 help
22 Say Y or M here if you want to use a DS2405 1-wire
23 single-channel addressable switch.
24 This device can also work as a single-channel
25 binary remote sensor.
26
19config W1_SLAVE_DS2408 27config W1_SLAVE_DS2408
20 tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)" 28 tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
21 help 29 help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 1e9989afe7bf..b4a358955ef9 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o 5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o 6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
7obj-$(CONFIG_W1_SLAVE_DS2405) += w1_ds2405.o
7obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o 8obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
8obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o 9obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
9obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o 10obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
diff --git a/drivers/w1/slaves/w1_ds2405.c b/drivers/w1/slaves/w1_ds2405.c
new file mode 100644
index 000000000000..d5d54876cb64
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2405.c
@@ -0,0 +1,227 @@
1/*
2 * w1_ds2405.c
3 *
4 * Copyright (c) 2017 Maciej S. Szmigiero <mail@maciej.szmigiero.name>
5 * Based on w1_therm.c copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the therms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/device.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/mutex.h>
24#include <linux/string.h>
25#include <linux/types.h>
26
27#include "../w1.h"
28#include "../w1_family.h"
29
30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
32MODULE_DESCRIPTION("Driver for 1-wire Dallas DS2405 PIO.");
33MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2405));
34
35static int w1_ds2405_select(struct w1_slave *sl, bool only_active)
36{
37 struct w1_master *dev = sl->master;
38
39 u64 dev_addr = le64_to_cpu(*(u64 *)&sl->reg_num);
40 unsigned int bit_ctr;
41
42 if (w1_reset_bus(dev) != 0)
43 return 0;
44
45 /*
46 * We cannot use a normal Match ROM command
47 * since doing so would toggle PIO state
48 */
49 w1_write_8(dev, only_active ? W1_ALARM_SEARCH : W1_SEARCH);
50
51 for (bit_ctr = 0; bit_ctr < 64; bit_ctr++) {
52 int bit2send = !!(dev_addr & BIT(bit_ctr));
53 u8 ret;
54
55 ret = w1_triplet(dev, bit2send);
56
57 if ((ret & (BIT(0) | BIT(1))) ==
58 (BIT(0) | BIT(1))) /* no devices found */
59 return 0;
60
61 if (!!(ret & BIT(2)) != bit2send)
62 /* wrong direction taken - no such device */
63 return 0;
64 }
65
66 return 1;
67}
68
69static int w1_ds2405_read_pio(struct w1_slave *sl)
70{
71 if (w1_ds2405_select(sl, true))
72 return 0; /* "active" means PIO is low */
73
74 if (w1_ds2405_select(sl, false))
75 return 1;
76
77 return -ENODEV;
78}
79
80static ssize_t state_show(struct device *device,
81 struct device_attribute *attr, char *buf)
82{
83 struct w1_slave *sl = dev_to_w1_slave(device);
84 struct w1_master *dev = sl->master;
85
86 int ret;
87 ssize_t f_retval;
88 u8 state;
89
90 ret = mutex_lock_interruptible(&dev->bus_mutex);
91 if (ret)
92 return ret;
93
94 if (!w1_ds2405_select(sl, false)) {
95 f_retval = -ENODEV;
96 goto out_unlock;
97 }
98
99 state = w1_read_8(dev);
100 if (state != 0 &&
101 state != 0xff) {
102 dev_err(device, "non-consistent state %x\n", state);
103 f_retval = -EIO;
104 goto out_unlock;
105 }
106
107 *buf = state ? '1' : '0';
108 f_retval = 1;
109
110out_unlock:
111 w1_reset_bus(dev);
112 mutex_unlock(&dev->bus_mutex);
113
114 return f_retval;
115}
116
117static ssize_t output_show(struct device *device,
118 struct device_attribute *attr, char *buf)
119{
120 struct w1_slave *sl = dev_to_w1_slave(device);
121 struct w1_master *dev = sl->master;
122
123 int ret;
124 ssize_t f_retval;
125
126 ret = mutex_lock_interruptible(&dev->bus_mutex);
127 if (ret)
128 return ret;
129
130 ret = w1_ds2405_read_pio(sl);
131 if (ret < 0) {
132 f_retval = ret;
133 goto out_unlock;
134 }
135
136 *buf = ret ? '1' : '0';
137 f_retval = 1;
138
139out_unlock:
140 w1_reset_bus(dev);
141 mutex_unlock(&dev->bus_mutex);
142
143 return f_retval;
144}
145
146static ssize_t output_store(struct device *device,
147 struct device_attribute *attr,
148 const char *buf, size_t count)
149{
150 struct w1_slave *sl = dev_to_w1_slave(device);
151 struct w1_master *dev = sl->master;
152
153 int ret, current_pio;
154 unsigned int val;
155 ssize_t f_retval;
156
157 if (count < 1)
158 return -EINVAL;
159
160 if (sscanf(buf, " %u%n", &val, &ret) < 1)
161 return -EINVAL;
162
163 if (val != 0 && val != 1)
164 return -EINVAL;
165
166 f_retval = ret;
167
168 ret = mutex_lock_interruptible(&dev->bus_mutex);
169 if (ret)
170 return ret;
171
172 current_pio = w1_ds2405_read_pio(sl);
173 if (current_pio < 0) {
174 f_retval = current_pio;
175 goto out_unlock;
176 }
177
178 if (current_pio == val)
179 goto out_unlock;
180
181 if (w1_reset_bus(dev) != 0) {
182 f_retval = -ENODEV;
183 goto out_unlock;
184 }
185
186 /*
187 * can't use w1_reset_select_slave() here since it uses Skip ROM if
188 * there is only one device on bus
189 */
190 do {
191 u64 dev_addr = le64_to_cpu(*(u64 *)&sl->reg_num);
192 u8 cmd[9];
193
194 cmd[0] = W1_MATCH_ROM;
195 memcpy(&cmd[1], &dev_addr, sizeof(dev_addr));
196
197 w1_write_block(dev, cmd, sizeof(cmd));
198 } while (0);
199
200out_unlock:
201 w1_reset_bus(dev);
202 mutex_unlock(&dev->bus_mutex);
203
204 return f_retval;
205}
206
207static DEVICE_ATTR_RO(state);
208static DEVICE_ATTR_RW(output);
209
210static struct attribute *w1_ds2405_attrs[] = {
211 &dev_attr_state.attr,
212 &dev_attr_output.attr,
213 NULL
214};
215
216ATTRIBUTE_GROUPS(w1_ds2405);
217
218static struct w1_family_ops w1_ds2405_fops = {
219 .groups = w1_ds2405_groups
220};
221
222static struct w1_family w1_family_ds2405 = {
223 .fid = W1_FAMILY_DS2405,
224 .fops = &w1_ds2405_fops
225};
226
227module_w1_family(w1_family_ds2405);
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index e213c678bbfe..90a3d9338fd2 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1.c
3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#include <linux/delay.h> 15#include <linux/delay.h>
@@ -763,6 +756,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
763 dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__, 756 dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
764 sl->name); 757 sl->name);
765 w1_family_put(sl->family); 758 w1_family_put(sl->family);
759 atomic_dec(&sl->master->refcnt);
766 kfree(sl); 760 kfree(sl);
767 return err; 761 return err;
768 } 762 }
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 129895f562b0..758a7a6322e9 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1.h
3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#ifndef __W1_H 15#ifndef __W1_H
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 1dc3051f7d76..df1c9bb90eb5 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1_family.c
3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 10a7a0767187..c4a6b257a367 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1_family.h
3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#ifndef __W1_FAMILY_H 15#ifndef __W1_FAMILY_H
@@ -30,6 +23,7 @@
30#define W1_FAMILY_BQ27000 0x01 23#define W1_FAMILY_BQ27000 0x01
31#define W1_FAMILY_SMEM_01 0x01 24#define W1_FAMILY_SMEM_01 0x01
32#define W1_FAMILY_SMEM_81 0x81 25#define W1_FAMILY_SMEM_81 0x81
26#define W1_FAMILY_DS2405 0x05
33#define W1_THERM_DS18S20 0x10 27#define W1_THERM_DS18S20 0x10
34#define W1_FAMILY_DS28E04 0x1C 28#define W1_FAMILY_DS28E04 0x1C
35#define W1_COUNTER_DS2423 0x1D 29#define W1_COUNTER_DS2423 0x1D
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 20f766afa4c7..4ce1b66d5092 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1_int.c
3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h
index 2ad7d4414bed..371989159216 100644
--- a/drivers/w1/w1_int.h
+++ b/drivers/w1/w1_int.h
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1_int.h
3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#ifndef __W1_INT_H 15#ifndef __W1_INT_H
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index f4bc8c100a01..de8bebc27896 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1_io.c
3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#include <asm/io.h> 15#include <asm/io.h>
@@ -233,6 +226,7 @@ u8 w1_triplet(struct w1_master *dev, int bdir)
233 return retval; 226 return retval;
234 } 227 }
235} 228}
229EXPORT_SYMBOL_GPL(w1_triplet);
236 230
237/** 231/**
238 * w1_read_8() - Reads 8 bits. 232 * w1_read_8() - Reads 8 bits.
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
index f9eecff23b8d..dd1422b6afbb 100644
--- a/drivers/w1/w1_log.h
+++ b/drivers/w1/w1_log.h
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1_log.h
3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#ifndef __W1_LOG_H 15#ifndef __W1_LOG_H
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 881597a191b8..49e520ca79c5 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1_netlink.c
3 *
4 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#include <linux/slab.h> 15#include <linux/slab.h>
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index c99a9ce05e62..b389e5ff5fa5 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -1,9 +1,6 @@
1/* 1/*
2 * w1_netlink.h
3 *
4 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> 2 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5 * 3 *
6 *
7 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
@@ -13,10 +10,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 12 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 13 */
21 14
22#ifndef __W1_NETLINK_H 15#ifndef __W1_NETLINK_H
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b871c0cb1f02..7010fb01a81a 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -46,7 +46,18 @@
46#define EXTCON_USB 1 46#define EXTCON_USB 1
47#define EXTCON_USB_HOST 2 47#define EXTCON_USB_HOST 2
48 48
49/* Charging external connector */ 49/*
50 * Charging external connector
51 *
52 * When one SDP charger connector was reported, we should also report
53 * the USB connector, which means EXTCON_CHG_USB_SDP should always
54 * appear together with EXTCON_USB. The same as ACA charger connector,
55 * EXTCON_CHG_USB_ACA would normally appear with EXTCON_USB_HOST.
56 *
57 * The EXTCON_CHG_USB_SLOW connector can provide at least 500mA of
58 * current at 5V. The EXTCON_CHG_USB_FAST connector can provide at
59 * least 1A of current at 5V.
60 */
50#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ 61#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */
51#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ 62#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */
52#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ 63#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */
@@ -54,6 +65,7 @@
54#define EXTCON_CHG_USB_FAST 9 65#define EXTCON_CHG_USB_FAST 9
55#define EXTCON_CHG_USB_SLOW 10 66#define EXTCON_CHG_USB_SLOW 10
56#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */ 67#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */
68#define EXTCON_CHG_USB_PD 12 /* USB Power Delivery */
57 69
58/* Jack external connector */ 70/* Jack external connector */
59#define EXTCON_JACK_MICROPHONE 20 71#define EXTCON_JACK_MICROPHONE 20
@@ -160,62 +172,7 @@ union extcon_property_value {
160}; 172};
161 173
162struct extcon_cable; 174struct extcon_cable;
163 175struct extcon_dev;
164/**
165 * struct extcon_dev - An extcon device represents one external connector.
166 * @name: The name of this extcon device. Parent device name is
167 * used if NULL.
168 * @supported_cable: Array of supported cable names ending with EXTCON_NONE.
169 * If supported_cable is NULL, cable name related APIs
170 * are disabled.
171 * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
172 * be attached simultaneously. The array should be
173 * ending with NULL or be NULL (no mutually exclusive
174 * cables). For example, if it is { 0x7, 0x30, 0}, then,
175 * {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
176 * be attached simulataneously. {0x7, 0} is equivalent to
177 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
178 * can be no simultaneous connections.
179 * @dev: Device of this extcon.
180 * @state: Attach/detach state of this extcon. Do not provide at
181 * register-time.
182 * @nh: Notifier for the state change events from this extcon
183 * @entry: To support list of extcon devices so that users can
184 * search for extcon devices based on the extcon name.
185 * @lock:
186 * @max_supported: Internal value to store the number of cables.
187 * @extcon_dev_type: Device_type struct to provide attribute_groups
188 * customized for each extcon device.
189 * @cables: Sysfs subdirectories. Each represents one cable.
190 *
191 * In most cases, users only need to provide "User initializing data" of
192 * this struct when registering an extcon. In some exceptional cases,
193 * optional callbacks may be needed. However, the values in "internal data"
194 * are overwritten by register function.
195 */
196struct extcon_dev {
197 /* Optional user initializing data */
198 const char *name;
199 const unsigned int *supported_cable;
200 const u32 *mutually_exclusive;
201
202 /* Internal data. Please do not set. */
203 struct device dev;
204 struct raw_notifier_head *nh;
205 struct list_head entry;
206 int max_supported;
207 spinlock_t lock; /* could be called by irq handler */
208 u32 state;
209
210 /* /sys/class/extcon/.../cable.n/... */
211 struct device_type extcon_dev_type;
212 struct extcon_cable *cables;
213
214 /* /sys/class/extcon/.../mutually_exclusive/... */
215 struct attribute_group attr_g_muex;
216 struct attribute **attrs_muex;
217 struct device_attribute *d_attrs_muex;
218};
219 176
220#if IS_ENABLED(CONFIG_EXTCON) 177#if IS_ENABLED(CONFIG_EXTCON)
221 178
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index a0e03b13b449..2aa32075bca1 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -59,7 +59,7 @@ struct adc_jack_pdata {
59 const char *name; 59 const char *name;
60 const char *consumer_channel; 60 const char *consumer_channel;
61 61
62 const enum extcon *cable_names; 62 const unsigned int *cable_names;
63 63
64 /* The last entry's state should be 0 */ 64 /* The last entry's state should be 0 */
65 struct adc_jack_cond *adc_conditions; 65 struct adc_jack_cond *adc_conditions;
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 16551d5eac36..57beb5d09bfc 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -22,6 +22,7 @@
22#define _LINUX_FPGA_MGR_H 22#define _LINUX_FPGA_MGR_H
23 23
24struct fpga_manager; 24struct fpga_manager;
25struct sg_table;
25 26
26/** 27/**
27 * enum fpga_mgr_states - fpga framework states 28 * enum fpga_mgr_states - fpga framework states
@@ -88,6 +89,7 @@ struct fpga_image_info {
88 * @state: returns an enum value of the FPGA's state 89 * @state: returns an enum value of the FPGA's state
89 * @write_init: prepare the FPGA to receive confuration data 90 * @write_init: prepare the FPGA to receive confuration data
90 * @write: write count bytes of configuration data to the FPGA 91 * @write: write count bytes of configuration data to the FPGA
92 * @write_sg: write the scatter list of configuration data to the FPGA
91 * @write_complete: set FPGA to operating state after writing is done 93 * @write_complete: set FPGA to operating state after writing is done
92 * @fpga_remove: optional: Set FPGA into a specific state during driver remove 94 * @fpga_remove: optional: Set FPGA into a specific state during driver remove
93 * 95 *
@@ -102,6 +104,7 @@ struct fpga_manager_ops {
102 struct fpga_image_info *info, 104 struct fpga_image_info *info,
103 const char *buf, size_t count); 105 const char *buf, size_t count);
104 int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); 106 int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
107 int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt);
105 int (*write_complete)(struct fpga_manager *mgr, 108 int (*write_complete)(struct fpga_manager *mgr,
106 struct fpga_image_info *info); 109 struct fpga_image_info *info);
107 void (*fpga_remove)(struct fpga_manager *mgr); 110 void (*fpga_remove)(struct fpga_manager *mgr);
@@ -129,6 +132,8 @@ struct fpga_manager {
129 132
130int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, 133int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
131 const char *buf, size_t count); 134 const char *buf, size_t count);
135int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
136 struct sg_table *sgt);
132 137
133int fpga_mgr_firmware_load(struct fpga_manager *mgr, 138int fpga_mgr_firmware_load(struct fpga_manager *mgr,
134 struct fpga_image_info *info, 139 struct fpga_image_info *info,
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
new file mode 100644
index 000000000000..273cbf6400ea
--- /dev/null
+++ b/include/linux/fsi.h
@@ -0,0 +1,50 @@
1/* FSI device & driver interfaces
2 *
3 * Copyright (C) IBM Corporation 2016
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef LINUX_FSI_H
16#define LINUX_FSI_H
17
18#include <linux/device.h>
19
20struct fsi_device {
21 struct device dev;
22 u8 engine_type;
23 u8 version;
24};
25
26struct fsi_device_id {
27 u8 engine_type;
28 u8 version;
29};
30
31#define FSI_VERSION_ANY 0
32
33#define FSI_DEVICE(t) \
34 .engine_type = (t), .version = FSI_VERSION_ANY,
35
36#define FSI_DEVICE_VERSIONED(t, v) \
37 .engine_type = (t), .version = (v),
38
39
40struct fsi_driver {
41 struct device_driver drv;
42 const struct fsi_device_id *id_table;
43};
44
45#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
46#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
47
48extern struct bus_type fsi_bus_type;
49
50#endif /* LINUX_FSI_H */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 183efde54269..62bbf3c1aa4a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -32,11 +32,10 @@
32#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/timer.h> 34#include <linux/timer.h>
35#include <linux/workqueue.h>
36#include <linux/completion.h> 35#include <linux/completion.h>
37#include <linux/device.h> 36#include <linux/device.h>
38#include <linux/mod_devicetable.h> 37#include <linux/mod_devicetable.h>
39 38#include <linux/interrupt.h>
40 39
41#define MAX_PAGE_BUFFER_COUNT 32 40#define MAX_PAGE_BUFFER_COUNT 32
42#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 41#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -139,8 +138,8 @@ struct hv_ring_buffer_info {
139 * for the specified ring buffer 138 * for the specified ring buffer
140 */ 139 */
141static inline void 140static inline void
142hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, 141hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
143 u32 *read, u32 *write) 142 u32 *read, u32 *write)
144{ 143{
145 u32 read_loc, write_loc, dsize; 144 u32 read_loc, write_loc, dsize;
146 145
@@ -154,7 +153,7 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
154 *read = dsize - *write; 153 *read = dsize - *write;
155} 154}
156 155
157static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) 156static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
158{ 157{
159 u32 read_loc, write_loc, dsize, read; 158 u32 read_loc, write_loc, dsize, read;
160 159
@@ -168,7 +167,7 @@ static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
168 return read; 167 return read;
169} 168}
170 169
171static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) 170static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
172{ 171{
173 u32 read_loc, write_loc, dsize, write; 172 u32 read_loc, write_loc, dsize, write;
174 173
@@ -641,6 +640,7 @@ struct vmbus_channel_msginfo {
641 640
642 /* Synchronize the request/response if needed */ 641 /* Synchronize the request/response if needed */
643 struct completion waitevent; 642 struct completion waitevent;
643 struct vmbus_channel *waiting_channel;
644 union { 644 union {
645 struct vmbus_channel_version_supported version_supported; 645 struct vmbus_channel_version_supported version_supported;
646 struct vmbus_channel_open_result open_result; 646 struct vmbus_channel_open_result open_result;
@@ -683,11 +683,6 @@ struct hv_input_signal_event_buffer {
683 struct hv_input_signal_event event; 683 struct hv_input_signal_event event;
684}; 684};
685 685
686enum hv_signal_policy {
687 HV_SIGNAL_POLICY_DEFAULT = 0,
688 HV_SIGNAL_POLICY_EXPLICIT,
689};
690
691enum hv_numa_policy { 686enum hv_numa_policy {
692 HV_BALANCED = 0, 687 HV_BALANCED = 0,
693 HV_LOCALIZED, 688 HV_LOCALIZED,
@@ -747,26 +742,27 @@ struct vmbus_channel {
747 742
748 struct vmbus_close_msg close_msg; 743 struct vmbus_close_msg close_msg;
749 744
750 /* Channel callback are invoked in this workqueue context */ 745 /* Channel callback's invoked in softirq context */
751 /* HANDLE dataWorkQueue; */ 746 struct tasklet_struct callback_event;
752
753 void (*onchannel_callback)(void *context); 747 void (*onchannel_callback)(void *context);
754 void *channel_callback_context; 748 void *channel_callback_context;
755 749
756 /* 750 /*
757 * A channel can be marked for efficient (batched) 751 * A channel can be marked for one of three modes of reading:
758 * reading: 752 * BATCHED - callback called from taslket and should read
759 * If batched_reading is set to "true", we read until the 753 * channel until empty. Interrupts from the host
760 * channel is empty and hold off interrupts from the host 754 * are masked while read is in process (default).
761 * during the entire read process. 755 * DIRECT - callback called from tasklet (softirq).
762 * If batched_reading is set to "false", the client is not 756 * ISR - callback called in interrupt context and must
763 * going to perform batched reading. 757 * invoke its own deferred processing.
764 * 758 * Host interrupts are disabled and must be re-enabled
765 * By default we will enable batched reading; specific 759 * when ring is empty.
766 * drivers that don't want this behavior can turn it off.
767 */ 760 */
768 761 enum hv_callback_mode {
769 bool batched_reading; 762 HV_CALL_BATCHED,
763 HV_CALL_DIRECT,
764 HV_CALL_ISR
765 } callback_mode;
770 766
771 bool is_dedicated_interrupt; 767 bool is_dedicated_interrupt;
772 struct hv_input_signal_event_buffer sig_buf; 768 struct hv_input_signal_event_buffer sig_buf;
@@ -850,23 +846,6 @@ struct vmbus_channel {
850 */ 846 */
851 struct list_head percpu_list; 847 struct list_head percpu_list;
852 /* 848 /*
853 * Host signaling policy: The default policy will be
854 * based on the ring buffer state. We will also support
855 * a policy where the client driver can have explicit
856 * signaling control.
857 */
858 enum hv_signal_policy signal_policy;
859 /*
860 * On the channel send side, many of the VMBUS
861 * device drivers explicity serialize access to the
862 * outgoing ring buffer. Give more control to the
863 * VMBUS device drivers in terms how to serialize
864 * accesss to the outgoing ring buffer.
865 * The default behavior will be to aquire the
866 * ring lock to preserve the current behavior.
867 */
868 bool acquire_ring_lock;
869 /*
870 * For performance critical channels (storage, networking 849 * For performance critical channels (storage, networking
871 * etc,), Hyper-V has a mechanism to enhance the throughput 850 * etc,), Hyper-V has a mechanism to enhance the throughput
872 * at the expense of latency: 851 * at the expense of latency:
@@ -906,32 +885,22 @@ struct vmbus_channel {
906 885
907}; 886};
908 887
909static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
910{
911 c->acquire_ring_lock = state;
912}
913
914static inline bool is_hvsock_channel(const struct vmbus_channel *c) 888static inline bool is_hvsock_channel(const struct vmbus_channel *c)
915{ 889{
916 return !!(c->offermsg.offer.chn_flags & 890 return !!(c->offermsg.offer.chn_flags &
917 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); 891 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
918} 892}
919 893
920static inline void set_channel_signal_state(struct vmbus_channel *c,
921 enum hv_signal_policy policy)
922{
923 c->signal_policy = policy;
924}
925
926static inline void set_channel_affinity_state(struct vmbus_channel *c, 894static inline void set_channel_affinity_state(struct vmbus_channel *c,
927 enum hv_numa_policy policy) 895 enum hv_numa_policy policy)
928{ 896{
929 c->affinity_policy = policy; 897 c->affinity_policy = policy;
930} 898}
931 899
932static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 900static inline void set_channel_read_mode(struct vmbus_channel *c,
901 enum hv_callback_mode mode)
933{ 902{
934 c->batched_reading = state; 903 c->callback_mode = mode;
935} 904}
936 905
937static inline void set_per_channel_state(struct vmbus_channel *c, void *s) 906static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
@@ -1054,8 +1023,7 @@ extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
1054 u32 bufferLen, 1023 u32 bufferLen,
1055 u64 requestid, 1024 u64 requestid,
1056 enum vmbus_packet_type type, 1025 enum vmbus_packet_type type,
1057 u32 flags, 1026 u32 flags);
1058 bool kick_q);
1059 1027
1060extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 1028extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1061 struct hv_page_buffer pagebuffers[], 1029 struct hv_page_buffer pagebuffers[],
@@ -1070,8 +1038,7 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
1070 void *buffer, 1038 void *buffer,
1071 u32 bufferlen, 1039 u32 bufferlen,
1072 u64 requestid, 1040 u64 requestid,
1073 u32 flags, 1041 u32 flags);
1074 bool kick_q);
1075 1042
1076extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, 1043extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
1077 struct hv_multipage_buffer *mpb, 1044 struct hv_multipage_buffer *mpb,
@@ -1458,9 +1425,10 @@ struct hyperv_service_callback {
1458}; 1425};
1459 1426
1460#define MAX_SRV_VER 0x7ffffff 1427#define MAX_SRV_VER 0x7ffffff
1461extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, 1428extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1462 struct icmsg_negotiate *, u8 *, int, 1429 const int *fw_version, int fw_vercnt,
1463 int); 1430 const int *srv_version, int srv_vercnt,
1431 int *nego_fw_version, int *nego_srv_version);
1464 1432
1465void hv_event_tasklet_disable(struct vmbus_channel *channel); 1433void hv_event_tasklet_disable(struct vmbus_channel *channel);
1466void hv_event_tasklet_enable(struct vmbus_channel *channel); 1434void hv_event_tasklet_enable(struct vmbus_channel *channel);
@@ -1480,9 +1448,9 @@ void vmbus_set_event(struct vmbus_channel *channel);
1480 1448
1481/* Get the start of the ring buffer. */ 1449/* Get the start of the ring buffer. */
1482static inline void * 1450static inline void *
1483hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) 1451hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1484{ 1452{
1485 return (void *)ring_info->ring_buffer->buffer; 1453 return ring_info->ring_buffer->buffer;
1486} 1454}
1487 1455
1488/* 1456/*
@@ -1545,6 +1513,36 @@ init_cached_read_index(struct vmbus_channel *channel)
1545} 1513}
1546 1514
1547/* 1515/*
1516 * Mask off host interrupt callback notifications
1517 */
1518static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1519{
1520 rbi->ring_buffer->interrupt_mask = 1;
1521
1522 /* make sure mask update is not reordered */
1523 virt_mb();
1524}
1525
1526/*
1527 * Re-enable host callback and return number of outstanding bytes
1528 */
1529static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1530{
1531
1532 rbi->ring_buffer->interrupt_mask = 0;
1533
1534 /* make sure mask update is not reordered */
1535 virt_mb();
1536
1537 /*
1538 * Now check to see if the ring buffer is still empty.
1539 * If it is not, we raced and we need to process new
1540 * incoming messages.
1541 */
1542 return hv_get_bytes_to_read(rbi);
1543}
1544
1545/*
1548 * An API to support in-place processing of incoming VMBUS packets. 1546 * An API to support in-place processing of incoming VMBUS packets.
1549 */ 1547 */
1550#define VMBUS_PKT_TRAILER 8 1548#define VMBUS_PKT_TRAILER 8
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index ed30d5d713e3..0590263c462c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -22,6 +22,7 @@
22/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ 22/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
23#define WATCHDOG_MINOR 130 /* Watchdog timer */ 23#define WATCHDOG_MINOR 130 /* Watchdog timer */
24#define TEMP_MINOR 131 /* Temperature Sensor */ 24#define TEMP_MINOR 131 /* Temperature Sensor */
25#define APM_MINOR_DEV 134
25#define RTC_MINOR 135 26#define RTC_MINOR 135
26#define EFI_RTC_MINOR 136 /* EFI Time services */ 27#define EFI_RTC_MINOR 136 /* EFI Time services */
27#define VHCI_MINOR 137 28#define VHCI_MINOR 137
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
new file mode 100644
index 000000000000..ac72e115093c
--- /dev/null
+++ b/include/linux/platform_data/ti-aemif.h
@@ -0,0 +1,23 @@
1/*
2 * TI DaVinci AEMIF platform glue.
3 *
4 * Copyright (C) 2017 BayLibre SAS
5 *
6 * Author:
7 * Bartosz Golaszewski <bgolaszewski@baylibre.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef __TI_DAVINCI_AEMIF_DATA_H__
15#define __TI_DAVINCI_AEMIF_DATA_H__
16
17#include <linux/of_platform.h>
18
19struct aemif_platform_data {
20 struct of_dev_auxdata *dev_lookup;
21};
22
23#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/sram.h b/include/linux/sram.h
new file mode 100644
index 000000000000..c97dcbe8ce25
--- /dev/null
+++ b/include/linux/sram.h
@@ -0,0 +1,27 @@
1/*
2 * Generic SRAM Driver Interface
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#ifndef __LINUX_SRAM_H__
14#define __LINUX_SRAM_H__
15
16struct gen_pool;
17
18#ifdef CONFIG_SRAM_EXEC
19int sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
20#else
21static inline int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
22 size_t size)
23{
24 return -ENODEV;
25}
26#endif /* CONFIG_SRAM_EXEC */
27#endif /* __LINUX_SRAM_H__ */
diff --git a/include/linux/vme.h b/include/linux/vme.h
index 8c589176c2f8..ec5e8bf6118e 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -108,7 +108,6 @@ struct vme_dev {
108}; 108};
109 109
110struct vme_driver { 110struct vme_driver {
111 struct list_head node;
112 const char *name; 111 const char *name;
113 int (*match)(struct vme_dev *); 112 int (*match)(struct vme_dev *);
114 int (*probe)(struct vme_dev *); 113 int (*probe)(struct vme_dev *);
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 1bd31a38c51e..b724ef7005de 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -54,13 +54,6 @@
54#define VMCI_IMR_DATAGRAM 0x1 54#define VMCI_IMR_DATAGRAM 0x1
55#define VMCI_IMR_NOTIFICATION 0x2 55#define VMCI_IMR_NOTIFICATION 0x2
56 56
57/* Interrupt type. */
58enum {
59 VMCI_INTR_TYPE_INTX = 0,
60 VMCI_INTR_TYPE_MSI = 1,
61 VMCI_INTR_TYPE_MSIX = 2,
62};
63
64/* Maximum MSI/MSI-X interrupt vectors in the device. */ 57/* Maximum MSI/MSI-X interrupt vectors in the device. */
65#define VMCI_MAX_INTRS 2 58#define VMCI_MAX_INTRS 2
66 59
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 41420e341e75..51f891fb1b18 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -33,6 +33,8 @@ enum {
33 BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), 33 BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
34 BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), 34 BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
35 BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), 35 BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
36 BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
37 BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
36}; 38};
37 39
38enum { 40enum {
@@ -48,6 +50,14 @@ typedef __u64 binder_size_t;
48typedef __u64 binder_uintptr_t; 50typedef __u64 binder_uintptr_t;
49#endif 51#endif
50 52
53/**
54 * struct binder_object_header - header shared by all binder metadata objects.
55 * @type: type of the object
56 */
57struct binder_object_header {
58 __u32 type;
59};
60
51/* 61/*
52 * This is the flattened representation of a Binder object for transfer 62 * This is the flattened representation of a Binder object for transfer
53 * between processes. The 'offsets' supplied as part of a binder transaction 63 * between processes. The 'offsets' supplied as part of a binder transaction
@@ -56,9 +66,8 @@ typedef __u64 binder_uintptr_t;
56 * between processes. 66 * between processes.
57 */ 67 */
58struct flat_binder_object { 68struct flat_binder_object {
59 /* 8 bytes for large_flat_header. */ 69 struct binder_object_header hdr;
60 __u32 type; 70 __u32 flags;
61 __u32 flags;
62 71
63 /* 8 bytes of data. */ 72 /* 8 bytes of data. */
64 union { 73 union {
@@ -70,6 +79,84 @@ struct flat_binder_object {
70 binder_uintptr_t cookie; 79 binder_uintptr_t cookie;
71}; 80};
72 81
82/**
83 * struct binder_fd_object - describes a filedescriptor to be fixed up.
84 * @hdr: common header structure
85 * @pad_flags: padding to remain compatible with old userspace code
86 * @pad_binder: padding to remain compatible with old userspace code
87 * @fd: file descriptor
88 * @cookie: opaque data, used by user-space
89 */
90struct binder_fd_object {
91 struct binder_object_header hdr;
92 __u32 pad_flags;
93 union {
94 binder_uintptr_t pad_binder;
95 __u32 fd;
96 };
97
98 binder_uintptr_t cookie;
99};
100
101/* struct binder_buffer_object - object describing a userspace buffer
102 * @hdr: common header structure
103 * @flags: one or more BINDER_BUFFER_* flags
104 * @buffer: address of the buffer
105 * @length: length of the buffer
106 * @parent: index in offset array pointing to parent buffer
107 * @parent_offset: offset in @parent pointing to this buffer
108 *
109 * A binder_buffer object represents an object that the
110 * binder kernel driver can copy verbatim to the target
111 * address space. A buffer itself may be pointed to from
112 * within another buffer, meaning that the pointer inside
113 * that other buffer needs to be fixed up as well. This
114 * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
115 * flag in @flags, by setting @parent buffer to the index
116 * in the offset array pointing to the parent binder_buffer_object,
117 * and by setting @parent_offset to the offset in the parent buffer
118 * at which the pointer to this buffer is located.
119 */
120struct binder_buffer_object {
121 struct binder_object_header hdr;
122 __u32 flags;
123 binder_uintptr_t buffer;
124 binder_size_t length;
125 binder_size_t parent;
126 binder_size_t parent_offset;
127};
128
129enum {
130 BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
131};
132
133/* struct binder_fd_array_object - object describing an array of fds in a buffer
134 * @hdr: common header structure
135 * @num_fds: number of file descriptors in the buffer
136 * @parent: index in offset array to buffer holding the fd array
137 * @parent_offset: start offset of fd array in the buffer
138 *
139 * A binder_fd_array object represents an array of file
140 * descriptors embedded in a binder_buffer_object. It is
141 * different from a regular binder_buffer_object because it
142 * describes a list of file descriptors to fix up, not an opaque
143 * blob of memory, and hence the kernel needs to treat it differently.
144 *
145 * An example of how this would be used is with Android's
146 * native_handle_t object, which is a struct with a list of integers
147 * and a list of file descriptors. The native_handle_t struct itself
148 * will be represented by a struct binder_buffer_objct, whereas the
149 * embedded list of file descriptors is represented by a
150 * struct binder_fd_array_object with that binder_buffer_object as
151 * a parent.
152 */
153struct binder_fd_array_object {
154 struct binder_object_header hdr;
155 binder_size_t num_fds;
156 binder_size_t parent;
157 binder_size_t parent_offset;
158};
159
73/* 160/*
74 * On 64-bit platforms where user code may run in 32-bits the driver must 161 * On 64-bit platforms where user code may run in 32-bits the driver must
75 * translate the buffer (and local binder) addresses appropriately. 162 * translate the buffer (and local binder) addresses appropriately.
@@ -162,6 +249,11 @@ struct binder_transaction_data {
162 } data; 249 } data;
163}; 250};
164 251
252struct binder_transaction_data_sg {
253 struct binder_transaction_data transaction_data;
254 binder_size_t buffers_size;
255};
256
165struct binder_ptr_cookie { 257struct binder_ptr_cookie {
166 binder_uintptr_t ptr; 258 binder_uintptr_t ptr;
167 binder_uintptr_t cookie; 259 binder_uintptr_t cookie;
@@ -346,6 +438,12 @@ enum binder_driver_command_protocol {
346 /* 438 /*
347 * void *: cookie 439 * void *: cookie
348 */ 440 */
441
442 BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
443 BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
444 /*
445 * binder_transaction_data_sg: the sent command.
446 */
349}; 447};
350 448
351#endif /* _UAPI_LINUX_BINDER_H */ 449#endif /* _UAPI_LINUX_BINDER_H */
diff --git a/init/Kconfig b/init/Kconfig
index 2655abb8f310..55bb6fbc294e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1706,6 +1706,13 @@ config PERF_USE_VMALLOC
1706 help 1706 help
1707 See tools/perf/design.txt for details 1707 See tools/perf/design.txt for details
1708 1708
1709config PC104
1710 bool "PC/104 support"
1711 help
1712 Expose PC/104 form factor device drivers and options available for
1713 selection and configuration. Enable this option if your target
1714 machine has a PC/104 bus.
1715
1709menu "Kernel Performance Events And Counters" 1716menu "Kernel Performance Events And Counters"
1710 1717
1711config PERF_EVENTS 1718config PERF_EVENTS
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index a3e8ec3fb1c5..09371b0a9baf 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -42,12 +42,6 @@ static const struct file_operations test_fw_fops = {
42 .read = test_fw_misc_read, 42 .read = test_fw_misc_read,
43}; 43};
44 44
45static struct miscdevice test_fw_misc_device = {
46 .minor = MISC_DYNAMIC_MINOR,
47 .name = "test_firmware",
48 .fops = &test_fw_fops,
49};
50
51static ssize_t trigger_request_store(struct device *dev, 45static ssize_t trigger_request_store(struct device *dev,
52 struct device_attribute *attr, 46 struct device_attribute *attr,
53 const char *buf, size_t count) 47 const char *buf, size_t count)
@@ -132,39 +126,81 @@ out:
132} 126}
133static DEVICE_ATTR_WO(trigger_async_request); 127static DEVICE_ATTR_WO(trigger_async_request);
134 128
135static int __init test_firmware_init(void) 129static ssize_t trigger_custom_fallback_store(struct device *dev,
130 struct device_attribute *attr,
131 const char *buf, size_t count)
136{ 132{
137 int rc; 133 int rc;
134 char *name;
138 135
139 rc = misc_register(&test_fw_misc_device); 136 name = kstrndup(buf, count, GFP_KERNEL);
137 if (!name)
138 return -ENOSPC;
139
140 pr_info("loading '%s' using custom fallback mechanism\n", name);
141
142 mutex_lock(&test_fw_mutex);
143 release_firmware(test_firmware);
144 test_firmware = NULL;
145 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
146 dev, GFP_KERNEL, NULL,
147 trigger_async_request_cb);
140 if (rc) { 148 if (rc) {
141 pr_err("could not register misc device: %d\n", rc); 149 pr_info("async load of '%s' failed: %d\n", name, rc);
142 return rc; 150 kfree(name);
151 goto out;
143 } 152 }
144 rc = device_create_file(test_fw_misc_device.this_device, 153 /* Free 'name' ASAP, to test for race conditions */
145 &dev_attr_trigger_request); 154 kfree(name);
146 if (rc) { 155
147 pr_err("could not create sysfs interface: %d\n", rc); 156 wait_for_completion(&async_fw_done);
148 goto dereg; 157
158 if (test_firmware) {
159 pr_info("loaded: %zu\n", test_firmware->size);
160 rc = count;
161 } else {
162 pr_err("failed to async load firmware\n");
163 rc = -ENODEV;
149 } 164 }
150 165
151 rc = device_create_file(test_fw_misc_device.this_device, 166out:
152 &dev_attr_trigger_async_request); 167 mutex_unlock(&test_fw_mutex);
168
169 return rc;
170}
171static DEVICE_ATTR_WO(trigger_custom_fallback);
172
173#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
174
175static struct attribute *test_dev_attrs[] = {
176 TEST_FW_DEV_ATTR(trigger_request),
177 TEST_FW_DEV_ATTR(trigger_async_request),
178 TEST_FW_DEV_ATTR(trigger_custom_fallback),
179 NULL,
180};
181
182ATTRIBUTE_GROUPS(test_dev);
183
184static struct miscdevice test_fw_misc_device = {
185 .minor = MISC_DYNAMIC_MINOR,
186 .name = "test_firmware",
187 .fops = &test_fw_fops,
188 .groups = test_dev_groups,
189};
190
191static int __init test_firmware_init(void)
192{
193 int rc;
194
195 rc = misc_register(&test_fw_misc_device);
153 if (rc) { 196 if (rc) {
154 pr_err("could not create async sysfs interface: %d\n", rc); 197 pr_err("could not register misc device: %d\n", rc);
155 goto remove_file; 198 return rc;
156 } 199 }
157 200
158 pr_warn("interface ready\n"); 201 pr_warn("interface ready\n");
159 202
160 return 0; 203 return 0;
161
162remove_file:
163 device_remove_file(test_fw_misc_device.this_device,
164 &dev_attr_trigger_async_request);
165dereg:
166 misc_deregister(&test_fw_misc_device);
167 return rc;
168} 204}
169 205
170module_init(test_firmware_init); 206module_init(test_firmware_init);
@@ -172,10 +208,6 @@ module_init(test_firmware_init);
172static void __exit test_firmware_exit(void) 208static void __exit test_firmware_exit(void)
173{ 209{
174 release_firmware(test_firmware); 210 release_firmware(test_firmware);
175 device_remove_file(test_fw_misc_device.this_device,
176 &dev_attr_trigger_async_request);
177 device_remove_file(test_fw_misc_device.this_device,
178 &dev_attr_trigger_request);
179 misc_deregister(&test_fw_misc_device); 211 misc_deregister(&test_fw_misc_device);
180 pr_warn("removed interface\n"); 212 pr_warn("removed interface\n");
181} 213}
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index 3820f00b066a..8cd16c65d3c5 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -2,7 +2,7 @@
2 2
3"""Find Kconfig symbols that are referenced but not defined.""" 3"""Find Kconfig symbols that are referenced but not defined."""
4 4
5# (c) 2014-2016 Valentin Rothberg <valentinrothberg@gmail.com> 5# (c) 2014-2017 Valentin Rothberg <valentinrothberg@gmail.com>
6# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de> 6# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
7# 7#
8# Licensed under the terms of the GNU GPL License version 2 8# Licensed under the terms of the GNU GPL License version 2
@@ -24,7 +24,7 @@ SYMBOL = r"(?:\w*[A-Z0-9]\w*){2,}"
24DEF = r"^\s*(?:menu){,1}config\s+(" + SYMBOL + r")\s*" 24DEF = r"^\s*(?:menu){,1}config\s+(" + SYMBOL + r")\s*"
25EXPR = r"(?:" + OPERATORS + r"|\s|" + SYMBOL + r")+" 25EXPR = r"(?:" + OPERATORS + r"|\s|" + SYMBOL + r")+"
26DEFAULT = r"default\s+.*?(?:if\s.+){,1}" 26DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
27STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR 27STMT = r"^\s*(?:if|select|imply|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
28SOURCE_SYMBOL = r"(?:\W|\b)+[D]{,1}CONFIG_(" + SYMBOL + r")" 28SOURCE_SYMBOL = r"(?:\W|\b)+[D]{,1}CONFIG_(" + SYMBOL + r")"
29 29
30# regex objects 30# regex objects
@@ -269,7 +269,7 @@ def find_sims(symbol, ignore, defined=[]):
269 """Return a list of max. ten Kconfig symbols that are string-similar to 269 """Return a list of max. ten Kconfig symbols that are string-similar to
270 @symbol.""" 270 @symbol."""
271 if defined: 271 if defined:
272 return sorted(difflib.get_close_matches(symbol, set(defined), 10)) 272 return difflib.get_close_matches(symbol, set(defined), 10)
273 273
274 pool = Pool(cpu_count(), init_worker) 274 pool = Pool(cpu_count(), init_worker)
275 kfiles = [] 275 kfiles = []
@@ -284,7 +284,7 @@ def find_sims(symbol, ignore, defined=[]):
284 for res in pool.map(parse_kconfig_files, arglist): 284 for res in pool.map(parse_kconfig_files, arglist):
285 defined.extend(res[0]) 285 defined.extend(res[0])
286 286
287 return sorted(difflib.get_close_matches(symbol, set(defined), 10)) 287 return difflib.get_close_matches(symbol, set(defined), 10)
288 288
289 289
290def get_files(): 290def get_files():
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
index 9bf82234855b..1894d625af2d 100644
--- a/tools/testing/selftests/firmware/Makefile
+++ b/tools/testing/selftests/firmware/Makefile
@@ -3,7 +3,7 @@
3# No binaries, but make sure arg-less "make" doesn't trigger "run_tests" 3# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
4all: 4all:
5 5
6TEST_PROGS := fw_filesystem.sh fw_userhelper.sh 6TEST_PROGS := fw_filesystem.sh fw_fallback.sh
7 7
8include ../lib.mk 8include ../lib.mk
9 9
diff --git a/tools/testing/selftests/firmware/fw_fallback.sh b/tools/testing/selftests/firmware/fw_fallback.sh
new file mode 100755
index 000000000000..2e4c22d5abf7
--- /dev/null
+++ b/tools/testing/selftests/firmware/fw_fallback.sh
@@ -0,0 +1,224 @@
1#!/bin/sh
2# This validates that the kernel will fall back to using the fallback mechanism
3# to load firmware it can't find on disk itself. We must request a firmware
4# that the kernel won't find, and any installed helper (e.g. udev) also
5# won't find so that we can do the load ourself manually.
6set -e
7
8modprobe test_firmware
9
10DIR=/sys/devices/virtual/misc/test_firmware
11
12# CONFIG_FW_LOADER_USER_HELPER has a sysfs class under /sys/class/firmware/
13# These days no one enables CONFIG_FW_LOADER_USER_HELPER so check for that
14# as an indicator for CONFIG_FW_LOADER_USER_HELPER.
15HAS_FW_LOADER_USER_HELPER=$(if [ -d /sys/class/firmware/ ]; then echo yes; else echo no; fi)
16
17if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
18 OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
19else
20 echo "usermode helper disabled so ignoring test"
21 exit 0
22fi
23
24FWPATH=$(mktemp -d)
25FW="$FWPATH/test-firmware.bin"
26
27test_finish()
28{
29 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
30 rm -f "$FW"
31 rmdir "$FWPATH"
32}
33
34load_fw()
35{
36 local name="$1"
37 local file="$2"
38
39 # This will block until our load (below) has finished.
40 echo -n "$name" >"$DIR"/trigger_request &
41
42 # Give kernel a chance to react.
43 local timeout=10
44 while [ ! -e "$DIR"/"$name"/loading ]; do
45 sleep 0.1
46 timeout=$(( $timeout - 1 ))
47 if [ "$timeout" -eq 0 ]; then
48 echo "$0: firmware interface never appeared" >&2
49 exit 1
50 fi
51 done
52
53 echo 1 >"$DIR"/"$name"/loading
54 cat "$file" >"$DIR"/"$name"/data
55 echo 0 >"$DIR"/"$name"/loading
56
57 # Wait for request to finish.
58 wait
59}
60
61load_fw_cancel()
62{
63 local name="$1"
64 local file="$2"
65
66 # This will block until our load (below) has finished.
67 echo -n "$name" >"$DIR"/trigger_request 2>/dev/null &
68
69 # Give kernel a chance to react.
70 local timeout=10
71 while [ ! -e "$DIR"/"$name"/loading ]; do
72 sleep 0.1
73 timeout=$(( $timeout - 1 ))
74 if [ "$timeout" -eq 0 ]; then
75 echo "$0: firmware interface never appeared" >&2
76 exit 1
77 fi
78 done
79
80 echo -1 >"$DIR"/"$name"/loading
81
82 # Wait for request to finish.
83 wait
84}
85
86load_fw_custom()
87{
88 local name="$1"
89 local file="$2"
90
91 echo -n "$name" >"$DIR"/trigger_custom_fallback 2>/dev/null &
92
93 # Give kernel a chance to react.
94 local timeout=10
95 while [ ! -e "$DIR"/"$name"/loading ]; do
96 sleep 0.1
97 timeout=$(( $timeout - 1 ))
98 if [ "$timeout" -eq 0 ]; then
99 echo "$0: firmware interface never appeared" >&2
100 exit 1
101 fi
102 done
103
104 echo 1 >"$DIR"/"$name"/loading
105 cat "$file" >"$DIR"/"$name"/data
106 echo 0 >"$DIR"/"$name"/loading
107
108 # Wait for request to finish.
109 wait
110}
111
112
113load_fw_custom_cancel()
114{
115 local name="$1"
116 local file="$2"
117
118 echo -n "$name" >"$DIR"/trigger_custom_fallback 2>/dev/null &
119
120 # Give kernel a chance to react.
121 local timeout=10
122 while [ ! -e "$DIR"/"$name"/loading ]; do
123 sleep 0.1
124 timeout=$(( $timeout - 1 ))
125 if [ "$timeout" -eq 0 ]; then
126 echo "$0: firmware interface never appeared" >&2
127 exit 1
128 fi
129 done
130
131 echo -1 >"$DIR"/"$name"/loading
132
133 # Wait for request to finish.
134 wait
135}
136
137
138trap "test_finish" EXIT
139
140# This is an unlikely real-world firmware content. :)
141echo "ABCD0123" >"$FW"
142NAME=$(basename "$FW")
143
144DEVPATH="$DIR"/"nope-$NAME"/loading
145
146# Test failure when doing nothing (timeout works).
147echo -n 2 >/sys/class/firmware/timeout
148echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
149
150# Give the kernel some time to load the loading file, must be less
151# than the timeout above.
152sleep 1
153if [ ! -f $DEVPATH ]; then
154 echo "$0: fallback mechanism immediately cancelled"
155 echo ""
156 echo "The file never appeared: $DEVPATH"
157 echo ""
158 echo "This might be a distribution udev rule setup by your distribution"
159 echo "to immediately cancel all fallback requests, this must be"
160 echo "removed before running these tests. To confirm look for"
161 echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
162 echo "and see if you have something like this:"
163 echo ""
164 echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
165 echo ""
166 echo "If you do remove this file or comment out this line before"
167 echo "proceeding with these tests."
168 exit 1
169fi
170
171if diff -q "$FW" /dev/test_firmware >/dev/null ; then
172 echo "$0: firmware was not expected to match" >&2
173 exit 1
174else
175 echo "$0: timeout works"
176fi
177
178# Put timeout high enough for us to do work but not so long that failures
179# slow down this test too much.
180echo 4 >/sys/class/firmware/timeout
181
182# Load this script instead of the desired firmware.
183load_fw "$NAME" "$0"
184if diff -q "$FW" /dev/test_firmware >/dev/null ; then
185 echo "$0: firmware was not expected to match" >&2
186 exit 1
187else
188 echo "$0: firmware comparison works"
189fi
190
191# Do a proper load, which should work correctly.
192load_fw "$NAME" "$FW"
193if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
194 echo "$0: firmware was not loaded" >&2
195 exit 1
196else
197 echo "$0: fallback mechanism works"
198fi
199
200load_fw_cancel "nope-$NAME" "$FW"
201if diff -q "$FW" /dev/test_firmware >/dev/null ; then
202 echo "$0: firmware was expected to be cancelled" >&2
203 exit 1
204else
205 echo "$0: cancelling fallback mechanism works"
206fi
207
208load_fw_custom "$NAME" "$FW"
209if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
210 echo "$0: firmware was not loaded" >&2
211 exit 1
212else
213 echo "$0: custom fallback loading mechanism works"
214fi
215
216load_fw_custom_cancel "nope-$NAME" "$FW"
217if diff -q "$FW" /dev/test_firmware >/dev/null ; then
218 echo "$0: firmware was expected to be cancelled" >&2
219 exit 1
220else
221 echo "$0: cancelling custom fallback mechanism works"
222fi
223
224exit 0
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
deleted file mode 100755
index b9983f8e09f6..000000000000
--- a/tools/testing/selftests/firmware/fw_userhelper.sh
+++ /dev/null
@@ -1,99 +0,0 @@
1#!/bin/sh
2# This validates that the kernel will fall back to using the user helper
3# to load firmware it can't find on disk itself. We must request a firmware
4# that the kernel won't find, and any installed helper (e.g. udev) also
5# won't find so that we can do the load ourself manually.
6set -e
7
8modprobe test_firmware
9
10DIR=/sys/devices/virtual/misc/test_firmware
11
12# CONFIG_FW_LOADER_USER_HELPER has a sysfs class under /sys/class/firmware/
13# These days no one enables CONFIG_FW_LOADER_USER_HELPER so check for that
14# as an indicator for CONFIG_FW_LOADER_USER_HELPER.
15HAS_FW_LOADER_USER_HELPER=$(if [ -d /sys/class/firmware/ ]; then echo yes; else echo no; fi)
16
17if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
18 OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
19else
20 echo "usermode helper disabled so ignoring test"
21 exit 0
22fi
23
24FWPATH=$(mktemp -d)
25FW="$FWPATH/test-firmware.bin"
26
27test_finish()
28{
29 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
30 rm -f "$FW"
31 rmdir "$FWPATH"
32}
33
34load_fw()
35{
36 local name="$1"
37 local file="$2"
38
39 # This will block until our load (below) has finished.
40 echo -n "$name" >"$DIR"/trigger_request &
41
42 # Give kernel a chance to react.
43 local timeout=10
44 while [ ! -e "$DIR"/"$name"/loading ]; do
45 sleep 0.1
46 timeout=$(( $timeout - 1 ))
47 if [ "$timeout" -eq 0 ]; then
48 echo "$0: firmware interface never appeared" >&2
49 exit 1
50 fi
51 done
52
53 echo 1 >"$DIR"/"$name"/loading
54 cat "$file" >"$DIR"/"$name"/data
55 echo 0 >"$DIR"/"$name"/loading
56
57 # Wait for request to finish.
58 wait
59}
60
61trap "test_finish" EXIT
62
63# This is an unlikely real-world firmware content. :)
64echo "ABCD0123" >"$FW"
65NAME=$(basename "$FW")
66
67# Test failure when doing nothing (timeout works).
68echo 1 >/sys/class/firmware/timeout
69echo -n "$NAME" >"$DIR"/trigger_request
70if diff -q "$FW" /dev/test_firmware >/dev/null ; then
71 echo "$0: firmware was not expected to match" >&2
72 exit 1
73else
74 echo "$0: timeout works"
75fi
76
77# Put timeout high enough for us to do work but not so long that failures
78# slow down this test too much.
79echo 4 >/sys/class/firmware/timeout
80
81# Load this script instead of the desired firmware.
82load_fw "$NAME" "$0"
83if diff -q "$FW" /dev/test_firmware >/dev/null ; then
84 echo "$0: firmware was not expected to match" >&2
85 exit 1
86else
87 echo "$0: firmware comparison works"
88fi
89
90# Do a proper load, which should work correctly.
91load_fw "$NAME" "$FW"
92if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
93 echo "$0: firmware was not loaded" >&2
94 exit 1
95else
96 echo "$0: user helper firmware loading works"
97fi
98
99exit 0