aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt1
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt18
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt75
-rw-r--r--Documentation/devicetree/bindings/misc/lis302.txt9
-rw-r--r--Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt6
-rw-r--r--Documentation/driver-model/devres.txt1
-rw-r--r--Documentation/trace/coresight.txt2
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/arm/Kconfig.debug55
-rw-r--r--arch/arm/boot/dts/hip04.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts1
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts52
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts1
-rw-r--r--arch/arm64/Kconfig.debug2
-rw-r--r--arch/m32r/include/asm/io.h1
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/char/hw_random/core.c34
-rw-r--r--drivers/char/hw_random/pasemi-rng.c2
-rw-r--r--drivers/char/hw_random/powernv-rng.c2
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c2
-rw-r--r--drivers/char/i8k.c16
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/misc.c20
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/char/xillybus/xillybus_core.c2
-rw-r--r--drivers/char/xillybus/xillybus_of.c2
-rw-r--r--drivers/extcon/Kconfig17
-rw-r--r--drivers/extcon/Makefile4
-rw-r--r--drivers/extcon/extcon-arizona.c49
-rw-r--r--drivers/extcon/extcon-max14577.c5
-rw-r--r--drivers/extcon/extcon-max77693.c37
-rw-r--r--drivers/extcon/extcon-max77843.c881
-rw-r--r--drivers/extcon/extcon-max8997.c5
-rw-r--r--drivers/extcon/extcon-rt8973a.c6
-rw-r--r--drivers/extcon/extcon-sm5502.c6
-rw-r--r--drivers/extcon/extcon-usb-gpio.c237
-rw-r--r--drivers/extcon/extcon.c (renamed from drivers/extcon/extcon-class.c)36
-rw-r--r--drivers/hv/channel.c125
-rw-r--r--drivers/hv/channel_mgmt.c223
-rw-r--r--drivers/hv/connection.c40
-rw-r--r--drivers/hv/hv.c34
-rw-r--r--drivers/hv/hv_balloon.c143
-rw-r--r--drivers/hv/hv_util.c13
-rw-r--r--drivers/hv/hyperv_vmbus.h31
-rw-r--r--drivers/hv/vmbus_drv.c136
-rw-r--r--drivers/hwtracing/coresight/Kconfig61
-rw-r--r--drivers/hwtracing/coresight/Makefile (renamed from drivers/coresight/Makefile)0
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c (renamed from drivers/coresight/coresight-etb10.c)4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-cp14.c (renamed from drivers/coresight/coresight-etm-cp14.c)0
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h (renamed from drivers/coresight/coresight-etm.h)0
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c (renamed from drivers/coresight/coresight-etm3x.c)0
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c (renamed from drivers/coresight/coresight-funnel.c)0
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h (renamed from drivers/coresight/coresight-priv.h)0
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c (renamed from drivers/coresight/coresight-replicator.c)2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c (renamed from drivers/coresight/coresight-tmc.c)60
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c (renamed from drivers/coresight/coresight-tpiu.c)0
-rw-r--r--drivers/hwtracing/coresight/coresight.c (renamed from drivers/coresight/coresight.c)4
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c (renamed from drivers/coresight/of_coresight.c)18
-rw-r--r--drivers/mcb/mcb-pci.c4
-rw-r--r--drivers/memory/Kconfig9
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/jz4780-nemc.c391
-rw-r--r--drivers/misc/bh1780gli.c2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c12
-rw-r--r--drivers/misc/carma/carma-fpga.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c56
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c2
-rw-r--r--drivers/misc/mei/Makefile3
-rw-r--r--drivers/misc/mei/amthif.c423
-rw-r--r--drivers/misc/mei/bus.c105
-rw-r--r--drivers/misc/mei/client.c478
-rw-r--r--drivers/misc/mei/client.h42
-rw-r--r--drivers/misc/mei/debugfs.c21
-rw-r--r--drivers/misc/mei/hbm.c8
-rw-r--r--drivers/misc/mei/hw-me.c170
-rw-r--r--drivers/misc/mei/hw-me.h4
-rw-r--r--drivers/misc/mei/hw-txe.c2
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/misc/mei/interrupt.c171
-rw-r--r--drivers/misc/mei/main.c146
-rw-r--r--drivers/misc/mei/mei-trace.c25
-rw-r--r--drivers/misc/mei/mei-trace.h74
-rw-r--r--drivers/misc/mei/mei_dev.h40
-rw-r--r--drivers/misc/mei/nfc.c43
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/wd.c36
-rw-r--r--drivers/misc/mic/host/mic_boot.c14
-rw-r--r--drivers/misc/mic/host/mic_intr.c2
-rw-r--r--drivers/misc/sram.c19
-rw-r--r--drivers/misc/tifm_7xx1.c5
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c37
-rw-r--r--drivers/pcmcia/omap_cf.c4
-rw-r--r--drivers/pcmcia/pd6729.c8
-rw-r--r--drivers/pcmcia/soc_common.c5
-rw-r--r--drivers/pcmcia/yenta_socket.c8
-rw-r--r--drivers/spmi/Kconfig1
-rw-r--r--drivers/spmi/spmi-pmic-arb.c319
-rw-r--r--drivers/spmi/spmi.c9
-rw-r--r--drivers/uio/uio.c12
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/masters/w1-gpio.c2
-rw-r--r--include/linux/hyperv.h37
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/jz4780-nemc.h43
-rw-r--r--include/linux/mfd/arizona/core.h2
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--lib/devres.c28
-rwxr-xr-x[-rw-r--r--]scripts/checkkconfigsymbols.py147
-rw-r--r--sound/soc/codecs/arizona.c4
-rw-r--r--tools/hv/Makefile2
-rw-r--r--tools/hv/hv_vss_daemon.c10
119 files changed, 4033 insertions, 1482 deletions
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index a3089359aaa6..88602b75418e 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -61,7 +61,6 @@ Example:
61 compatible = "arm,coresight-etb10", "arm,primecell"; 61 compatible = "arm,coresight-etb10", "arm,primecell";
62 reg = <0 0x20010000 0 0x1000>; 62 reg = <0 0x20010000 0 0x1000>;
63 63
64 coresight-default-sink;
65 clocks = <&oscclk6a>; 64 clocks = <&oscclk6a>;
66 clock-names = "apb_pclk"; 65 clock-names = "apb_pclk";
67 port { 66 port {
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
new file mode 100644
index 000000000000..af0b903de293
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
@@ -0,0 +1,18 @@
1USB GPIO Extcon device
2
3This is a virtual device used to generate USB cable states from the USB ID pin
4connected to a GPIO pin.
5
6Required properties:
7- compatible: Should be "linux,extcon-usb-gpio"
8- id-gpio: gpio for USB ID pin. See gpio binding.
9
10Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
11 extcon_usb1 {
12 compatible = "linux,extcon-usb-gpio";
13 id-gpio = <&gpio6 1 GPIO_ACTIVE_HIGH>;
14 }
15
16 &omap_dwc3_1 {
17 extcon = <&extcon_usb1>;
18 };
diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt b/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt
new file mode 100644
index 000000000000..f936b5589b19
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt
@@ -0,0 +1,75 @@
1* Ingenic JZ4780 NAND/external memory controller (NEMC)
2
3This file documents the device tree bindings for the NEMC external memory
4controller in Ingenic JZ4780
5
6Required properties:
7- compatible: Should be set to one of:
8 "ingenic,jz4780-nemc" (JZ4780)
9- reg: Should specify the NEMC controller registers location and length.
10- clocks: Clock for the NEMC controller.
11- #address-cells: Must be set to 2.
12- #size-cells: Must be set to 1.
13- ranges: A set of ranges for each bank describing the physical memory layout.
14 Each should specify the following 4 integer values:
15
16 <cs number> 0 <physical address of mapping> <size of mapping>
17
18Each child of the NEMC node describes a device connected to the NEMC.
19
20Required child node properties:
21- reg: Should contain at least one register specifier, given in the following
22 format:
23
24 <cs number> <offset> <size>
25
26 Multiple registers can be specified across multiple banks. This is needed,
27 for example, for packaged NAND devices with multiple dies. Such devices
28 should be grouped into a single node.
29
30Optional child node properties:
31- ingenic,nemc-bus-width: Specifies the bus width in bits. Defaults to 8 bits.
32- ingenic,nemc-tAS: Address setup time in nanoseconds.
33- ingenic,nemc-tAH: Address hold time in nanoseconds.
34- ingenic,nemc-tBP: Burst pitch time in nanoseconds.
35- ingenic,nemc-tAW: Access wait time in nanoseconds.
36- ingenic,nemc-tSTRV: Static memory recovery time in nanoseconds.
37
38If a child node references multiple banks in its "reg" property, the same value
39for all optional parameters will be configured for all banks. If any optional
40parameters are omitted, they will be left unchanged from whatever they are
41configured to when the NEMC device is probed (which may be the reset value as
42given in the hardware reference manual, or a value configured by the boot
43loader).
44
45Example (NEMC node with a NAND child device attached at CS1):
46
47nemc: nemc@13410000 {
48 compatible = "ingenic,jz4780-nemc";
49 reg = <0x13410000 0x10000>;
50
51 #address-cells = <2>;
52 #size-cells = <1>;
53
54 ranges = <1 0 0x1b000000 0x1000000
55 2 0 0x1a000000 0x1000000
56 3 0 0x19000000 0x1000000
57 4 0 0x18000000 0x1000000
58 5 0 0x17000000 0x1000000
59 6 0 0x16000000 0x1000000>;
60
61 clocks = <&cgu JZ4780_CLK_NEMC>;
62
63 nand: nand@1 {
64 compatible = "ingenic,jz4780-nand";
65 reg = <1 0 0x1000000>;
66
67 ingenic,nemc-tAS = <10>;
68 ingenic,nemc-tAH = <5>;
69 ingenic,nemc-tBP = <10>;
70 ingenic,nemc-tAW = <15>;
71 ingenic,nemc-tSTRV = <100>;
72
73 ...
74 };
75};
diff --git a/Documentation/devicetree/bindings/misc/lis302.txt b/Documentation/devicetree/bindings/misc/lis302.txt
index 6def86f6b053..2a19bff9693f 100644
--- a/Documentation/devicetree/bindings/misc/lis302.txt
+++ b/Documentation/devicetree/bindings/misc/lis302.txt
@@ -46,11 +46,18 @@ Optional properties for all bus drivers:
46 interrupt 2 46 interrupt 2
47 - st,wakeup-{x,y,z}-{lo,hi}: set wakeup condition on x/y/z axis for 47 - st,wakeup-{x,y,z}-{lo,hi}: set wakeup condition on x/y/z axis for
48 upper/lower limit 48 upper/lower limit
49 - st,wakeup-threshold: set wakeup threshold
50 - st,wakeup2-{x,y,z}-{lo,hi}: set wakeup condition on x/y/z axis for
51 upper/lower limit for second wakeup
52 engine.
53 - st,wakeup2-threshold: set wakeup threshold for second wakeup
54 engine.
49 - st,highpass-cutoff-hz=: 1, 2, 4 or 8 for 1Hz, 2Hz, 4Hz or 8Hz of 55 - st,highpass-cutoff-hz=: 1, 2, 4 or 8 for 1Hz, 2Hz, 4Hz or 8Hz of
50 highpass cut-off frequency 56 highpass cut-off frequency
51 - st,hipass{1,2}-disable: disable highpass 1/2. 57 - st,hipass{1,2}-disable: disable highpass 1/2.
52 - st,default-rate=: set the default rate 58 - st,default-rate=: set the default rate
53 - st,axis-{x,y,z}=: set the axis to map to the three coordinates 59 - st,axis-{x,y,z}=: set the axis to map to the three coordinates.
60 Negative values can be used for inverted axis.
54 - st,{min,max}-limit-{x,y,z} set the min/max limits for x/y/z axis 61 - st,{min,max}-limit-{x,y,z} set the min/max limits for x/y/z axis
55 (used by self-test) 62 (used by self-test)
56 63
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
index 715d0998af8e..e16b9b5afc70 100644
--- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
@@ -1,6 +1,6 @@
1Qualcomm SPMI Controller (PMIC Arbiter) 1Qualcomm SPMI Controller (PMIC Arbiter)
2 2
3The SPMI PMIC Arbiter is found on the Snapdragon 800 Series. It is an SPMI 3The SPMI PMIC Arbiter is found on Snapdragon chipsets. It is an SPMI
4controller with wrapping arbitration logic to allow for multiple on-chip 4controller with wrapping arbitration logic to allow for multiple on-chip
5devices to control a single SPMI master. 5devices to control a single SPMI master.
6 6
@@ -19,6 +19,10 @@ Required properties:
19 "core" - core registers 19 "core" - core registers
20 "intr" - interrupt controller registers 20 "intr" - interrupt controller registers
21 "cnfg" - configuration registers 21 "cnfg" - configuration registers
22 Registers used only for V2 PMIC Arbiter:
23 "chnls" - tx-channel per virtual slave registers.
24 "obsrvr" - rx-channel (called observer) per virtual slave registers.
25
22- reg : address + size pairs describing the PMIC arb register sets; order must 26- reg : address + size pairs describing the PMIC arb register sets; order must
23 correspond with the order of entries in reg-names 27 correspond with the order of entries in reg-names
24- #address-cells : must be set to 2 28- #address-cells : must be set to 2
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index e1e2bbd7a404..831a5363f6be 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -276,6 +276,7 @@ IOMAP
276 devm_ioport_unmap() 276 devm_ioport_unmap()
277 devm_ioremap() 277 devm_ioremap()
278 devm_ioremap_nocache() 278 devm_ioremap_nocache()
279 devm_ioremap_wc()
279 devm_ioremap_resource() : checks resource, requests memory region, ioremaps 280 devm_ioremap_resource() : checks resource, requests memory region, ioremaps
280 devm_iounmap() 281 devm_iounmap()
281 pcim_iomap() 282 pcim_iomap()
diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt
index 02361552a3ea..77d14d51a670 100644
--- a/Documentation/trace/coresight.txt
+++ b/Documentation/trace/coresight.txt
@@ -14,7 +14,7 @@ document is concerned with the latter.
14HW assisted tracing is becoming increasingly useful when dealing with systems 14HW assisted tracing is becoming increasingly useful when dealing with systems
15that have many SoCs and other components like GPU and DMA engines. ARM has 15that have many SoCs and other components like GPU and DMA engines. ARM has
16developed a HW assisted tracing solution by means of different components, each 16developed a HW assisted tracing solution by means of different components, each
17being added to a design at systhesis time to cater to specific tracing needs. 17being added to a design at synthesis time to cater to specific tracing needs.
18Compoments are generally categorised as source, link and sinks and are 18Compoments are generally categorised as source, link and sinks and are
19(usually) discovered using the AMBA bus. 19(usually) discovered using the AMBA bus.
20 20
diff --git a/MAINTAINERS b/MAINTAINERS
index 3264719740f5..d4aaab5ea139 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -958,7 +958,7 @@ ARM/CORESIGHT FRAMEWORK AND DRIVERS
958M: Mathieu Poirier <mathieu.poirier@linaro.org> 958M: Mathieu Poirier <mathieu.poirier@linaro.org>
959L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 959L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
960S: Maintained 960S: Maintained
961F: drivers/coresight/* 961F: drivers/hwtracing/coresight/*
962F: Documentation/trace/coresight.txt 962F: Documentation/trace/coresight.txt
963F: Documentation/devicetree/bindings/arm/coresight.txt 963F: Documentation/devicetree/bindings/arm/coresight.txt
964F: Documentation/ABI/testing/sysfs-bus-coresight-devices-* 964F: Documentation/ABI/testing/sysfs-bus-coresight-devices-*
@@ -1828,7 +1828,7 @@ S: Supported
1828F: drivers/spi/spi-atmel.* 1828F: drivers/spi/spi-atmel.*
1829 1829
1830ATMEL SSC DRIVER 1830ATMEL SSC DRIVER
1831M: Bo Shen <voice.shen@atmel.com> 1831M: Nicolas Ferre <nicolas.ferre@atmel.com>
1832L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1832L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1833S: Supported 1833S: Supported
1834F: drivers/misc/atmel-ssc.c 1834F: drivers/misc/atmel-ssc.c
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 970de7518341..8b0183a9a300 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1610,59 +1610,6 @@ config DEBUG_SET_MODULE_RONX
1610 against certain classes of kernel exploits. 1610 against certain classes of kernel exploits.
1611 If in doubt, say "N". 1611 If in doubt, say "N".
1612 1612
1613menuconfig CORESIGHT 1613source "drivers/hwtracing/coresight/Kconfig"
1614 bool "CoreSight Tracing Support"
1615 select ARM_AMBA
1616 help
1617 This framework provides a kernel interface for the CoreSight debug
1618 and trace drivers to register themselves with. It's intended to build
1619 a topological view of the CoreSight components based on a DT
1620 specification and configure the right serie of components when a
1621 trace source gets enabled.
1622
1623if CORESIGHT
1624config CORESIGHT_LINKS_AND_SINKS
1625 bool "CoreSight Link and Sink drivers"
1626 help
1627 This enables support for CoreSight link and sink drivers that are
1628 responsible for transporting and collecting the trace data
1629 respectively. Link and sinks are dynamically aggregated with a trace
1630 entity at run time to form a complete trace path.
1631
1632config CORESIGHT_LINK_AND_SINK_TMC
1633 bool "Coresight generic TMC driver"
1634 depends on CORESIGHT_LINKS_AND_SINKS
1635 help
1636 This enables support for the Trace Memory Controller driver. Depending
1637 on its configuration the device can act as a link (embedded trace router
1638 - ETR) or sink (embedded trace FIFO). The driver complies with the
1639 generic implementation of the component without special enhancement or
1640 added features.
1641
1642config CORESIGHT_SINK_TPIU
1643 bool "Coresight generic TPIU driver"
1644 depends on CORESIGHT_LINKS_AND_SINKS
1645 help
1646 This enables support for the Trace Port Interface Unit driver, responsible
1647 for bridging the gap between the on-chip coresight components and a trace
1648 port collection engine, typically connected to an external host for use
1649 case capturing more traces than the on-board coresight memory can handle.
1650
1651config CORESIGHT_SINK_ETBV10
1652 bool "Coresight ETBv1.0 driver"
1653 depends on CORESIGHT_LINKS_AND_SINKS
1654 help
1655 This enables support for the Embedded Trace Buffer version 1.0 driver
1656 that complies with the generic implementation of the component without
1657 special enhancement or added features.
1658 1614
1659config CORESIGHT_SOURCE_ETM3X
1660 bool "CoreSight Embedded Trace Macrocell 3.x driver"
1661 select CORESIGHT_LINKS_AND_SINKS
1662 help
1663 This driver provides support for processor ETM3.x and PTM1.x modules,
1664 which allows tracing the instructions that a processor is executing
1665 This is primarily useful for instruction level tracing. Depending
1666 the ETM version data tracing may also be available.
1667endif
1668endmenu 1615endmenu
diff --git a/arch/arm/boot/dts/hip04.dtsi b/arch/arm/boot/dts/hip04.dtsi
index 238814596a87..44044f275115 100644
--- a/arch/arm/boot/dts/hip04.dtsi
+++ b/arch/arm/boot/dts/hip04.dtsi
@@ -275,7 +275,6 @@
275 compatible = "arm,coresight-etb10", "arm,primecell"; 275 compatible = "arm,coresight-etb10", "arm,primecell";
276 reg = <0 0xe3c42000 0 0x1000>; 276 reg = <0 0xe3c42000 0 0x1000>;
277 277
278 coresight-default-sink;
279 clocks = <&clk_375m>; 278 clocks = <&clk_375m>;
280 clock-names = "apb_pclk"; 279 clock-names = "apb_pclk";
281 port { 280 port {
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 25f7b0a22114..8cdca51b6984 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -150,7 +150,6 @@
150 compatible = "arm,coresight-etb10", "arm,primecell"; 150 compatible = "arm,coresight-etb10", "arm,primecell";
151 reg = <0x5401b000 0x1000>; 151 reg = <0x5401b000 0x1000>;
152 152
153 coresight-default-sink;
154 clocks = <&emu_src_ck>; 153 clocks = <&emu_src_ck>;
155 clock-names = "apb_pclk"; 154 clock-names = "apb_pclk";
156 port { 155 port {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index c792391ef090..6d4c46be8c39 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -145,7 +145,6 @@
145 compatible = "arm,coresight-etb10", "arm,primecell"; 145 compatible = "arm,coresight-etb10", "arm,primecell";
146 reg = <0x5401b000 0x1000>; 146 reg = <0x5401b000 0x1000>;
147 147
148 coresight-default-sink;
149 clocks = <&emu_src_ck>; 148 clocks = <&emu_src_ck>;
150 clock-names = "apb_pclk"; 149 clock-names = "apb_pclk";
151 port { 150 port {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index db80f9d376fa..2cab149b191c 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -609,6 +609,58 @@
609 pinctrl-0 = <&i2c3_pins>; 609 pinctrl-0 = <&i2c3_pins>;
610 610
611 clock-frequency = <400000>; 611 clock-frequency = <400000>;
612
613 lis302dl: lis3lv02d@1d {
614 compatible = "st,lis3lv02d";
615 reg = <0x1d>;
616
617 Vdd-supply = <&vaux1>;
618 Vdd_IO-supply = <&vio>;
619
620 interrupt-parent = <&gpio6>;
621 interrupts = <21 20>; /* 181 and 180 */
622
623 /* click flags */
624 st,click-single-x;
625 st,click-single-y;
626 st,click-single-z;
627
628 /* Limits are 0.5g * value */
629 st,click-threshold-x = <8>;
630 st,click-threshold-y = <8>;
631 st,click-threshold-z = <10>;
632
633 /* Click must be longer than time limit */
634 st,click-time-limit = <9>;
635
636 /* Kind of debounce filter */
637 st,click-latency = <50>;
638
639 /* Interrupt line 2 for click detection */
640 st,irq2-click;
641
642 st,wakeup-x-hi;
643 st,wakeup-y-hi;
644 st,wakeup-threshold = <(800/18)>; /* millig-value / 18 to get HW values */
645
646 st,wakeup2-z-hi;
647 st,wakeup2-threshold = <(900/18)>; /* millig-value / 18 to get HW values */
648
649 st,hipass1-disable;
650 st,hipass2-disable;
651
652 st,axis-x = <1>; /* LIS3_DEV_X */
653 st,axis-y = <(-2)>; /* LIS3_INV_DEV_Y */
654 st,axis-z = <(-3)>; /* LIS3_INV_DEV_Z */
655
656 st,min-limit-x = <(-32)>;
657 st,min-limit-y = <3>;
658 st,min-limit-z = <3>;
659
660 st,max-limit-x = <(-3)>;
661 st,max-limit-y = <32>;
662 st,max-limit-z = <32>;
663 };
612}; 664};
613 665
614&mmc1 { 666&mmc1 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 33920df03640..7a2aeacd62c0 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -362,7 +362,6 @@
362 compatible = "arm,coresight-etb10", "arm,primecell"; 362 compatible = "arm,coresight-etb10", "arm,primecell";
363 reg = <0 0x20010000 0 0x1000>; 363 reg = <0 0x20010000 0 0x1000>;
364 364
365 coresight-default-sink;
366 clocks = <&oscclk6a>; 365 clocks = <&oscclk6a>;
367 clock-names = "apb_pclk"; 366 clock-names = "apb_pclk";
368 port { 367 port {
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 4a8741073c90..d6285ef9b5f9 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -89,4 +89,6 @@ config DEBUG_ALIGN_RODATA
89 89
90 If in doubt, say N 90 If in doubt, say N
91 91
92source "drivers/hwtracing/coresight/Kconfig"
93
92endmenu 94endmenu
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 6e7787f3dac7..9cc00dbd59ce 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -67,6 +67,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
67 67
68extern void iounmap(volatile void __iomem *addr); 68extern void iounmap(volatile void __iomem *addr);
69#define ioremap_nocache(off,size) ioremap(off,size) 69#define ioremap_nocache(off,size) ioremap(off,size)
70#define ioremap_wc ioremap_nocache
70 71
71/* 72/*
72 * IO bus memory addresses are also 1:1 with the physical address 73 * IO bus memory addresses are also 1:1 with the physical address
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 90c458e66e13..ce6068dbcfbc 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -225,6 +225,8 @@
225#define HV_STATUS_INVALID_HYPERCALL_CODE 2 225#define HV_STATUS_INVALID_HYPERCALL_CODE 2
226#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 226#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
227#define HV_STATUS_INVALID_ALIGNMENT 4 227#define HV_STATUS_INVALID_ALIGNMENT 4
228#define HV_STATUS_INSUFFICIENT_MEMORY 11
229#define HV_STATUS_INVALID_CONNECTION_ID 18
228#define HV_STATUS_INSUFFICIENT_BUFFERS 19 230#define HV_STATUS_INSUFFICIENT_BUFFERS 19
229 231
230typedef struct _HV_REFERENCE_TSC_PAGE { 232typedef struct _HV_REFERENCE_TSC_PAGE {
diff --git a/drivers/Makefile b/drivers/Makefile
index 527a6da8d539..46d2554be404 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -163,5 +163,5 @@ obj-$(CONFIG_POWERCAP) += powercap/
163obj-$(CONFIG_MCB) += mcb/ 163obj-$(CONFIG_MCB) += mcb/
164obj-$(CONFIG_RAS) += ras/ 164obj-$(CONFIG_RAS) += ras/
165obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ 165obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
166obj-$(CONFIG_CORESIGHT) += coresight/ 166obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
167obj-$(CONFIG_ANDROID) += android/ 167obj-$(CONFIG_ANDROID) += android/
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 571ef61f8ea9..da8faf78536a 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -300,11 +300,14 @@ static const struct file_operations rng_chrdev_ops = {
300 .llseek = noop_llseek, 300 .llseek = noop_llseek,
301}; 301};
302 302
303static const struct attribute_group *rng_dev_groups[];
304
303static struct miscdevice rng_miscdev = { 305static struct miscdevice rng_miscdev = {
304 .minor = RNG_MISCDEV_MINOR, 306 .minor = RNG_MISCDEV_MINOR,
305 .name = RNG_MODULE_NAME, 307 .name = RNG_MODULE_NAME,
306 .nodename = "hwrng", 308 .nodename = "hwrng",
307 .fops = &rng_chrdev_ops, 309 .fops = &rng_chrdev_ops,
310 .groups = rng_dev_groups,
308}; 311};
309 312
310 313
@@ -377,37 +380,22 @@ static DEVICE_ATTR(rng_available, S_IRUGO,
377 hwrng_attr_available_show, 380 hwrng_attr_available_show,
378 NULL); 381 NULL);
379 382
383static struct attribute *rng_dev_attrs[] = {
384 &dev_attr_rng_current.attr,
385 &dev_attr_rng_available.attr,
386 NULL
387};
388
389ATTRIBUTE_GROUPS(rng_dev);
380 390
381static void __exit unregister_miscdev(void) 391static void __exit unregister_miscdev(void)
382{ 392{
383 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
384 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
385 misc_deregister(&rng_miscdev); 393 misc_deregister(&rng_miscdev);
386} 394}
387 395
388static int __init register_miscdev(void) 396static int __init register_miscdev(void)
389{ 397{
390 int err; 398 return misc_register(&rng_miscdev);
391
392 err = misc_register(&rng_miscdev);
393 if (err)
394 goto out;
395 err = device_create_file(rng_miscdev.this_device,
396 &dev_attr_rng_current);
397 if (err)
398 goto err_misc_dereg;
399 err = device_create_file(rng_miscdev.this_device,
400 &dev_attr_rng_available);
401 if (err)
402 goto err_remove_current;
403out:
404 return err;
405
406err_remove_current:
407 device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
408err_misc_dereg:
409 misc_deregister(&rng_miscdev);
410 goto out;
411} 399}
412 400
413static int hwrng_fillfn(void *unused) 401static int hwrng_fillfn(void *unused)
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 3eb7bdd7f93b..51cb1d5cc489 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -133,7 +133,7 @@ static int rng_remove(struct platform_device *dev)
133 return 0; 133 return 0;
134} 134}
135 135
136static struct of_device_id rng_match[] = { 136static const struct of_device_id rng_match[] = {
137 { .compatible = "1682m-rng", }, 137 { .compatible = "1682m-rng", },
138 { .compatible = "pasemi,pwrficient-rng", }, 138 { .compatible = "pasemi,pwrficient-rng", },
139 { }, 139 { },
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 3f4f63204560..263a5bb8e605 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -61,7 +61,7 @@ static int powernv_rng_probe(struct platform_device *pdev)
61 return 0; 61 return 0;
62} 62}
63 63
64static struct of_device_id powernv_rng_match[] = { 64static const struct of_device_id powernv_rng_match[] = {
65 { .compatible = "ibm,power-rng",}, 65 { .compatible = "ibm,power-rng",},
66 {}, 66 {},
67}; 67};
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index c85d31a5f9e3..b2cfda0fa93e 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -123,7 +123,7 @@ static int ppc4xx_rng_remove(struct platform_device *dev)
123 return 0; 123 return 0;
124} 124}
125 125
126static struct of_device_id ppc4xx_rng_match[] = { 126static const struct of_device_id ppc4xx_rng_match[] = {
127 { .compatible = "ppc4xx-rng", }, 127 { .compatible = "ppc4xx-rng", },
128 { .compatible = "amcc,ppc460ex-rng", }, 128 { .compatible = "amcc,ppc460ex-rng", },
129 { .compatible = "amcc,ppc440epx-rng", }, 129 { .compatible = "amcc,ppc440epx-rng", },
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 24cc4ed9a780..a43048b5b05f 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -510,13 +510,15 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
510 * 9) AC power 510 * 9) AC power
511 * 10) Fn Key status 511 * 10) Fn Key status
512 */ 512 */
513 return seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", 513 seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
514 I8K_PROC_FMT, 514 I8K_PROC_FMT,
515 bios_version, 515 bios_version,
516 i8k_get_dmi_data(DMI_PRODUCT_SERIAL), 516 i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
517 cpu_temp, 517 cpu_temp,
518 left_fan, right_fan, left_speed, right_speed, 518 left_fan, right_fan, left_speed, right_speed,
519 ac_power, fn_key); 519 ac_power, fn_key);
520
521 return 0;
520} 522}
521 523
522static int i8k_open_fs(struct inode *inode, struct file *file) 524static int i8k_open_fs(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 518585c1ce94..5e90a18afbaf 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2667,7 +2667,7 @@ static struct pci_driver ipmi_pci_driver = {
2667}; 2667};
2668#endif /* CONFIG_PCI */ 2668#endif /* CONFIG_PCI */
2669 2669
2670static struct of_device_id ipmi_match[]; 2670static const struct of_device_id ipmi_match[];
2671static int ipmi_probe(struct platform_device *dev) 2671static int ipmi_probe(struct platform_device *dev)
2672{ 2672{
2673#ifdef CONFIG_OF 2673#ifdef CONFIG_OF
@@ -2764,7 +2764,7 @@ static int ipmi_remove(struct platform_device *dev)
2764 return 0; 2764 return 0;
2765} 2765}
2766 2766
2767static struct of_device_id ipmi_match[] = 2767static const struct of_device_id ipmi_match[] =
2768{ 2768{
2769 { .type = "ipmi", .compatible = "ipmi-kcs", 2769 { .type = "ipmi", .compatible = "ipmi-kcs",
2770 .data = (void *)(unsigned long) SI_KCS }, 2770 .data = (void *)(unsigned long) SI_KCS },
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index ffa97d261cf3..9fd5a91e0d81 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -140,12 +140,17 @@ static int misc_open(struct inode * inode, struct file * file)
140 goto fail; 140 goto fail;
141 } 141 }
142 142
143 /*
144 * Place the miscdevice in the file's
145 * private_data so it can be used by the
146 * file operations, including f_op->open below
147 */
148 file->private_data = c;
149
143 err = 0; 150 err = 0;
144 replace_fops(file, new_fops); 151 replace_fops(file, new_fops);
145 if (file->f_op->open) { 152 if (file->f_op->open)
146 file->private_data = c;
147 err = file->f_op->open(inode,file); 153 err = file->f_op->open(inode,file);
148 }
149fail: 154fail:
150 mutex_unlock(&misc_mtx); 155 mutex_unlock(&misc_mtx);
151 return err; 156 return err;
@@ -169,7 +174,9 @@ static const struct file_operations misc_fops = {
169 * the minor number requested is used. 174 * the minor number requested is used.
170 * 175 *
171 * The structure passed is linked into the kernel and may not be 176 * The structure passed is linked into the kernel and may not be
172 * destroyed until it has been unregistered. 177 * destroyed until it has been unregistered. By default, an open()
178 * syscall to the device sets file->private_data to point to the
179 * structure. Drivers don't need open in fops for this.
173 * 180 *
174 * A zero is returned on success and a negative errno code for 181 * A zero is returned on success and a negative errno code for
175 * failure. 182 * failure.
@@ -205,8 +212,9 @@ int misc_register(struct miscdevice * misc)
205 212
206 dev = MKDEV(MISC_MAJOR, misc->minor); 213 dev = MKDEV(MISC_MAJOR, misc->minor);
207 214
208 misc->this_device = device_create(misc_class, misc->parent, dev, 215 misc->this_device =
209 misc, "%s", misc->name); 216 device_create_with_groups(misc_class, misc->parent, dev,
217 misc, misc->groups, "%s", misc->name);
210 if (IS_ERR(misc->this_device)) { 218 if (IS_ERR(misc->this_device)) {
211 int i = DYNAMIC_MINORS - misc->minor - 1; 219 int i = DYNAMIC_MINORS - misc->minor - 1;
212 if (i < DYNAMIC_MINORS && i >= 0) 220 if (i < DYNAMIC_MINORS && i >= 0)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 72d7028f779b..50754d203310 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -355,7 +355,7 @@ static inline bool use_multiport(struct ports_device *portdev)
355 * early_init 355 * early_init
356 */ 356 */
357 if (!portdev->vdev) 357 if (!portdev->vdev)
358 return 0; 358 return false;
359 return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT); 359 return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
360} 360}
361 361
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index b827fa095f1b..77d6c127e691 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1237,6 +1237,8 @@ static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
1237 unsigned char *tail; 1237 unsigned char *tail;
1238 int i; 1238 int i;
1239 1239
1240 howmany = 0;
1241
1240 end_offset_plus1 = bufpos >> 1242 end_offset_plus1 = bufpos >>
1241 channel->log2_element_size; 1243 channel->log2_element_size;
1242 1244
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 2002a3a28146..781865084dc1 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL v2");
31static const char xillyname[] = "xillybus_of"; 31static const char xillyname[] = "xillybus_of";
32 32
33/* Match table for of_platform binding */ 33/* Match table for of_platform binding */
34static struct of_device_id xillybus_of_match[] = { 34static const struct of_device_id xillybus_of_match[] = {
35 { .compatible = "xillybus,xillybus-1.00.a", }, 35 { .compatible = "xillybus,xillybus-1.00.a", },
36 { .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */ 36 { .compatible = "xlnx,xillybus-1.00.a", }, /* Deprecated */
37 {} 37 {}
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 6a1f7de6fa54..fdc0bf0543ce 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -55,6 +55,16 @@ config EXTCON_MAX77693
55 Maxim MAX77693 PMIC. The MAX77693 MUIC is a USB port accessory 55 Maxim MAX77693 PMIC. The MAX77693 MUIC is a USB port accessory
56 detector and switch. 56 detector and switch.
57 57
58config EXTCON_MAX77843
59 tristate "MAX77843 EXTCON Support"
60 depends on MFD_MAX77843
61 select IRQ_DOMAIN
62 select REGMAP_I2C
63 help
64 If you say yes here you get support for the MUIC device of
65 Maxim MAX77843. The MAX77843 MUIC is a USB port accessory
66 detector add switch.
67
58config EXTCON_MAX8997 68config EXTCON_MAX8997
59 tristate "MAX8997 EXTCON Support" 69 tristate "MAX8997 EXTCON Support"
60 depends on MFD_MAX8997 && IRQ_DOMAIN 70 depends on MFD_MAX8997 && IRQ_DOMAIN
@@ -93,4 +103,11 @@ config EXTCON_SM5502
93 Silicon Mitus SM5502. The SM5502 is a USB port accessory 103 Silicon Mitus SM5502. The SM5502 is a USB port accessory
94 detector and switch. 104 detector and switch.
95 105
106config EXTCON_USB_GPIO
107 tristate "USB GPIO extcon support"
108 depends on GPIOLIB
109 help
110 Say Y here to enable GPIO based USB cable detection extcon support.
111 Used typically if GPIO is used for USB ID pin detection.
112
96endif # MULTISTATE_SWITCH 113endif # MULTISTATE_SWITCH
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0370b42e5a27..9204114791a3 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,13 +2,15 @@
2# Makefile for external connector class (extcon) devices 2# Makefile for external connector class (extcon) devices
3# 3#
4 4
5obj-$(CONFIG_EXTCON) += extcon-class.o 5obj-$(CONFIG_EXTCON) += extcon.o
6obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o 6obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
7obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o 7obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
8obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o 8obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
9obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o 9obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
10obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o 10obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
11obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
11obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o 12obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
12obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o 13obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
13obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o 14obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
14obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o 15obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
16obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 6b5e795f3fe2..a0ed35b336e4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -136,18 +136,35 @@ static const char *arizona_cable[] = {
136 136
137static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info); 137static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info);
138 138
139static void arizona_extcon_do_magic(struct arizona_extcon_info *info, 139static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
140 unsigned int magic) 140 bool clamp)
141{ 141{
142 struct arizona *arizona = info->arizona; 142 struct arizona *arizona = info->arizona;
143 unsigned int mask = 0, val = 0;
143 int ret; 144 int ret;
144 145
146 switch (arizona->type) {
147 case WM5110:
148 mask = ARIZONA_HP1L_SHRTO | ARIZONA_HP1L_FLWR |
149 ARIZONA_HP1L_SHRTI;
150 if (clamp)
151 val = ARIZONA_HP1L_SHRTO;
152 else
153 val = ARIZONA_HP1L_FLWR | ARIZONA_HP1L_SHRTI;
154 break;
155 default:
156 mask = ARIZONA_RMV_SHRT_HP1L;
157 if (clamp)
158 val = ARIZONA_RMV_SHRT_HP1L;
159 break;
160 };
161
145 mutex_lock(&arizona->dapm->card->dapm_mutex); 162 mutex_lock(&arizona->dapm->card->dapm_mutex);
146 163
147 arizona->hpdet_magic = magic; 164 arizona->hpdet_clamp = clamp;
148 165
149 /* Keep the HP output stages disabled while doing the magic */ 166 /* Keep the HP output stages disabled while doing the clamp */
150 if (magic) { 167 if (clamp) {
151 ret = regmap_update_bits(arizona->regmap, 168 ret = regmap_update_bits(arizona->regmap,
152 ARIZONA_OUTPUT_ENABLES_1, 169 ARIZONA_OUTPUT_ENABLES_1,
153 ARIZONA_OUT1L_ENA | 170 ARIZONA_OUT1L_ENA |
@@ -158,20 +175,20 @@ static void arizona_extcon_do_magic(struct arizona_extcon_info *info,
158 ret); 175 ret);
159 } 176 }
160 177
161 ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 178 ret = regmap_update_bits(arizona->regmap, ARIZONA_HP_CTRL_1L,
162 magic); 179 mask, val);
163 if (ret != 0) 180 if (ret != 0)
164 dev_warn(arizona->dev, "Failed to do magic: %d\n", 181 dev_warn(arizona->dev, "Failed to do clamp: %d\n",
165 ret); 182 ret);
166 183
167 ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 184 ret = regmap_update_bits(arizona->regmap, ARIZONA_HP_CTRL_1R,
168 magic); 185 mask, val);
169 if (ret != 0) 186 if (ret != 0)
170 dev_warn(arizona->dev, "Failed to do magic: %d\n", 187 dev_warn(arizona->dev, "Failed to do clamp: %d\n",
171 ret); 188 ret);
172 189
173 /* Restore the desired state while not doing the magic */ 190 /* Restore the desired state while not doing the clamp */
174 if (!magic) { 191 if (!clamp) {
175 ret = regmap_update_bits(arizona->regmap, 192 ret = regmap_update_bits(arizona->regmap,
176 ARIZONA_OUTPUT_ENABLES_1, 193 ARIZONA_OUTPUT_ENABLES_1,
177 ARIZONA_OUT1L_ENA | 194 ARIZONA_OUT1L_ENA |
@@ -603,7 +620,7 @@ done:
603 ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL, 620 ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
604 0); 621 0);
605 622
606 arizona_extcon_do_magic(info, 0); 623 arizona_extcon_hp_clamp(info, false);
607 624
608 if (id_gpio) 625 if (id_gpio)
609 gpio_set_value_cansleep(id_gpio, 0); 626 gpio_set_value_cansleep(id_gpio, 0);
@@ -648,7 +665,7 @@ static void arizona_identify_headphone(struct arizona_extcon_info *info)
648 if (info->mic) 665 if (info->mic)
649 arizona_stop_mic(info); 666 arizona_stop_mic(info);
650 667
651 arizona_extcon_do_magic(info, 0x4000); 668 arizona_extcon_hp_clamp(info, true);
652 669
653 ret = regmap_update_bits(arizona->regmap, 670 ret = regmap_update_bits(arizona->regmap,
654 ARIZONA_ACCESSORY_DETECT_MODE_1, 671 ARIZONA_ACCESSORY_DETECT_MODE_1,
@@ -699,7 +716,7 @@ static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
699 716
700 info->hpdet_active = true; 717 info->hpdet_active = true;
701 718
702 arizona_extcon_do_magic(info, 0x4000); 719 arizona_extcon_hp_clamp(info, true);
703 720
704 ret = regmap_update_bits(arizona->regmap, 721 ret = regmap_update_bits(arizona->regmap,
705 ARIZONA_ACCESSORY_DETECT_MODE_1, 722 ARIZONA_ACCESSORY_DETECT_MODE_1,
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index c1bf0cf747b0..3823aa4a3a80 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -539,8 +539,6 @@ static void max14577_muic_irq_work(struct work_struct *work)
539 dev_err(info->dev, "failed to handle MUIC interrupt\n"); 539 dev_err(info->dev, "failed to handle MUIC interrupt\n");
540 540
541 mutex_unlock(&info->mutex); 541 mutex_unlock(&info->mutex);
542
543 return;
544} 542}
545 543
546/* 544/*
@@ -730,8 +728,7 @@ static int max14577_muic_probe(struct platform_device *pdev)
730 muic_irq->name, info); 728 muic_irq->name, info);
731 if (ret) { 729 if (ret) {
732 dev_err(&pdev->dev, 730 dev_err(&pdev->dev,
733 "failed: irq request (IRQ: %d," 731 "failed: irq request (IRQ: %d, error :%d)\n",
734 " error :%d)\n",
735 muic_irq->irq, ret); 732 muic_irq->irq, ret);
736 return ret; 733 return ret;
737 } 734 }
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index af165fd0c6f5..a66bec8f6252 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -190,8 +190,8 @@ enum max77693_muic_acc_type {
190 /* The below accessories have same ADC value so ADCLow and 190 /* The below accessories have same ADC value so ADCLow and
191 ADC1K bit is used to separate specific accessory */ 191 ADC1K bit is used to separate specific accessory */
192 /* ADC|VBVolot|ADCLow|ADC1K| */ 192 /* ADC|VBVolot|ADCLow|ADC1K| */
193 MAX77693_MUIC_GND_USB_OTG = 0x100, /* 0x0| 0| 0| 0| */ 193 MAX77693_MUIC_GND_USB_HOST = 0x100, /* 0x0| 0| 0| 0| */
194 MAX77693_MUIC_GND_USB_OTG_VB = 0x104, /* 0x0| 1| 0| 0| */ 194 MAX77693_MUIC_GND_USB_HOST_VB = 0x104, /* 0x0| 1| 0| 0| */
195 MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* 0x0| 0| 1| 0| */ 195 MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* 0x0| 0| 1| 0| */
196 MAX77693_MUIC_GND_MHL = 0x103, /* 0x0| 0| 1| 1| */ 196 MAX77693_MUIC_GND_MHL = 0x103, /* 0x0| 0| 1| 1| */
197 MAX77693_MUIC_GND_MHL_VB = 0x107, /* 0x0| 1| 1| 1| */ 197 MAX77693_MUIC_GND_MHL_VB = 0x107, /* 0x0| 1| 1| 1| */
@@ -228,7 +228,7 @@ static const char *max77693_extcon_cable[] = {
228 [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger", 228 [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
229 [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream", 229 [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
230 [EXTCON_CABLE_MHL] = "MHL", 230 [EXTCON_CABLE_MHL] = "MHL",
231 [EXTCON_CABLE_MHL_TA] = "MHL_TA", 231 [EXTCON_CABLE_MHL_TA] = "MHL-TA",
232 [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON", 232 [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
233 [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF", 233 [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
234 [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF", 234 [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
@@ -403,8 +403,8 @@ static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
403 403
404 /** 404 /**
405 * [0x1|VBVolt|ADCLow|ADC1K] 405 * [0x1|VBVolt|ADCLow|ADC1K]
406 * [0x1| 0| 0| 0] USB_OTG 406 * [0x1| 0| 0| 0] USB_HOST
407 * [0x1| 1| 0| 0] USB_OTG_VB 407 * [0x1| 1| 0| 0] USB_HSOT_VB
408 * [0x1| 0| 1| 0] Audio Video cable with load 408 * [0x1| 0| 1| 0] Audio Video cable with load
409 * [0x1| 0| 1| 1] MHL without charging cable 409 * [0x1| 0| 1| 1] MHL without charging cable
410 * [0x1| 1| 1| 1] MHL with charging cable 410 * [0x1| 1| 1| 1] MHL with charging cable
@@ -523,7 +523,7 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
523 * - Support charging and data connection through micro-usb port 523 * - Support charging and data connection through micro-usb port
524 * if USB cable is connected between target and host 524 * if USB cable is connected between target and host
525 * device. 525 * device.
526 * - Support OTG device (Mouse/Keyboard) 526 * - Support OTG(On-The-Go) device (Ex: Mouse/Keyboard)
527 */ 527 */
528 ret = max77693_muic_set_path(info, info->path_usb, attached); 528 ret = max77693_muic_set_path(info, info->path_usb, attached);
529 if (ret < 0) 529 if (ret < 0)
@@ -609,9 +609,9 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
609 MAX77693_CABLE_GROUP_ADC_GND, &attached); 609 MAX77693_CABLE_GROUP_ADC_GND, &attached);
610 610
611 switch (cable_type_gnd) { 611 switch (cable_type_gnd) {
612 case MAX77693_MUIC_GND_USB_OTG: 612 case MAX77693_MUIC_GND_USB_HOST:
613 case MAX77693_MUIC_GND_USB_OTG_VB: 613 case MAX77693_MUIC_GND_USB_HOST_VB:
614 /* USB_OTG, PATH: AP_USB */ 614 /* USB_HOST, PATH: AP_USB */
615 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached); 615 ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
616 if (ret < 0) 616 if (ret < 0)
617 return ret; 617 return ret;
@@ -704,7 +704,7 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info)
704 704
705 switch (cable_type) { 705 switch (cable_type) {
706 case MAX77693_MUIC_ADC_GROUND: 706 case MAX77693_MUIC_ADC_GROUND:
707 /* USB_OTG/MHL/Audio */ 707 /* USB_HOST/MHL/Audio */
708 max77693_muic_adc_ground_handler(info); 708 max77693_muic_adc_ground_handler(info);
709 break; 709 break;
710 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: 710 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
@@ -823,19 +823,19 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
823 case MAX77693_MUIC_GND_MHL: 823 case MAX77693_MUIC_GND_MHL:
824 case MAX77693_MUIC_GND_MHL_VB: 824 case MAX77693_MUIC_GND_MHL_VB:
825 /* 825 /*
826 * MHL cable with MHL_TA(USB/TA) cable 826 * MHL cable with MHL-TA(USB/TA) cable
827 * - MHL cable include two port(HDMI line and separate 827 * - MHL cable include two port(HDMI line and separate
828 * micro-usb port. When the target connect MHL cable, 828 * micro-usb port. When the target connect MHL cable,
829 * extcon driver check whether MHL_TA(USB/TA) cable is 829 * extcon driver check whether MHL-TA(USB/TA) cable is
830 * connected. If MHL_TA cable is connected, extcon 830 * connected. If MHL-TA cable is connected, extcon
831 * driver notify state to notifiee for charging battery. 831 * driver notify state to notifiee for charging battery.
832 * 832 *
833 * Features of 'MHL_TA(USB/TA) with MHL cable' 833 * Features of 'MHL-TA(USB/TA) with MHL cable'
834 * - Support MHL 834 * - Support MHL
835 * - Support charging through micro-usb port without 835 * - Support charging through micro-usb port without
836 * data connection 836 * data connection
837 */ 837 */
838 extcon_set_cable_state(info->edev, "MHL_TA", attached); 838 extcon_set_cable_state(info->edev, "MHL-TA", attached);
839 if (!cable_attached) 839 if (!cable_attached)
840 extcon_set_cable_state(info->edev, 840 extcon_set_cable_state(info->edev,
841 "MHL", cable_attached); 841 "MHL", cable_attached);
@@ -886,7 +886,7 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
886 * - Support charging and data connection through micro- 886 * - Support charging and data connection through micro-
887 * usb port if USB cable is connected between target 887 * usb port if USB cable is connected between target
888 * and host device 888 * and host device
889 * - Support OTG device (Mouse/Keyboard) 889 * - Support OTG(On-The-Go) device (Ex: Mouse/Keyboard)
890 */ 890 */
891 ret = max77693_muic_set_path(info, info->path_usb, 891 ret = max77693_muic_set_path(info, info->path_usb,
892 attached); 892 attached);
@@ -1019,8 +1019,6 @@ static void max77693_muic_irq_work(struct work_struct *work)
1019 dev_err(info->dev, "failed to handle MUIC interrupt\n"); 1019 dev_err(info->dev, "failed to handle MUIC interrupt\n");
1020 1020
1021 mutex_unlock(&info->mutex); 1021 mutex_unlock(&info->mutex);
1022
1023 return;
1024} 1022}
1025 1023
1026static irqreturn_t max77693_muic_irq_handler(int irq, void *data) 1024static irqreturn_t max77693_muic_irq_handler(int irq, void *data)
@@ -1171,8 +1169,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
1171 muic_irq->name, info); 1169 muic_irq->name, info);
1172 if (ret) { 1170 if (ret) {
1173 dev_err(&pdev->dev, 1171 dev_err(&pdev->dev,
1174 "failed: irq request (IRQ: %d," 1172 "failed: irq request (IRQ: %d, error :%d)\n",
1175 " error :%d)\n",
1176 muic_irq->irq, ret); 1173 muic_irq->irq, ret);
1177 return ret; 1174 return ret;
1178 } 1175 }
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
new file mode 100644
index 000000000000..8db6a926ea07
--- /dev/null
+++ b/drivers/extcon/extcon-max77843.c
@@ -0,0 +1,881 @@
1/*
2 * extcon-max77843.c - Maxim MAX77843 extcon driver to support
3 * MUIC(Micro USB Interface Controller)
4 *
5 * Copyright (C) 2015 Samsung Electronics
6 * Author: Jaewon Kim <jaewon02.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/extcon.h>
15#include <linux/i2c.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/mfd/max77843-private.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/workqueue.h>
22
23#define DELAY_MS_DEFAULT 15000 /* unit: millisecond */
24
25enum max77843_muic_status {
26 MAX77843_MUIC_STATUS1 = 0,
27 MAX77843_MUIC_STATUS2,
28 MAX77843_MUIC_STATUS3,
29
30 MAX77843_MUIC_STATUS_NUM,
31};
32
33struct max77843_muic_info {
34 struct device *dev;
35 struct max77843 *max77843;
36 struct extcon_dev *edev;
37
38 struct mutex mutex;
39 struct work_struct irq_work;
40 struct delayed_work wq_detcable;
41
42 u8 status[MAX77843_MUIC_STATUS_NUM];
43 int prev_cable_type;
44 int prev_chg_type;
45 int prev_gnd_type;
46
47 bool irq_adc;
48 bool irq_chg;
49};
50
51enum max77843_muic_cable_group {
52 MAX77843_CABLE_GROUP_ADC = 0,
53 MAX77843_CABLE_GROUP_ADC_GND,
54 MAX77843_CABLE_GROUP_CHG,
55};
56
57enum max77843_muic_adc_debounce_time {
58 MAX77843_DEBOUNCE_TIME_5MS = 0,
59 MAX77843_DEBOUNCE_TIME_10MS,
60 MAX77843_DEBOUNCE_TIME_25MS,
61 MAX77843_DEBOUNCE_TIME_38_62MS,
62};
63
64/* Define accessory cable type */
65enum max77843_muic_accessory_type {
66 MAX77843_MUIC_ADC_GROUND = 0,
67 MAX77843_MUIC_ADC_SEND_END_BUTTON,
68 MAX77843_MUIC_ADC_REMOTE_S1_BUTTON,
69 MAX77843_MUIC_ADC_REMOTE_S2_BUTTON,
70 MAX77843_MUIC_ADC_REMOTE_S3_BUTTON,
71 MAX77843_MUIC_ADC_REMOTE_S4_BUTTON,
72 MAX77843_MUIC_ADC_REMOTE_S5_BUTTON,
73 MAX77843_MUIC_ADC_REMOTE_S6_BUTTON,
74 MAX77843_MUIC_ADC_REMOTE_S7_BUTTON,
75 MAX77843_MUIC_ADC_REMOTE_S8_BUTTON,
76 MAX77843_MUIC_ADC_REMOTE_S9_BUTTON,
77 MAX77843_MUIC_ADC_REMOTE_S10_BUTTON,
78 MAX77843_MUIC_ADC_REMOTE_S11_BUTTON,
79 MAX77843_MUIC_ADC_REMOTE_S12_BUTTON,
80 MAX77843_MUIC_ADC_RESERVED_ACC_1,
81 MAX77843_MUIC_ADC_RESERVED_ACC_2,
82 MAX77843_MUIC_ADC_RESERVED_ACC_3,
83 MAX77843_MUIC_ADC_RESERVED_ACC_4,
84 MAX77843_MUIC_ADC_RESERVED_ACC_5,
85 MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2,
86 MAX77843_MUIC_ADC_PHONE_POWERED_DEV,
87 MAX77843_MUIC_ADC_TTY_CONVERTER,
88 MAX77843_MUIC_ADC_UART_CABLE,
89 MAX77843_MUIC_ADC_CEA936A_TYPE1_CHG,
90 MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF,
91 MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON,
92 MAX77843_MUIC_ADC_AV_CABLE_NOLOAD,
93 MAX77843_MUIC_ADC_CEA936A_TYPE2_CHG,
94 MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF,
95 MAX77843_MUIC_ADC_FACTORY_MODE_UART_ON,
96 MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1,
97 MAX77843_MUIC_ADC_OPEN,
98
99 /* The blow accessories should check
100 not only ADC value but also ADC1K and VBVolt value. */
101 /* Offset|ADC1K|VBVolt| */
102 MAX77843_MUIC_GND_USB_HOST = 0x100, /* 0x1| 0| 0| */
103 MAX77843_MUIC_GND_USB_HOST_VB = 0x101, /* 0x1| 0| 1| */
104 MAX77843_MUIC_GND_MHL = 0x102, /* 0x1| 1| 0| */
105 MAX77843_MUIC_GND_MHL_VB = 0x103, /* 0x1| 1| 1| */
106};
107
108/* Define charger cable type */
109enum max77843_muic_charger_type {
110 MAX77843_MUIC_CHG_NONE = 0,
111 MAX77843_MUIC_CHG_USB,
112 MAX77843_MUIC_CHG_DOWNSTREAM,
113 MAX77843_MUIC_CHG_DEDICATED,
114 MAX77843_MUIC_CHG_SPECIAL_500MA,
115 MAX77843_MUIC_CHG_SPECIAL_1A,
116 MAX77843_MUIC_CHG_SPECIAL_BIAS,
117 MAX77843_MUIC_CHG_RESERVED,
118 MAX77843_MUIC_CHG_GND,
119};
120
121enum {
122 MAX77843_CABLE_USB = 0,
123 MAX77843_CABLE_USB_HOST,
124 MAX77843_CABLE_TA,
125 MAX77843_CABLE_CHARGE_DOWNSTREAM,
126 MAX77843_CABLE_FAST_CHARGER,
127 MAX77843_CABLE_SLOW_CHARGER,
128 MAX77843_CABLE_MHL,
129 MAX77843_CABLE_MHL_TA,
130 MAX77843_CABLE_JIG_USB_ON,
131 MAX77843_CABLE_JIG_USB_OFF,
132 MAX77843_CABLE_JIG_UART_ON,
133 MAX77843_CABLE_JIG_UART_OFF,
134
135 MAX77843_CABLE_NUM,
136};
137
138static const char *max77843_extcon_cable[] = {
139 [MAX77843_CABLE_USB] = "USB",
140 [MAX77843_CABLE_USB_HOST] = "USB-HOST",
141 [MAX77843_CABLE_TA] = "TA",
142 [MAX77843_CABLE_CHARGE_DOWNSTREAM] = "CHARGER-DOWNSTREAM",
143 [MAX77843_CABLE_FAST_CHARGER] = "FAST-CHARGER",
144 [MAX77843_CABLE_SLOW_CHARGER] = "SLOW-CHARGER",
145 [MAX77843_CABLE_MHL] = "MHL",
146 [MAX77843_CABLE_MHL_TA] = "MHL-TA",
147 [MAX77843_CABLE_JIG_USB_ON] = "JIG-USB-ON",
148 [MAX77843_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
149 [MAX77843_CABLE_JIG_UART_ON] = "JIG-UART-ON",
150 [MAX77843_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
151};
152
153struct max77843_muic_irq {
154 unsigned int irq;
155 const char *name;
156 unsigned int virq;
157};
158
159static struct max77843_muic_irq max77843_muic_irqs[] = {
160 { MAX77843_MUIC_IRQ_INT1_ADC, "MUIC-ADC" },
161 { MAX77843_MUIC_IRQ_INT1_ADCERROR, "MUIC-ADC_ERROR" },
162 { MAX77843_MUIC_IRQ_INT1_ADC1K, "MUIC-ADC1K" },
163 { MAX77843_MUIC_IRQ_INT2_CHGTYP, "MUIC-CHGTYP" },
164 { MAX77843_MUIC_IRQ_INT2_CHGDETRUN, "MUIC-CHGDETRUN" },
165 { MAX77843_MUIC_IRQ_INT2_DCDTMR, "MUIC-DCDTMR" },
166 { MAX77843_MUIC_IRQ_INT2_DXOVP, "MUIC-DXOVP" },
167 { MAX77843_MUIC_IRQ_INT2_VBVOLT, "MUIC-VBVOLT" },
168 { MAX77843_MUIC_IRQ_INT3_VBADC, "MUIC-VBADC" },
169 { MAX77843_MUIC_IRQ_INT3_VDNMON, "MUIC-VDNMON" },
170 { MAX77843_MUIC_IRQ_INT3_DNRES, "MUIC-DNRES" },
171 { MAX77843_MUIC_IRQ_INT3_MPNACK, "MUIC-MPNACK"},
172 { MAX77843_MUIC_IRQ_INT3_MRXBUFOW, "MUIC-MRXBUFOW"},
173 { MAX77843_MUIC_IRQ_INT3_MRXTRF, "MUIC-MRXTRF"},
174 { MAX77843_MUIC_IRQ_INT3_MRXPERR, "MUIC-MRXPERR"},
175 { MAX77843_MUIC_IRQ_INT3_MRXRDY, "MUIC-MRXRDY"},
176};
177
178static const struct regmap_config max77843_muic_regmap_config = {
179 .reg_bits = 8,
180 .val_bits = 8,
181 .max_register = MAX77843_MUIC_REG_END,
182};
183
184static const struct regmap_irq max77843_muic_irq[] = {
185 /* INT1 interrupt */
186 { .reg_offset = 0, .mask = MAX77843_MUIC_ADC, },
187 { .reg_offset = 0, .mask = MAX77843_MUIC_ADCERROR, },
188 { .reg_offset = 0, .mask = MAX77843_MUIC_ADC1K, },
189
190 /* INT2 interrupt */
191 { .reg_offset = 1, .mask = MAX77843_MUIC_CHGTYP, },
192 { .reg_offset = 1, .mask = MAX77843_MUIC_CHGDETRUN, },
193 { .reg_offset = 1, .mask = MAX77843_MUIC_DCDTMR, },
194 { .reg_offset = 1, .mask = MAX77843_MUIC_DXOVP, },
195 { .reg_offset = 1, .mask = MAX77843_MUIC_VBVOLT, },
196
197 /* INT3 interrupt */
198 { .reg_offset = 2, .mask = MAX77843_MUIC_VBADC, },
199 { .reg_offset = 2, .mask = MAX77843_MUIC_VDNMON, },
200 { .reg_offset = 2, .mask = MAX77843_MUIC_DNRES, },
201 { .reg_offset = 2, .mask = MAX77843_MUIC_MPNACK, },
202 { .reg_offset = 2, .mask = MAX77843_MUIC_MRXBUFOW, },
203 { .reg_offset = 2, .mask = MAX77843_MUIC_MRXTRF, },
204 { .reg_offset = 2, .mask = MAX77843_MUIC_MRXPERR, },
205 { .reg_offset = 2, .mask = MAX77843_MUIC_MRXRDY, },
206};
207
208static const struct regmap_irq_chip max77843_muic_irq_chip = {
209 .name = "max77843-muic",
210 .status_base = MAX77843_MUIC_REG_INT1,
211 .mask_base = MAX77843_MUIC_REG_INTMASK1,
212 .mask_invert = true,
213 .num_regs = 3,
214 .irqs = max77843_muic_irq,
215 .num_irqs = ARRAY_SIZE(max77843_muic_irq),
216};
217
218static int max77843_muic_set_path(struct max77843_muic_info *info,
219 u8 val, bool attached)
220{
221 struct max77843 *max77843 = info->max77843;
222 int ret = 0;
223 unsigned int ctrl1, ctrl2;
224
225 if (attached)
226 ctrl1 = val;
227 else
228 ctrl1 = CONTROL1_SW_OPEN;
229
230 ret = regmap_update_bits(max77843->regmap_muic,
231 MAX77843_MUIC_REG_CONTROL1,
232 CONTROL1_COM_SW, ctrl1);
233 if (ret < 0) {
234 dev_err(info->dev, "Cannot switch MUIC port\n");
235 return ret;
236 }
237
238 if (attached)
239 ctrl2 = MAX77843_MUIC_CONTROL2_CPEN_MASK;
240 else
241 ctrl2 = MAX77843_MUIC_CONTROL2_LOWPWR_MASK;
242
243 ret = regmap_update_bits(max77843->regmap_muic,
244 MAX77843_MUIC_REG_CONTROL2,
245 MAX77843_MUIC_CONTROL2_LOWPWR_MASK |
246 MAX77843_MUIC_CONTROL2_CPEN_MASK, ctrl2);
247 if (ret < 0) {
248 dev_err(info->dev, "Cannot update lowpower mode\n");
249 return ret;
250 }
251
252 dev_dbg(info->dev,
253 "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
254 ctrl1, ctrl2, attached ? "attached" : "detached");
255
256 return 0;
257}
258
259static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
260 enum max77843_muic_cable_group group, bool *attached)
261{
262 int adc, chg_type, cable_type, gnd_type;
263
264 adc = info->status[MAX77843_MUIC_STATUS1] &
265 MAX77843_MUIC_STATUS1_ADC_MASK;
266 adc >>= STATUS1_ADC_SHIFT;
267
268 switch (group) {
269 case MAX77843_CABLE_GROUP_ADC:
270 if (adc == MAX77843_MUIC_ADC_OPEN) {
271 *attached = false;
272 cable_type = info->prev_cable_type;
273 info->prev_cable_type = MAX77843_MUIC_ADC_OPEN;
274 } else {
275 *attached = true;
276 cable_type = info->prev_cable_type = adc;
277 }
278 break;
279 case MAX77843_CABLE_GROUP_CHG:
280 chg_type = info->status[MAX77843_MUIC_STATUS2] &
281 MAX77843_MUIC_STATUS2_CHGTYP_MASK;
282
283 /* Check GROUND accessory with charger cable */
284 if (adc == MAX77843_MUIC_ADC_GROUND) {
285 if (chg_type == MAX77843_MUIC_CHG_NONE) {
286 /* The following state when charger cable is
287 * disconnected but the GROUND accessory still
288 * connected */
289 *attached = false;
290 cable_type = info->prev_chg_type;
291 info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
292 } else {
293
294 /* The following state when charger cable is
295 * connected on the GROUND accessory */
296 *attached = true;
297 cable_type = MAX77843_MUIC_CHG_GND;
298 info->prev_chg_type = MAX77843_MUIC_CHG_GND;
299 }
300 break;
301 }
302
303 if (chg_type == MAX77843_MUIC_CHG_NONE) {
304 *attached = false;
305 cable_type = info->prev_chg_type;
306 info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
307 } else {
308 *attached = true;
309 cable_type = info->prev_chg_type = chg_type;
310 }
311 break;
312 case MAX77843_CABLE_GROUP_ADC_GND:
313 if (adc == MAX77843_MUIC_ADC_OPEN) {
314 *attached = false;
315 cable_type = info->prev_gnd_type;
316 info->prev_gnd_type = MAX77843_MUIC_ADC_OPEN;
317 } else {
318 *attached = true;
319
320 /* Offset|ADC1K|VBVolt|
321 * 0x1| 0| 0| USB-HOST
322 * 0x1| 0| 1| USB-HOST with VB
323 * 0x1| 1| 0| MHL
324 * 0x1| 1| 1| MHL with VB */
325 /* Get ADC1K register bit */
326 gnd_type = (info->status[MAX77843_MUIC_STATUS1] &
327 MAX77843_MUIC_STATUS1_ADC1K_MASK);
328
329 /* Get VBVolt register bit */
330 gnd_type |= (info->status[MAX77843_MUIC_STATUS2] &
331 MAX77843_MUIC_STATUS2_VBVOLT_MASK);
332 gnd_type >>= STATUS2_VBVOLT_SHIFT;
333
334 /* Offset of GND cable */
335 gnd_type |= MAX77843_MUIC_GND_USB_HOST;
336 cable_type = info->prev_gnd_type = gnd_type;
337 }
338 break;
339 default:
340 dev_err(info->dev, "Unknown cable group (%d)\n", group);
341 cable_type = -EINVAL;
342 break;
343 }
344
345 return cable_type;
346}
347
348static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
349{
350 int ret, gnd_cable_type;
351 bool attached;
352
353 gnd_cable_type = max77843_muic_get_cable_type(info,
354 MAX77843_CABLE_GROUP_ADC_GND, &attached);
355 dev_dbg(info->dev, "external connector is %s (gnd:0x%02x)\n",
356 attached ? "attached" : "detached", gnd_cable_type);
357
358 switch (gnd_cable_type) {
359 case MAX77843_MUIC_GND_USB_HOST:
360 case MAX77843_MUIC_GND_USB_HOST_VB:
361 ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
362 if (ret < 0)
363 return ret;
364
365 extcon_set_cable_state(info->edev, "USB-HOST", attached);
366 break;
367 case MAX77843_MUIC_GND_MHL_VB:
368 case MAX77843_MUIC_GND_MHL:
369 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
370 if (ret < 0)
371 return ret;
372
373 extcon_set_cable_state(info->edev, "MHL", attached);
374 break;
375 default:
376 dev_err(info->dev, "failed to detect %s accessory(gnd:0x%x)\n",
377 attached ? "attached" : "detached", gnd_cable_type);
378 return -EINVAL;
379 }
380
381 return 0;
382}
383
384static int max77843_muic_jig_handler(struct max77843_muic_info *info,
385 int cable_type, bool attached)
386{
387 int ret;
388
389 dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n",
390 attached ? "attached" : "detached", cable_type);
391
392 switch (cable_type) {
393 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
394 ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
395 if (ret < 0)
396 return ret;
397 extcon_set_cable_state(info->edev, "JIG-USB-OFF", attached);
398 break;
399 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
400 ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
401 if (ret < 0)
402 return ret;
403 extcon_set_cable_state(info->edev, "JIG-USB-ON", attached);
404 break;
405 case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
406 ret = max77843_muic_set_path(info, CONTROL1_SW_UART, attached);
407 if (ret < 0)
408 return ret;
409 extcon_set_cable_state(info->edev, "JIG-UART-OFF", attached);
410 break;
411 default:
412 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
413 if (ret < 0)
414 return ret;
415 break;
416 }
417
418 return 0;
419}
420
421static int max77843_muic_adc_handler(struct max77843_muic_info *info)
422{
423 int ret, cable_type;
424 bool attached;
425
426 cable_type = max77843_muic_get_cable_type(info,
427 MAX77843_CABLE_GROUP_ADC, &attached);
428
429 dev_dbg(info->dev,
430 "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
431 attached ? "attached" : "detached", cable_type,
432 info->prev_cable_type);
433
434 switch (cable_type) {
435 case MAX77843_MUIC_ADC_GROUND:
436 ret = max77843_muic_adc_gnd_handler(info);
437 if (ret < 0)
438 return ret;
439 break;
440 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
441 case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
442 case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
443 ret = max77843_muic_jig_handler(info, cable_type, attached);
444 if (ret < 0)
445 return ret;
446 break;
447 case MAX77843_MUIC_ADC_SEND_END_BUTTON:
448 case MAX77843_MUIC_ADC_REMOTE_S1_BUTTON:
449 case MAX77843_MUIC_ADC_REMOTE_S2_BUTTON:
450 case MAX77843_MUIC_ADC_REMOTE_S3_BUTTON:
451 case MAX77843_MUIC_ADC_REMOTE_S4_BUTTON:
452 case MAX77843_MUIC_ADC_REMOTE_S5_BUTTON:
453 case MAX77843_MUIC_ADC_REMOTE_S6_BUTTON:
454 case MAX77843_MUIC_ADC_REMOTE_S7_BUTTON:
455 case MAX77843_MUIC_ADC_REMOTE_S8_BUTTON:
456 case MAX77843_MUIC_ADC_REMOTE_S9_BUTTON:
457 case MAX77843_MUIC_ADC_REMOTE_S10_BUTTON:
458 case MAX77843_MUIC_ADC_REMOTE_S11_BUTTON:
459 case MAX77843_MUIC_ADC_REMOTE_S12_BUTTON:
460 case MAX77843_MUIC_ADC_RESERVED_ACC_1:
461 case MAX77843_MUIC_ADC_RESERVED_ACC_2:
462 case MAX77843_MUIC_ADC_RESERVED_ACC_3:
463 case MAX77843_MUIC_ADC_RESERVED_ACC_4:
464 case MAX77843_MUIC_ADC_RESERVED_ACC_5:
465 case MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2:
466 case MAX77843_MUIC_ADC_PHONE_POWERED_DEV:
467 case MAX77843_MUIC_ADC_TTY_CONVERTER:
468 case MAX77843_MUIC_ADC_UART_CABLE:
469 case MAX77843_MUIC_ADC_CEA936A_TYPE1_CHG:
470 case MAX77843_MUIC_ADC_AV_CABLE_NOLOAD:
471 case MAX77843_MUIC_ADC_CEA936A_TYPE2_CHG:
472 case MAX77843_MUIC_ADC_FACTORY_MODE_UART_ON:
473 case MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1:
474 case MAX77843_MUIC_ADC_OPEN:
475 dev_err(info->dev,
476 "accessory is %s but it isn't used (adc:0x%x)\n",
477 attached ? "attached" : "detached", cable_type);
478 return -EAGAIN;
479 default:
480 dev_err(info->dev,
481 "failed to detect %s accessory (adc:0x%x)\n",
482 attached ? "attached" : "detached", cable_type);
483 return -EINVAL;
484 }
485
486 return 0;
487}
488
489static int max77843_muic_chg_handler(struct max77843_muic_info *info)
490{
491 int ret, chg_type, gnd_type;
492 bool attached;
493
494 chg_type = max77843_muic_get_cable_type(info,
495 MAX77843_CABLE_GROUP_CHG, &attached);
496
497 dev_dbg(info->dev,
498 "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
499 attached ? "attached" : "detached",
500 chg_type, info->prev_chg_type);
501
502 switch (chg_type) {
503 case MAX77843_MUIC_CHG_USB:
504 ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
505 if (ret < 0)
506 return ret;
507
508 extcon_set_cable_state(info->edev, "USB", attached);
509 break;
510 case MAX77843_MUIC_CHG_DOWNSTREAM:
511 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
512 if (ret < 0)
513 return ret;
514
515 extcon_set_cable_state(info->edev,
516 "CHARGER-DOWNSTREAM", attached);
517 break;
518 case MAX77843_MUIC_CHG_DEDICATED:
519 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
520 if (ret < 0)
521 return ret;
522
523 extcon_set_cable_state(info->edev, "TA", attached);
524 break;
525 case MAX77843_MUIC_CHG_SPECIAL_500MA:
526 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
527 if (ret < 0)
528 return ret;
529
530 extcon_set_cable_state(info->edev, "SLOW-CHAREGER", attached);
531 break;
532 case MAX77843_MUIC_CHG_SPECIAL_1A:
533 ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
534 if (ret < 0)
535 return ret;
536
537 extcon_set_cable_state(info->edev, "FAST-CHARGER", attached);
538 break;
539 case MAX77843_MUIC_CHG_GND:
540 gnd_type = max77843_muic_get_cable_type(info,
541 MAX77843_CABLE_GROUP_ADC_GND, &attached);
542
543 /* Charger cable on MHL accessory is attach or detach */
544 if (gnd_type == MAX77843_MUIC_GND_MHL_VB)
545 extcon_set_cable_state(info->edev, "MHL-TA", true);
546 else if (gnd_type == MAX77843_MUIC_GND_MHL)
547 extcon_set_cable_state(info->edev, "MHL-TA", false);
548 break;
549 case MAX77843_MUIC_CHG_NONE:
550 break;
551 default:
552 dev_err(info->dev,
553 "failed to detect %s accessory (chg_type:0x%x)\n",
554 attached ? "attached" : "detached", chg_type);
555
556 max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
557 return -EINVAL;
558 }
559
560 return 0;
561}
562
563static void max77843_muic_irq_work(struct work_struct *work)
564{
565 struct max77843_muic_info *info = container_of(work,
566 struct max77843_muic_info, irq_work);
567 struct max77843 *max77843 = info->max77843;
568 int ret = 0;
569
570 mutex_lock(&info->mutex);
571
572 ret = regmap_bulk_read(max77843->regmap_muic,
573 MAX77843_MUIC_REG_STATUS1, info->status,
574 MAX77843_MUIC_STATUS_NUM);
575 if (ret) {
576 dev_err(info->dev, "Cannot read STATUS registers\n");
577 mutex_unlock(&info->mutex);
578 return;
579 }
580
581 if (info->irq_adc) {
582 ret = max77843_muic_adc_handler(info);
583 if (ret)
584 dev_err(info->dev, "Unknown cable type\n");
585 info->irq_adc = false;
586 }
587
588 if (info->irq_chg) {
589 ret = max77843_muic_chg_handler(info);
590 if (ret)
591 dev_err(info->dev, "Unknown charger type\n");
592 info->irq_chg = false;
593 }
594
595 mutex_unlock(&info->mutex);
596}
597
598static irqreturn_t max77843_muic_irq_handler(int irq, void *data)
599{
600 struct max77843_muic_info *info = data;
601 int i, irq_type = -1;
602
603 for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++)
604 if (irq == max77843_muic_irqs[i].virq)
605 irq_type = max77843_muic_irqs[i].irq;
606
607 switch (irq_type) {
608 case MAX77843_MUIC_IRQ_INT1_ADC:
609 case MAX77843_MUIC_IRQ_INT1_ADCERROR:
610 case MAX77843_MUIC_IRQ_INT1_ADC1K:
611 info->irq_adc = true;
612 break;
613 case MAX77843_MUIC_IRQ_INT2_CHGTYP:
614 case MAX77843_MUIC_IRQ_INT2_CHGDETRUN:
615 case MAX77843_MUIC_IRQ_INT2_DCDTMR:
616 case MAX77843_MUIC_IRQ_INT2_DXOVP:
617 case MAX77843_MUIC_IRQ_INT2_VBVOLT:
618 info->irq_chg = true;
619 break;
620 case MAX77843_MUIC_IRQ_INT3_VBADC:
621 case MAX77843_MUIC_IRQ_INT3_VDNMON:
622 case MAX77843_MUIC_IRQ_INT3_DNRES:
623 case MAX77843_MUIC_IRQ_INT3_MPNACK:
624 case MAX77843_MUIC_IRQ_INT3_MRXBUFOW:
625 case MAX77843_MUIC_IRQ_INT3_MRXTRF:
626 case MAX77843_MUIC_IRQ_INT3_MRXPERR:
627 case MAX77843_MUIC_IRQ_INT3_MRXRDY:
628 break;
629 default:
630 dev_err(info->dev, "Cannot recognize IRQ(%d)\n", irq_type);
631 break;
632 }
633
634 schedule_work(&info->irq_work);
635
636 return IRQ_HANDLED;
637}
638
639static void max77843_muic_detect_cable_wq(struct work_struct *work)
640{
641 struct max77843_muic_info *info = container_of(to_delayed_work(work),
642 struct max77843_muic_info, wq_detcable);
643 struct max77843 *max77843 = info->max77843;
644 int chg_type, adc, ret;
645 bool attached;
646
647 mutex_lock(&info->mutex);
648
649 ret = regmap_bulk_read(max77843->regmap_muic,
650 MAX77843_MUIC_REG_STATUS1, info->status,
651 MAX77843_MUIC_STATUS_NUM);
652 if (ret) {
653 dev_err(info->dev, "Cannot read STATUS registers\n");
654 goto err_cable_wq;
655 }
656
657 adc = max77843_muic_get_cable_type(info,
658 MAX77843_CABLE_GROUP_ADC, &attached);
659 if (attached && adc != MAX77843_MUIC_ADC_OPEN) {
660 ret = max77843_muic_adc_handler(info);
661 if (ret < 0) {
662 dev_err(info->dev, "Cannot detect accessory\n");
663 goto err_cable_wq;
664 }
665 }
666
667 chg_type = max77843_muic_get_cable_type(info,
668 MAX77843_CABLE_GROUP_CHG, &attached);
669 if (attached && chg_type != MAX77843_MUIC_CHG_NONE) {
670 ret = max77843_muic_chg_handler(info);
671 if (ret < 0) {
672 dev_err(info->dev, "Cannot detect charger accessory\n");
673 goto err_cable_wq;
674 }
675 }
676
677err_cable_wq:
678 mutex_unlock(&info->mutex);
679}
680
681static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
682 enum max77843_muic_adc_debounce_time time)
683{
684 struct max77843 *max77843 = info->max77843;
685 int ret;
686
687 switch (time) {
688 case MAX77843_DEBOUNCE_TIME_5MS:
689 case MAX77843_DEBOUNCE_TIME_10MS:
690 case MAX77843_DEBOUNCE_TIME_25MS:
691 case MAX77843_DEBOUNCE_TIME_38_62MS:
692 ret = regmap_update_bits(max77843->regmap_muic,
693 MAX77843_MUIC_REG_CONTROL4,
694 MAX77843_MUIC_CONTROL4_ADCDBSET_MASK,
695 time << CONTROL4_ADCDBSET_SHIFT);
696 if (ret < 0) {
697 dev_err(info->dev, "Cannot write MUIC regmap\n");
698 return ret;
699 }
700 break;
701 default:
702 dev_err(info->dev, "Invalid ADC debounce time\n");
703 return -EINVAL;
704 }
705
706 return 0;
707}
708
709static int max77843_init_muic_regmap(struct max77843 *max77843)
710{
711 int ret;
712
713 max77843->i2c_muic = i2c_new_dummy(max77843->i2c->adapter,
714 I2C_ADDR_MUIC);
715 if (!max77843->i2c_muic) {
716 dev_err(&max77843->i2c->dev,
717 "Cannot allocate I2C device for MUIC\n");
718 return -ENOMEM;
719 }
720
721 i2c_set_clientdata(max77843->i2c_muic, max77843);
722
723 max77843->regmap_muic = devm_regmap_init_i2c(max77843->i2c_muic,
724 &max77843_muic_regmap_config);
725 if (IS_ERR(max77843->regmap_muic)) {
726 ret = PTR_ERR(max77843->regmap_muic);
727 goto err_muic_i2c;
728 }
729
730 ret = regmap_add_irq_chip(max77843->regmap_muic, max77843->irq,
731 IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
732 0, &max77843_muic_irq_chip, &max77843->irq_data_muic);
733 if (ret < 0) {
734 dev_err(&max77843->i2c->dev, "Cannot add MUIC IRQ chip\n");
735 goto err_muic_i2c;
736 }
737
738 return 0;
739
740err_muic_i2c:
741 i2c_unregister_device(max77843->i2c_muic);
742
743 return ret;
744}
745
746static int max77843_muic_probe(struct platform_device *pdev)
747{
748 struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
749 struct max77843_muic_info *info;
750 unsigned int id;
751 int i, ret;
752
753 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
754 if (!info)
755 return -ENOMEM;
756
757 info->dev = &pdev->dev;
758 info->max77843 = max77843;
759
760 platform_set_drvdata(pdev, info);
761 mutex_init(&info->mutex);
762
763 /* Initialize i2c and regmap */
764 ret = max77843_init_muic_regmap(max77843);
765 if (ret) {
766 dev_err(&pdev->dev, "Failed to init MUIC regmap\n");
767 return ret;
768 }
769
770 /* Turn off auto detection configuration */
771 ret = regmap_update_bits(max77843->regmap_muic,
772 MAX77843_MUIC_REG_CONTROL4,
773 MAX77843_MUIC_CONTROL4_USBAUTO_MASK |
774 MAX77843_MUIC_CONTROL4_FCTAUTO_MASK,
775 CONTROL4_AUTO_DISABLE);
776
777 /* Initialize extcon device */
778 info->edev = devm_extcon_dev_allocate(&pdev->dev,
779 max77843_extcon_cable);
780 if (IS_ERR(info->edev)) {
781 dev_err(&pdev->dev, "Failed to allocate memory for extcon\n");
782 ret = -ENODEV;
783 goto err_muic_irq;
784 }
785
786 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
787 if (ret) {
788 dev_err(&pdev->dev, "Failed to register extcon device\n");
789 goto err_muic_irq;
790 }
791
792 /* Set ADC debounce time */
793 max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
794
795 /* Set initial path for UART */
796 max77843_muic_set_path(info, CONTROL1_SW_UART, true);
797
798 /* Check revision number of MUIC device */
799 ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
800 if (ret < 0) {
801 dev_err(&pdev->dev, "Failed to read revision number\n");
802 goto err_muic_irq;
803 }
804 dev_info(info->dev, "MUIC device ID : 0x%x\n", id);
805
806 /* Support virtual irq domain for max77843 MUIC device */
807 INIT_WORK(&info->irq_work, max77843_muic_irq_work);
808
809 for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) {
810 struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i];
811 unsigned int virq = 0;
812
813 virq = regmap_irq_get_virq(max77843->irq_data_muic,
814 muic_irq->irq);
815 if (virq <= 0) {
816 ret = -EINVAL;
817 goto err_muic_irq;
818 }
819 muic_irq->virq = virq;
820
821 ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
822 max77843_muic_irq_handler, IRQF_NO_SUSPEND,
823 muic_irq->name, info);
824 if (ret) {
825 dev_err(&pdev->dev,
826 "Failed to request irq (IRQ: %d, error: %d)\n",
827 muic_irq->irq, ret);
828 goto err_muic_irq;
829 }
830 }
831
832 /* Detect accessory after completing the initialization of platform */
833 INIT_DELAYED_WORK(&info->wq_detcable, max77843_muic_detect_cable_wq);
834 queue_delayed_work(system_power_efficient_wq,
835 &info->wq_detcable, msecs_to_jiffies(DELAY_MS_DEFAULT));
836
837 return 0;
838
839err_muic_irq:
840 regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
841 i2c_unregister_device(max77843->i2c_muic);
842
843 return ret;
844}
845
846static int max77843_muic_remove(struct platform_device *pdev)
847{
848 struct max77843_muic_info *info = platform_get_drvdata(pdev);
849 struct max77843 *max77843 = info->max77843;
850
851 cancel_work_sync(&info->irq_work);
852 regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
853 i2c_unregister_device(max77843->i2c_muic);
854
855 return 0;
856}
857
858static const struct platform_device_id max77843_muic_id[] = {
859 { "max77843-muic", },
860 { /* sentinel */ },
861};
862MODULE_DEVICE_TABLE(platform, max77843_muic_id);
863
864static struct platform_driver max77843_muic_driver = {
865 .driver = {
866 .name = "max77843-muic",
867 },
868 .probe = max77843_muic_probe,
869 .remove = max77843_muic_remove,
870 .id_table = max77843_muic_id,
871};
872
873static int __init max77843_muic_init(void)
874{
875 return platform_driver_register(&max77843_muic_driver);
876}
877subsys_initcall(max77843_muic_init);
878
879MODULE_DESCRIPTION("Maxim MAX77843 Extcon driver");
880MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
881MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index fc1678fa95c4..5774e56c6422 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -579,8 +579,6 @@ static void max8997_muic_irq_work(struct work_struct *work)
579 dev_err(info->dev, "failed to handle MUIC interrupt\n"); 579 dev_err(info->dev, "failed to handle MUIC interrupt\n");
580 580
581 mutex_unlock(&info->mutex); 581 mutex_unlock(&info->mutex);
582
583 return;
584} 582}
585 583
586static irqreturn_t max8997_muic_irq_handler(int irq, void *data) 584static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
@@ -689,8 +687,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
689 muic_irq->name, info); 687 muic_irq->name, info);
690 if (ret) { 688 if (ret) {
691 dev_err(&pdev->dev, 689 dev_err(&pdev->dev,
692 "failed: irq request (IRQ: %d," 690 "failed: irq request (IRQ: %d, error :%d)\n",
693 " error :%d)\n",
694 muic_irq->irq, ret); 691 muic_irq->irq, ret);
695 goto err_irq; 692 goto err_irq;
696 } 693 }
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index a784b2d5ee72..9ccd5af89d1c 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -582,10 +582,8 @@ static int rt8973a_muic_i2c_probe(struct i2c_client *i2c,
582 return -EINVAL; 582 return -EINVAL;
583 583
584 info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL); 584 info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
585 if (!info) { 585 if (!info)
586 dev_err(&i2c->dev, "failed to allocate memory\n");
587 return -ENOMEM; 586 return -ENOMEM;
588 }
589 i2c_set_clientdata(i2c, info); 587 i2c_set_clientdata(i2c, info);
590 588
591 info->dev = &i2c->dev; 589 info->dev = &i2c->dev;
@@ -681,7 +679,7 @@ static int rt8973a_muic_i2c_remove(struct i2c_client *i2c)
681 return 0; 679 return 0;
682} 680}
683 681
684static struct of_device_id rt8973a_dt_match[] = { 682static const struct of_device_id rt8973a_dt_match[] = {
685 { .compatible = "richtek,rt8973a-muic" }, 683 { .compatible = "richtek,rt8973a-muic" },
686 { }, 684 { },
687}; 685};
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index b0f7bd82af90..2f93cf307852 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -359,8 +359,8 @@ static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
359 break; 359 break;
360 default: 360 default:
361 dev_dbg(info->dev, 361 dev_dbg(info->dev,
362 "cannot identify the cable type: adc(0x%x) " 362 "cannot identify the cable type: adc(0x%x)\n",
363 "dev_type1(0x%x)\n", adc, dev_type1); 363 adc);
364 return -EINVAL; 364 return -EINVAL;
365 }; 365 };
366 break; 366 break;
@@ -659,7 +659,7 @@ static int sm5502_muic_i2c_remove(struct i2c_client *i2c)
659 return 0; 659 return 0;
660} 660}
661 661
662static struct of_device_id sm5502_dt_match[] = { 662static const struct of_device_id sm5502_dt_match[] = {
663 { .compatible = "siliconmitus,sm5502-muic" }, 663 { .compatible = "siliconmitus,sm5502-muic" },
664 { }, 664 { },
665}; 665};
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
new file mode 100644
index 000000000000..de67fce18984
--- /dev/null
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -0,0 +1,237 @@
1/**
2 * drivers/extcon/extcon-usb-gpio.c - USB GPIO extcon driver
3 *
4 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
5 * Author: Roger Quadros <rogerq@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/extcon.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/irq.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/of_gpio.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/workqueue.h>
27
28#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
29
30struct usb_extcon_info {
31 struct device *dev;
32 struct extcon_dev *edev;
33
34 struct gpio_desc *id_gpiod;
35 int id_irq;
36
37 unsigned long debounce_jiffies;
38 struct delayed_work wq_detcable;
39};
40
41/* List of detectable cables */
42enum {
43 EXTCON_CABLE_USB = 0,
44 EXTCON_CABLE_USB_HOST,
45
46 EXTCON_CABLE_END,
47};
48
49static const char *usb_extcon_cable[] = {
50 [EXTCON_CABLE_USB] = "USB",
51 [EXTCON_CABLE_USB_HOST] = "USB-HOST",
52 NULL,
53};
54
55static void usb_extcon_detect_cable(struct work_struct *work)
56{
57 int id;
58 struct usb_extcon_info *info = container_of(to_delayed_work(work),
59 struct usb_extcon_info,
60 wq_detcable);
61
62 /* check ID and update cable state */
63 id = gpiod_get_value_cansleep(info->id_gpiod);
64 if (id) {
65 /*
66 * ID = 1 means USB HOST cable detached.
67 * As we don't have event for USB peripheral cable attached,
68 * we simulate USB peripheral attach here.
69 */
70 extcon_set_cable_state(info->edev,
71 usb_extcon_cable[EXTCON_CABLE_USB_HOST],
72 false);
73 extcon_set_cable_state(info->edev,
74 usb_extcon_cable[EXTCON_CABLE_USB],
75 true);
76 } else {
77 /*
78 * ID = 0 means USB HOST cable attached.
79 * As we don't have event for USB peripheral cable detached,
80 * we simulate USB peripheral detach here.
81 */
82 extcon_set_cable_state(info->edev,
83 usb_extcon_cable[EXTCON_CABLE_USB],
84 false);
85 extcon_set_cable_state(info->edev,
86 usb_extcon_cable[EXTCON_CABLE_USB_HOST],
87 true);
88 }
89}
90
91static irqreturn_t usb_irq_handler(int irq, void *dev_id)
92{
93 struct usb_extcon_info *info = dev_id;
94
95 queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
96 info->debounce_jiffies);
97
98 return IRQ_HANDLED;
99}
100
101static int usb_extcon_probe(struct platform_device *pdev)
102{
103 struct device *dev = &pdev->dev;
104 struct device_node *np = dev->of_node;
105 struct usb_extcon_info *info;
106 int ret;
107
108 if (!np)
109 return -EINVAL;
110
111 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
112 if (!info)
113 return -ENOMEM;
114
115 info->dev = dev;
116 info->id_gpiod = devm_gpiod_get(&pdev->dev, "id");
117 if (IS_ERR(info->id_gpiod)) {
118 dev_err(dev, "failed to get ID GPIO\n");
119 return PTR_ERR(info->id_gpiod);
120 }
121
122 ret = gpiod_set_debounce(info->id_gpiod,
123 USB_GPIO_DEBOUNCE_MS * 1000);
124 if (ret < 0)
125 info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
126
127 INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
128
129 info->id_irq = gpiod_to_irq(info->id_gpiod);
130 if (info->id_irq < 0) {
131 dev_err(dev, "failed to get ID IRQ\n");
132 return info->id_irq;
133 }
134
135 ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
136 usb_irq_handler,
137 IRQF_TRIGGER_RISING |
138 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
139 pdev->name, info);
140 if (ret < 0) {
141 dev_err(dev, "failed to request handler for ID IRQ\n");
142 return ret;
143 }
144
145 info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
146 if (IS_ERR(info->edev)) {
147 dev_err(dev, "failed to allocate extcon device\n");
148 return -ENOMEM;
149 }
150
151 ret = devm_extcon_dev_register(dev, info->edev);
152 if (ret < 0) {
153 dev_err(dev, "failed to register extcon device\n");
154 return ret;
155 }
156
157 platform_set_drvdata(pdev, info);
158 device_init_wakeup(dev, 1);
159
160 /* Perform initial detection */
161 usb_extcon_detect_cable(&info->wq_detcable.work);
162
163 return 0;
164}
165
166static int usb_extcon_remove(struct platform_device *pdev)
167{
168 struct usb_extcon_info *info = platform_get_drvdata(pdev);
169
170 cancel_delayed_work_sync(&info->wq_detcable);
171
172 return 0;
173}
174
175#ifdef CONFIG_PM_SLEEP
176static int usb_extcon_suspend(struct device *dev)
177{
178 struct usb_extcon_info *info = dev_get_drvdata(dev);
179 int ret = 0;
180
181 if (device_may_wakeup(dev)) {
182 ret = enable_irq_wake(info->id_irq);
183 if (ret)
184 return ret;
185 }
186
187 /*
188 * We don't want to process any IRQs after this point
189 * as GPIOs used behind I2C subsystem might not be
190 * accessible until resume completes. So disable IRQ.
191 */
192 disable_irq(info->id_irq);
193
194 return ret;
195}
196
197static int usb_extcon_resume(struct device *dev)
198{
199 struct usb_extcon_info *info = dev_get_drvdata(dev);
200 int ret = 0;
201
202 if (device_may_wakeup(dev)) {
203 ret = disable_irq_wake(info->id_irq);
204 if (ret)
205 return ret;
206 }
207
208 enable_irq(info->id_irq);
209
210 return ret;
211}
212#endif
213
214static SIMPLE_DEV_PM_OPS(usb_extcon_pm_ops,
215 usb_extcon_suspend, usb_extcon_resume);
216
217static const struct of_device_id usb_extcon_dt_match[] = {
218 { .compatible = "linux,extcon-usb-gpio", },
219 { /* sentinel */ }
220};
221MODULE_DEVICE_TABLE(of, usb_extcon_dt_match);
222
223static struct platform_driver usb_extcon_driver = {
224 .probe = usb_extcon_probe,
225 .remove = usb_extcon_remove,
226 .driver = {
227 .name = "extcon-usb-gpio",
228 .pm = &usb_extcon_pm_ops,
229 .of_match_table = usb_extcon_dt_match,
230 },
231};
232
233module_platform_driver(usb_extcon_driver);
234
235MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
236MODULE_DESCRIPTION("USB GPIO extcon driver");
237MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon.c
index 8319f25b7145..4c9f165e4a04 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon.c
@@ -158,6 +158,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
158 /* Optional callback given by the user */ 158 /* Optional callback given by the user */
159 if (edev->print_name) { 159 if (edev->print_name) {
160 int ret = edev->print_name(edev, buf); 160 int ret = edev->print_name(edev, buf);
161
161 if (ret >= 0) 162 if (ret >= 0)
162 return ret; 163 return ret;
163 } 164 }
@@ -444,6 +445,9 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
444 const char *extcon_name, const char *cable_name, 445 const char *extcon_name, const char *cable_name,
445 struct notifier_block *nb) 446 struct notifier_block *nb)
446{ 447{
448 unsigned long flags;
449 int ret;
450
447 if (!obj || !cable_name || !nb) 451 if (!obj || !cable_name || !nb)
448 return -EINVAL; 452 return -EINVAL;
449 453
@@ -461,8 +465,11 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
461 465
462 obj->internal_nb.notifier_call = _call_per_cable; 466 obj->internal_nb.notifier_call = _call_per_cable;
463 467
464 return raw_notifier_chain_register(&obj->edev->nh, 468 spin_lock_irqsave(&obj->edev->lock, flags);
469 ret = raw_notifier_chain_register(&obj->edev->nh,
465 &obj->internal_nb); 470 &obj->internal_nb);
471 spin_unlock_irqrestore(&obj->edev->lock, flags);
472 return ret;
466 } else { 473 } else {
467 struct class_dev_iter iter; 474 struct class_dev_iter iter;
468 struct extcon_dev *extd; 475 struct extcon_dev *extd;
@@ -495,10 +502,17 @@ EXPORT_SYMBOL_GPL(extcon_register_interest);
495 */ 502 */
496int extcon_unregister_interest(struct extcon_specific_cable_nb *obj) 503int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
497{ 504{
505 unsigned long flags;
506 int ret;
507
498 if (!obj) 508 if (!obj)
499 return -EINVAL; 509 return -EINVAL;
500 510
501 return raw_notifier_chain_unregister(&obj->edev->nh, &obj->internal_nb); 511 spin_lock_irqsave(&obj->edev->lock, flags);
512 ret = raw_notifier_chain_unregister(&obj->edev->nh, &obj->internal_nb);
513 spin_unlock_irqrestore(&obj->edev->lock, flags);
514
515 return ret;
502} 516}
503EXPORT_SYMBOL_GPL(extcon_unregister_interest); 517EXPORT_SYMBOL_GPL(extcon_unregister_interest);
504 518
@@ -515,7 +529,14 @@ EXPORT_SYMBOL_GPL(extcon_unregister_interest);
515int extcon_register_notifier(struct extcon_dev *edev, 529int extcon_register_notifier(struct extcon_dev *edev,
516 struct notifier_block *nb) 530 struct notifier_block *nb)
517{ 531{
518 return raw_notifier_chain_register(&edev->nh, nb); 532 unsigned long flags;
533 int ret;
534
535 spin_lock_irqsave(&edev->lock, flags);
536 ret = raw_notifier_chain_register(&edev->nh, nb);
537 spin_unlock_irqrestore(&edev->lock, flags);
538
539 return ret;
519} 540}
520EXPORT_SYMBOL_GPL(extcon_register_notifier); 541EXPORT_SYMBOL_GPL(extcon_register_notifier);
521 542
@@ -527,7 +548,14 @@ EXPORT_SYMBOL_GPL(extcon_register_notifier);
527int extcon_unregister_notifier(struct extcon_dev *edev, 548int extcon_unregister_notifier(struct extcon_dev *edev,
528 struct notifier_block *nb) 549 struct notifier_block *nb)
529{ 550{
530 return raw_notifier_chain_unregister(&edev->nh, nb); 551 unsigned long flags;
552 int ret;
553
554 spin_lock_irqsave(&edev->lock, flags);
555 ret = raw_notifier_chain_unregister(&edev->nh, nb);
556 spin_unlock_irqrestore(&edev->lock, flags);
557
558 return ret;
531} 559}
532EXPORT_SYMBOL_GPL(extcon_unregister_notifier); 560EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
533 561
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 2978f5ee8d2a..54da66dc7d16 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -71,7 +71,8 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
71 struct vmbus_channel_msginfo *open_info = NULL; 71 struct vmbus_channel_msginfo *open_info = NULL;
72 void *in, *out; 72 void *in, *out;
73 unsigned long flags; 73 unsigned long flags;
74 int ret, t, err = 0; 74 int ret, err = 0;
75 unsigned long t;
75 76
76 spin_lock_irqsave(&newchannel->lock, flags); 77 spin_lock_irqsave(&newchannel->lock, flags);
77 if (newchannel->state == CHANNEL_OPEN_STATE) { 78 if (newchannel->state == CHANNEL_OPEN_STATE) {
@@ -89,9 +90,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
89 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 90 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
90 get_order(send_ringbuffer_size + recv_ringbuffer_size)); 91 get_order(send_ringbuffer_size + recv_ringbuffer_size));
91 92
92 if (!out) 93 if (!out) {
93 return -ENOMEM; 94 err = -ENOMEM;
94 95 goto error0;
96 }
95 97
96 in = (void *)((unsigned long)out + send_ringbuffer_size); 98 in = (void *)((unsigned long)out + send_ringbuffer_size);
97 99
@@ -135,7 +137,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
135 GFP_KERNEL); 137 GFP_KERNEL);
136 if (!open_info) { 138 if (!open_info) {
137 err = -ENOMEM; 139 err = -ENOMEM;
138 goto error0; 140 goto error_gpadl;
139 } 141 }
140 142
141 init_completion(&open_info->waitevent); 143 init_completion(&open_info->waitevent);
@@ -151,7 +153,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
151 153
152 if (userdatalen > MAX_USER_DEFINED_BYTES) { 154 if (userdatalen > MAX_USER_DEFINED_BYTES) {
153 err = -EINVAL; 155 err = -EINVAL;
154 goto error0; 156 goto error_gpadl;
155 } 157 }
156 158
157 if (userdatalen) 159 if (userdatalen)
@@ -195,10 +197,14 @@ error1:
195 list_del(&open_info->msglistentry); 197 list_del(&open_info->msglistentry);
196 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 198 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
197 199
200error_gpadl:
201 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
202
198error0: 203error0:
199 free_pages((unsigned long)out, 204 free_pages((unsigned long)out,
200 get_order(send_ringbuffer_size + recv_ringbuffer_size)); 205 get_order(send_ringbuffer_size + recv_ringbuffer_size));
201 kfree(open_info); 206 kfree(open_info);
207 newchannel->state = CHANNEL_OPEN_STATE;
202 return err; 208 return err;
203} 209}
204EXPORT_SYMBOL_GPL(vmbus_open); 210EXPORT_SYMBOL_GPL(vmbus_open);
@@ -534,6 +540,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
534 free_pages((unsigned long)channel->ringbuffer_pages, 540 free_pages((unsigned long)channel->ringbuffer_pages,
535 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 541 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
536 542
543 /*
544 * If the channel has been rescinded; process device removal.
545 */
546 if (channel->rescind)
547 hv_process_channel_removal(channel,
548 channel->offermsg.child_relid);
537 return ret; 549 return ret;
538} 550}
539 551
@@ -569,23 +581,9 @@ void vmbus_close(struct vmbus_channel *channel)
569} 581}
570EXPORT_SYMBOL_GPL(vmbus_close); 582EXPORT_SYMBOL_GPL(vmbus_close);
571 583
572/** 584int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
573 * vmbus_sendpacket() - Send the specified buffer on the given channel
574 * @channel: Pointer to vmbus_channel structure.
575 * @buffer: Pointer to the buffer you want to receive the data into.
576 * @bufferlen: Maximum size of what the the buffer will hold
577 * @requestid: Identifier of the request
578 * @type: Type of packet that is being send e.g. negotiate, time
579 * packet etc.
580 *
581 * Sends data in @buffer directly to hyper-v via the vmbus
582 * This will send the data unparsed to hyper-v.
583 *
584 * Mainly used by Hyper-V drivers.
585 */
586int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
587 u32 bufferlen, u64 requestid, 585 u32 bufferlen, u64 requestid,
588 enum vmbus_packet_type type, u32 flags) 586 enum vmbus_packet_type type, u32 flags, bool kick_q)
589{ 587{
590 struct vmpacket_descriptor desc; 588 struct vmpacket_descriptor desc;
591 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; 589 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
@@ -613,21 +611,61 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
613 611
614 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 612 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
615 613
616 if (ret == 0 && signal) 614 /*
615 * Signalling the host is conditional on many factors:
616 * 1. The ring state changed from being empty to non-empty.
617 * This is tracked by the variable "signal".
618 * 2. The variable kick_q tracks if more data will be placed
619 * on the ring. We will not signal if more data is
620 * to be placed.
621 *
622 * If we cannot write to the ring-buffer; signal the host
623 * even if we may not have written anything. This is a rare
624 * enough condition that it should not matter.
625 */
626 if (((ret == 0) && kick_q && signal) || (ret))
617 vmbus_setevent(channel); 627 vmbus_setevent(channel);
618 628
619 return ret; 629 return ret;
620} 630}
631EXPORT_SYMBOL(vmbus_sendpacket_ctl);
632
633/**
634 * vmbus_sendpacket() - Send the specified buffer on the given channel
635 * @channel: Pointer to vmbus_channel structure.
636 * @buffer: Pointer to the buffer you want to receive the data into.
637 * @bufferlen: Maximum size of what the the buffer will hold
638 * @requestid: Identifier of the request
639 * @type: Type of packet that is being send e.g. negotiate, time
640 * packet etc.
641 *
642 * Sends data in @buffer directly to hyper-v via the vmbus
643 * This will send the data unparsed to hyper-v.
644 *
645 * Mainly used by Hyper-V drivers.
646 */
647int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
648 u32 bufferlen, u64 requestid,
649 enum vmbus_packet_type type, u32 flags)
650{
651 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
652 type, flags, true);
653}
621EXPORT_SYMBOL(vmbus_sendpacket); 654EXPORT_SYMBOL(vmbus_sendpacket);
622 655
623/* 656/*
624 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer 657 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
625 * packets using a GPADL Direct packet type. 658 * packets using a GPADL Direct packet type. This interface allows you
659 * to control notifying the host. This will be useful for sending
660 * batched data. Also the sender can control the send flags
661 * explicitly.
626 */ 662 */
627int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 663int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
628 struct hv_page_buffer pagebuffers[], 664 struct hv_page_buffer pagebuffers[],
629 u32 pagecount, void *buffer, u32 bufferlen, 665 u32 pagecount, void *buffer, u32 bufferlen,
630 u64 requestid) 666 u64 requestid,
667 u32 flags,
668 bool kick_q)
631{ 669{
632 int ret; 670 int ret;
633 int i; 671 int i;
@@ -655,7 +693,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
655 693
656 /* Setup the descriptor */ 694 /* Setup the descriptor */
657 desc.type = VM_PKT_DATA_USING_GPA_DIRECT; 695 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
658 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; 696 desc.flags = flags;
659 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */ 697 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
660 desc.length8 = (u16)(packetlen_aligned >> 3); 698 desc.length8 = (u16)(packetlen_aligned >> 3);
661 desc.transactionid = requestid; 699 desc.transactionid = requestid;
@@ -676,11 +714,40 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
676 714
677 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 715 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
678 716
679 if (ret == 0 && signal) 717 /*
718 * Signalling the host is conditional on many factors:
719 * 1. The ring state changed from being empty to non-empty.
720 * This is tracked by the variable "signal".
721 * 2. The variable kick_q tracks if more data will be placed
722 * on the ring. We will not signal if more data is
723 * to be placed.
724 *
725 * If we cannot write to the ring-buffer; signal the host
726 * even if we may not have written anything. This is a rare
727 * enough condition that it should not matter.
728 */
729 if (((ret == 0) && kick_q && signal) || (ret))
680 vmbus_setevent(channel); 730 vmbus_setevent(channel);
681 731
682 return ret; 732 return ret;
683} 733}
734EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
735
736/*
737 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
738 * packets using a GPADL Direct packet type.
739 */
740int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
741 struct hv_page_buffer pagebuffers[],
742 u32 pagecount, void *buffer, u32 bufferlen,
743 u64 requestid)
744{
745 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
746 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
747 buffer, bufferlen, requestid,
748 flags, true);
749
750}
684EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); 751EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
685 752
686/* 753/*
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 3736f71bdec5..0eeb1b3bc048 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -32,12 +32,6 @@
32 32
33#include "hyperv_vmbus.h" 33#include "hyperv_vmbus.h"
34 34
35struct vmbus_channel_message_table_entry {
36 enum vmbus_channel_message_type message_type;
37 void (*message_handler)(struct vmbus_channel_message_header *msg);
38};
39
40
41/** 35/**
42 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message 36 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
43 * @icmsghdrp: Pointer to msg header structure 37 * @icmsghdrp: Pointer to msg header structure
@@ -139,54 +133,29 @@ EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
139 */ 133 */
140static struct vmbus_channel *alloc_channel(void) 134static struct vmbus_channel *alloc_channel(void)
141{ 135{
136 static atomic_t chan_num = ATOMIC_INIT(0);
142 struct vmbus_channel *channel; 137 struct vmbus_channel *channel;
143 138
144 channel = kzalloc(sizeof(*channel), GFP_ATOMIC); 139 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
145 if (!channel) 140 if (!channel)
146 return NULL; 141 return NULL;
147 142
143 channel->id = atomic_inc_return(&chan_num);
148 spin_lock_init(&channel->inbound_lock); 144 spin_lock_init(&channel->inbound_lock);
149 spin_lock_init(&channel->lock); 145 spin_lock_init(&channel->lock);
150 146
151 INIT_LIST_HEAD(&channel->sc_list); 147 INIT_LIST_HEAD(&channel->sc_list);
152 INIT_LIST_HEAD(&channel->percpu_list); 148 INIT_LIST_HEAD(&channel->percpu_list);
153 149
154 channel->controlwq = create_workqueue("hv_vmbus_ctl");
155 if (!channel->controlwq) {
156 kfree(channel);
157 return NULL;
158 }
159
160 return channel; 150 return channel;
161} 151}
162 152
163/* 153/*
164 * release_hannel - Release the vmbus channel object itself
165 */
166static void release_channel(struct work_struct *work)
167{
168 struct vmbus_channel *channel = container_of(work,
169 struct vmbus_channel,
170 work);
171
172 destroy_workqueue(channel->controlwq);
173
174 kfree(channel);
175}
176
177/*
178 * free_channel - Release the resources used by the vmbus channel object 154 * free_channel - Release the resources used by the vmbus channel object
179 */ 155 */
180static void free_channel(struct vmbus_channel *channel) 156static void free_channel(struct vmbus_channel *channel)
181{ 157{
182 158 kfree(channel);
183 /*
184 * We have to release the channel's workqueue/thread in the vmbus's
185 * workqueue/thread context
186 * ie we can't destroy ourselves.
187 */
188 INIT_WORK(&channel->work, release_channel);
189 queue_work(vmbus_connection.work_queue, &channel->work);
190} 159}
191 160
192static void percpu_channel_enq(void *arg) 161static void percpu_channel_enq(void *arg)
@@ -204,33 +173,21 @@ static void percpu_channel_deq(void *arg)
204 list_del(&channel->percpu_list); 173 list_del(&channel->percpu_list);
205} 174}
206 175
207/* 176
208 * vmbus_process_rescind_offer - 177void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
209 * Rescind the offer by initiating a device removal
210 */
211static void vmbus_process_rescind_offer(struct work_struct *work)
212{ 178{
213 struct vmbus_channel *channel = container_of(work, 179 struct vmbus_channel_relid_released msg;
214 struct vmbus_channel,
215 work);
216 unsigned long flags; 180 unsigned long flags;
217 struct vmbus_channel *primary_channel; 181 struct vmbus_channel *primary_channel;
218 struct vmbus_channel_relid_released msg;
219 struct device *dev;
220
221 if (channel->device_obj) {
222 dev = get_device(&channel->device_obj->device);
223 if (dev) {
224 vmbus_device_unregister(channel->device_obj);
225 put_device(dev);
226 }
227 }
228 182
229 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); 183 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
230 msg.child_relid = channel->offermsg.child_relid; 184 msg.child_relid = relid;
231 msg.header.msgtype = CHANNELMSG_RELID_RELEASED; 185 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
232 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); 186 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
233 187
188 if (channel == NULL)
189 return;
190
234 if (channel->target_cpu != get_cpu()) { 191 if (channel->target_cpu != get_cpu()) {
235 put_cpu(); 192 put_cpu();
236 smp_call_function_single(channel->target_cpu, 193 smp_call_function_single(channel->target_cpu,
@@ -259,7 +216,6 @@ void vmbus_free_channels(void)
259 216
260 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { 217 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
261 vmbus_device_unregister(channel->device_obj); 218 vmbus_device_unregister(channel->device_obj);
262 kfree(channel->device_obj);
263 free_channel(channel); 219 free_channel(channel);
264 } 220 }
265} 221}
@@ -268,15 +224,11 @@ void vmbus_free_channels(void)
268 * vmbus_process_offer - Process the offer by creating a channel/device 224 * vmbus_process_offer - Process the offer by creating a channel/device
269 * associated with this offer 225 * associated with this offer
270 */ 226 */
271static void vmbus_process_offer(struct work_struct *work) 227static void vmbus_process_offer(struct vmbus_channel *newchannel)
272{ 228{
273 struct vmbus_channel *newchannel = container_of(work,
274 struct vmbus_channel,
275 work);
276 struct vmbus_channel *channel; 229 struct vmbus_channel *channel;
277 bool fnew = true; 230 bool fnew = true;
278 bool enq = false; 231 bool enq = false;
279 int ret;
280 unsigned long flags; 232 unsigned long flags;
281 233
282 /* Make sure this is a new offer */ 234 /* Make sure this is a new offer */
@@ -335,10 +287,11 @@ static void vmbus_process_offer(struct work_struct *work)
335 } 287 }
336 288
337 newchannel->state = CHANNEL_OPEN_STATE; 289 newchannel->state = CHANNEL_OPEN_STATE;
290 channel->num_sc++;
338 if (channel->sc_creation_callback != NULL) 291 if (channel->sc_creation_callback != NULL)
339 channel->sc_creation_callback(newchannel); 292 channel->sc_creation_callback(newchannel);
340 293
341 goto done_init_rescind; 294 return;
342 } 295 }
343 296
344 goto err_free_chan; 297 goto err_free_chan;
@@ -361,33 +314,35 @@ static void vmbus_process_offer(struct work_struct *work)
361 &newchannel->offermsg.offer.if_instance, 314 &newchannel->offermsg.offer.if_instance,
362 newchannel); 315 newchannel);
363 if (!newchannel->device_obj) 316 if (!newchannel->device_obj)
364 goto err_free_chan; 317 goto err_deq_chan;
365 318
366 /* 319 /*
367 * Add the new device to the bus. This will kick off device-driver 320 * Add the new device to the bus. This will kick off device-driver
368 * binding which eventually invokes the device driver's AddDevice() 321 * binding which eventually invokes the device driver's AddDevice()
369 * method. 322 * method.
370 */ 323 */
371 ret = vmbus_device_register(newchannel->device_obj); 324 if (vmbus_device_register(newchannel->device_obj) != 0) {
372 if (ret != 0) {
373 pr_err("unable to add child device object (relid %d)\n", 325 pr_err("unable to add child device object (relid %d)\n",
374 newchannel->offermsg.child_relid); 326 newchannel->offermsg.child_relid);
375
376 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
377 list_del(&newchannel->listentry);
378 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
379 kfree(newchannel->device_obj); 327 kfree(newchannel->device_obj);
380 goto err_free_chan; 328 goto err_deq_chan;
381 } 329 }
382done_init_rescind:
383 spin_lock_irqsave(&newchannel->lock, flags);
384 /* The next possible work is rescind handling */
385 INIT_WORK(&newchannel->work, vmbus_process_rescind_offer);
386 /* Check if rescind offer was already received */
387 if (newchannel->rescind)
388 queue_work(newchannel->controlwq, &newchannel->work);
389 spin_unlock_irqrestore(&newchannel->lock, flags);
390 return; 330 return;
331
332err_deq_chan:
333 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
334 list_del(&newchannel->listentry);
335 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
336
337 if (newchannel->target_cpu != get_cpu()) {
338 put_cpu();
339 smp_call_function_single(newchannel->target_cpu,
340 percpu_channel_deq, newchannel, true);
341 } else {
342 percpu_channel_deq(newchannel);
343 put_cpu();
344 }
345
391err_free_chan: 346err_free_chan:
392 free_channel(newchannel); 347 free_channel(newchannel);
393} 348}
@@ -411,6 +366,8 @@ static const struct hv_vmbus_device_id hp_devs[] = {
411 { HV_SCSI_GUID, }, 366 { HV_SCSI_GUID, },
412 /* Network */ 367 /* Network */
413 { HV_NIC_GUID, }, 368 { HV_NIC_GUID, },
369 /* NetworkDirect Guest RDMA */
370 { HV_ND_GUID, },
414}; 371};
415 372
416 373
@@ -511,8 +468,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
511 newchannel->monitor_grp = (u8)offer->monitorid / 32; 468 newchannel->monitor_grp = (u8)offer->monitorid / 32;
512 newchannel->monitor_bit = (u8)offer->monitorid % 32; 469 newchannel->monitor_bit = (u8)offer->monitorid % 32;
513 470
514 INIT_WORK(&newchannel->work, vmbus_process_offer); 471 vmbus_process_offer(newchannel);
515 queue_work(newchannel->controlwq, &newchannel->work);
516} 472}
517 473
518/* 474/*
@@ -525,28 +481,34 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
525 struct vmbus_channel_rescind_offer *rescind; 481 struct vmbus_channel_rescind_offer *rescind;
526 struct vmbus_channel *channel; 482 struct vmbus_channel *channel;
527 unsigned long flags; 483 unsigned long flags;
484 struct device *dev;
528 485
529 rescind = (struct vmbus_channel_rescind_offer *)hdr; 486 rescind = (struct vmbus_channel_rescind_offer *)hdr;
530 channel = relid2channel(rescind->child_relid); 487 channel = relid2channel(rescind->child_relid);
531 488
532 if (channel == NULL) 489 if (channel == NULL) {
533 /* Just return here, no channel found */ 490 hv_process_channel_removal(NULL, rescind->child_relid);
534 return; 491 return;
492 }
535 493
536 spin_lock_irqsave(&channel->lock, flags); 494 spin_lock_irqsave(&channel->lock, flags);
537 channel->rescind = true; 495 channel->rescind = true;
538 /*
539 * channel->work.func != vmbus_process_rescind_offer means we are still
540 * processing offer request and the rescind offer processing should be
541 * postponed. It will be done at the very end of vmbus_process_offer()
542 * as rescind flag is being checked there.
543 */
544 if (channel->work.func == vmbus_process_rescind_offer)
545 /* work is initialized for vmbus_process_rescind_offer() from
546 * vmbus_process_offer() where the channel got created */
547 queue_work(channel->controlwq, &channel->work);
548
549 spin_unlock_irqrestore(&channel->lock, flags); 496 spin_unlock_irqrestore(&channel->lock, flags);
497
498 if (channel->device_obj) {
499 /*
500 * We will have to unregister this device from the
501 * driver core.
502 */
503 dev = get_device(&channel->device_obj->device);
504 if (dev) {
505 vmbus_device_unregister(channel->device_obj);
506 put_device(dev);
507 }
508 } else {
509 hv_process_channel_removal(channel,
510 channel->offermsg.child_relid);
511 }
550} 512}
551 513
552/* 514/*
@@ -731,25 +693,25 @@ static void vmbus_onversion_response(
731} 693}
732 694
733/* Channel message dispatch table */ 695/* Channel message dispatch table */
734static struct vmbus_channel_message_table_entry 696struct vmbus_channel_message_table_entry
735 channel_message_table[CHANNELMSG_COUNT] = { 697 channel_message_table[CHANNELMSG_COUNT] = {
736 {CHANNELMSG_INVALID, NULL}, 698 {CHANNELMSG_INVALID, 0, NULL},
737 {CHANNELMSG_OFFERCHANNEL, vmbus_onoffer}, 699 {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
738 {CHANNELMSG_RESCIND_CHANNELOFFER, vmbus_onoffer_rescind}, 700 {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
739 {CHANNELMSG_REQUESTOFFERS, NULL}, 701 {CHANNELMSG_REQUESTOFFERS, 0, NULL},
740 {CHANNELMSG_ALLOFFERS_DELIVERED, vmbus_onoffers_delivered}, 702 {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
741 {CHANNELMSG_OPENCHANNEL, NULL}, 703 {CHANNELMSG_OPENCHANNEL, 0, NULL},
742 {CHANNELMSG_OPENCHANNEL_RESULT, vmbus_onopen_result}, 704 {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
743 {CHANNELMSG_CLOSECHANNEL, NULL}, 705 {CHANNELMSG_CLOSECHANNEL, 0, NULL},
744 {CHANNELMSG_GPADL_HEADER, NULL}, 706 {CHANNELMSG_GPADL_HEADER, 0, NULL},
745 {CHANNELMSG_GPADL_BODY, NULL}, 707 {CHANNELMSG_GPADL_BODY, 0, NULL},
746 {CHANNELMSG_GPADL_CREATED, vmbus_ongpadl_created}, 708 {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
747 {CHANNELMSG_GPADL_TEARDOWN, NULL}, 709 {CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
748 {CHANNELMSG_GPADL_TORNDOWN, vmbus_ongpadl_torndown}, 710 {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
749 {CHANNELMSG_RELID_RELEASED, NULL}, 711 {CHANNELMSG_RELID_RELEASED, 0, NULL},
750 {CHANNELMSG_INITIATE_CONTACT, NULL}, 712 {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
751 {CHANNELMSG_VERSION_RESPONSE, vmbus_onversion_response}, 713 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
752 {CHANNELMSG_UNLOAD, NULL}, 714 {CHANNELMSG_UNLOAD, 0, NULL},
753}; 715};
754 716
755/* 717/*
@@ -787,7 +749,7 @@ int vmbus_request_offers(void)
787{ 749{
788 struct vmbus_channel_message_header *msg; 750 struct vmbus_channel_message_header *msg;
789 struct vmbus_channel_msginfo *msginfo; 751 struct vmbus_channel_msginfo *msginfo;
790 int ret, t; 752 int ret;
791 753
792 msginfo = kmalloc(sizeof(*msginfo) + 754 msginfo = kmalloc(sizeof(*msginfo) +
793 sizeof(struct vmbus_channel_message_header), 755 sizeof(struct vmbus_channel_message_header),
@@ -795,8 +757,6 @@ int vmbus_request_offers(void)
795 if (!msginfo) 757 if (!msginfo)
796 return -ENOMEM; 758 return -ENOMEM;
797 759
798 init_completion(&msginfo->waitevent);
799
800 msg = (struct vmbus_channel_message_header *)msginfo->msg; 760 msg = (struct vmbus_channel_message_header *)msginfo->msg;
801 761
802 msg->msgtype = CHANNELMSG_REQUESTOFFERS; 762 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
@@ -810,14 +770,6 @@ int vmbus_request_offers(void)
810 goto cleanup; 770 goto cleanup;
811 } 771 }
812 772
813 t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
814 if (t == 0) {
815 ret = -ETIMEDOUT;
816 goto cleanup;
817 }
818
819
820
821cleanup: 773cleanup:
822 kfree(msginfo); 774 kfree(msginfo);
823 775
@@ -826,9 +778,8 @@ cleanup:
826 778
827/* 779/*
828 * Retrieve the (sub) channel on which to send an outgoing request. 780 * Retrieve the (sub) channel on which to send an outgoing request.
829 * When a primary channel has multiple sub-channels, we choose a 781 * When a primary channel has multiple sub-channels, we try to
830 * channel whose VCPU binding is closest to the VCPU on which 782 * distribute the load equally amongst all available channels.
831 * this call is being made.
832 */ 783 */
833struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) 784struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
834{ 785{
@@ -836,11 +787,19 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
836 int cur_cpu; 787 int cur_cpu;
837 struct vmbus_channel *cur_channel; 788 struct vmbus_channel *cur_channel;
838 struct vmbus_channel *outgoing_channel = primary; 789 struct vmbus_channel *outgoing_channel = primary;
839 int cpu_distance, new_cpu_distance; 790 int next_channel;
791 int i = 1;
840 792
841 if (list_empty(&primary->sc_list)) 793 if (list_empty(&primary->sc_list))
842 return outgoing_channel; 794 return outgoing_channel;
843 795
796 next_channel = primary->next_oc++;
797
798 if (next_channel > (primary->num_sc)) {
799 primary->next_oc = 0;
800 return outgoing_channel;
801 }
802
844 cur_cpu = hv_context.vp_index[get_cpu()]; 803 cur_cpu = hv_context.vp_index[get_cpu()];
845 put_cpu(); 804 put_cpu();
846 list_for_each_safe(cur, tmp, &primary->sc_list) { 805 list_for_each_safe(cur, tmp, &primary->sc_list) {
@@ -851,18 +810,10 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
851 if (cur_channel->target_vp == cur_cpu) 810 if (cur_channel->target_vp == cur_cpu)
852 return cur_channel; 811 return cur_channel;
853 812
854 cpu_distance = ((outgoing_channel->target_vp > cur_cpu) ? 813 if (i == next_channel)
855 (outgoing_channel->target_vp - cur_cpu) : 814 return cur_channel;
856 (cur_cpu - outgoing_channel->target_vp));
857
858 new_cpu_distance = ((cur_channel->target_vp > cur_cpu) ?
859 (cur_channel->target_vp - cur_cpu) :
860 (cur_cpu - cur_channel->target_vp));
861
862 if (cpu_distance < new_cpu_distance)
863 continue;
864 815
865 outgoing_channel = cur_channel; 816 i++;
866 } 817 }
867 818
868 return outgoing_channel; 819 return outgoing_channel;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index a63a795300b9..b27220a425f4 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -216,10 +216,21 @@ int vmbus_connect(void)
216 216
217cleanup: 217cleanup:
218 pr_err("Unable to connect to host\n"); 218 pr_err("Unable to connect to host\n");
219
219 vmbus_connection.conn_state = DISCONNECTED; 220 vmbus_connection.conn_state = DISCONNECTED;
221 vmbus_disconnect();
222
223 kfree(msginfo);
220 224
221 if (vmbus_connection.work_queue) 225 return ret;
226}
227
228void vmbus_disconnect(void)
229{
230 if (vmbus_connection.work_queue) {
231 drain_workqueue(vmbus_connection.work_queue);
222 destroy_workqueue(vmbus_connection.work_queue); 232 destroy_workqueue(vmbus_connection.work_queue);
233 }
223 234
224 if (vmbus_connection.int_page) { 235 if (vmbus_connection.int_page) {
225 free_pages((unsigned long)vmbus_connection.int_page, 0); 236 free_pages((unsigned long)vmbus_connection.int_page, 0);
@@ -230,10 +241,6 @@ cleanup:
230 free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0); 241 free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
231 vmbus_connection.monitor_pages[0] = NULL; 242 vmbus_connection.monitor_pages[0] = NULL;
232 vmbus_connection.monitor_pages[1] = NULL; 243 vmbus_connection.monitor_pages[1] = NULL;
233
234 kfree(msginfo);
235
236 return ret;
237} 244}
238 245
239/* 246/*
@@ -311,10 +318,8 @@ static void process_chn_event(u32 relid)
311 */ 318 */
312 channel = pcpu_relid2channel(relid); 319 channel = pcpu_relid2channel(relid);
313 320
314 if (!channel) { 321 if (!channel)
315 pr_err("channel not found for relid - %u\n", relid);
316 return; 322 return;
317 }
318 323
319 /* 324 /*
320 * A channel once created is persistent even when there 325 * A channel once created is persistent even when there
@@ -349,10 +354,7 @@ static void process_chn_event(u32 relid)
349 else 354 else
350 bytes_to_read = 0; 355 bytes_to_read = 0;
351 } while (read_state && (bytes_to_read != 0)); 356 } while (read_state && (bytes_to_read != 0));
352 } else {
353 pr_err("no channel callback for relid - %u\n", relid);
354 } 357 }
355
356} 358}
357 359
358/* 360/*
@@ -420,6 +422,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
420 union hv_connection_id conn_id; 422 union hv_connection_id conn_id;
421 int ret = 0; 423 int ret = 0;
422 int retries = 0; 424 int retries = 0;
425 u32 msec = 1;
423 426
424 conn_id.asu32 = 0; 427 conn_id.asu32 = 0;
425 conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID; 428 conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID;
@@ -429,13 +432,20 @@ int vmbus_post_msg(void *buffer, size_t buflen)
429 * insufficient resources. Retry the operation a couple of 432 * insufficient resources. Retry the operation a couple of
430 * times before giving up. 433 * times before giving up.
431 */ 434 */
432 while (retries < 10) { 435 while (retries < 20) {
433 ret = hv_post_message(conn_id, 1, buffer, buflen); 436 ret = hv_post_message(conn_id, 1, buffer, buflen);
434 437
435 switch (ret) { 438 switch (ret) {
439 case HV_STATUS_INVALID_CONNECTION_ID:
440 /*
441 * We could get this if we send messages too
442 * frequently.
443 */
444 ret = -EAGAIN;
445 break;
446 case HV_STATUS_INSUFFICIENT_MEMORY:
436 case HV_STATUS_INSUFFICIENT_BUFFERS: 447 case HV_STATUS_INSUFFICIENT_BUFFERS:
437 ret = -ENOMEM; 448 ret = -ENOMEM;
438 case -ENOMEM:
439 break; 449 break;
440 case HV_STATUS_SUCCESS: 450 case HV_STATUS_SUCCESS:
441 return ret; 451 return ret;
@@ -445,7 +455,9 @@ int vmbus_post_msg(void *buffer, size_t buflen)
445 } 455 }
446 456
447 retries++; 457 retries++;
448 msleep(100); 458 msleep(msec);
459 if (msec < 2048)
460 msec *= 2;
449 } 461 }
450 return ret; 462 return ret;
451} 463}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 50e51a51ff8b..d3943bceecc3 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -312,7 +312,11 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
312 dev->features = CLOCK_EVT_FEAT_ONESHOT; 312 dev->features = CLOCK_EVT_FEAT_ONESHOT;
313 dev->cpumask = cpumask_of(cpu); 313 dev->cpumask = cpumask_of(cpu);
314 dev->rating = 1000; 314 dev->rating = 1000;
315 dev->owner = THIS_MODULE; 315 /*
316 * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
317 * result in clockevents_config_and_register() taking additional
318 * references to the hv_vmbus module making it impossible to unload.
319 */
316 320
317 dev->set_mode = hv_ce_setmode; 321 dev->set_mode = hv_ce_setmode;
318 dev->set_next_event = hv_ce_set_next_event; 322 dev->set_next_event = hv_ce_set_next_event;
@@ -470,6 +474,20 @@ void hv_synic_init(void *arg)
470} 474}
471 475
472/* 476/*
477 * hv_synic_clockevents_cleanup - Cleanup clockevent devices
478 */
479void hv_synic_clockevents_cleanup(void)
480{
481 int cpu;
482
483 if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
484 return;
485
486 for_each_online_cpu(cpu)
487 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
488}
489
490/*
473 * hv_synic_cleanup - Cleanup routine for hv_synic_init(). 491 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
474 */ 492 */
475void hv_synic_cleanup(void *arg) 493void hv_synic_cleanup(void *arg)
@@ -477,11 +495,17 @@ void hv_synic_cleanup(void *arg)
477 union hv_synic_sint shared_sint; 495 union hv_synic_sint shared_sint;
478 union hv_synic_simp simp; 496 union hv_synic_simp simp;
479 union hv_synic_siefp siefp; 497 union hv_synic_siefp siefp;
498 union hv_synic_scontrol sctrl;
480 int cpu = smp_processor_id(); 499 int cpu = smp_processor_id();
481 500
482 if (!hv_context.synic_initialized) 501 if (!hv_context.synic_initialized)
483 return; 502 return;
484 503
504 /* Turn off clockevent device */
505 if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
506 hv_ce_setmode(CLOCK_EVT_MODE_SHUTDOWN,
507 hv_context.clk_evt[cpu]);
508
485 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 509 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
486 510
487 shared_sint.masked = 1; 511 shared_sint.masked = 1;
@@ -502,6 +526,10 @@ void hv_synic_cleanup(void *arg)
502 526
503 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); 527 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
504 528
505 free_page((unsigned long)hv_context.synic_message_page[cpu]); 529 /* Disable the global synic bit */
506 free_page((unsigned long)hv_context.synic_event_page[cpu]); 530 rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
531 sctrl.enable = 0;
532 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
533
534 hv_synic_free_cpu(cpu);
507} 535}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index ff169386b2c7..cb5b7dc9797f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -428,14 +428,13 @@ struct dm_info_msg {
428 * currently hot added. We hot add in multiples of 128M 428 * currently hot added. We hot add in multiples of 128M
429 * chunks; it is possible that we may not be able to bring 429 * chunks; it is possible that we may not be able to bring
430 * online all the pages in the region. The range 430 * online all the pages in the region. The range
431 * covered_start_pfn : covered_end_pfn defines the pages that can 431 * covered_end_pfn defines the pages that can
432 * be brough online. 432 * be brough online.
433 */ 433 */
434 434
435struct hv_hotadd_state { 435struct hv_hotadd_state {
436 struct list_head list; 436 struct list_head list;
437 unsigned long start_pfn; 437 unsigned long start_pfn;
438 unsigned long covered_start_pfn;
439 unsigned long covered_end_pfn; 438 unsigned long covered_end_pfn;
440 unsigned long ha_end_pfn; 439 unsigned long ha_end_pfn;
441 unsigned long end_pfn; 440 unsigned long end_pfn;
@@ -503,6 +502,8 @@ struct hv_dynmem_device {
503 * Number of pages we have currently ballooned out. 502 * Number of pages we have currently ballooned out.
504 */ 503 */
505 unsigned int num_pages_ballooned; 504 unsigned int num_pages_ballooned;
505 unsigned int num_pages_onlined;
506 unsigned int num_pages_added;
506 507
507 /* 508 /*
508 * State to manage the ballooning (up) operation. 509 * State to manage the ballooning (up) operation.
@@ -534,7 +535,6 @@ struct hv_dynmem_device {
534 struct task_struct *thread; 535 struct task_struct *thread;
535 536
536 struct mutex ha_region_mutex; 537 struct mutex ha_region_mutex;
537 struct completion waiter_event;
538 538
539 /* 539 /*
540 * A list of hot-add regions. 540 * A list of hot-add regions.
@@ -554,46 +554,32 @@ static struct hv_dynmem_device dm_device;
554static void post_status(struct hv_dynmem_device *dm); 554static void post_status(struct hv_dynmem_device *dm);
555 555
556#ifdef CONFIG_MEMORY_HOTPLUG 556#ifdef CONFIG_MEMORY_HOTPLUG
557static void acquire_region_mutex(bool trylock)
558{
559 if (trylock) {
560 reinit_completion(&dm_device.waiter_event);
561 while (!mutex_trylock(&dm_device.ha_region_mutex))
562 wait_for_completion(&dm_device.waiter_event);
563 } else {
564 mutex_lock(&dm_device.ha_region_mutex);
565 }
566}
567
568static void release_region_mutex(bool trylock)
569{
570 if (trylock) {
571 mutex_unlock(&dm_device.ha_region_mutex);
572 } else {
573 mutex_unlock(&dm_device.ha_region_mutex);
574 complete(&dm_device.waiter_event);
575 }
576}
577
578static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, 557static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
579 void *v) 558 void *v)
580{ 559{
560 struct memory_notify *mem = (struct memory_notify *)v;
561
581 switch (val) { 562 switch (val) {
582 case MEM_GOING_ONLINE: 563 case MEM_GOING_ONLINE:
583 acquire_region_mutex(true); 564 mutex_lock(&dm_device.ha_region_mutex);
584 break; 565 break;
585 566
586 case MEM_ONLINE: 567 case MEM_ONLINE:
568 dm_device.num_pages_onlined += mem->nr_pages;
587 case MEM_CANCEL_ONLINE: 569 case MEM_CANCEL_ONLINE:
588 release_region_mutex(true); 570 mutex_unlock(&dm_device.ha_region_mutex);
589 if (dm_device.ha_waiting) { 571 if (dm_device.ha_waiting) {
590 dm_device.ha_waiting = false; 572 dm_device.ha_waiting = false;
591 complete(&dm_device.ol_waitevent); 573 complete(&dm_device.ol_waitevent);
592 } 574 }
593 break; 575 break;
594 576
595 case MEM_GOING_OFFLINE:
596 case MEM_OFFLINE: 577 case MEM_OFFLINE:
578 mutex_lock(&dm_device.ha_region_mutex);
579 dm_device.num_pages_onlined -= mem->nr_pages;
580 mutex_unlock(&dm_device.ha_region_mutex);
581 break;
582 case MEM_GOING_OFFLINE:
597 case MEM_CANCEL_OFFLINE: 583 case MEM_CANCEL_OFFLINE:
598 break; 584 break;
599 } 585 }
@@ -646,7 +632,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
646 init_completion(&dm_device.ol_waitevent); 632 init_completion(&dm_device.ol_waitevent);
647 dm_device.ha_waiting = true; 633 dm_device.ha_waiting = true;
648 634
649 release_region_mutex(false); 635 mutex_unlock(&dm_device.ha_region_mutex);
650 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); 636 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
651 ret = add_memory(nid, PFN_PHYS((start_pfn)), 637 ret = add_memory(nid, PFN_PHYS((start_pfn)),
652 (HA_CHUNK << PAGE_SHIFT)); 638 (HA_CHUNK << PAGE_SHIFT));
@@ -665,6 +651,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
665 } 651 }
666 has->ha_end_pfn -= HA_CHUNK; 652 has->ha_end_pfn -= HA_CHUNK;
667 has->covered_end_pfn -= processed_pfn; 653 has->covered_end_pfn -= processed_pfn;
654 mutex_lock(&dm_device.ha_region_mutex);
668 break; 655 break;
669 } 656 }
670 657
@@ -675,7 +662,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
675 * have not been "onlined" within the allowed time. 662 * have not been "onlined" within the allowed time.
676 */ 663 */
677 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); 664 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
678 acquire_region_mutex(false); 665 mutex_lock(&dm_device.ha_region_mutex);
679 post_status(&dm_device); 666 post_status(&dm_device);
680 } 667 }
681 668
@@ -691,8 +678,7 @@ static void hv_online_page(struct page *pg)
691 678
692 list_for_each(cur, &dm_device.ha_region_list) { 679 list_for_each(cur, &dm_device.ha_region_list) {
693 has = list_entry(cur, struct hv_hotadd_state, list); 680 has = list_entry(cur, struct hv_hotadd_state, list);
694 cur_start_pgp = (unsigned long) 681 cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
695 pfn_to_page(has->covered_start_pfn);
696 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn); 682 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
697 683
698 if (((unsigned long)pg >= cur_start_pgp) && 684 if (((unsigned long)pg >= cur_start_pgp) &&
@@ -704,7 +690,6 @@ static void hv_online_page(struct page *pg)
704 __online_page_set_limits(pg); 690 __online_page_set_limits(pg);
705 __online_page_increment_counters(pg); 691 __online_page_increment_counters(pg);
706 __online_page_free(pg); 692 __online_page_free(pg);
707 has->covered_start_pfn++;
708 } 693 }
709 } 694 }
710} 695}
@@ -748,10 +733,9 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
748 * is, update it. 733 * is, update it.
749 */ 734 */
750 735
751 if (has->covered_end_pfn != start_pfn) { 736 if (has->covered_end_pfn != start_pfn)
752 has->covered_end_pfn = start_pfn; 737 has->covered_end_pfn = start_pfn;
753 has->covered_start_pfn = start_pfn; 738
754 }
755 return true; 739 return true;
756 740
757 } 741 }
@@ -794,9 +778,18 @@ static unsigned long handle_pg_range(unsigned long pg_start,
794 pgs_ol = has->ha_end_pfn - start_pfn; 778 pgs_ol = has->ha_end_pfn - start_pfn;
795 if (pgs_ol > pfn_cnt) 779 if (pgs_ol > pfn_cnt)
796 pgs_ol = pfn_cnt; 780 pgs_ol = pfn_cnt;
797 hv_bring_pgs_online(start_pfn, pgs_ol); 781
782 /*
783 * Check if the corresponding memory block is already
784 * online by checking its last previously backed page.
785 * In case it is we need to bring rest (which was not
786 * backed previously) online too.
787 */
788 if (start_pfn > has->start_pfn &&
789 !PageReserved(pfn_to_page(start_pfn - 1)))
790 hv_bring_pgs_online(start_pfn, pgs_ol);
791
798 has->covered_end_pfn += pgs_ol; 792 has->covered_end_pfn += pgs_ol;
799 has->covered_start_pfn += pgs_ol;
800 pfn_cnt -= pgs_ol; 793 pfn_cnt -= pgs_ol;
801 } 794 }
802 795
@@ -857,7 +850,6 @@ static unsigned long process_hot_add(unsigned long pg_start,
857 list_add_tail(&ha_region->list, &dm_device.ha_region_list); 850 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
858 ha_region->start_pfn = rg_start; 851 ha_region->start_pfn = rg_start;
859 ha_region->ha_end_pfn = rg_start; 852 ha_region->ha_end_pfn = rg_start;
860 ha_region->covered_start_pfn = pg_start;
861 ha_region->covered_end_pfn = pg_start; 853 ha_region->covered_end_pfn = pg_start;
862 ha_region->end_pfn = rg_start + rg_size; 854 ha_region->end_pfn = rg_start + rg_size;
863 } 855 }
@@ -886,7 +878,7 @@ static void hot_add_req(struct work_struct *dummy)
886 resp.hdr.size = sizeof(struct dm_hot_add_response); 878 resp.hdr.size = sizeof(struct dm_hot_add_response);
887 879
888#ifdef CONFIG_MEMORY_HOTPLUG 880#ifdef CONFIG_MEMORY_HOTPLUG
889 acquire_region_mutex(false); 881 mutex_lock(&dm_device.ha_region_mutex);
890 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page; 882 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
891 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt; 883 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
892 884
@@ -918,7 +910,9 @@ static void hot_add_req(struct work_struct *dummy)
918 if (do_hot_add) 910 if (do_hot_add)
919 resp.page_count = process_hot_add(pg_start, pfn_cnt, 911 resp.page_count = process_hot_add(pg_start, pfn_cnt,
920 rg_start, rg_sz); 912 rg_start, rg_sz);
921 release_region_mutex(false); 913
914 dm->num_pages_added += resp.page_count;
915 mutex_unlock(&dm_device.ha_region_mutex);
922#endif 916#endif
923 /* 917 /*
924 * The result field of the response structure has the 918 * The result field of the response structure has the
@@ -982,8 +976,8 @@ static unsigned long compute_balloon_floor(void)
982 * 128 72 (1/2) 976 * 128 72 (1/2)
983 * 512 168 (1/4) 977 * 512 168 (1/4)
984 * 2048 360 (1/8) 978 * 2048 360 (1/8)
985 * 8192 768 (1/16) 979 * 8192 744 (1/16)
986 * 32768 1536 (1/32) 980 * 32768 1512 (1/32)
987 */ 981 */
988 if (totalram_pages < MB2PAGES(128)) 982 if (totalram_pages < MB2PAGES(128))
989 min_pages = MB2PAGES(8) + (totalram_pages >> 1); 983 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
@@ -992,9 +986,9 @@ static unsigned long compute_balloon_floor(void)
992 else if (totalram_pages < MB2PAGES(2048)) 986 else if (totalram_pages < MB2PAGES(2048))
993 min_pages = MB2PAGES(104) + (totalram_pages >> 3); 987 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
994 else if (totalram_pages < MB2PAGES(8192)) 988 else if (totalram_pages < MB2PAGES(8192))
995 min_pages = MB2PAGES(256) + (totalram_pages >> 4); 989 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
996 else 990 else
997 min_pages = MB2PAGES(512) + (totalram_pages >> 5); 991 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
998#undef MB2PAGES 992#undef MB2PAGES
999 return min_pages; 993 return min_pages;
1000} 994}
@@ -1031,17 +1025,21 @@ static void post_status(struct hv_dynmem_device *dm)
1031 status.hdr.trans_id = atomic_inc_return(&trans_id); 1025 status.hdr.trans_id = atomic_inc_return(&trans_id);
1032 1026
1033 /* 1027 /*
1034 * The host expects the guest to report free memory. 1028 * The host expects the guest to report free and committed memory.
1035 * Further, the host expects the pressure information to 1029 * Furthermore, the host expects the pressure information to include
1036 * include the ballooned out pages. 1030 * the ballooned out pages. For a given amount of memory that we are
1037 * For a given amount of memory that we are managing, we 1031 * managing we need to compute a floor below which we should not
1038 * need to compute a floor below which we should not balloon. 1032 * balloon. Compute this and add it to the pressure report.
1039 * Compute this and add it to the pressure report. 1033 * We also need to report all offline pages (num_pages_added -
1034 * num_pages_onlined) as committed to the host, otherwise it can try
1035 * asking us to balloon them out.
1040 */ 1036 */
1041 status.num_avail = val.freeram; 1037 status.num_avail = val.freeram;
1042 status.num_committed = vm_memory_committed() + 1038 status.num_committed = vm_memory_committed() +
1043 dm->num_pages_ballooned + 1039 dm->num_pages_ballooned +
1044 compute_balloon_floor(); 1040 (dm->num_pages_added > dm->num_pages_onlined ?
1041 dm->num_pages_added - dm->num_pages_onlined : 0) +
1042 compute_balloon_floor();
1045 1043
1046 /* 1044 /*
1047 * If our transaction ID is no longer current, just don't 1045 * If our transaction ID is no longer current, just don't
@@ -1083,11 +1081,12 @@ static void free_balloon_pages(struct hv_dynmem_device *dm,
1083 1081
1084 1082
1085 1083
1086static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages, 1084static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1087 struct dm_balloon_response *bl_resp, int alloc_unit, 1085 unsigned int num_pages,
1088 bool *alloc_error) 1086 struct dm_balloon_response *bl_resp,
1087 int alloc_unit)
1089{ 1088{
1090 int i = 0; 1089 unsigned int i = 0;
1091 struct page *pg; 1090 struct page *pg;
1092 1091
1093 if (num_pages < alloc_unit) 1092 if (num_pages < alloc_unit)
@@ -1106,11 +1105,8 @@ static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
1106 __GFP_NOMEMALLOC | __GFP_NOWARN, 1105 __GFP_NOMEMALLOC | __GFP_NOWARN,
1107 get_order(alloc_unit << PAGE_SHIFT)); 1106 get_order(alloc_unit << PAGE_SHIFT));
1108 1107
1109 if (!pg) { 1108 if (!pg)
1110 *alloc_error = true;
1111 return i * alloc_unit; 1109 return i * alloc_unit;
1112 }
1113
1114 1110
1115 dm->num_pages_ballooned += alloc_unit; 1111 dm->num_pages_ballooned += alloc_unit;
1116 1112
@@ -1137,14 +1133,15 @@ static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
1137 1133
1138static void balloon_up(struct work_struct *dummy) 1134static void balloon_up(struct work_struct *dummy)
1139{ 1135{
1140 int num_pages = dm_device.balloon_wrk.num_pages; 1136 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1141 int num_ballooned = 0; 1137 unsigned int num_ballooned = 0;
1142 struct dm_balloon_response *bl_resp; 1138 struct dm_balloon_response *bl_resp;
1143 int alloc_unit; 1139 int alloc_unit;
1144 int ret; 1140 int ret;
1145 bool alloc_error;
1146 bool done = false; 1141 bool done = false;
1147 int i; 1142 int i;
1143 struct sysinfo val;
1144 unsigned long floor;
1148 1145
1149 /* The host balloons pages in 2M granularity. */ 1146 /* The host balloons pages in 2M granularity. */
1150 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0); 1147 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
@@ -1155,6 +1152,15 @@ static void balloon_up(struct work_struct *dummy)
1155 */ 1152 */
1156 alloc_unit = 512; 1153 alloc_unit = 512;
1157 1154
1155 si_meminfo(&val);
1156 floor = compute_balloon_floor();
1157
1158 /* Refuse to balloon below the floor, keep the 2M granularity. */
1159 if (val.freeram < num_pages || val.freeram - num_pages < floor) {
1160 num_pages = val.freeram > floor ? (val.freeram - floor) : 0;
1161 num_pages -= num_pages % PAGES_IN_2M;
1162 }
1163
1158 while (!done) { 1164 while (!done) {
1159 bl_resp = (struct dm_balloon_response *)send_buffer; 1165 bl_resp = (struct dm_balloon_response *)send_buffer;
1160 memset(send_buffer, 0, PAGE_SIZE); 1166 memset(send_buffer, 0, PAGE_SIZE);
@@ -1164,18 +1170,15 @@ static void balloon_up(struct work_struct *dummy)
1164 1170
1165 1171
1166 num_pages -= num_ballooned; 1172 num_pages -= num_ballooned;
1167 alloc_error = false;
1168 num_ballooned = alloc_balloon_pages(&dm_device, num_pages, 1173 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1169 bl_resp, alloc_unit, 1174 bl_resp, alloc_unit);
1170 &alloc_error);
1171 1175
1172 if (alloc_unit != 1 && num_ballooned == 0) { 1176 if (alloc_unit != 1 && num_ballooned == 0) {
1173 alloc_unit = 1; 1177 alloc_unit = 1;
1174 continue; 1178 continue;
1175 } 1179 }
1176 1180
1177 if ((alloc_unit == 1 && alloc_error) || 1181 if (num_ballooned == 0 || num_ballooned == num_pages) {
1178 (num_ballooned == num_pages)) {
1179 bl_resp->more_pages = 0; 1182 bl_resp->more_pages = 0;
1180 done = true; 1183 done = true;
1181 dm_device.state = DM_INITIALIZED; 1184 dm_device.state = DM_INITIALIZED;
@@ -1414,7 +1417,8 @@ static void balloon_onchannelcallback(void *context)
1414static int balloon_probe(struct hv_device *dev, 1417static int balloon_probe(struct hv_device *dev,
1415 const struct hv_vmbus_device_id *dev_id) 1418 const struct hv_vmbus_device_id *dev_id)
1416{ 1419{
1417 int ret, t; 1420 int ret;
1421 unsigned long t;
1418 struct dm_version_request version_req; 1422 struct dm_version_request version_req;
1419 struct dm_capabilities cap_msg; 1423 struct dm_capabilities cap_msg;
1420 1424
@@ -1439,7 +1443,6 @@ static int balloon_probe(struct hv_device *dev,
1439 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7; 1443 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1440 init_completion(&dm_device.host_event); 1444 init_completion(&dm_device.host_event);
1441 init_completion(&dm_device.config_event); 1445 init_completion(&dm_device.config_event);
1442 init_completion(&dm_device.waiter_event);
1443 INIT_LIST_HEAD(&dm_device.ha_region_list); 1446 INIT_LIST_HEAD(&dm_device.ha_region_list);
1444 mutex_init(&dm_device.ha_region_mutex); 1447 mutex_init(&dm_device.ha_region_mutex);
1445 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); 1448 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 3b9c9ef0deb8..7994ec2e4151 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -340,12 +340,8 @@ static int util_probe(struct hv_device *dev,
340 340
341 set_channel_read_state(dev->channel, false); 341 set_channel_read_state(dev->channel, false);
342 342
343 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
344 srv->util_cb, dev->channel);
345 if (ret)
346 goto error;
347
348 hv_set_drvdata(dev, srv); 343 hv_set_drvdata(dev, srv);
344
349 /* 345 /*
350 * Based on the host; initialize the framework and 346 * Based on the host; initialize the framework and
351 * service version numbers we will negotiate. 347 * service version numbers we will negotiate.
@@ -365,6 +361,11 @@ static int util_probe(struct hv_device *dev,
365 hb_srv_version = HB_VERSION; 361 hb_srv_version = HB_VERSION;
366 } 362 }
367 363
364 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
365 srv->util_cb, dev->channel);
366 if (ret)
367 goto error;
368
368 return 0; 369 return 0;
369 370
370error: 371error:
@@ -379,9 +380,9 @@ static int util_remove(struct hv_device *dev)
379{ 380{
380 struct hv_util_service *srv = hv_get_drvdata(dev); 381 struct hv_util_service *srv = hv_get_drvdata(dev);
381 382
382 vmbus_close(dev->channel);
383 if (srv->util_deinit) 383 if (srv->util_deinit)
384 srv->util_deinit(); 384 srv->util_deinit();
385 vmbus_close(dev->channel);
385 kfree(srv->recv_buffer); 386 kfree(srv->recv_buffer);
386 387
387 return 0; 388 return 0;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 44b1c9424712..887287ad411f 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -49,6 +49,17 @@ enum hv_cpuid_function {
49 HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005, 49 HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
50}; 50};
51 51
52#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE 0x400
53
54#define HV_X64_MSR_CRASH_P0 0x40000100
55#define HV_X64_MSR_CRASH_P1 0x40000101
56#define HV_X64_MSR_CRASH_P2 0x40000102
57#define HV_X64_MSR_CRASH_P3 0x40000103
58#define HV_X64_MSR_CRASH_P4 0x40000104
59#define HV_X64_MSR_CRASH_CTL 0x40000105
60
61#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
62
52/* Define version of the synthetic interrupt controller. */ 63/* Define version of the synthetic interrupt controller. */
53#define HV_SYNIC_VERSION (1) 64#define HV_SYNIC_VERSION (1)
54 65
@@ -572,6 +583,8 @@ extern void hv_synic_init(void *irqarg);
572 583
573extern void hv_synic_cleanup(void *arg); 584extern void hv_synic_cleanup(void *arg);
574 585
586extern void hv_synic_clockevents_cleanup(void);
587
575/* 588/*
576 * Host version information. 589 * Host version information.
577 */ 590 */
@@ -672,6 +685,23 @@ struct vmbus_msginfo {
672 685
673extern struct vmbus_connection vmbus_connection; 686extern struct vmbus_connection vmbus_connection;
674 687
688enum vmbus_message_handler_type {
689 /* The related handler can sleep. */
690 VMHT_BLOCKING = 0,
691
692 /* The related handler must NOT sleep. */
693 VMHT_NON_BLOCKING = 1,
694};
695
696struct vmbus_channel_message_table_entry {
697 enum vmbus_channel_message_type message_type;
698 enum vmbus_message_handler_type handler_type;
699 void (*message_handler)(struct vmbus_channel_message_header *msg);
700};
701
702extern struct vmbus_channel_message_table_entry
703 channel_message_table[CHANNELMSG_COUNT];
704
675/* General vmbus interface */ 705/* General vmbus interface */
676 706
677struct hv_device *vmbus_device_create(const uuid_le *type, 707struct hv_device *vmbus_device_create(const uuid_le *type,
@@ -692,6 +722,7 @@ void vmbus_free_channels(void);
692/* Connection interface */ 722/* Connection interface */
693 723
694int vmbus_connect(void); 724int vmbus_connect(void);
725void vmbus_disconnect(void);
695 726
696int vmbus_post_msg(void *buffer, size_t buflen); 727int vmbus_post_msg(void *buffer, size_t buflen);
697 728
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index f518b8d7a5b5..c85235e9f245 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,9 +33,12 @@
33#include <linux/hyperv.h> 33#include <linux/hyperv.h>
34#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
35#include <linux/clockchips.h> 35#include <linux/clockchips.h>
36#include <linux/cpu.h>
36#include <asm/hyperv.h> 37#include <asm/hyperv.h>
37#include <asm/hypervisor.h> 38#include <asm/hypervisor.h>
38#include <asm/mshyperv.h> 39#include <asm/mshyperv.h>
40#include <linux/notifier.h>
41#include <linux/ptrace.h>
39#include "hyperv_vmbus.h" 42#include "hyperv_vmbus.h"
40 43
41static struct acpi_device *hv_acpi_dev; 44static struct acpi_device *hv_acpi_dev;
@@ -44,6 +47,31 @@ static struct tasklet_struct msg_dpc;
44static struct completion probe_event; 47static struct completion probe_event;
45static int irq; 48static int irq;
46 49
50
51static int hyperv_panic_event(struct notifier_block *nb,
52 unsigned long event, void *ptr)
53{
54 struct pt_regs *regs;
55
56 regs = current_pt_regs();
57
58 wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
59 wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
60 wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
61 wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
62 wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
63
64 /*
65 * Let Hyper-V know there is crash data available
66 */
67 wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
68 return NOTIFY_DONE;
69}
70
71static struct notifier_block hyperv_panic_block = {
72 .notifier_call = hyperv_panic_event,
73};
74
47struct resource hyperv_mmio = { 75struct resource hyperv_mmio = {
48 .name = "hyperv mmio", 76 .name = "hyperv mmio",
49 .flags = IORESOURCE_MEM, 77 .flags = IORESOURCE_MEM,
@@ -507,14 +535,26 @@ static int vmbus_probe(struct device *child_device)
507 */ 535 */
508static int vmbus_remove(struct device *child_device) 536static int vmbus_remove(struct device *child_device)
509{ 537{
510 struct hv_driver *drv = drv_to_hv_drv(child_device->driver); 538 struct hv_driver *drv;
511 struct hv_device *dev = device_to_hv_device(child_device); 539 struct hv_device *dev = device_to_hv_device(child_device);
512 540 u32 relid = dev->channel->offermsg.child_relid;
513 if (drv->remove) 541
514 drv->remove(dev); 542 if (child_device->driver) {
515 else 543 drv = drv_to_hv_drv(child_device->driver);
516 pr_err("remove not set for driver %s\n", 544 if (drv->remove)
517 dev_name(child_device)); 545 drv->remove(dev);
546 else {
547 hv_process_channel_removal(dev->channel, relid);
548 pr_err("remove not set for driver %s\n",
549 dev_name(child_device));
550 }
551 } else {
552 /*
553 * We don't have a driver for this device; deal with the
554 * rescind message by removing the channel.
555 */
556 hv_process_channel_removal(dev->channel, relid);
557 }
518 558
519 return 0; 559 return 0;
520} 560}
@@ -573,6 +613,10 @@ static void vmbus_onmessage_work(struct work_struct *work)
573{ 613{
574 struct onmessage_work_context *ctx; 614 struct onmessage_work_context *ctx;
575 615
616 /* Do not process messages if we're in DISCONNECTED state */
617 if (vmbus_connection.conn_state == DISCONNECTED)
618 return;
619
576 ctx = container_of(work, struct onmessage_work_context, 620 ctx = container_of(work, struct onmessage_work_context,
577 work); 621 work);
578 vmbus_onmessage(&ctx->msg); 622 vmbus_onmessage(&ctx->msg);
@@ -613,21 +657,36 @@ static void vmbus_on_msg_dpc(unsigned long data)
613 void *page_addr = hv_context.synic_message_page[cpu]; 657 void *page_addr = hv_context.synic_message_page[cpu];
614 struct hv_message *msg = (struct hv_message *)page_addr + 658 struct hv_message *msg = (struct hv_message *)page_addr +
615 VMBUS_MESSAGE_SINT; 659 VMBUS_MESSAGE_SINT;
660 struct vmbus_channel_message_header *hdr;
661 struct vmbus_channel_message_table_entry *entry;
616 struct onmessage_work_context *ctx; 662 struct onmessage_work_context *ctx;
617 663
618 while (1) { 664 while (1) {
619 if (msg->header.message_type == HVMSG_NONE) { 665 if (msg->header.message_type == HVMSG_NONE)
620 /* no msg */ 666 /* no msg */
621 break; 667 break;
622 } else { 668
669 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
670
671 if (hdr->msgtype >= CHANNELMSG_COUNT) {
672 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
673 goto msg_handled;
674 }
675
676 entry = &channel_message_table[hdr->msgtype];
677 if (entry->handler_type == VMHT_BLOCKING) {
623 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); 678 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
624 if (ctx == NULL) 679 if (ctx == NULL)
625 continue; 680 continue;
681
626 INIT_WORK(&ctx->work, vmbus_onmessage_work); 682 INIT_WORK(&ctx->work, vmbus_onmessage_work);
627 memcpy(&ctx->msg, msg, sizeof(*msg)); 683 memcpy(&ctx->msg, msg, sizeof(*msg));
684
628 queue_work(vmbus_connection.work_queue, &ctx->work); 685 queue_work(vmbus_connection.work_queue, &ctx->work);
629 } 686 } else
687 entry->message_handler(hdr);
630 688
689msg_handled:
631 msg->header.message_type = HVMSG_NONE; 690 msg->header.message_type = HVMSG_NONE;
632 691
633 /* 692 /*
@@ -704,6 +763,39 @@ static void vmbus_isr(void)
704 } 763 }
705} 764}
706 765
766#ifdef CONFIG_HOTPLUG_CPU
767static int hyperv_cpu_disable(void)
768{
769 return -ENOSYS;
770}
771
772static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
773{
774 static void *previous_cpu_disable;
775
776 /*
777 * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
778 * ...) is not supported at this moment as channel interrupts are
779 * distributed across all of them.
780 */
781
782 if ((vmbus_proto_version == VERSION_WS2008) ||
783 (vmbus_proto_version == VERSION_WIN7))
784 return;
785
786 if (vmbus_loaded) {
787 previous_cpu_disable = smp_ops.cpu_disable;
788 smp_ops.cpu_disable = hyperv_cpu_disable;
789 pr_notice("CPU offlining is not supported by hypervisor\n");
790 } else if (previous_cpu_disable)
791 smp_ops.cpu_disable = previous_cpu_disable;
792}
793#else
794static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
795{
796}
797#endif
798
707/* 799/*
708 * vmbus_bus_init -Main vmbus driver initialization routine. 800 * vmbus_bus_init -Main vmbus driver initialization routine.
709 * 801 *
@@ -744,6 +836,16 @@ static int vmbus_bus_init(int irq)
744 if (ret) 836 if (ret)
745 goto err_alloc; 837 goto err_alloc;
746 838
839 hv_cpu_hotplug_quirk(true);
840
841 /*
842 * Only register if the crash MSRs are available
843 */
844 if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
845 atomic_notifier_chain_register(&panic_notifier_list,
846 &hyperv_panic_block);
847 }
848
747 vmbus_request_offers(); 849 vmbus_request_offers();
748 850
749 return 0; 851 return 0;
@@ -840,10 +942,8 @@ int vmbus_device_register(struct hv_device *child_device_obj)
840{ 942{
841 int ret = 0; 943 int ret = 0;
842 944
843 static atomic_t device_num = ATOMIC_INIT(0); 945 dev_set_name(&child_device_obj->device, "vmbus_%d",
844 946 child_device_obj->channel->id);
845 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
846 atomic_inc_return(&device_num));
847 947
848 child_device_obj->device.bus = &hv_bus; 948 child_device_obj->device.bus = &hv_bus;
849 child_device_obj->device.parent = &hv_acpi_dev->dev; 949 child_device_obj->device.parent = &hv_acpi_dev->dev;
@@ -992,11 +1092,19 @@ cleanup:
992 1092
993static void __exit vmbus_exit(void) 1093static void __exit vmbus_exit(void)
994{ 1094{
1095 int cpu;
1096
1097 vmbus_connection.conn_state = DISCONNECTED;
1098 hv_synic_clockevents_cleanup();
995 hv_remove_vmbus_irq(); 1099 hv_remove_vmbus_irq();
996 vmbus_free_channels(); 1100 vmbus_free_channels();
997 bus_unregister(&hv_bus); 1101 bus_unregister(&hv_bus);
998 hv_cleanup(); 1102 hv_cleanup();
1103 for_each_online_cpu(cpu)
1104 smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
999 acpi_bus_unregister_driver(&vmbus_acpi_driver); 1105 acpi_bus_unregister_driver(&vmbus_acpi_driver);
1106 hv_cpu_hotplug_quirk(false);
1107 vmbus_disconnect();
1000} 1108}
1001 1109
1002 1110
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
new file mode 100644
index 000000000000..fc1f1ae7a49d
--- /dev/null
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -0,0 +1,61 @@
1#
2# Coresight configuration
3#
4menuconfig CORESIGHT
5 bool "CoreSight Tracing Support"
6 select ARM_AMBA
7 help
8 This framework provides a kernel interface for the CoreSight debug
9 and trace drivers to register themselves with. It's intended to build
10 a topological view of the CoreSight components based on a DT
11 specification and configure the right serie of components when a
12 trace source gets enabled.
13
14if CORESIGHT
15config CORESIGHT_LINKS_AND_SINKS
16 bool "CoreSight Link and Sink drivers"
17 help
18 This enables support for CoreSight link and sink drivers that are
19 responsible for transporting and collecting the trace data
20 respectively. Link and sinks are dynamically aggregated with a trace
21 entity at run time to form a complete trace path.
22
23config CORESIGHT_LINK_AND_SINK_TMC
24 bool "Coresight generic TMC driver"
25 depends on CORESIGHT_LINKS_AND_SINKS
26 help
27 This enables support for the Trace Memory Controller driver.
28 Depending on its configuration the device can act as a link (embedded
29 trace router - ETR) or sink (embedded trace FIFO). The driver
30 complies with the generic implementation of the component without
31 special enhancement or added features.
32
33config CORESIGHT_SINK_TPIU
34 bool "Coresight generic TPIU driver"
35 depends on CORESIGHT_LINKS_AND_SINKS
36 help
37 This enables support for the Trace Port Interface Unit driver,
38 responsible for bridging the gap between the on-chip coresight
39 components and a trace for bridging the gap between the on-chip
40 coresight components and a trace port collection engine, typically
41 connected to an external host for use case capturing more traces than
42 the on-board coresight memory can handle.
43
44config CORESIGHT_SINK_ETBV10
45 bool "Coresight ETBv1.0 driver"
46 depends on CORESIGHT_LINKS_AND_SINKS
47 help
48 This enables support for the Embedded Trace Buffer version 1.0 driver
49 that complies with the generic implementation of the component without
50 special enhancement or added features.
51
52config CORESIGHT_SOURCE_ETM3X
53 bool "CoreSight Embedded Trace Macrocell 3.x driver"
54 depends on !ARM64
55 select CORESIGHT_LINKS_AND_SINKS
56 help
57 This driver provides support for processor ETM3.x and PTM1.x modules,
58 which allows tracing the instructions that a processor is executing
59 This is primarily useful for instruction level tracing. Depending
60 the ETM version data tracing may also be available.
61endif
diff --git a/drivers/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 4b4bec890ef5..4b4bec890ef5 100644
--- a/drivers/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
diff --git a/drivers/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index c9acd406f0d0..40049869aecd 100644
--- a/drivers/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -313,8 +313,8 @@ static ssize_t etb_read(struct file *file, char __user *data,
313 313
314 *ppos += len; 314 *ppos += len;
315 315
316 dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n", 316 dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
317 __func__, len, (int) (depth * 4 - *ppos)); 317 __func__, len, (int)(depth * 4 - *ppos));
318 return len; 318 return len;
319} 319}
320 320
diff --git a/drivers/coresight/coresight-etm-cp14.c b/drivers/hwtracing/coresight/coresight-etm-cp14.c
index 12a220682117..12a220682117 100644
--- a/drivers/coresight/coresight-etm-cp14.c
+++ b/drivers/hwtracing/coresight/coresight-etm-cp14.c
diff --git a/drivers/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 501c5fac8a45..501c5fac8a45 100644
--- a/drivers/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
diff --git a/drivers/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index c965f5724abd..c965f5724abd 100644
--- a/drivers/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
diff --git a/drivers/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 3db36f70b666..3db36f70b666 100644
--- a/drivers/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
diff --git a/drivers/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 62fcd98cc7cf..62fcd98cc7cf 100644
--- a/drivers/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
diff --git a/drivers/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index cdf05537d574..75b9abd804e6 100644
--- a/drivers/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -107,7 +107,7 @@ static int replicator_remove(struct platform_device *pdev)
107 return 0; 107 return 0;
108} 108}
109 109
110static struct of_device_id replicator_match[] = { 110static const struct of_device_id replicator_match[] = {
111 {.compatible = "arm,coresight-replicator"}, 111 {.compatible = "arm,coresight-replicator"},
112 {} 112 {}
113}; 113};
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 3ff232f9ddf7..7147f3dd363c 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -533,8 +533,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
533 533
534 *ppos += len; 534 *ppos += len;
535 535
536 dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n", 536 dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
537 __func__, len, (int) (drvdata->size - *ppos)); 537 __func__, len, (int)(drvdata->size - *ppos));
538 return len; 538 return len;
539} 539}
540 540
@@ -565,6 +565,59 @@ static const struct file_operations tmc_fops = {
565 .llseek = no_llseek, 565 .llseek = no_llseek,
566}; 566};
567 567
568static ssize_t status_show(struct device *dev,
569 struct device_attribute *attr, char *buf)
570{
571 int ret;
572 unsigned long flags;
573 u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
574 u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
575 u32 devid;
576 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
577
578 ret = clk_prepare_enable(drvdata->clk);
579 if (ret)
580 goto out;
581
582 spin_lock_irqsave(&drvdata->spinlock, flags);
583 CS_UNLOCK(drvdata->base);
584
585 tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
586 tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
587 tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
588 tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
589 tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
590 tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
591 tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
592 tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
593 tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
594 tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
595 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
596
597 CS_LOCK(drvdata->base);
598 spin_unlock_irqrestore(&drvdata->spinlock, flags);
599
600 clk_disable_unprepare(drvdata->clk);
601
602 return sprintf(buf,
603 "Depth:\t\t0x%x\n"
604 "Status:\t\t0x%x\n"
605 "RAM read ptr:\t0x%x\n"
606 "RAM wrt ptr:\t0x%x\n"
607 "Trigger cnt:\t0x%x\n"
608 "Control:\t0x%x\n"
609 "Flush status:\t0x%x\n"
610 "Flush ctrl:\t0x%x\n"
611 "Mode:\t\t0x%x\n"
612 "PSRC:\t\t0x%x\n"
613 "DEVID:\t\t0x%x\n",
614 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
615 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
616out:
617 return -EINVAL;
618}
619static DEVICE_ATTR_RO(status);
620
568static ssize_t trigger_cntr_show(struct device *dev, 621static ssize_t trigger_cntr_show(struct device *dev,
569 struct device_attribute *attr, char *buf) 622 struct device_attribute *attr, char *buf)
570{ 623{
@@ -593,18 +646,21 @@ static DEVICE_ATTR_RW(trigger_cntr);
593 646
594static struct attribute *coresight_etb_attrs[] = { 647static struct attribute *coresight_etb_attrs[] = {
595 &dev_attr_trigger_cntr.attr, 648 &dev_attr_trigger_cntr.attr,
649 &dev_attr_status.attr,
596 NULL, 650 NULL,
597}; 651};
598ATTRIBUTE_GROUPS(coresight_etb); 652ATTRIBUTE_GROUPS(coresight_etb);
599 653
600static struct attribute *coresight_etr_attrs[] = { 654static struct attribute *coresight_etr_attrs[] = {
601 &dev_attr_trigger_cntr.attr, 655 &dev_attr_trigger_cntr.attr,
656 &dev_attr_status.attr,
602 NULL, 657 NULL,
603}; 658};
604ATTRIBUTE_GROUPS(coresight_etr); 659ATTRIBUTE_GROUPS(coresight_etr);
605 660
606static struct attribute *coresight_etf_attrs[] = { 661static struct attribute *coresight_etf_attrs[] = {
607 &dev_attr_trigger_cntr.attr, 662 &dev_attr_trigger_cntr.attr,
663 &dev_attr_status.attr,
608 NULL, 664 NULL,
609}; 665};
610ATTRIBUTE_GROUPS(coresight_etf); 666ATTRIBUTE_GROUPS(coresight_etf);
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 3b33af2416bb..3b33af2416bb 100644
--- a/drivers/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
diff --git a/drivers/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index c5def9382357..894531d315b8 100644
--- a/drivers/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -305,7 +305,9 @@ static int coresight_build_paths(struct coresight_device *csdev,
305 305
306 list_add(&csdev->path_link, path); 306 list_add(&csdev->path_link, path);
307 307
308 if (csdev->type == CORESIGHT_DEV_TYPE_SINK && csdev->activated) { 308 if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
309 csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
310 csdev->activated) {
309 if (enable) 311 if (enable)
310 ret = coresight_enable_path(path); 312 ret = coresight_enable_path(path);
311 else 313 else
diff --git a/drivers/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 6f75e9d5b6fb..35e51ce93a5c 100644
--- a/drivers/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -22,6 +22,7 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/amba/bus.h> 23#include <linux/amba/bus.h>
24#include <linux/coresight.h> 24#include <linux/coresight.h>
25#include <linux/cpumask.h>
25#include <asm/smp_plat.h> 26#include <asm/smp_plat.h>
26 27
27 28
@@ -104,7 +105,7 @@ static int of_coresight_alloc_memory(struct device *dev,
104struct coresight_platform_data *of_get_coresight_platform_data( 105struct coresight_platform_data *of_get_coresight_platform_data(
105 struct device *dev, struct device_node *node) 106 struct device *dev, struct device_node *node)
106{ 107{
107 int i = 0, ret = 0; 108 int i = 0, ret = 0, cpu;
108 struct coresight_platform_data *pdata; 109 struct coresight_platform_data *pdata;
109 struct of_endpoint endpoint, rendpoint; 110 struct of_endpoint endpoint, rendpoint;
110 struct device *rdev; 111 struct device *rdev;
@@ -178,17 +179,10 @@ struct coresight_platform_data *of_get_coresight_platform_data(
178 /* Affinity defaults to CPU0 */ 179 /* Affinity defaults to CPU0 */
179 pdata->cpu = 0; 180 pdata->cpu = 0;
180 dn = of_parse_phandle(node, "cpu", 0); 181 dn = of_parse_phandle(node, "cpu", 0);
181 if (dn) { 182 for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) {
182 const u32 *cell; 183 if (dn == of_get_cpu_node(cpu, NULL)) {
183 int len, index; 184 pdata->cpu = cpu;
184 u64 hwid; 185 break;
185
186 cell = of_get_property(dn, "reg", &len);
187 if (cell) {
188 hwid = of_read_number(cell, of_n_addr_cells(dn));
189 index = get_logical_index(hwid);
190 if (index != -EINVAL)
191 pdata->cpu = index;
192 } 186 }
193 } 187 }
194 188
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 0af7361e377f..de36237d7c6b 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -56,9 +56,9 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
56 56
57 res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE, 57 res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
58 KBUILD_MODNAME); 58 KBUILD_MODNAME);
59 if (IS_ERR(res)) { 59 if (!res) {
60 dev_err(&pdev->dev, "Failed to request PCI memory\n"); 60 dev_err(&pdev->dev, "Failed to request PCI memory\n");
61 ret = PTR_ERR(res); 61 ret = -EBUSY;
62 goto out_disable; 62 goto out_disable;
63 } 63 }
64 64
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 191383d8c94d..868036f70f8f 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -83,6 +83,15 @@ config FSL_IFC
83 bool 83 bool
84 depends on FSL_SOC 84 depends on FSL_SOC
85 85
86config JZ4780_NEMC
87 bool "Ingenic JZ4780 SoC NEMC driver"
88 default y
89 depends on MACH_JZ4780
90 help
91 This driver is for the NAND/External Memory Controller (NEMC) in
92 the Ingenic JZ4780. This controller is used to handle external
93 memory devices such as NAND and SRAM.
94
86source "drivers/memory/tegra/Kconfig" 95source "drivers/memory/tegra/Kconfig"
87 96
88endif 97endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 6b6548124473..b670441e3cdf 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -13,5 +13,6 @@ obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o
13obj-$(CONFIG_FSL_IFC) += fsl_ifc.o 13obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
14obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o 14obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
15obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o 15obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
16obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
16 17
17obj-$(CONFIG_TEGRA_MC) += tegra/ 18obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
new file mode 100644
index 000000000000..919d1925acb9
--- /dev/null
+++ b/drivers/memory/jz4780-nemc.c
@@ -0,0 +1,391 @@
1/*
2 * JZ4780 NAND/external memory controller (NEMC)
3 *
4 * Copyright (c) 2015 Imagination Technologies
5 * Author: Alex Smith <alex@alex-smith.me.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/init.h>
14#include <linux/math64.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/of_platform.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22
23#include <linux/jz4780-nemc.h>
24
25#define NEMC_SMCRn(n) (0x14 + (((n) - 1) * 4))
26#define NEMC_NFCSR 0x50
27
28#define NEMC_SMCR_SMT BIT(0)
29#define NEMC_SMCR_BW_SHIFT 6
30#define NEMC_SMCR_BW_MASK (0x3 << NEMC_SMCR_BW_SHIFT)
31#define NEMC_SMCR_BW_8 (0 << 6)
32#define NEMC_SMCR_TAS_SHIFT 8
33#define NEMC_SMCR_TAS_MASK (0xf << NEMC_SMCR_TAS_SHIFT)
34#define NEMC_SMCR_TAH_SHIFT 12
35#define NEMC_SMCR_TAH_MASK (0xf << NEMC_SMCR_TAH_SHIFT)
36#define NEMC_SMCR_TBP_SHIFT 16
37#define NEMC_SMCR_TBP_MASK (0xf << NEMC_SMCR_TBP_SHIFT)
38#define NEMC_SMCR_TAW_SHIFT 20
39#define NEMC_SMCR_TAW_MASK (0xf << NEMC_SMCR_TAW_SHIFT)
40#define NEMC_SMCR_TSTRV_SHIFT 24
41#define NEMC_SMCR_TSTRV_MASK (0x3f << NEMC_SMCR_TSTRV_SHIFT)
42
43#define NEMC_NFCSR_NFEn(n) BIT(((n) - 1) << 1)
44#define NEMC_NFCSR_NFCEn(n) BIT((((n) - 1) << 1) + 1)
45#define NEMC_NFCSR_TNFEn(n) BIT(16 + (n) - 1)
46
47struct jz4780_nemc {
48 spinlock_t lock;
49 struct device *dev;
50 void __iomem *base;
51 struct clk *clk;
52 uint32_t clk_period;
53 unsigned long banks_present;
54};
55
56/**
57 * jz4780_nemc_num_banks() - count the number of banks referenced by a device
58 * @dev: device to count banks for, must be a child of the NEMC.
59 *
60 * Return: The number of unique NEMC banks referred to by the specified NEMC
61 * child device. Unique here means that a device that references the same bank
62 * multiple times in the its "reg" property will only count once.
63 */
64unsigned int jz4780_nemc_num_banks(struct device *dev)
65{
66 const __be32 *prop;
67 unsigned int bank, count = 0;
68 unsigned long referenced = 0;
69 int i = 0;
70
71 while ((prop = of_get_address(dev->of_node, i++, NULL, NULL))) {
72 bank = of_read_number(prop, 1);
73 if (!(referenced & BIT(bank))) {
74 referenced |= BIT(bank);
75 count++;
76 }
77 }
78
79 return count;
80}
81EXPORT_SYMBOL(jz4780_nemc_num_banks);
82
83/**
84 * jz4780_nemc_set_type() - set the type of device connected to a bank
85 * @dev: child device of the NEMC.
86 * @bank: bank number to configure.
87 * @type: type of device connected to the bank.
88 */
89void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
90 enum jz4780_nemc_bank_type type)
91{
92 struct jz4780_nemc *nemc = dev_get_drvdata(dev->parent);
93 uint32_t nfcsr;
94
95 nfcsr = readl(nemc->base + NEMC_NFCSR);
96
97 /* TODO: Support toggle NAND devices. */
98 switch (type) {
99 case JZ4780_NEMC_BANK_SRAM:
100 nfcsr &= ~(NEMC_NFCSR_TNFEn(bank) | NEMC_NFCSR_NFEn(bank));
101 break;
102 case JZ4780_NEMC_BANK_NAND:
103 nfcsr &= ~NEMC_NFCSR_TNFEn(bank);
104 nfcsr |= NEMC_NFCSR_NFEn(bank);
105 break;
106 }
107
108 writel(nfcsr, nemc->base + NEMC_NFCSR);
109}
110EXPORT_SYMBOL(jz4780_nemc_set_type);
111
112/**
113 * jz4780_nemc_assert() - (de-)assert a NAND device's chip enable pin
114 * @dev: child device of the NEMC.
115 * @bank: bank number of device.
116 * @assert: whether the chip enable pin should be asserted.
117 *
118 * (De-)asserts the chip enable pin for the NAND device connected to the
119 * specified bank.
120 */
121void jz4780_nemc_assert(struct device *dev, unsigned int bank, bool assert)
122{
123 struct jz4780_nemc *nemc = dev_get_drvdata(dev->parent);
124 uint32_t nfcsr;
125
126 nfcsr = readl(nemc->base + NEMC_NFCSR);
127
128 if (assert)
129 nfcsr |= NEMC_NFCSR_NFCEn(bank);
130 else
131 nfcsr &= ~NEMC_NFCSR_NFCEn(bank);
132
133 writel(nfcsr, nemc->base + NEMC_NFCSR);
134}
135EXPORT_SYMBOL(jz4780_nemc_assert);
136
137static uint32_t jz4780_nemc_clk_period(struct jz4780_nemc *nemc)
138{
139 unsigned long rate;
140
141 rate = clk_get_rate(nemc->clk);
142 if (!rate)
143 return 0;
144
145 /* Return in picoseconds. */
146 return div64_ul(1000000000000ull, rate);
147}
148
149static uint32_t jz4780_nemc_ns_to_cycles(struct jz4780_nemc *nemc, uint32_t ns)
150{
151 return ((ns * 1000) + nemc->clk_period - 1) / nemc->clk_period;
152}
153
154static bool jz4780_nemc_configure_bank(struct jz4780_nemc *nemc,
155 unsigned int bank,
156 struct device_node *node)
157{
158 uint32_t smcr, val, cycles;
159
160 /*
161 * Conversion of tBP and tAW cycle counts to values supported by the
162 * hardware (round up to the next supported value).
163 */
164 static const uint32_t convert_tBP_tAW[] = {
165 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
166
167 /* 11 - 12 -> 12 cycles */
168 11, 11,
169
170 /* 13 - 15 -> 15 cycles */
171 12, 12, 12,
172
173 /* 16 - 20 -> 20 cycles */
174 13, 13, 13, 13, 13,
175
176 /* 21 - 25 -> 25 cycles */
177 14, 14, 14, 14, 14,
178
179 /* 26 - 31 -> 31 cycles */
180 15, 15, 15, 15, 15, 15
181 };
182
183 smcr = readl(nemc->base + NEMC_SMCRn(bank));
184 smcr &= ~NEMC_SMCR_SMT;
185
186 if (!of_property_read_u32(node, "ingenic,nemc-bus-width", &val)) {
187 smcr &= ~NEMC_SMCR_BW_MASK;
188 switch (val) {
189 case 8:
190 smcr |= NEMC_SMCR_BW_8;
191 break;
192 default:
193 /*
194 * Earlier SoCs support a 16 bit bus width (the 4780
195 * does not), until those are properly supported, error.
196 */
197 dev_err(nemc->dev, "unsupported bus width: %u\n", val);
198 return false;
199 }
200 }
201
202 if (of_property_read_u32(node, "ingenic,nemc-tAS", &val) == 0) {
203 smcr &= ~NEMC_SMCR_TAS_MASK;
204 cycles = jz4780_nemc_ns_to_cycles(nemc, val);
205 if (cycles > 15) {
206 dev_err(nemc->dev, "tAS %u is too high (%u cycles)\n",
207 val, cycles);
208 return false;
209 }
210
211 smcr |= cycles << NEMC_SMCR_TAS_SHIFT;
212 }
213
214 if (of_property_read_u32(node, "ingenic,nemc-tAH", &val) == 0) {
215 smcr &= ~NEMC_SMCR_TAH_MASK;
216 cycles = jz4780_nemc_ns_to_cycles(nemc, val);
217 if (cycles > 15) {
218 dev_err(nemc->dev, "tAH %u is too high (%u cycles)\n",
219 val, cycles);
220 return false;
221 }
222
223 smcr |= cycles << NEMC_SMCR_TAH_SHIFT;
224 }
225
226 if (of_property_read_u32(node, "ingenic,nemc-tBP", &val) == 0) {
227 smcr &= ~NEMC_SMCR_TBP_MASK;
228 cycles = jz4780_nemc_ns_to_cycles(nemc, val);
229 if (cycles > 31) {
230 dev_err(nemc->dev, "tBP %u is too high (%u cycles)\n",
231 val, cycles);
232 return false;
233 }
234
235 smcr |= convert_tBP_tAW[cycles] << NEMC_SMCR_TBP_SHIFT;
236 }
237
238 if (of_property_read_u32(node, "ingenic,nemc-tAW", &val) == 0) {
239 smcr &= ~NEMC_SMCR_TAW_MASK;
240 cycles = jz4780_nemc_ns_to_cycles(nemc, val);
241 if (cycles > 31) {
242 dev_err(nemc->dev, "tAW %u is too high (%u cycles)\n",
243 val, cycles);
244 return false;
245 }
246
247 smcr |= convert_tBP_tAW[cycles] << NEMC_SMCR_TAW_SHIFT;
248 }
249
250 if (of_property_read_u32(node, "ingenic,nemc-tSTRV", &val) == 0) {
251 smcr &= ~NEMC_SMCR_TSTRV_MASK;
252 cycles = jz4780_nemc_ns_to_cycles(nemc, val);
253 if (cycles > 63) {
254 dev_err(nemc->dev, "tSTRV %u is too high (%u cycles)\n",
255 val, cycles);
256 return false;
257 }
258
259 smcr |= cycles << NEMC_SMCR_TSTRV_SHIFT;
260 }
261
262 writel(smcr, nemc->base + NEMC_SMCRn(bank));
263 return true;
264}
265
266static int jz4780_nemc_probe(struct platform_device *pdev)
267{
268 struct device *dev = &pdev->dev;
269 struct jz4780_nemc *nemc;
270 struct resource *res;
271 struct device_node *child;
272 const __be32 *prop;
273 unsigned int bank;
274 unsigned long referenced;
275 int i, ret;
276
277 nemc = devm_kzalloc(dev, sizeof(*nemc), GFP_KERNEL);
278 if (!nemc)
279 return -ENOMEM;
280
281 spin_lock_init(&nemc->lock);
282 nemc->dev = dev;
283
284 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
285 nemc->base = devm_ioremap_resource(dev, res);
286 if (IS_ERR(nemc->base)) {
287 dev_err(dev, "failed to get I/O memory\n");
288 return PTR_ERR(nemc->base);
289 }
290
291 writel(0, nemc->base + NEMC_NFCSR);
292
293 nemc->clk = devm_clk_get(dev, NULL);
294 if (IS_ERR(nemc->clk)) {
295 dev_err(dev, "failed to get clock\n");
296 return PTR_ERR(nemc->clk);
297 }
298
299 ret = clk_prepare_enable(nemc->clk);
300 if (ret) {
301 dev_err(dev, "failed to enable clock: %d\n", ret);
302 return ret;
303 }
304
305 nemc->clk_period = jz4780_nemc_clk_period(nemc);
306 if (!nemc->clk_period) {
307 dev_err(dev, "failed to calculate clock period\n");
308 clk_disable_unprepare(nemc->clk);
309 return -EINVAL;
310 }
311
312 /*
313 * Iterate over child devices, check that they do not conflict with
314 * each other, and register child devices for them. If a child device
315 * has invalid properties, it is ignored and no platform device is
316 * registered for it.
317 */
318 for_each_child_of_node(nemc->dev->of_node, child) {
319 referenced = 0;
320 i = 0;
321 while ((prop = of_get_address(child, i++, NULL, NULL))) {
322 bank = of_read_number(prop, 1);
323 if (bank < 1 || bank >= JZ4780_NEMC_NUM_BANKS) {
324 dev_err(nemc->dev,
325 "%s requests invalid bank %u\n",
326 child->full_name, bank);
327
328 /* Will continue the outer loop below. */
329 referenced = 0;
330 break;
331 }
332
333 referenced |= BIT(bank);
334 }
335
336 if (!referenced) {
337 dev_err(nemc->dev, "%s has no addresses\n",
338 child->full_name);
339 continue;
340 } else if (nemc->banks_present & referenced) {
341 dev_err(nemc->dev, "%s conflicts with another node\n",
342 child->full_name);
343 continue;
344 }
345
346 /* Configure bank parameters. */
347 for_each_set_bit(bank, &referenced, JZ4780_NEMC_NUM_BANKS) {
348 if (!jz4780_nemc_configure_bank(nemc, bank, child)) {
349 referenced = 0;
350 break;
351 }
352 }
353
354 if (referenced) {
355 if (of_platform_device_create(child, NULL, nemc->dev))
356 nemc->banks_present |= referenced;
357 }
358 }
359
360 platform_set_drvdata(pdev, nemc);
361 dev_info(dev, "JZ4780 NEMC initialised\n");
362 return 0;
363}
364
365static int jz4780_nemc_remove(struct platform_device *pdev)
366{
367 struct jz4780_nemc *nemc = platform_get_drvdata(pdev);
368
369 clk_disable_unprepare(nemc->clk);
370 return 0;
371}
372
373static const struct of_device_id jz4780_nemc_dt_match[] = {
374 { .compatible = "ingenic,jz4780-nemc" },
375 {},
376};
377
378static struct platform_driver jz4780_nemc_driver = {
379 .probe = jz4780_nemc_probe,
380 .remove = jz4780_nemc_remove,
381 .driver = {
382 .name = "jz4780-nemc",
383 .of_match_table = of_match_ptr(jz4780_nemc_dt_match),
384 },
385};
386
387static int __init jz4780_nemc_init(void)
388{
389 return platform_driver_register(&jz4780_nemc_driver);
390}
391subsys_initcall(jz4780_nemc_init);
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 4c4a59b25537..7f90ce5a569a 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -230,6 +230,8 @@ static const struct i2c_device_id bh1780_id[] = {
230 { }, 230 { },
231}; 231};
232 232
233MODULE_DEVICE_TABLE(i2c, bh1780_id);
234
233#ifdef CONFIG_OF 235#ifdef CONFIG_OF
234static const struct of_device_id of_bh1780_match[] = { 236static const struct of_device_id of_bh1780_match[] = {
235 { .compatible = "rohm,bh1780gli", }, 237 { .compatible = "rohm,bh1780gli", },
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 06166ac000e0..0b1bd85e4ae6 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -479,6 +479,7 @@ static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
479static noinline int fpga_program_cpu(struct fpga_dev *priv) 479static noinline int fpga_program_cpu(struct fpga_dev *priv)
480{ 480{
481 int ret; 481 int ret;
482 unsigned long timeout;
482 483
483 /* Disable the programmer */ 484 /* Disable the programmer */
484 fpga_programmer_disable(priv); 485 fpga_programmer_disable(priv);
@@ -497,8 +498,8 @@ static noinline int fpga_program_cpu(struct fpga_dev *priv)
497 goto out_disable_controller; 498 goto out_disable_controller;
498 499
499 /* Wait for the interrupt handler to signal that programming finished */ 500 /* Wait for the interrupt handler to signal that programming finished */
500 ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); 501 timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
501 if (!ret) { 502 if (!timeout) {
502 dev_err(priv->dev, "Timed out waiting for completion\n"); 503 dev_err(priv->dev, "Timed out waiting for completion\n");
503 ret = -ETIMEDOUT; 504 ret = -ETIMEDOUT;
504 goto out_disable_controller; 505 goto out_disable_controller;
@@ -536,6 +537,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
536 struct sg_table table; 537 struct sg_table table;
537 dma_cookie_t cookie; 538 dma_cookie_t cookie;
538 int ret, i; 539 int ret, i;
540 unsigned long timeout;
539 541
540 /* Disable the programmer */ 542 /* Disable the programmer */
541 fpga_programmer_disable(priv); 543 fpga_programmer_disable(priv);
@@ -623,8 +625,8 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
623 dev_dbg(priv->dev, "enabled the controller\n"); 625 dev_dbg(priv->dev, "enabled the controller\n");
624 626
625 /* Wait for the interrupt handler to signal that programming finished */ 627 /* Wait for the interrupt handler to signal that programming finished */
626 ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); 628 timeout = wait_for_completion_timeout(&priv->completion, 2 * HZ);
627 if (!ret) { 629 if (!timeout) {
628 dev_err(priv->dev, "Timed out waiting for completion\n"); 630 dev_err(priv->dev, "Timed out waiting for completion\n");
629 ret = -ETIMEDOUT; 631 ret = -ETIMEDOUT;
630 goto out_disable_controller; 632 goto out_disable_controller;
@@ -1142,7 +1144,7 @@ out_return:
1142 return ret; 1144 return ret;
1143} 1145}
1144 1146
1145static struct of_device_id fpga_of_match[] = { 1147static const struct of_device_id fpga_of_match[] = {
1146 { .compatible = "carma,fpga-programmer", }, 1148 { .compatible = "carma,fpga-programmer", },
1147 {}, 1149 {},
1148}; 1150};
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 68cdfe151bdb..5aba3fd789de 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -1486,7 +1486,7 @@ static int data_of_remove(struct platform_device *op)
1486 return 0; 1486 return 0;
1487} 1487}
1488 1488
1489static struct of_device_id data_of_match[] = { 1489static const struct of_device_id data_of_match[] = {
1490 { .compatible = "carma,carma-fpga", }, 1490 { .compatible = "carma,carma-fpga", },
1491 {}, 1491 {},
1492}; 1492};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 3ef4627f9cb1..4739689d23ad 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -950,6 +950,7 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
950 struct lis3lv02d_platform_data *pdata; 950 struct lis3lv02d_platform_data *pdata;
951 struct device_node *np = lis3->of_node; 951 struct device_node *np = lis3->of_node;
952 u32 val; 952 u32 val;
953 s32 sval;
953 954
954 if (!lis3->of_node) 955 if (!lis3->of_node)
955 return 0; 956 return 0;
@@ -1031,6 +1032,23 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
1031 pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO; 1032 pdata->wakeup_flags |= LIS3_WAKEUP_Z_LO;
1032 if (of_get_property(np, "st,wakeup-z-hi", NULL)) 1033 if (of_get_property(np, "st,wakeup-z-hi", NULL))
1033 pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI; 1034 pdata->wakeup_flags |= LIS3_WAKEUP_Z_HI;
1035 if (of_get_property(np, "st,wakeup-threshold", &val))
1036 pdata->wakeup_thresh = val;
1037
1038 if (of_get_property(np, "st,wakeup2-x-lo", NULL))
1039 pdata->wakeup_flags2 |= LIS3_WAKEUP_X_LO;
1040 if (of_get_property(np, "st,wakeup2-x-hi", NULL))
1041 pdata->wakeup_flags2 |= LIS3_WAKEUP_X_HI;
1042 if (of_get_property(np, "st,wakeup2-y-lo", NULL))
1043 pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_LO;
1044 if (of_get_property(np, "st,wakeup2-y-hi", NULL))
1045 pdata->wakeup_flags2 |= LIS3_WAKEUP_Y_HI;
1046 if (of_get_property(np, "st,wakeup2-z-lo", NULL))
1047 pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_LO;
1048 if (of_get_property(np, "st,wakeup2-z-hi", NULL))
1049 pdata->wakeup_flags2 |= LIS3_WAKEUP_Z_HI;
1050 if (of_get_property(np, "st,wakeup2-threshold", &val))
1051 pdata->wakeup_thresh2 = val;
1034 1052
1035 if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) { 1053 if (!of_property_read_u32(np, "st,highpass-cutoff-hz", &val)) {
1036 switch (val) { 1054 switch (val) {
@@ -1054,29 +1072,29 @@ int lis3lv02d_init_dt(struct lis3lv02d *lis3)
1054 if (of_get_property(np, "st,hipass2-disable", NULL)) 1072 if (of_get_property(np, "st,hipass2-disable", NULL))
1055 pdata->hipass_ctrl |= LIS3_HIPASS2_DISABLE; 1073 pdata->hipass_ctrl |= LIS3_HIPASS2_DISABLE;
1056 1074
1057 if (of_get_property(np, "st,axis-x", &val)) 1075 if (of_property_read_s32(np, "st,axis-x", &sval) == 0)
1058 pdata->axis_x = val; 1076 pdata->axis_x = sval;
1059 if (of_get_property(np, "st,axis-y", &val)) 1077 if (of_property_read_s32(np, "st,axis-y", &sval) == 0)
1060 pdata->axis_y = val; 1078 pdata->axis_y = sval;
1061 if (of_get_property(np, "st,axis-z", &val)) 1079 if (of_property_read_s32(np, "st,axis-z", &sval) == 0)
1062 pdata->axis_z = val; 1080 pdata->axis_z = sval;
1063 1081
1064 if (of_get_property(np, "st,default-rate", NULL)) 1082 if (of_get_property(np, "st,default-rate", NULL))
1065 pdata->default_rate = val; 1083 pdata->default_rate = val;
1066 1084
1067 if (of_get_property(np, "st,min-limit-x", &val)) 1085 if (of_property_read_s32(np, "st,min-limit-x", &sval) == 0)
1068 pdata->st_min_limits[0] = val; 1086 pdata->st_min_limits[0] = sval;
1069 if (of_get_property(np, "st,min-limit-y", &val)) 1087 if (of_property_read_s32(np, "st,min-limit-y", &sval) == 0)
1070 pdata->st_min_limits[1] = val; 1088 pdata->st_min_limits[1] = sval;
1071 if (of_get_property(np, "st,min-limit-z", &val)) 1089 if (of_property_read_s32(np, "st,min-limit-z", &sval) == 0)
1072 pdata->st_min_limits[2] = val; 1090 pdata->st_min_limits[2] = sval;
1073 1091
1074 if (of_get_property(np, "st,max-limit-x", &val)) 1092 if (of_property_read_s32(np, "st,max-limit-x", &sval) == 0)
1075 pdata->st_max_limits[0] = val; 1093 pdata->st_max_limits[0] = sval;
1076 if (of_get_property(np, "st,max-limit-y", &val)) 1094 if (of_property_read_s32(np, "st,max-limit-y", &sval) == 0)
1077 pdata->st_max_limits[1] = val; 1095 pdata->st_max_limits[1] = sval;
1078 if (of_get_property(np, "st,max-limit-z", &val)) 1096 if (of_property_read_s32(np, "st,max-limit-z", &sval) == 0)
1079 pdata->st_max_limits[2] = val; 1097 pdata->st_max_limits[2] = sval;
1080 1098
1081 1099
1082 lis3->pdata = pdata; 1100 lis3->pdata = pdata;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 63fe096d4462..e3e7f1dc27ba 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -106,7 +106,7 @@ static union axis_conversion lis3lv02d_axis_map =
106 { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } }; 106 { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } };
107 107
108#ifdef CONFIG_OF 108#ifdef CONFIG_OF
109static struct of_device_id lis3lv02d_i2c_dt_ids[] = { 109static const struct of_device_id lis3lv02d_i2c_dt_ids[] = {
110 { .compatible = "st,lis3lv02d" }, 110 { .compatible = "st,lis3lv02d" },
111 {} 111 {}
112}; 112};
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index bd06d0cfac45..b2f6e1651ac9 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -61,7 +61,7 @@ static union axis_conversion lis3lv02d_axis_normal =
61 { .as_array = { 1, 2, 3 } }; 61 { .as_array = { 1, 2, 3 } };
62 62
63#ifdef CONFIG_OF 63#ifdef CONFIG_OF
64static struct of_device_id lis302dl_spi_dt_ids[] = { 64static const struct of_device_id lis302dl_spi_dt_ids[] = {
65 { .compatible = "st,lis302dl-spi" }, 65 { .compatible = "st,lis302dl-spi" },
66 {} 66 {}
67}; 67};
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 8ebc6cda1373..518914a82b83 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -21,3 +21,6 @@ mei-me-objs += hw-me.o
21obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o 21obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
22mei-txe-objs := pci-txe.o 22mei-txe-objs := pci-txe.o
23mei-txe-objs += hw-txe.o 23mei-txe-objs += hw-txe.o
24
25mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
26CFLAGS_mei-trace.o = -I$(src)
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 40ea639fa413..d2cd53e3fac3 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -48,10 +48,7 @@ void mei_amthif_reset_params(struct mei_device *dev)
48{ 48{
49 /* reset iamthif parameters. */ 49 /* reset iamthif parameters. */
50 dev->iamthif_current_cb = NULL; 50 dev->iamthif_current_cb = NULL;
51 dev->iamthif_msg_buf_size = 0;
52 dev->iamthif_msg_buf_index = 0;
53 dev->iamthif_canceled = false; 51 dev->iamthif_canceled = false;
54 dev->iamthif_ioctl = false;
55 dev->iamthif_state = MEI_IAMTHIF_IDLE; 52 dev->iamthif_state = MEI_IAMTHIF_IDLE;
56 dev->iamthif_timer = 0; 53 dev->iamthif_timer = 0;
57 dev->iamthif_stall_timer = 0; 54 dev->iamthif_stall_timer = 0;
@@ -69,7 +66,6 @@ int mei_amthif_host_init(struct mei_device *dev)
69{ 66{
70 struct mei_cl *cl = &dev->iamthif_cl; 67 struct mei_cl *cl = &dev->iamthif_cl;
71 struct mei_me_client *me_cl; 68 struct mei_me_client *me_cl;
72 unsigned char *msg_buf;
73 int ret; 69 int ret;
74 70
75 dev->iamthif_state = MEI_IAMTHIF_IDLE; 71 dev->iamthif_state = MEI_IAMTHIF_IDLE;
@@ -90,18 +86,6 @@ int mei_amthif_host_init(struct mei_device *dev)
90 dev->iamthif_mtu = me_cl->props.max_msg_length; 86 dev->iamthif_mtu = me_cl->props.max_msg_length;
91 dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu); 87 dev_dbg(dev->dev, "IAMTHIF_MTU = %d\n", dev->iamthif_mtu);
92 88
93 kfree(dev->iamthif_msg_buf);
94 dev->iamthif_msg_buf = NULL;
95
96 /* allocate storage for ME message buffer */
97 msg_buf = kcalloc(dev->iamthif_mtu,
98 sizeof(unsigned char), GFP_KERNEL);
99 if (!msg_buf) {
100 ret = -ENOMEM;
101 goto out;
102 }
103
104 dev->iamthif_msg_buf = msg_buf;
105 89
106 ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); 90 ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
107 if (ret < 0) { 91 if (ret < 0) {
@@ -194,30 +178,33 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
194 dev_dbg(dev->dev, "woke up from sleep\n"); 178 dev_dbg(dev->dev, "woke up from sleep\n");
195 } 179 }
196 180
181 if (cb->status) {
182 rets = cb->status;
183 dev_dbg(dev->dev, "read operation failed %d\n", rets);
184 goto free;
185 }
197 186
198 dev_dbg(dev->dev, "Got amthif data\n"); 187 dev_dbg(dev->dev, "Got amthif data\n");
199 dev->iamthif_timer = 0; 188 dev->iamthif_timer = 0;
200 189
201 if (cb) { 190 timeout = cb->read_time +
202 timeout = cb->read_time + 191 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
203 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 192 dev_dbg(dev->dev, "amthif timeout = %lud\n",
204 dev_dbg(dev->dev, "amthif timeout = %lud\n", 193 timeout);
205 timeout); 194
206 195 if (time_after(jiffies, timeout)) {
207 if (time_after(jiffies, timeout)) { 196 dev_dbg(dev->dev, "amthif Time out\n");
208 dev_dbg(dev->dev, "amthif Time out\n"); 197 /* 15 sec for the message has expired */
209 /* 15 sec for the message has expired */ 198 list_del_init(&cb->list);
210 list_del(&cb->list); 199 rets = -ETIME;
211 rets = -ETIME; 200 goto free;
212 goto free;
213 }
214 } 201 }
215 /* if the whole message will fit remove it from the list */ 202 /* if the whole message will fit remove it from the list */
216 if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset)) 203 if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
217 list_del(&cb->list); 204 list_del_init(&cb->list);
218 else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { 205 else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
219 /* end of the message has been reached */ 206 /* end of the message has been reached */
220 list_del(&cb->list); 207 list_del_init(&cb->list);
221 rets = 0; 208 rets = 0;
222 goto free; 209 goto free;
223 } 210 }
@@ -225,15 +212,15 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
225 * remove message from deletion list 212 * remove message from deletion list
226 */ 213 */
227 214
228 dev_dbg(dev->dev, "amthif cb->response_buffer size - %d\n", 215 dev_dbg(dev->dev, "amthif cb->buf size - %d\n",
229 cb->response_buffer.size); 216 cb->buf.size);
230 dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); 217 dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
231 218
232 /* length is being truncated to PAGE_SIZE, however, 219 /* length is being truncated to PAGE_SIZE, however,
233 * the buf_idx may point beyond */ 220 * the buf_idx may point beyond */
234 length = min_t(size_t, length, (cb->buf_idx - *offset)); 221 length = min_t(size_t, length, (cb->buf_idx - *offset));
235 222
236 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 223 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
237 dev_dbg(dev->dev, "failed to copy data to userland\n"); 224 dev_dbg(dev->dev, "failed to copy data to userland\n");
238 rets = -EFAULT; 225 rets = -EFAULT;
239 } else { 226 } else {
@@ -252,126 +239,88 @@ out:
252} 239}
253 240
254/** 241/**
255 * mei_amthif_send_cmd - send amthif command to the ME 242 * mei_amthif_read_start - queue message for sending read credential
256 * 243 *
257 * @dev: the device structure 244 * @cl: host client
258 * @cb: mei call back struct 245 * @file: file pointer of message recipient
259 * 246 *
260 * Return: 0 on success, <0 on failure. 247 * Return: 0 on success, <0 on failure.
261 *
262 */ 248 */
263static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) 249static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
264{ 250{
265 struct mei_msg_hdr mei_hdr; 251 struct mei_device *dev = cl->dev;
266 struct mei_cl *cl; 252 struct mei_cl_cb *cb;
267 int ret; 253 size_t length = dev->iamthif_mtu;
268 254 int rets;
269 if (!dev || !cb)
270 return -ENODEV;
271 255
272 dev_dbg(dev->dev, "write data to amthif client.\n"); 256 cb = mei_io_cb_init(cl, MEI_FOP_READ, file);
257 if (!cb) {
258 rets = -ENOMEM;
259 goto err;
260 }
273 261
274 dev->iamthif_state = MEI_IAMTHIF_WRITING; 262 rets = mei_io_cb_alloc_buf(cb, length);
275 dev->iamthif_current_cb = cb; 263 if (rets)
276 dev->iamthif_file_object = cb->file_object; 264 goto err;
277 dev->iamthif_canceled = false;
278 dev->iamthif_ioctl = true;
279 dev->iamthif_msg_buf_size = cb->request_buffer.size;
280 memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
281 cb->request_buffer.size);
282 cl = &dev->iamthif_cl;
283 265
284 ret = mei_cl_flow_ctrl_creds(cl); 266 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
285 if (ret < 0)
286 return ret;
287 267
288 if (ret && mei_hbuf_acquire(dev)) { 268 dev->iamthif_state = MEI_IAMTHIF_READING;
289 ret = 0; 269 dev->iamthif_file_object = cb->file_object;
290 if (cb->request_buffer.size > mei_hbuf_max_len(dev)) { 270 dev->iamthif_current_cb = cb;
291 mei_hdr.length = mei_hbuf_max_len(dev);
292 mei_hdr.msg_complete = 0;
293 } else {
294 mei_hdr.length = cb->request_buffer.size;
295 mei_hdr.msg_complete = 1;
296 }
297 271
298 mei_hdr.host_addr = cl->host_client_id;
299 mei_hdr.me_addr = cl->me_client_id;
300 mei_hdr.reserved = 0;
301 mei_hdr.internal = 0;
302 dev->iamthif_msg_buf_index += mei_hdr.length;
303 ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf);
304 if (ret)
305 return ret;
306
307 if (mei_hdr.msg_complete) {
308 if (mei_cl_flow_ctrl_reduce(cl))
309 return -EIO;
310 dev->iamthif_flow_control_pending = true;
311 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
312 dev_dbg(dev->dev, "add amthif cb to write waiting list\n");
313 dev->iamthif_current_cb = cb;
314 dev->iamthif_file_object = cb->file_object;
315 list_add_tail(&cb->list, &dev->write_waiting_list.list);
316 } else {
317 dev_dbg(dev->dev, "message does not complete, so add amthif cb to write list.\n");
318 list_add_tail(&cb->list, &dev->write_list.list);
319 }
320 } else {
321 list_add_tail(&cb->list, &dev->write_list.list);
322 }
323 return 0; 272 return 0;
273err:
274 mei_io_cb_free(cb);
275 return rets;
324} 276}
325 277
326/** 278/**
327 * mei_amthif_write - write amthif data to amthif client 279 * mei_amthif_send_cmd - send amthif command to the ME
328 * 280 *
329 * @dev: the device structure 281 * @cl: the host client
330 * @cb: mei call back struct 282 * @cb: mei call back struct
331 * 283 *
332 * Return: 0 on success, <0 on failure. 284 * Return: 0 on success, <0 on failure.
333 *
334 */ 285 */
335int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb) 286static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
336{ 287{
288 struct mei_device *dev;
337 int ret; 289 int ret;
338 290
339 if (!dev || !cb) 291 if (!cl->dev || !cb)
340 return -ENODEV; 292 return -ENODEV;
341 293
342 ret = mei_io_cb_alloc_resp_buf(cb, dev->iamthif_mtu); 294 dev = cl->dev;
343 if (ret) 295
296 dev->iamthif_state = MEI_IAMTHIF_WRITING;
297 dev->iamthif_current_cb = cb;
298 dev->iamthif_file_object = cb->file_object;
299 dev->iamthif_canceled = false;
300
301 ret = mei_cl_write(cl, cb, false);
302 if (ret < 0)
344 return ret; 303 return ret;
345 304
346 cb->fop_type = MEI_FOP_WRITE; 305 if (cb->completed)
306 cb->status = mei_amthif_read_start(cl, cb->file_object);
347 307
348 if (!list_empty(&dev->amthif_cmd_list.list) || 308 return 0;
349 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
350 dev_dbg(dev->dev,
351 "amthif state = %d\n", dev->iamthif_state);
352 dev_dbg(dev->dev, "AMTHIF: add cb to the wait list\n");
353 list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
354 return 0;
355 }
356 return mei_amthif_send_cmd(dev, cb);
357} 309}
310
358/** 311/**
359 * mei_amthif_run_next_cmd - send next amt command from queue 312 * mei_amthif_run_next_cmd - send next amt command from queue
360 * 313 *
361 * @dev: the device structure 314 * @dev: the device structure
315 *
316 * Return: 0 on success, <0 on failure.
362 */ 317 */
363void mei_amthif_run_next_cmd(struct mei_device *dev) 318int mei_amthif_run_next_cmd(struct mei_device *dev)
364{ 319{
320 struct mei_cl *cl = &dev->iamthif_cl;
365 struct mei_cl_cb *cb; 321 struct mei_cl_cb *cb;
366 int ret;
367
368 if (!dev)
369 return;
370 322
371 dev->iamthif_msg_buf_size = 0;
372 dev->iamthif_msg_buf_index = 0;
373 dev->iamthif_canceled = false; 323 dev->iamthif_canceled = false;
374 dev->iamthif_ioctl = true;
375 dev->iamthif_state = MEI_IAMTHIF_IDLE; 324 dev->iamthif_state = MEI_IAMTHIF_IDLE;
376 dev->iamthif_timer = 0; 325 dev->iamthif_timer = 0;
377 dev->iamthif_file_object = NULL; 326 dev->iamthif_file_object = NULL;
@@ -381,13 +330,48 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
381 cb = list_first_entry_or_null(&dev->amthif_cmd_list.list, 330 cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
382 typeof(*cb), list); 331 typeof(*cb), list);
383 if (!cb) 332 if (!cb)
384 return; 333 return 0;
385 list_del(&cb->list); 334
386 ret = mei_amthif_send_cmd(dev, cb); 335 list_del_init(&cb->list);
387 if (ret) 336 return mei_amthif_send_cmd(cl, cb);
388 dev_warn(dev->dev, "amthif write failed status = %d\n", ret);
389} 337}
390 338
339/**
340 * mei_amthif_write - write amthif data to amthif client
341 *
342 * @cl: host client
343 * @cb: mei call back struct
344 *
345 * Return: 0 on success, <0 on failure.
346 */
347int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
348{
349
350 struct mei_device *dev;
351
352 if (WARN_ON(!cl || !cl->dev))
353 return -ENODEV;
354
355 if (WARN_ON(!cb))
356 return -EINVAL;
357
358 dev = cl->dev;
359
360 list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
361 return mei_amthif_run_next_cmd(dev);
362}
363
364/**
365 * mei_amthif_poll - the amthif poll function
366 *
367 * @dev: the device structure
368 * @file: pointer to file structure
369 * @wait: pointer to poll_table structure
370 *
371 * Return: poll mask
372 *
373 * Locking: called under "dev->device_lock" lock
374 */
391 375
392unsigned int mei_amthif_poll(struct mei_device *dev, 376unsigned int mei_amthif_poll(struct mei_device *dev,
393 struct file *file, poll_table *wait) 377 struct file *file, poll_table *wait)
@@ -396,19 +380,12 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
396 380
397 poll_wait(file, &dev->iamthif_cl.wait, wait); 381 poll_wait(file, &dev->iamthif_cl.wait, wait);
398 382
399 mutex_lock(&dev->device_lock); 383 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
400 if (!mei_cl_is_connected(&dev->iamthif_cl)) { 384 dev->iamthif_file_object == file) {
401
402 mask = POLLERR;
403
404 } else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
405 dev->iamthif_file_object == file) {
406 385
407 mask |= (POLLIN | POLLRDNORM); 386 mask |= POLLIN | POLLRDNORM;
408 dev_dbg(dev->dev, "run next amthif cb\n");
409 mei_amthif_run_next_cmd(dev); 387 mei_amthif_run_next_cmd(dev);
410 } 388 }
411 mutex_unlock(&dev->device_lock);
412 389
413 return mask; 390 return mask;
414} 391}
@@ -427,71 +404,14 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
427int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 404int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
428 struct mei_cl_cb *cmpl_list) 405 struct mei_cl_cb *cmpl_list)
429{ 406{
430 struct mei_device *dev = cl->dev; 407 int ret;
431 struct mei_msg_hdr mei_hdr;
432 size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
433 u32 msg_slots = mei_data2slots(len);
434 int slots;
435 int rets;
436
437 rets = mei_cl_flow_ctrl_creds(cl);
438 if (rets < 0)
439 return rets;
440
441 if (rets == 0) {
442 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
443 return 0;
444 }
445
446 mei_hdr.host_addr = cl->host_client_id;
447 mei_hdr.me_addr = cl->me_client_id;
448 mei_hdr.reserved = 0;
449 mei_hdr.internal = 0;
450
451 slots = mei_hbuf_empty_slots(dev);
452
453 if (slots >= msg_slots) {
454 mei_hdr.length = len;
455 mei_hdr.msg_complete = 1;
456 /* Split the message only if we can write the whole host buffer */
457 } else if (slots == dev->hbuf_depth) {
458 msg_slots = slots;
459 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
460 mei_hdr.length = len;
461 mei_hdr.msg_complete = 0;
462 } else {
463 /* wait for next time the host buffer is empty */
464 return 0;
465 }
466
467 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
468
469 rets = mei_write_message(dev, &mei_hdr,
470 dev->iamthif_msg_buf + dev->iamthif_msg_buf_index);
471 if (rets) {
472 dev->iamthif_state = MEI_IAMTHIF_IDLE;
473 cl->status = rets;
474 list_del(&cb->list);
475 return rets;
476 }
477
478 if (mei_cl_flow_ctrl_reduce(cl))
479 return -EIO;
480
481 dev->iamthif_msg_buf_index += mei_hdr.length;
482 cl->status = 0;
483
484 if (mei_hdr.msg_complete) {
485 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
486 dev->iamthif_flow_control_pending = true;
487
488 /* save iamthif cb sent to amthif client */
489 cb->buf_idx = dev->iamthif_msg_buf_index;
490 dev->iamthif_current_cb = cb;
491 408
492 list_move_tail(&cb->list, &dev->write_waiting_list.list); 409 ret = mei_cl_irq_write(cl, cb, cmpl_list);
493 } 410 if (ret)
411 return ret;
494 412
413 if (cb->completed)
414 cb->status = mei_amthif_read_start(cl, cb->file_object);
495 415
496 return 0; 416 return 0;
497} 417}
@@ -500,83 +420,35 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
500 * mei_amthif_irq_read_msg - read routine after ISR to 420 * mei_amthif_irq_read_msg - read routine after ISR to
501 * handle the read amthif message 421 * handle the read amthif message
502 * 422 *
503 * @dev: the device structure 423 * @cl: mei client
504 * @mei_hdr: header of amthif message 424 * @mei_hdr: header of amthif message
505 * @complete_list: An instance of our list structure 425 * @cmpl_list: completed callbacks list
506 * 426 *
507 * Return: 0 on success, <0 on failure. 427 * Return: -ENODEV if cb is NULL 0 otherwise; error message is in cb->status
508 */ 428 */
509int mei_amthif_irq_read_msg(struct mei_device *dev, 429int mei_amthif_irq_read_msg(struct mei_cl *cl,
510 struct mei_msg_hdr *mei_hdr, 430 struct mei_msg_hdr *mei_hdr,
511 struct mei_cl_cb *complete_list) 431 struct mei_cl_cb *cmpl_list)
512{ 432{
513 struct mei_cl_cb *cb; 433 struct mei_device *dev;
514 unsigned char *buffer; 434 int ret;
515
516 BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
517 BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
518 435
519 buffer = dev->iamthif_msg_buf + dev->iamthif_msg_buf_index; 436 dev = cl->dev;
520 BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
521 437
522 mei_read_slots(dev, buffer, mei_hdr->length); 438 if (dev->iamthif_state != MEI_IAMTHIF_READING)
439 return 0;
523 440
524 dev->iamthif_msg_buf_index += mei_hdr->length; 441 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
442 if (ret)
443 return ret;
525 444
526 if (!mei_hdr->msg_complete) 445 if (!mei_hdr->msg_complete)
527 return 0; 446 return 0;
528 447
529 dev_dbg(dev->dev, "amthif_message_buffer_index =%d\n",
530 mei_hdr->length);
531
532 dev_dbg(dev->dev, "completed amthif read.\n "); 448 dev_dbg(dev->dev, "completed amthif read.\n ");
533 if (!dev->iamthif_current_cb)
534 return -ENODEV;
535
536 cb = dev->iamthif_current_cb;
537 dev->iamthif_current_cb = NULL; 449 dev->iamthif_current_cb = NULL;
538
539 dev->iamthif_stall_timer = 0; 450 dev->iamthif_stall_timer = 0;
540 cb->buf_idx = dev->iamthif_msg_buf_index;
541 cb->read_time = jiffies;
542 if (dev->iamthif_ioctl) {
543 /* found the iamthif cb */
544 dev_dbg(dev->dev, "complete the amthif read cb.\n ");
545 dev_dbg(dev->dev, "add the amthif read cb to complete.\n ");
546 list_add_tail(&cb->list, &complete_list->list);
547 }
548 return 0;
549}
550
551/**
552 * mei_amthif_irq_read - prepares to read amthif data.
553 *
554 * @dev: the device structure.
555 * @slots: free slots.
556 *
557 * Return: 0, OK; otherwise, error.
558 */
559int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
560{
561 u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
562
563 if (*slots < msg_slots)
564 return -EMSGSIZE;
565
566 *slots -= msg_slots;
567
568 if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
569 dev_dbg(dev->dev, "iamthif flow control failed\n");
570 return -EIO;
571 }
572 451
573 dev_dbg(dev->dev, "iamthif flow control success\n");
574 dev->iamthif_state = MEI_IAMTHIF_READING;
575 dev->iamthif_flow_control_pending = false;
576 dev->iamthif_msg_buf_index = 0;
577 dev->iamthif_msg_buf_size = 0;
578 dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
579 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
580 return 0; 452 return 0;
581} 453}
582 454
@@ -588,17 +460,30 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
588 */ 460 */
589void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) 461void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
590{ 462{
463
464 if (cb->fop_type == MEI_FOP_WRITE) {
465 if (!cb->status) {
466 dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
467 mei_io_cb_free(cb);
468 return;
469 }
470 /*
471 * in case of error enqueue the write cb to complete read list
472 * so it can be propagated to the reader
473 */
474 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
475 wake_up_interruptible(&dev->iamthif_cl.wait);
476 return;
477 }
478
591 if (dev->iamthif_canceled != 1) { 479 if (dev->iamthif_canceled != 1) {
592 dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; 480 dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
593 dev->iamthif_stall_timer = 0; 481 dev->iamthif_stall_timer = 0;
594 memcpy(cb->response_buffer.data,
595 dev->iamthif_msg_buf,
596 dev->iamthif_msg_buf_index);
597 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); 482 list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
598 dev_dbg(dev->dev, "amthif read completed\n"); 483 dev_dbg(dev->dev, "amthif read completed\n");
599 dev->iamthif_timer = jiffies; 484 dev->iamthif_timer = jiffies;
600 dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", 485 dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
601 dev->iamthif_timer); 486 dev->iamthif_timer);
602 } else { 487 } else {
603 mei_amthif_run_next_cmd(dev); 488 mei_amthif_run_next_cmd(dev);
604 } 489 }
@@ -623,26 +508,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
623static bool mei_clear_list(struct mei_device *dev, 508static bool mei_clear_list(struct mei_device *dev,
624 const struct file *file, struct list_head *mei_cb_list) 509 const struct file *file, struct list_head *mei_cb_list)
625{ 510{
626 struct mei_cl_cb *cb_pos = NULL; 511 struct mei_cl *cl = &dev->iamthif_cl;
627 struct mei_cl_cb *cb_next = NULL; 512 struct mei_cl_cb *cb, *next;
628 bool removed = false; 513 bool removed = false;
629 514
630 /* list all list member */ 515 /* list all list member */
631 list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, list) { 516 list_for_each_entry_safe(cb, next, mei_cb_list, list) {
632 /* check if list member associated with a file */ 517 /* check if list member associated with a file */
633 if (file == cb_pos->file_object) { 518 if (file == cb->file_object) {
634 /* remove member from the list */
635 list_del(&cb_pos->list);
636 /* check if cb equal to current iamthif cb */ 519 /* check if cb equal to current iamthif cb */
637 if (dev->iamthif_current_cb == cb_pos) { 520 if (dev->iamthif_current_cb == cb) {
638 dev->iamthif_current_cb = NULL; 521 dev->iamthif_current_cb = NULL;
639 /* send flow control to iamthif client */ 522 /* send flow control to iamthif client */
640 mei_hbm_cl_flow_control_req(dev, 523 mei_hbm_cl_flow_control_req(dev, cl);
641 &dev->iamthif_cl);
642 } 524 }
643 /* free all allocated buffers */ 525 /* free all allocated buffers */
644 mei_io_cb_free(cb_pos); 526 mei_io_cb_free(cb);
645 cb_pos = NULL;
646 removed = true; 527 removed = true;
647 } 528 }
648 } 529 }
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index be767f4db26a..4cf38c39878a 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -238,7 +238,7 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
238 dev = cl->dev; 238 dev = cl->dev;
239 239
240 mutex_lock(&dev->device_lock); 240 mutex_lock(&dev->device_lock);
241 if (cl->state != MEI_FILE_CONNECTED) { 241 if (!mei_cl_is_connected(cl)) {
242 rets = -ENODEV; 242 rets = -ENODEV;
243 goto out; 243 goto out;
244 } 244 }
@@ -255,17 +255,13 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
255 goto out; 255 goto out;
256 } 256 }
257 257
258 cb = mei_io_cb_init(cl, NULL); 258 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
259 if (!cb) { 259 if (!cb) {
260 rets = -ENOMEM; 260 rets = -ENOMEM;
261 goto out; 261 goto out;
262 } 262 }
263 263
264 rets = mei_io_cb_alloc_req_buf(cb, length); 264 memcpy(cb->buf.data, buf, length);
265 if (rets < 0)
266 goto out;
267
268 memcpy(cb->request_buffer.data, buf, length);
269 265
270 rets = mei_cl_write(cl, cb, blocking); 266 rets = mei_cl_write(cl, cb, blocking);
271 267
@@ -292,20 +288,21 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
292 288
293 mutex_lock(&dev->device_lock); 289 mutex_lock(&dev->device_lock);
294 290
295 if (!cl->read_cb) { 291 cb = mei_cl_read_cb(cl, NULL);
296 rets = mei_cl_read_start(cl, length); 292 if (cb)
297 if (rets < 0) 293 goto copy;
298 goto out;
299 }
300 294
301 if (cl->reading_state != MEI_READ_COMPLETE && 295 rets = mei_cl_read_start(cl, length, NULL);
302 !waitqueue_active(&cl->rx_wait)) { 296 if (rets && rets != -EBUSY)
297 goto out;
298
299 if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
303 300
304 mutex_unlock(&dev->device_lock); 301 mutex_unlock(&dev->device_lock);
305 302
306 if (wait_event_interruptible(cl->rx_wait, 303 if (wait_event_interruptible(cl->rx_wait,
307 cl->reading_state == MEI_READ_COMPLETE || 304 (!list_empty(&cl->rd_completed)) ||
308 mei_cl_is_transitioning(cl))) { 305 (!mei_cl_is_connected(cl)))) {
309 306
310 if (signal_pending(current)) 307 if (signal_pending(current))
311 return -EINTR; 308 return -EINTR;
@@ -313,23 +310,31 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
313 } 310 }
314 311
315 mutex_lock(&dev->device_lock); 312 mutex_lock(&dev->device_lock);
316 }
317 313
318 cb = cl->read_cb; 314 if (!mei_cl_is_connected(cl)) {
315 rets = -EBUSY;
316 goto out;
317 }
318 }
319 319
320 if (cl->reading_state != MEI_READ_COMPLETE) { 320 cb = mei_cl_read_cb(cl, NULL);
321 if (!cb) {
321 rets = 0; 322 rets = 0;
322 goto out; 323 goto out;
323 } 324 }
324 325
326copy:
327 if (cb->status) {
328 rets = cb->status;
329 goto free;
330 }
331
325 r_length = min_t(size_t, length, cb->buf_idx); 332 r_length = min_t(size_t, length, cb->buf_idx);
326 memcpy(buf, cb->response_buffer.data, r_length); 333 memcpy(buf, cb->buf.data, r_length);
327 rets = r_length; 334 rets = r_length;
328 335
336free:
329 mei_io_cb_free(cb); 337 mei_io_cb_free(cb);
330 cl->reading_state = MEI_IDLE;
331 cl->read_cb = NULL;
332
333out: 338out:
334 mutex_unlock(&dev->device_lock); 339 mutex_unlock(&dev->device_lock);
335 340
@@ -386,7 +391,7 @@ static void mei_bus_event_work(struct work_struct *work)
386 device->events = 0; 391 device->events = 0;
387 392
388 /* Prepare for the next read */ 393 /* Prepare for the next read */
389 mei_cl_read_start(device->cl, 0); 394 mei_cl_read_start(device->cl, 0, NULL);
390} 395}
391 396
392int mei_cl_register_event_cb(struct mei_cl_device *device, 397int mei_cl_register_event_cb(struct mei_cl_device *device,
@@ -400,7 +405,7 @@ int mei_cl_register_event_cb(struct mei_cl_device *device,
400 device->event_context = context; 405 device->event_context = context;
401 INIT_WORK(&device->event_work, mei_bus_event_work); 406 INIT_WORK(&device->event_work, mei_bus_event_work);
402 407
403 mei_cl_read_start(device->cl, 0); 408 mei_cl_read_start(device->cl, 0, NULL);
404 409
405 return 0; 410 return 0;
406} 411}
@@ -441,8 +446,8 @@ int mei_cl_enable_device(struct mei_cl_device *device)
441 446
442 mutex_unlock(&dev->device_lock); 447 mutex_unlock(&dev->device_lock);
443 448
444 if (device->event_cb && !cl->read_cb) 449 if (device->event_cb)
445 mei_cl_read_start(device->cl, 0); 450 mei_cl_read_start(device->cl, 0, NULL);
446 451
447 if (!device->ops || !device->ops->enable) 452 if (!device->ops || !device->ops->enable)
448 return 0; 453 return 0;
@@ -462,54 +467,34 @@ int mei_cl_disable_device(struct mei_cl_device *device)
462 467
463 dev = cl->dev; 468 dev = cl->dev;
464 469
470 if (device->ops && device->ops->disable)
471 device->ops->disable(device);
472
473 device->event_cb = NULL;
474
465 mutex_lock(&dev->device_lock); 475 mutex_lock(&dev->device_lock);
466 476
467 if (cl->state != MEI_FILE_CONNECTED) { 477 if (!mei_cl_is_connected(cl)) {
468 mutex_unlock(&dev->device_lock);
469 dev_err(dev->dev, "Already disconnected"); 478 dev_err(dev->dev, "Already disconnected");
470 479 err = 0;
471 return 0; 480 goto out;
472 } 481 }
473 482
474 cl->state = MEI_FILE_DISCONNECTING; 483 cl->state = MEI_FILE_DISCONNECTING;
475 484
476 err = mei_cl_disconnect(cl); 485 err = mei_cl_disconnect(cl);
477 if (err < 0) { 486 if (err < 0) {
478 mutex_unlock(&dev->device_lock); 487 dev_err(dev->dev, "Could not disconnect from the ME client");
479 dev_err(dev->dev, 488 goto out;
480 "Could not disconnect from the ME client");
481
482 return err;
483 } 489 }
484 490
485 /* Flush queues and remove any pending read */ 491 /* Flush queues and remove any pending read */
486 mei_cl_flush_queues(cl); 492 mei_cl_flush_queues(cl, NULL);
487
488 if (cl->read_cb) {
489 struct mei_cl_cb *cb = NULL;
490
491 cb = mei_cl_find_read_cb(cl);
492 /* Remove entry from read list */
493 if (cb)
494 list_del(&cb->list);
495
496 cb = cl->read_cb;
497 cl->read_cb = NULL;
498
499 if (cb) {
500 mei_io_cb_free(cb);
501 cb = NULL;
502 }
503 }
504
505 device->event_cb = NULL;
506 493
494out:
507 mutex_unlock(&dev->device_lock); 495 mutex_unlock(&dev->device_lock);
496 return err;
508 497
509 if (!device->ops || !device->ops->disable)
510 return 0;
511
512 return device->ops->disable(device);
513} 498}
514EXPORT_SYMBOL_GPL(mei_cl_disable_device); 499EXPORT_SYMBOL_GPL(mei_cl_disable_device);
515 500
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index dfbddfe1c7a0..1e99ef6a54a2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -48,14 +48,14 @@ void mei_me_cl_init(struct mei_me_client *me_cl)
48 */ 48 */
49struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 49struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
50{ 50{
51 if (me_cl) 51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
52 kref_get(&me_cl->refcnt); 52 return me_cl;
53 53
54 return me_cl; 54 return NULL;
55} 55}
56 56
57/** 57/**
58 * mei_me_cl_release - unlink and free me client 58 * mei_me_cl_release - free me client
59 * 59 *
60 * Locking: called under "dev->device_lock" lock 60 * Locking: called under "dev->device_lock" lock
61 * 61 *
@@ -65,9 +65,10 @@ static void mei_me_cl_release(struct kref *ref)
65{ 65{
66 struct mei_me_client *me_cl = 66 struct mei_me_client *me_cl =
67 container_of(ref, struct mei_me_client, refcnt); 67 container_of(ref, struct mei_me_client, refcnt);
68 list_del(&me_cl->list); 68
69 kfree(me_cl); 69 kfree(me_cl);
70} 70}
71
71/** 72/**
72 * mei_me_cl_put - decrease me client refcount and free client if necessary 73 * mei_me_cl_put - decrease me client refcount and free client if necessary
73 * 74 *
@@ -82,51 +83,146 @@ void mei_me_cl_put(struct mei_me_client *me_cl)
82} 83}
83 84
84/** 85/**
85 * mei_me_cl_by_uuid - locate me client by uuid 86 * __mei_me_cl_del - delete me client form the list and decrease
87 * reference counter
88 *
89 * @dev: mei device
90 * @me_cl: me client
91 *
92 * Locking: dev->me_clients_rwsem
93 */
94static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
95{
96 if (!me_cl)
97 return;
98
99 list_del(&me_cl->list);
100 mei_me_cl_put(me_cl);
101}
102
103/**
104 * mei_me_cl_add - add me client to the list
105 *
106 * @dev: mei device
107 * @me_cl: me client
108 */
109void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
110{
111 down_write(&dev->me_clients_rwsem);
112 list_add(&me_cl->list, &dev->me_clients);
113 up_write(&dev->me_clients_rwsem);
114}
115
116/**
117 * __mei_me_cl_by_uuid - locate me client by uuid
86 * increases ref count 118 * increases ref count
87 * 119 *
88 * @dev: mei device 120 * @dev: mei device
89 * @uuid: me client uuid 121 * @uuid: me client uuid
90 * 122 *
91 * Locking: called under "dev->device_lock" lock
92 *
93 * Return: me client or NULL if not found 123 * Return: me client or NULL if not found
124 *
125 * Locking: dev->me_clients_rwsem
94 */ 126 */
95struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev, 127static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
96 const uuid_le *uuid) 128 const uuid_le *uuid)
97{ 129{
98 struct mei_me_client *me_cl; 130 struct mei_me_client *me_cl;
131 const uuid_le *pn;
99 132
100 list_for_each_entry(me_cl, &dev->me_clients, list) 133 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
101 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0) 134
135 list_for_each_entry(me_cl, &dev->me_clients, list) {
136 pn = &me_cl->props.protocol_name;
137 if (uuid_le_cmp(*uuid, *pn) == 0)
102 return mei_me_cl_get(me_cl); 138 return mei_me_cl_get(me_cl);
139 }
103 140
104 return NULL; 141 return NULL;
105} 142}
106 143
107/** 144/**
145 * mei_me_cl_by_uuid - locate me client by uuid
146 * increases ref count
147 *
148 * @dev: mei device
149 * @uuid: me client uuid
150 *
151 * Return: me client or NULL if not found
152 *
153 * Locking: dev->me_clients_rwsem
154 */
155struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
156 const uuid_le *uuid)
157{
158 struct mei_me_client *me_cl;
159
160 down_read(&dev->me_clients_rwsem);
161 me_cl = __mei_me_cl_by_uuid(dev, uuid);
162 up_read(&dev->me_clients_rwsem);
163
164 return me_cl;
165}
166
167/**
108 * mei_me_cl_by_id - locate me client by client id 168 * mei_me_cl_by_id - locate me client by client id
109 * increases ref count 169 * increases ref count
110 * 170 *
111 * @dev: the device structure 171 * @dev: the device structure
112 * @client_id: me client id 172 * @client_id: me client id
113 * 173 *
114 * Locking: called under "dev->device_lock" lock
115 *
116 * Return: me client or NULL if not found 174 * Return: me client or NULL if not found
175 *
176 * Locking: dev->me_clients_rwsem
117 */ 177 */
118struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 178struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
119{ 179{
120 180
181 struct mei_me_client *__me_cl, *me_cl = NULL;
182
183 down_read(&dev->me_clients_rwsem);
184 list_for_each_entry(__me_cl, &dev->me_clients, list) {
185 if (__me_cl->client_id == client_id) {
186 me_cl = mei_me_cl_get(__me_cl);
187 break;
188 }
189 }
190 up_read(&dev->me_clients_rwsem);
191
192 return me_cl;
193}
194
195/**
196 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
197 * increases ref count
198 *
199 * @dev: the device structure
200 * @uuid: me client uuid
201 * @client_id: me client id
202 *
203 * Return: me client or null if not found
204 *
205 * Locking: dev->me_clients_rwsem
206 */
207static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
208 const uuid_le *uuid, u8 client_id)
209{
121 struct mei_me_client *me_cl; 210 struct mei_me_client *me_cl;
211 const uuid_le *pn;
212
213 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
122 214
123 list_for_each_entry(me_cl, &dev->me_clients, list) 215 list_for_each_entry(me_cl, &dev->me_clients, list) {
124 if (me_cl->client_id == client_id) 216 pn = &me_cl->props.protocol_name;
217 if (uuid_le_cmp(*uuid, *pn) == 0 &&
218 me_cl->client_id == client_id)
125 return mei_me_cl_get(me_cl); 219 return mei_me_cl_get(me_cl);
220 }
126 221
127 return NULL; 222 return NULL;
128} 223}
129 224
225
130/** 226/**
131 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 227 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
132 * increases ref count 228 * increases ref count
@@ -135,21 +231,18 @@ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
135 * @uuid: me client uuid 231 * @uuid: me client uuid
136 * @client_id: me client id 232 * @client_id: me client id
137 * 233 *
138 * Locking: called under "dev->device_lock" lock 234 * Return: me client or null if not found
139 *
140 * Return: me client or NULL if not found
141 */ 235 */
142struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 236struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
143 const uuid_le *uuid, u8 client_id) 237 const uuid_le *uuid, u8 client_id)
144{ 238{
145 struct mei_me_client *me_cl; 239 struct mei_me_client *me_cl;
146 240
147 list_for_each_entry(me_cl, &dev->me_clients, list) 241 down_read(&dev->me_clients_rwsem);
148 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0 && 242 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
149 me_cl->client_id == client_id) 243 up_read(&dev->me_clients_rwsem);
150 return mei_me_cl_get(me_cl);
151 244
152 return NULL; 245 return me_cl;
153} 246}
154 247
155/** 248/**
@@ -162,12 +255,14 @@ struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
162 */ 255 */
163void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 256void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
164{ 257{
165 struct mei_me_client *me_cl, *next; 258 struct mei_me_client *me_cl;
166 259
167 dev_dbg(dev->dev, "remove %pUl\n", uuid); 260 dev_dbg(dev->dev, "remove %pUl\n", uuid);
168 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 261
169 if (uuid_le_cmp(*uuid, me_cl->props.protocol_name) == 0) 262 down_write(&dev->me_clients_rwsem);
170 mei_me_cl_put(me_cl); 263 me_cl = __mei_me_cl_by_uuid(dev, uuid);
264 __mei_me_cl_del(dev, me_cl);
265 up_write(&dev->me_clients_rwsem);
171} 266}
172 267
173/** 268/**
@@ -181,15 +276,14 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
181 */ 276 */
182void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 277void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
183{ 278{
184 struct mei_me_client *me_cl, *next; 279 struct mei_me_client *me_cl;
185 const uuid_le *pn;
186 280
187 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 281 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
188 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) { 282
189 pn = &me_cl->props.protocol_name; 283 down_write(&dev->me_clients_rwsem);
190 if (me_cl->client_id == id && uuid_le_cmp(*uuid, *pn) == 0) 284 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
191 mei_me_cl_put(me_cl); 285 __mei_me_cl_del(dev, me_cl);
192 } 286 up_write(&dev->me_clients_rwsem);
193} 287}
194 288
195/** 289/**
@@ -203,12 +297,12 @@ void mei_me_cl_rm_all(struct mei_device *dev)
203{ 297{
204 struct mei_me_client *me_cl, *next; 298 struct mei_me_client *me_cl, *next;
205 299
300 down_write(&dev->me_clients_rwsem);
206 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 301 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
207 mei_me_cl_put(me_cl); 302 __mei_me_cl_del(dev, me_cl);
303 up_write(&dev->me_clients_rwsem);
208} 304}
209 305
210
211
212/** 306/**
213 * mei_cl_cmp_id - tells if the clients are the same 307 * mei_cl_cmp_id - tells if the clients are the same
214 * 308 *
@@ -227,7 +321,48 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
227} 321}
228 322
229/** 323/**
230 * mei_io_list_flush - removes cbs belonging to cl. 324 * mei_io_cb_free - free mei_cb_private related memory
325 *
326 * @cb: mei callback struct
327 */
328void mei_io_cb_free(struct mei_cl_cb *cb)
329{
330 if (cb == NULL)
331 return;
332
333 list_del(&cb->list);
334 kfree(cb->buf.data);
335 kfree(cb);
336}
337
338/**
339 * mei_io_cb_init - allocate and initialize io callback
340 *
341 * @cl: mei client
342 * @type: operation type
343 * @fp: pointer to file structure
344 *
345 * Return: mei_cl_cb pointer or NULL;
346 */
347struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
348 struct file *fp)
349{
350 struct mei_cl_cb *cb;
351
352 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
353 if (!cb)
354 return NULL;
355
356 INIT_LIST_HEAD(&cb->list);
357 cb->file_object = fp;
358 cb->cl = cl;
359 cb->buf_idx = 0;
360 cb->fop_type = type;
361 return cb;
362}
363
364/**
365 * __mei_io_list_flush - removes and frees cbs belonging to cl.
231 * 366 *
232 * @list: an instance of our list structure 367 * @list: an instance of our list structure
233 * @cl: host client, can be NULL for flushing the whole list 368 * @cl: host client, can be NULL for flushing the whole list
@@ -236,13 +371,12 @@ static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
236static void __mei_io_list_flush(struct mei_cl_cb *list, 371static void __mei_io_list_flush(struct mei_cl_cb *list,
237 struct mei_cl *cl, bool free) 372 struct mei_cl *cl, bool free)
238{ 373{
239 struct mei_cl_cb *cb; 374 struct mei_cl_cb *cb, *next;
240 struct mei_cl_cb *next;
241 375
242 /* enable removing everything if no cl is specified */ 376 /* enable removing everything if no cl is specified */
243 list_for_each_entry_safe(cb, next, &list->list, list) { 377 list_for_each_entry_safe(cb, next, &list->list, list) {
244 if (!cl || mei_cl_cmp_id(cl, cb->cl)) { 378 if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
245 list_del(&cb->list); 379 list_del_init(&cb->list);
246 if (free) 380 if (free)
247 mei_io_cb_free(cb); 381 mei_io_cb_free(cb);
248 } 382 }
@@ -260,7 +394,6 @@ void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
260 __mei_io_list_flush(list, cl, false); 394 __mei_io_list_flush(list, cl, false);
261} 395}
262 396
263
264/** 397/**
265 * mei_io_list_free - removes cb belonging to cl and free them 398 * mei_io_list_free - removes cb belonging to cl and free them
266 * 399 *
@@ -273,103 +406,107 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
273} 406}
274 407
275/** 408/**
276 * mei_io_cb_free - free mei_cb_private related memory 409 * mei_io_cb_alloc_buf - allocate callback buffer
277 * 410 *
278 * @cb: mei callback struct 411 * @cb: io callback structure
412 * @length: size of the buffer
413 *
414 * Return: 0 on success
415 * -EINVAL if cb is NULL
416 * -ENOMEM if allocation failed
279 */ 417 */
280void mei_io_cb_free(struct mei_cl_cb *cb) 418int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
281{ 419{
282 if (cb == NULL) 420 if (!cb)
283 return; 421 return -EINVAL;
284 422
285 kfree(cb->request_buffer.data); 423 if (length == 0)
286 kfree(cb->response_buffer.data); 424 return 0;
287 kfree(cb); 425
426 cb->buf.data = kmalloc(length, GFP_KERNEL);
427 if (!cb->buf.data)
428 return -ENOMEM;
429 cb->buf.size = length;
430 return 0;
288} 431}
289 432
290/** 433/**
291 * mei_io_cb_init - allocate and initialize io callback 434 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
292 * 435 *
293 * @cl: mei client 436 * @cl: host client
294 * @fp: pointer to file structure 437 * @length: size of the buffer
438 * @type: operation type
439 * @fp: associated file pointer (might be NULL)
295 * 440 *
296 * Return: mei_cl_cb pointer or NULL; 441 * Return: cb on success and NULL on failure
297 */ 442 */
298struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) 443struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
444 enum mei_cb_file_ops type, struct file *fp)
299{ 445{
300 struct mei_cl_cb *cb; 446 struct mei_cl_cb *cb;
301 447
302 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 448 cb = mei_io_cb_init(cl, type, fp);
303 if (!cb) 449 if (!cb)
304 return NULL; 450 return NULL;
305 451
306 mei_io_list_init(cb); 452 if (mei_io_cb_alloc_buf(cb, length)) {
453 mei_io_cb_free(cb);
454 return NULL;
455 }
307 456
308 cb->file_object = fp;
309 cb->cl = cl;
310 cb->buf_idx = 0;
311 return cb; 457 return cb;
312} 458}
313 459
314/** 460/**
315 * mei_io_cb_alloc_req_buf - allocate request buffer 461 * mei_cl_read_cb - find this cl's callback in the read list
462 * for a specific file
316 * 463 *
317 * @cb: io callback structure 464 * @cl: host client
318 * @length: size of the buffer 465 * @fp: file pointer (matching cb file object), may be NULL
319 * 466 *
320 * Return: 0 on success 467 * Return: cb on success, NULL if cb is not found
321 * -EINVAL if cb is NULL
322 * -ENOMEM if allocation failed
323 */ 468 */
324int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) 469struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
325{ 470{
326 if (!cb) 471 struct mei_cl_cb *cb;
327 return -EINVAL;
328 472
329 if (length == 0) 473 list_for_each_entry(cb, &cl->rd_completed, list)
330 return 0; 474 if (!fp || fp == cb->file_object)
475 return cb;
331 476
332 cb->request_buffer.data = kmalloc(length, GFP_KERNEL); 477 return NULL;
333 if (!cb->request_buffer.data)
334 return -ENOMEM;
335 cb->request_buffer.size = length;
336 return 0;
337} 478}
479
338/** 480/**
339 * mei_io_cb_alloc_resp_buf - allocate response buffer 481 * mei_cl_read_cb_flush - free client's read pending and completed cbs
340 * 482 * for a specific file
341 * @cb: io callback structure
342 * @length: size of the buffer
343 * 483 *
344 * Return: 0 on success 484 * @cl: host client
345 * -EINVAL if cb is NULL 485 * @fp: file pointer (matching cb file object), may be NULL
346 * -ENOMEM if allocation failed
347 */ 486 */
348int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) 487void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
349{ 488{
350 if (!cb) 489 struct mei_cl_cb *cb, *next;
351 return -EINVAL;
352 490
353 if (length == 0) 491 list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
354 return 0; 492 if (!fp || fp == cb->file_object)
355 493 mei_io_cb_free(cb);
356 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
357 if (!cb->response_buffer.data)
358 return -ENOMEM;
359 cb->response_buffer.size = length;
360 return 0;
361}
362 494
363 495
496 list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
497 if (!fp || fp == cb->file_object)
498 mei_io_cb_free(cb);
499}
364 500
365/** 501/**
366 * mei_cl_flush_queues - flushes queue lists belonging to cl. 502 * mei_cl_flush_queues - flushes queue lists belonging to cl.
367 * 503 *
368 * @cl: host client 504 * @cl: host client
505 * @fp: file pointer (matching cb file object), may be NULL
369 * 506 *
370 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 507 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
371 */ 508 */
372int mei_cl_flush_queues(struct mei_cl *cl) 509int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
373{ 510{
374 struct mei_device *dev; 511 struct mei_device *dev;
375 512
@@ -379,13 +516,15 @@ int mei_cl_flush_queues(struct mei_cl *cl)
379 dev = cl->dev; 516 dev = cl->dev;
380 517
381 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 518 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
382 mei_io_list_flush(&cl->dev->read_list, cl);
383 mei_io_list_free(&cl->dev->write_list, cl); 519 mei_io_list_free(&cl->dev->write_list, cl);
384 mei_io_list_free(&cl->dev->write_waiting_list, cl); 520 mei_io_list_free(&cl->dev->write_waiting_list, cl);
385 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 521 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
386 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 522 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
387 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 523 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
388 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); 524 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
525
526 mei_cl_read_cb_flush(cl, fp);
527
389 return 0; 528 return 0;
390} 529}
391 530
@@ -402,9 +541,10 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
402 init_waitqueue_head(&cl->wait); 541 init_waitqueue_head(&cl->wait);
403 init_waitqueue_head(&cl->rx_wait); 542 init_waitqueue_head(&cl->rx_wait);
404 init_waitqueue_head(&cl->tx_wait); 543 init_waitqueue_head(&cl->tx_wait);
544 INIT_LIST_HEAD(&cl->rd_completed);
545 INIT_LIST_HEAD(&cl->rd_pending);
405 INIT_LIST_HEAD(&cl->link); 546 INIT_LIST_HEAD(&cl->link);
406 INIT_LIST_HEAD(&cl->device_link); 547 INIT_LIST_HEAD(&cl->device_link);
407 cl->reading_state = MEI_IDLE;
408 cl->writing_state = MEI_IDLE; 548 cl->writing_state = MEI_IDLE;
409 cl->dev = dev; 549 cl->dev = dev;
410} 550}
@@ -429,31 +569,14 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
429} 569}
430 570
431/** 571/**
432 * mei_cl_find_read_cb - find this cl's callback in the read list 572 * mei_cl_link - allocate host id in the host map
433 * 573 *
434 * @cl: host client 574 * @cl: host client
435 * 575 * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
436 * Return: cb on success, NULL on error
437 */
438struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
439{
440 struct mei_device *dev = cl->dev;
441 struct mei_cl_cb *cb;
442
443 list_for_each_entry(cb, &dev->read_list.list, list)
444 if (mei_cl_cmp_id(cl, cb->cl))
445 return cb;
446 return NULL;
447}
448
449/** mei_cl_link: allocate host id in the host map
450 *
451 * @cl - host client
452 * @id - fixed host id or -1 for generic one
453 * 576 *
454 * Return: 0 on success 577 * Return: 0 on success
455 * -EINVAL on incorrect values 578 * -EINVAL on incorrect values
456 * -ENONET if client not found 579 * -EMFILE if open count exceeded.
457 */ 580 */
458int mei_cl_link(struct mei_cl *cl, int id) 581int mei_cl_link(struct mei_cl *cl, int id)
459{ 582{
@@ -535,28 +658,31 @@ int mei_cl_unlink(struct mei_cl *cl)
535 658
536void mei_host_client_init(struct work_struct *work) 659void mei_host_client_init(struct work_struct *work)
537{ 660{
538 struct mei_device *dev = container_of(work, 661 struct mei_device *dev =
539 struct mei_device, init_work); 662 container_of(work, struct mei_device, init_work);
540 struct mei_me_client *me_cl; 663 struct mei_me_client *me_cl;
541 struct mei_client_properties *props;
542 664
543 mutex_lock(&dev->device_lock); 665 mutex_lock(&dev->device_lock);
544 666
545 list_for_each_entry(me_cl, &dev->me_clients, list) {
546 props = &me_cl->props;
547 667
548 if (!uuid_le_cmp(props->protocol_name, mei_amthif_guid)) 668 me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
549 mei_amthif_host_init(dev); 669 if (me_cl)
550 else if (!uuid_le_cmp(props->protocol_name, mei_wd_guid)) 670 mei_amthif_host_init(dev);
551 mei_wd_host_init(dev); 671 mei_me_cl_put(me_cl);
552 else if (!uuid_le_cmp(props->protocol_name, mei_nfc_guid)) 672
553 mei_nfc_host_init(dev); 673 me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
674 if (me_cl)
675 mei_wd_host_init(dev);
676 mei_me_cl_put(me_cl);
677
678 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
679 if (me_cl)
680 mei_nfc_host_init(dev);
681 mei_me_cl_put(me_cl);
554 682
555 }
556 683
557 dev->dev_state = MEI_DEV_ENABLED; 684 dev->dev_state = MEI_DEV_ENABLED;
558 dev->reset_count = 0; 685 dev->reset_count = 0;
559
560 mutex_unlock(&dev->device_lock); 686 mutex_unlock(&dev->device_lock);
561 687
562 pm_runtime_mark_last_busy(dev->dev); 688 pm_runtime_mark_last_busy(dev->dev);
@@ -620,13 +746,10 @@ int mei_cl_disconnect(struct mei_cl *cl)
620 return rets; 746 return rets;
621 } 747 }
622 748
623 cb = mei_io_cb_init(cl, NULL); 749 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
624 if (!cb) { 750 rets = cb ? 0 : -ENOMEM;
625 rets = -ENOMEM; 751 if (rets)
626 goto free; 752 goto free;
627 }
628
629 cb->fop_type = MEI_FOP_DISCONNECT;
630 753
631 if (mei_hbuf_acquire(dev)) { 754 if (mei_hbuf_acquire(dev)) {
632 if (mei_hbm_cl_disconnect_req(dev, cl)) { 755 if (mei_hbm_cl_disconnect_req(dev, cl)) {
@@ -727,13 +850,10 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
727 return rets; 850 return rets;
728 } 851 }
729 852
730 cb = mei_io_cb_init(cl, file); 853 cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
731 if (!cb) { 854 rets = cb ? 0 : -ENOMEM;
732 rets = -ENOMEM; 855 if (rets)
733 goto out; 856 goto out;
734 }
735
736 cb->fop_type = MEI_FOP_CONNECT;
737 857
738 /* run hbuf acquire last so we don't have to undo */ 858 /* run hbuf acquire last so we don't have to undo */
739 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 859 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
@@ -756,7 +876,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
756 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 876 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
757 mutex_lock(&dev->device_lock); 877 mutex_lock(&dev->device_lock);
758 878
759 if (cl->state != MEI_FILE_CONNECTED) { 879 if (!mei_cl_is_connected(cl)) {
760 cl->state = MEI_FILE_DISCONNECTED; 880 cl->state = MEI_FILE_DISCONNECTED;
761 /* something went really wrong */ 881 /* something went really wrong */
762 if (!cl->status) 882 if (!cl->status)
@@ -778,6 +898,37 @@ out:
778} 898}
779 899
780/** 900/**
901 * mei_cl_alloc_linked - allocate and link host client
902 *
903 * @dev: the device structure
904 * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
905 *
906 * Return: cl on success ERR_PTR on failure
907 */
908struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
909{
910 struct mei_cl *cl;
911 int ret;
912
913 cl = mei_cl_allocate(dev);
914 if (!cl) {
915 ret = -ENOMEM;
916 goto err;
917 }
918
919 ret = mei_cl_link(cl, id);
920 if (ret)
921 goto err;
922
923 return cl;
924err:
925 kfree(cl);
926 return ERR_PTR(ret);
927}
928
929
930
931/**
781 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. 932 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
782 * 933 *
783 * @cl: private data of the file object 934 * @cl: private data of the file object
@@ -866,10 +1017,11 @@ out:
866 * 1017 *
867 * @cl: host client 1018 * @cl: host client
868 * @length: number of bytes to read 1019 * @length: number of bytes to read
1020 * @fp: pointer to file structure
869 * 1021 *
870 * Return: 0 on success, <0 on failure. 1022 * Return: 0 on success, <0 on failure.
871 */ 1023 */
872int mei_cl_read_start(struct mei_cl *cl, size_t length) 1024int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
873{ 1025{
874 struct mei_device *dev; 1026 struct mei_device *dev;
875 struct mei_cl_cb *cb; 1027 struct mei_cl_cb *cb;
@@ -884,10 +1036,10 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
884 if (!mei_cl_is_connected(cl)) 1036 if (!mei_cl_is_connected(cl))
885 return -ENODEV; 1037 return -ENODEV;
886 1038
887 if (cl->read_cb) { 1039 /* HW currently supports only one pending read */
888 cl_dbg(dev, cl, "read is pending.\n"); 1040 if (!list_empty(&cl->rd_pending))
889 return -EBUSY; 1041 return -EBUSY;
890 } 1042
891 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); 1043 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
892 if (!me_cl) { 1044 if (!me_cl) {
893 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 1045 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
@@ -904,29 +1056,21 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
904 return rets; 1056 return rets;
905 } 1057 }
906 1058
907 cb = mei_io_cb_init(cl, NULL); 1059 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
908 if (!cb) { 1060 rets = cb ? 0 : -ENOMEM;
909 rets = -ENOMEM;
910 goto out;
911 }
912
913 rets = mei_io_cb_alloc_resp_buf(cb, length);
914 if (rets) 1061 if (rets)
915 goto out; 1062 goto out;
916 1063
917 cb->fop_type = MEI_FOP_READ;
918 if (mei_hbuf_acquire(dev)) { 1064 if (mei_hbuf_acquire(dev)) {
919 rets = mei_hbm_cl_flow_control_req(dev, cl); 1065 rets = mei_hbm_cl_flow_control_req(dev, cl);
920 if (rets < 0) 1066 if (rets < 0)
921 goto out; 1067 goto out;
922 1068
923 list_add_tail(&cb->list, &dev->read_list.list); 1069 list_add_tail(&cb->list, &cl->rd_pending);
924 } else { 1070 } else {
925 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1071 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
926 } 1072 }
927 1073
928 cl->read_cb = cb;
929
930out: 1074out:
931 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1075 cl_dbg(dev, cl, "rpm: autosuspend\n");
932 pm_runtime_mark_last_busy(dev->dev); 1076 pm_runtime_mark_last_busy(dev->dev);
@@ -964,7 +1108,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
964 1108
965 dev = cl->dev; 1109 dev = cl->dev;
966 1110
967 buf = &cb->request_buffer; 1111 buf = &cb->buf;
968 1112
969 rets = mei_cl_flow_ctrl_creds(cl); 1113 rets = mei_cl_flow_ctrl_creds(cl);
970 if (rets < 0) 1114 if (rets < 0)
@@ -999,7 +1143,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
999 } 1143 }
1000 1144
1001 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", 1145 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
1002 cb->request_buffer.size, cb->buf_idx); 1146 cb->buf.size, cb->buf_idx);
1003 1147
1004 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 1148 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
1005 if (rets) { 1149 if (rets) {
@@ -1011,6 +1155,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1011 cl->status = 0; 1155 cl->status = 0;
1012 cl->writing_state = MEI_WRITING; 1156 cl->writing_state = MEI_WRITING;
1013 cb->buf_idx += mei_hdr.length; 1157 cb->buf_idx += mei_hdr.length;
1158 cb->completed = mei_hdr.msg_complete == 1;
1014 1159
1015 if (mei_hdr.msg_complete) { 1160 if (mei_hdr.msg_complete) {
1016 if (mei_cl_flow_ctrl_reduce(cl)) 1161 if (mei_cl_flow_ctrl_reduce(cl))
@@ -1048,7 +1193,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1048 dev = cl->dev; 1193 dev = cl->dev;
1049 1194
1050 1195
1051 buf = &cb->request_buffer; 1196 buf = &cb->buf;
1052 1197
1053 cl_dbg(dev, cl, "size=%d\n", buf->size); 1198 cl_dbg(dev, cl, "size=%d\n", buf->size);
1054 1199
@@ -1059,7 +1204,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1059 return rets; 1204 return rets;
1060 } 1205 }
1061 1206
1062 cb->fop_type = MEI_FOP_WRITE;
1063 cb->buf_idx = 0; 1207 cb->buf_idx = 0;
1064 cl->writing_state = MEI_IDLE; 1208 cl->writing_state = MEI_IDLE;
1065 1209
@@ -1099,6 +1243,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
1099 1243
1100 cl->writing_state = MEI_WRITING; 1244 cl->writing_state = MEI_WRITING;
1101 cb->buf_idx = mei_hdr.length; 1245 cb->buf_idx = mei_hdr.length;
1246 cb->completed = mei_hdr.msg_complete == 1;
1102 1247
1103out: 1248out:
1104 if (mei_hdr.msg_complete) { 1249 if (mei_hdr.msg_complete) {
@@ -1151,11 +1296,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1151 if (waitqueue_active(&cl->tx_wait)) 1296 if (waitqueue_active(&cl->tx_wait))
1152 wake_up_interruptible(&cl->tx_wait); 1297 wake_up_interruptible(&cl->tx_wait);
1153 1298
1154 } else if (cb->fop_type == MEI_FOP_READ && 1299 } else if (cb->fop_type == MEI_FOP_READ) {
1155 MEI_READING == cl->reading_state) { 1300 list_add_tail(&cb->list, &cl->rd_completed);
1156 cl->reading_state = MEI_READ_COMPLETE;
1157 if (waitqueue_active(&cl->rx_wait)) 1301 if (waitqueue_active(&cl->rx_wait))
1158 wake_up_interruptible(&cl->rx_wait); 1302 wake_up_interruptible_all(&cl->rx_wait);
1159 else 1303 else
1160 mei_cl_bus_rx_event(cl); 1304 mei_cl_bus_rx_event(cl);
1161 1305
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index cfcde8e97fc4..0a39e5d45171 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -31,7 +31,10 @@ void mei_me_cl_init(struct mei_me_client *me_cl);
31void mei_me_cl_put(struct mei_me_client *me_cl); 31void mei_me_cl_put(struct mei_me_client *me_cl);
32struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl); 32struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl);
33 33
34struct mei_me_client *mei_me_cl_by_uuid(const struct mei_device *dev, 34void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl);
35void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl);
36
37struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
35 const uuid_le *uuid); 38 const uuid_le *uuid);
36struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id); 39struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
37struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 40struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
@@ -44,10 +47,10 @@ void mei_me_cl_rm_all(struct mei_device *dev);
44/* 47/*
45 * MEI IO Functions 48 * MEI IO Functions
46 */ 49 */
47struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp); 50struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
51 struct file *fp);
48void mei_io_cb_free(struct mei_cl_cb *priv_cb); 52void mei_io_cb_free(struct mei_cl_cb *priv_cb);
49int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length); 53int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
50int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
51 54
52 55
53/** 56/**
@@ -72,9 +75,14 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
72int mei_cl_link(struct mei_cl *cl, int id); 75int mei_cl_link(struct mei_cl *cl, int id);
73int mei_cl_unlink(struct mei_cl *cl); 76int mei_cl_unlink(struct mei_cl *cl);
74 77
75int mei_cl_flush_queues(struct mei_cl *cl); 78struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);
76struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
77 79
80struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
81 const struct file *fp);
82void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
83struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
84 enum mei_cb_file_ops type, struct file *fp);
85int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
78 86
79int mei_cl_flow_ctrl_creds(struct mei_cl *cl); 87int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
80 88
@@ -82,23 +90,25 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
82/* 90/*
83 * MEI input output function prototype 91 * MEI input output function prototype
84 */ 92 */
93
94/**
95 * mei_cl_is_connected - host client is connected
96 *
97 * @cl: host clinet
98 *
99 * Return: true if the host clinet is connected
100 */
85static inline bool mei_cl_is_connected(struct mei_cl *cl) 101static inline bool mei_cl_is_connected(struct mei_cl *cl)
86{ 102{
87 return cl->dev && 103 return cl->state == MEI_FILE_CONNECTED;
88 cl->dev->dev_state == MEI_DEV_ENABLED &&
89 cl->state == MEI_FILE_CONNECTED;
90}
91static inline bool mei_cl_is_transitioning(struct mei_cl *cl)
92{
93 return MEI_FILE_INITIALIZING == cl->state ||
94 MEI_FILE_DISCONNECTED == cl->state ||
95 MEI_FILE_DISCONNECTING == cl->state;
96} 104}
97 105
98bool mei_cl_is_other_connecting(struct mei_cl *cl); 106bool mei_cl_is_other_connecting(struct mei_cl *cl);
99int mei_cl_disconnect(struct mei_cl *cl); 107int mei_cl_disconnect(struct mei_cl *cl);
100int mei_cl_connect(struct mei_cl *cl, struct file *file); 108int mei_cl_connect(struct mei_cl *cl, struct file *file);
101int mei_cl_read_start(struct mei_cl *cl, size_t length); 109int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
110int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
111 struct mei_cl_cb *cmpl_list);
102int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); 112int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
103int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 113int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
104 struct mei_cl_cb *cmpl_list); 114 struct mei_cl_cb *cmpl_list);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index b125380ee871..d9cd7e6ee484 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -28,7 +28,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
28 size_t cnt, loff_t *ppos) 28 size_t cnt, loff_t *ppos)
29{ 29{
30 struct mei_device *dev = fp->private_data; 30 struct mei_device *dev = fp->private_data;
31 struct mei_me_client *me_cl, *n; 31 struct mei_me_client *me_cl;
32 size_t bufsz = 1; 32 size_t bufsz = 1;
33 char *buf; 33 char *buf;
34 int i = 0; 34 int i = 0;
@@ -38,15 +38,14 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
38#define HDR \ 38#define HDR \
39" |id|fix| UUID |con|msg len|sb|refc|\n" 39" |id|fix| UUID |con|msg len|sb|refc|\n"
40 40
41 mutex_lock(&dev->device_lock); 41 down_read(&dev->me_clients_rwsem);
42
43 list_for_each_entry(me_cl, &dev->me_clients, list) 42 list_for_each_entry(me_cl, &dev->me_clients, list)
44 bufsz++; 43 bufsz++;
45 44
46 bufsz *= sizeof(HDR) + 1; 45 bufsz *= sizeof(HDR) + 1;
47 buf = kzalloc(bufsz, GFP_KERNEL); 46 buf = kzalloc(bufsz, GFP_KERNEL);
48 if (!buf) { 47 if (!buf) {
49 mutex_unlock(&dev->device_lock); 48 up_read(&dev->me_clients_rwsem);
50 return -ENOMEM; 49 return -ENOMEM;
51 } 50 }
52 51
@@ -56,10 +55,9 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
56 if (dev->dev_state != MEI_DEV_ENABLED) 55 if (dev->dev_state != MEI_DEV_ENABLED)
57 goto out; 56 goto out;
58 57
59 list_for_each_entry_safe(me_cl, n, &dev->me_clients, list) { 58 list_for_each_entry(me_cl, &dev->me_clients, list) {
60 59
61 me_cl = mei_me_cl_get(me_cl); 60 if (mei_me_cl_get(me_cl)) {
62 if (me_cl) {
63 pos += scnprintf(buf + pos, bufsz - pos, 61 pos += scnprintf(buf + pos, bufsz - pos,
64 "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n", 62 "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n",
65 i++, me_cl->client_id, 63 i++, me_cl->client_id,
@@ -69,12 +67,13 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
69 me_cl->props.max_msg_length, 67 me_cl->props.max_msg_length,
70 me_cl->props.single_recv_buf, 68 me_cl->props.single_recv_buf,
71 atomic_read(&me_cl->refcnt.refcount)); 69 atomic_read(&me_cl->refcnt.refcount));
72 }
73 70
74 mei_me_cl_put(me_cl); 71 mei_me_cl_put(me_cl);
72 }
75 } 73 }
74
76out: 75out:
77 mutex_unlock(&dev->device_lock); 76 up_read(&dev->me_clients_rwsem);
78 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); 77 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
79 kfree(buf); 78 kfree(buf);
80 return ret; 79 return ret;
@@ -118,7 +117,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
118 pos += scnprintf(buf + pos, bufsz - pos, 117 pos += scnprintf(buf + pos, bufsz - pos,
119 "%2d|%2d|%4d|%5d|%2d|%2d|\n", 118 "%2d|%2d|%4d|%5d|%2d|%2d|\n",
120 i, cl->me_client_id, cl->host_client_id, cl->state, 119 i, cl->me_client_id, cl->host_client_id, cl->state,
121 cl->reading_state, cl->writing_state); 120 !list_empty(&cl->rd_completed), cl->writing_state);
122 i++; 121 i++;
123 } 122 }
124out: 123out:
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index c8412d41e4f1..58da92565c5e 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -338,7 +338,8 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
338 me_cl->client_id = res->me_addr; 338 me_cl->client_id = res->me_addr;
339 me_cl->mei_flow_ctrl_creds = 0; 339 me_cl->mei_flow_ctrl_creds = 0;
340 340
341 list_add(&me_cl->list, &dev->me_clients); 341 mei_me_cl_add(dev, me_cl);
342
342 return 0; 343 return 0;
343} 344}
344 345
@@ -638,7 +639,7 @@ static void mei_hbm_cl_res(struct mei_device *dev,
638 continue; 639 continue;
639 640
640 if (mei_hbm_cl_addr_equal(cl, rs)) { 641 if (mei_hbm_cl_addr_equal(cl, rs)) {
641 list_del(&cb->list); 642 list_del_init(&cb->list);
642 break; 643 break;
643 } 644 }
644 } 645 }
@@ -683,10 +684,9 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
683 cl->state = MEI_FILE_DISCONNECTED; 684 cl->state = MEI_FILE_DISCONNECTED;
684 cl->timer_count = 0; 685 cl->timer_count = 0;
685 686
686 cb = mei_io_cb_init(cl, NULL); 687 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
687 if (!cb) 688 if (!cb)
688 return -ENOMEM; 689 return -ENOMEM;
689 cb->fop_type = MEI_FOP_DISCONNECT_RSP;
690 cl_dbg(dev, cl, "add disconnect response as first\n"); 690 cl_dbg(dev, cl, "add disconnect response as first\n");
691 list_add(&cb->list, &dev->ctrl_wr_list.list); 691 list_add(&cb->list, &dev->ctrl_wr_list.list);
692 } 692 }
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index f8fd503dfbd6..6fb75e62a764 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -25,6 +25,8 @@
25#include "hw-me.h" 25#include "hw-me.h"
26#include "hw-me-regs.h" 26#include "hw-me-regs.h"
27 27
28#include "mei-trace.h"
29
28/** 30/**
29 * mei_me_reg_read - Reads 32bit data from the mei device 31 * mei_me_reg_read - Reads 32bit data from the mei device
30 * 32 *
@@ -61,45 +63,79 @@ static inline void mei_me_reg_write(const struct mei_me_hw *hw,
61 * 63 *
62 * Return: ME_CB_RW register value (u32) 64 * Return: ME_CB_RW register value (u32)
63 */ 65 */
64static u32 mei_me_mecbrw_read(const struct mei_device *dev) 66static inline u32 mei_me_mecbrw_read(const struct mei_device *dev)
65{ 67{
66 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW); 68 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
67} 69}
70
71/**
72 * mei_me_hcbww_write - write 32bit data to the host circular buffer
73 *
74 * @dev: the device structure
75 * @data: 32bit data to be written to the host circular buffer
76 */
77static inline void mei_me_hcbww_write(struct mei_device *dev, u32 data)
78{
79 mei_me_reg_write(to_me_hw(dev), H_CB_WW, data);
80}
81
68/** 82/**
69 * mei_me_mecsr_read - Reads 32bit data from the ME CSR 83 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
70 * 84 *
71 * @hw: the me hardware structure 85 * @dev: the device structure
72 * 86 *
73 * Return: ME_CSR_HA register value (u32) 87 * Return: ME_CSR_HA register value (u32)
74 */ 88 */
75static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw) 89static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
76{ 90{
77 return mei_me_reg_read(hw, ME_CSR_HA); 91 u32 reg;
92
93 reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
94 trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
95
96 return reg;
78} 97}
79 98
80/** 99/**
81 * mei_hcsr_read - Reads 32bit data from the host CSR 100 * mei_hcsr_read - Reads 32bit data from the host CSR
82 * 101 *
83 * @hw: the me hardware structure 102 * @dev: the device structure
84 * 103 *
85 * Return: H_CSR register value (u32) 104 * Return: H_CSR register value (u32)
86 */ 105 */
87static inline u32 mei_hcsr_read(const struct mei_me_hw *hw) 106static inline u32 mei_hcsr_read(const struct mei_device *dev)
107{
108 u32 reg;
109
110 reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
111 trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
112
113 return reg;
114}
115
116/**
117 * mei_hcsr_write - writes H_CSR register to the mei device
118 *
119 * @dev: the device structure
120 * @reg: new register value
121 */
122static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
88{ 123{
89 return mei_me_reg_read(hw, H_CSR); 124 trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
125 mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
90} 126}
91 127
92/** 128/**
93 * mei_hcsr_set - writes H_CSR register to the mei device, 129 * mei_hcsr_set - writes H_CSR register to the mei device,
94 * and ignores the H_IS bit for it is write-one-to-zero. 130 * and ignores the H_IS bit for it is write-one-to-zero.
95 * 131 *
96 * @hw: the me hardware structure 132 * @dev: the device structure
97 * @hcsr: new register value 133 * @reg: new register value
98 */ 134 */
99static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr) 135static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
100{ 136{
101 hcsr &= ~H_IS; 137 reg &= ~H_IS;
102 mei_me_reg_write(hw, H_CSR, hcsr); 138 mei_hcsr_write(dev, reg);
103} 139}
104 140
105/** 141/**
@@ -141,7 +177,7 @@ static int mei_me_fw_status(struct mei_device *dev,
141static void mei_me_hw_config(struct mei_device *dev) 177static void mei_me_hw_config(struct mei_device *dev)
142{ 178{
143 struct mei_me_hw *hw = to_me_hw(dev); 179 struct mei_me_hw *hw = to_me_hw(dev);
144 u32 hcsr = mei_hcsr_read(to_me_hw(dev)); 180 u32 hcsr = mei_hcsr_read(dev);
145 /* Doesn't change in runtime */ 181 /* Doesn't change in runtime */
146 dev->hbuf_depth = (hcsr & H_CBD) >> 24; 182 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
147 183
@@ -170,11 +206,10 @@ static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
170 */ 206 */
171static void mei_me_intr_clear(struct mei_device *dev) 207static void mei_me_intr_clear(struct mei_device *dev)
172{ 208{
173 struct mei_me_hw *hw = to_me_hw(dev); 209 u32 hcsr = mei_hcsr_read(dev);
174 u32 hcsr = mei_hcsr_read(hw);
175 210
176 if ((hcsr & H_IS) == H_IS) 211 if ((hcsr & H_IS) == H_IS)
177 mei_me_reg_write(hw, H_CSR, hcsr); 212 mei_hcsr_write(dev, hcsr);
178} 213}
179/** 214/**
180 * mei_me_intr_enable - enables mei device interrupts 215 * mei_me_intr_enable - enables mei device interrupts
@@ -183,11 +218,10 @@ static void mei_me_intr_clear(struct mei_device *dev)
183 */ 218 */
184static void mei_me_intr_enable(struct mei_device *dev) 219static void mei_me_intr_enable(struct mei_device *dev)
185{ 220{
186 struct mei_me_hw *hw = to_me_hw(dev); 221 u32 hcsr = mei_hcsr_read(dev);
187 u32 hcsr = mei_hcsr_read(hw);
188 222
189 hcsr |= H_IE; 223 hcsr |= H_IE;
190 mei_hcsr_set(hw, hcsr); 224 mei_hcsr_set(dev, hcsr);
191} 225}
192 226
193/** 227/**
@@ -197,11 +231,10 @@ static void mei_me_intr_enable(struct mei_device *dev)
197 */ 231 */
198static void mei_me_intr_disable(struct mei_device *dev) 232static void mei_me_intr_disable(struct mei_device *dev)
199{ 233{
200 struct mei_me_hw *hw = to_me_hw(dev); 234 u32 hcsr = mei_hcsr_read(dev);
201 u32 hcsr = mei_hcsr_read(hw);
202 235
203 hcsr &= ~H_IE; 236 hcsr &= ~H_IE;
204 mei_hcsr_set(hw, hcsr); 237 mei_hcsr_set(dev, hcsr);
205} 238}
206 239
207/** 240/**
@@ -211,12 +244,11 @@ static void mei_me_intr_disable(struct mei_device *dev)
211 */ 244 */
212static void mei_me_hw_reset_release(struct mei_device *dev) 245static void mei_me_hw_reset_release(struct mei_device *dev)
213{ 246{
214 struct mei_me_hw *hw = to_me_hw(dev); 247 u32 hcsr = mei_hcsr_read(dev);
215 u32 hcsr = mei_hcsr_read(hw);
216 248
217 hcsr |= H_IG; 249 hcsr |= H_IG;
218 hcsr &= ~H_RST; 250 hcsr &= ~H_RST;
219 mei_hcsr_set(hw, hcsr); 251 mei_hcsr_set(dev, hcsr);
220 252
221 /* complete this write before we set host ready on another CPU */ 253 /* complete this write before we set host ready on another CPU */
222 mmiowb(); 254 mmiowb();
@@ -231,8 +263,7 @@ static void mei_me_hw_reset_release(struct mei_device *dev)
231 */ 263 */
232static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) 264static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
233{ 265{
234 struct mei_me_hw *hw = to_me_hw(dev); 266 u32 hcsr = mei_hcsr_read(dev);
235 u32 hcsr = mei_hcsr_read(hw);
236 267
237 /* H_RST may be found lit before reset is started, 268 /* H_RST may be found lit before reset is started,
238 * for example if preceding reset flow hasn't completed. 269 * for example if preceding reset flow hasn't completed.
@@ -242,8 +273,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
242 if ((hcsr & H_RST) == H_RST) { 273 if ((hcsr & H_RST) == H_RST) {
243 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); 274 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
244 hcsr &= ~H_RST; 275 hcsr &= ~H_RST;
245 mei_hcsr_set(hw, hcsr); 276 mei_hcsr_set(dev, hcsr);
246 hcsr = mei_hcsr_read(hw); 277 hcsr = mei_hcsr_read(dev);
247 } 278 }
248 279
249 hcsr |= H_RST | H_IG | H_IS; 280 hcsr |= H_RST | H_IG | H_IS;
@@ -254,13 +285,13 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
254 hcsr &= ~H_IE; 285 hcsr &= ~H_IE;
255 286
256 dev->recvd_hw_ready = false; 287 dev->recvd_hw_ready = false;
257 mei_me_reg_write(hw, H_CSR, hcsr); 288 mei_hcsr_write(dev, hcsr);
258 289
259 /* 290 /*
260 * Host reads the H_CSR once to ensure that the 291 * Host reads the H_CSR once to ensure that the
261 * posted write to H_CSR completes. 292 * posted write to H_CSR completes.
262 */ 293 */
263 hcsr = mei_hcsr_read(hw); 294 hcsr = mei_hcsr_read(dev);
264 295
265 if ((hcsr & H_RST) == 0) 296 if ((hcsr & H_RST) == 0)
266 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr); 297 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
@@ -281,11 +312,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
281 */ 312 */
282static void mei_me_host_set_ready(struct mei_device *dev) 313static void mei_me_host_set_ready(struct mei_device *dev)
283{ 314{
284 struct mei_me_hw *hw = to_me_hw(dev); 315 u32 hcsr = mei_hcsr_read(dev);
285 u32 hcsr = mei_hcsr_read(hw);
286 316
287 hcsr |= H_IE | H_IG | H_RDY; 317 hcsr |= H_IE | H_IG | H_RDY;
288 mei_hcsr_set(hw, hcsr); 318 mei_hcsr_set(dev, hcsr);
289} 319}
290 320
291/** 321/**
@@ -296,8 +326,7 @@ static void mei_me_host_set_ready(struct mei_device *dev)
296 */ 326 */
297static bool mei_me_host_is_ready(struct mei_device *dev) 327static bool mei_me_host_is_ready(struct mei_device *dev)
298{ 328{
299 struct mei_me_hw *hw = to_me_hw(dev); 329 u32 hcsr = mei_hcsr_read(dev);
300 u32 hcsr = mei_hcsr_read(hw);
301 330
302 return (hcsr & H_RDY) == H_RDY; 331 return (hcsr & H_RDY) == H_RDY;
303} 332}
@@ -310,8 +339,7 @@ static bool mei_me_host_is_ready(struct mei_device *dev)
310 */ 339 */
311static bool mei_me_hw_is_ready(struct mei_device *dev) 340static bool mei_me_hw_is_ready(struct mei_device *dev)
312{ 341{
313 struct mei_me_hw *hw = to_me_hw(dev); 342 u32 mecsr = mei_me_mecsr_read(dev);
314 u32 mecsr = mei_me_mecsr_read(hw);
315 343
316 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA; 344 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
317} 345}
@@ -368,11 +396,10 @@ static int mei_me_hw_start(struct mei_device *dev)
368 */ 396 */
369static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) 397static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
370{ 398{
371 struct mei_me_hw *hw = to_me_hw(dev);
372 u32 hcsr; 399 u32 hcsr;
373 char read_ptr, write_ptr; 400 char read_ptr, write_ptr;
374 401
375 hcsr = mei_hcsr_read(hw); 402 hcsr = mei_hcsr_read(dev);
376 403
377 read_ptr = (char) ((hcsr & H_CBRP) >> 8); 404 read_ptr = (char) ((hcsr & H_CBRP) >> 8);
378 write_ptr = (char) ((hcsr & H_CBWP) >> 16); 405 write_ptr = (char) ((hcsr & H_CBWP) >> 16);
@@ -439,7 +466,6 @@ static int mei_me_write_message(struct mei_device *dev,
439 struct mei_msg_hdr *header, 466 struct mei_msg_hdr *header,
440 unsigned char *buf) 467 unsigned char *buf)
441{ 468{
442 struct mei_me_hw *hw = to_me_hw(dev);
443 unsigned long rem; 469 unsigned long rem;
444 unsigned long length = header->length; 470 unsigned long length = header->length;
445 u32 *reg_buf = (u32 *)buf; 471 u32 *reg_buf = (u32 *)buf;
@@ -457,21 +483,21 @@ static int mei_me_write_message(struct mei_device *dev,
457 if (empty_slots < 0 || dw_cnt > empty_slots) 483 if (empty_slots < 0 || dw_cnt > empty_slots)
458 return -EMSGSIZE; 484 return -EMSGSIZE;
459 485
460 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header)); 486 mei_me_hcbww_write(dev, *((u32 *) header));
461 487
462 for (i = 0; i < length / 4; i++) 488 for (i = 0; i < length / 4; i++)
463 mei_me_reg_write(hw, H_CB_WW, reg_buf[i]); 489 mei_me_hcbww_write(dev, reg_buf[i]);
464 490
465 rem = length & 0x3; 491 rem = length & 0x3;
466 if (rem > 0) { 492 if (rem > 0) {
467 u32 reg = 0; 493 u32 reg = 0;
468 494
469 memcpy(&reg, &buf[length - rem], rem); 495 memcpy(&reg, &buf[length - rem], rem);
470 mei_me_reg_write(hw, H_CB_WW, reg); 496 mei_me_hcbww_write(dev, reg);
471 } 497 }
472 498
473 hcsr = mei_hcsr_read(hw) | H_IG; 499 hcsr = mei_hcsr_read(dev) | H_IG;
474 mei_hcsr_set(hw, hcsr); 500 mei_hcsr_set(dev, hcsr);
475 if (!mei_me_hw_is_ready(dev)) 501 if (!mei_me_hw_is_ready(dev))
476 return -EIO; 502 return -EIO;
477 503
@@ -487,12 +513,11 @@ static int mei_me_write_message(struct mei_device *dev,
487 */ 513 */
488static int mei_me_count_full_read_slots(struct mei_device *dev) 514static int mei_me_count_full_read_slots(struct mei_device *dev)
489{ 515{
490 struct mei_me_hw *hw = to_me_hw(dev);
491 u32 me_csr; 516 u32 me_csr;
492 char read_ptr, write_ptr; 517 char read_ptr, write_ptr;
493 unsigned char buffer_depth, filled_slots; 518 unsigned char buffer_depth, filled_slots;
494 519
495 me_csr = mei_me_mecsr_read(hw); 520 me_csr = mei_me_mecsr_read(dev);
496 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24); 521 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
497 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8); 522 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
498 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16); 523 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
@@ -518,7 +543,6 @@ static int mei_me_count_full_read_slots(struct mei_device *dev)
518static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer, 543static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
519 unsigned long buffer_length) 544 unsigned long buffer_length)
520{ 545{
521 struct mei_me_hw *hw = to_me_hw(dev);
522 u32 *reg_buf = (u32 *)buffer; 546 u32 *reg_buf = (u32 *)buffer;
523 u32 hcsr; 547 u32 hcsr;
524 548
@@ -531,49 +555,59 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
531 memcpy(reg_buf, &reg, buffer_length); 555 memcpy(reg_buf, &reg, buffer_length);
532 } 556 }
533 557
534 hcsr = mei_hcsr_read(hw) | H_IG; 558 hcsr = mei_hcsr_read(dev) | H_IG;
535 mei_hcsr_set(hw, hcsr); 559 mei_hcsr_set(dev, hcsr);
536 return 0; 560 return 0;
537} 561}
538 562
539/** 563/**
540 * mei_me_pg_enter - write pg enter register 564 * mei_me_pg_set - write pg enter register
541 * 565 *
542 * @dev: the device structure 566 * @dev: the device structure
543 */ 567 */
544static void mei_me_pg_enter(struct mei_device *dev) 568static void mei_me_pg_set(struct mei_device *dev)
545{ 569{
546 struct mei_me_hw *hw = to_me_hw(dev); 570 struct mei_me_hw *hw = to_me_hw(dev);
547 u32 reg = mei_me_reg_read(hw, H_HPG_CSR); 571 u32 reg;
572
573 reg = mei_me_reg_read(hw, H_HPG_CSR);
574 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
548 575
549 reg |= H_HPG_CSR_PGI; 576 reg |= H_HPG_CSR_PGI;
577
578 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
550 mei_me_reg_write(hw, H_HPG_CSR, reg); 579 mei_me_reg_write(hw, H_HPG_CSR, reg);
551} 580}
552 581
553/** 582/**
554 * mei_me_pg_exit - write pg exit register 583 * mei_me_pg_unset - write pg exit register
555 * 584 *
556 * @dev: the device structure 585 * @dev: the device structure
557 */ 586 */
558static void mei_me_pg_exit(struct mei_device *dev) 587static void mei_me_pg_unset(struct mei_device *dev)
559{ 588{
560 struct mei_me_hw *hw = to_me_hw(dev); 589 struct mei_me_hw *hw = to_me_hw(dev);
561 u32 reg = mei_me_reg_read(hw, H_HPG_CSR); 590 u32 reg;
591
592 reg = mei_me_reg_read(hw, H_HPG_CSR);
593 trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
562 594
563 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n"); 595 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
564 596
565 reg |= H_HPG_CSR_PGIHEXR; 597 reg |= H_HPG_CSR_PGIHEXR;
598
599 trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
566 mei_me_reg_write(hw, H_HPG_CSR, reg); 600 mei_me_reg_write(hw, H_HPG_CSR, reg);
567} 601}
568 602
569/** 603/**
570 * mei_me_pg_set_sync - perform pg entry procedure 604 * mei_me_pg_enter_sync - perform pg entry procedure
571 * 605 *
572 * @dev: the device structure 606 * @dev: the device structure
573 * 607 *
574 * Return: 0 on success an error code otherwise 608 * Return: 0 on success an error code otherwise
575 */ 609 */
576int mei_me_pg_set_sync(struct mei_device *dev) 610int mei_me_pg_enter_sync(struct mei_device *dev)
577{ 611{
578 struct mei_me_hw *hw = to_me_hw(dev); 612 struct mei_me_hw *hw = to_me_hw(dev);
579 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 613 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -591,7 +625,7 @@ int mei_me_pg_set_sync(struct mei_device *dev)
591 mutex_lock(&dev->device_lock); 625 mutex_lock(&dev->device_lock);
592 626
593 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { 627 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
594 mei_me_pg_enter(dev); 628 mei_me_pg_set(dev);
595 ret = 0; 629 ret = 0;
596 } else { 630 } else {
597 ret = -ETIME; 631 ret = -ETIME;
@@ -604,13 +638,13 @@ int mei_me_pg_set_sync(struct mei_device *dev)
604} 638}
605 639
606/** 640/**
607 * mei_me_pg_unset_sync - perform pg exit procedure 641 * mei_me_pg_exit_sync - perform pg exit procedure
608 * 642 *
609 * @dev: the device structure 643 * @dev: the device structure
610 * 644 *
611 * Return: 0 on success an error code otherwise 645 * Return: 0 on success an error code otherwise
612 */ 646 */
613int mei_me_pg_unset_sync(struct mei_device *dev) 647int mei_me_pg_exit_sync(struct mei_device *dev)
614{ 648{
615 struct mei_me_hw *hw = to_me_hw(dev); 649 struct mei_me_hw *hw = to_me_hw(dev);
616 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 650 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -621,7 +655,7 @@ int mei_me_pg_unset_sync(struct mei_device *dev)
621 655
622 dev->pg_event = MEI_PG_EVENT_WAIT; 656 dev->pg_event = MEI_PG_EVENT_WAIT;
623 657
624 mei_me_pg_exit(dev); 658 mei_me_pg_unset(dev);
625 659
626 mutex_unlock(&dev->device_lock); 660 mutex_unlock(&dev->device_lock);
627 wait_event_timeout(dev->wait_pg, 661 wait_event_timeout(dev->wait_pg,
@@ -649,8 +683,7 @@ reply:
649 */ 683 */
650static bool mei_me_pg_is_enabled(struct mei_device *dev) 684static bool mei_me_pg_is_enabled(struct mei_device *dev)
651{ 685{
652 struct mei_me_hw *hw = to_me_hw(dev); 686 u32 reg = mei_me_mecsr_read(dev);
653 u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
654 687
655 if ((reg & ME_PGIC_HRA) == 0) 688 if ((reg & ME_PGIC_HRA) == 0)
656 goto notsupported; 689 goto notsupported;
@@ -683,14 +716,13 @@ notsupported:
683irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) 716irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
684{ 717{
685 struct mei_device *dev = (struct mei_device *) dev_id; 718 struct mei_device *dev = (struct mei_device *) dev_id;
686 struct mei_me_hw *hw = to_me_hw(dev); 719 u32 hcsr = mei_hcsr_read(dev);
687 u32 csr_reg = mei_hcsr_read(hw);
688 720
689 if ((csr_reg & H_IS) != H_IS) 721 if ((hcsr & H_IS) != H_IS)
690 return IRQ_NONE; 722 return IRQ_NONE;
691 723
692 /* clear H_IS bit in H_CSR */ 724 /* clear H_IS bit in H_CSR */
693 mei_me_reg_write(hw, H_CSR, csr_reg); 725 mei_hcsr_write(dev, hcsr);
694 726
695 return IRQ_WAKE_THREAD; 727 return IRQ_WAKE_THREAD;
696} 728}
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index d6567af44377..6022d52af6f6 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -71,8 +71,8 @@ extern const struct mei_cfg mei_me_pch8_sps_cfg;
71struct mei_device *mei_me_dev_init(struct pci_dev *pdev, 71struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
72 const struct mei_cfg *cfg); 72 const struct mei_cfg *cfg);
73 73
74int mei_me_pg_set_sync(struct mei_device *dev); 74int mei_me_pg_enter_sync(struct mei_device *dev);
75int mei_me_pg_unset_sync(struct mei_device *dev); 75int mei_me_pg_exit_sync(struct mei_device *dev);
76 76
77irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); 77irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
78irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); 78irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 618ea721aca8..7abafe7d120d 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -412,7 +412,7 @@ static void mei_txe_intr_disable(struct mei_device *dev)
412 mei_txe_br_reg_write(hw, HIER_REG, 0); 412 mei_txe_br_reg_write(hw, HIER_REG, 0);
413} 413}
414/** 414/**
415 * mei_txe_intr_disable - enable all interrupts 415 * mei_txe_intr_enable - enable all interrupts
416 * 416 *
417 * @dev: the device structure 417 * @dev: the device structure
418 */ 418 */
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 6ad049a08e4d..97353cf8d9b6 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -389,6 +389,7 @@ void mei_device_init(struct mei_device *dev,
389 INIT_LIST_HEAD(&dev->device_list); 389 INIT_LIST_HEAD(&dev->device_list);
390 INIT_LIST_HEAD(&dev->me_clients); 390 INIT_LIST_HEAD(&dev->me_clients);
391 mutex_init(&dev->device_lock); 391 mutex_init(&dev->device_lock);
392 init_rwsem(&dev->me_clients_rwsem);
392 init_waitqueue_head(&dev->wait_hw_ready); 393 init_waitqueue_head(&dev->wait_hw_ready);
393 init_waitqueue_head(&dev->wait_pg); 394 init_waitqueue_head(&dev->wait_pg);
394 init_waitqueue_head(&dev->wait_hbm_start); 395 init_waitqueue_head(&dev->wait_hbm_start);
@@ -396,7 +397,6 @@ void mei_device_init(struct mei_device *dev,
396 dev->dev_state = MEI_DEV_INITIALIZING; 397 dev->dev_state = MEI_DEV_INITIALIZING;
397 dev->reset_count = 0; 398 dev->reset_count = 0;
398 399
399 mei_io_list_init(&dev->read_list);
400 mei_io_list_init(&dev->write_list); 400 mei_io_list_init(&dev->write_list);
401 mei_io_list_init(&dev->write_waiting_list); 401 mei_io_list_init(&dev->write_waiting_list);
402 mei_io_list_init(&dev->ctrl_wr_list); 402 mei_io_list_init(&dev->ctrl_wr_list);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 711cddfa9c99..3f84d2edcde4 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -43,7 +43,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
43 43
44 list_for_each_entry_safe(cb, next, &compl_list->list, list) { 44 list_for_each_entry_safe(cb, next, &compl_list->list, list) {
45 cl = cb->cl; 45 cl = cb->cl;
46 list_del(&cb->list); 46 list_del_init(&cb->list);
47 47
48 dev_dbg(dev->dev, "completing call back.\n"); 48 dev_dbg(dev->dev, "completing call back.\n");
49 if (cl == &dev->iamthif_cl) 49 if (cl == &dev->iamthif_cl)
@@ -68,91 +68,91 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
68 return cl->host_client_id == mei_hdr->host_addr && 68 return cl->host_client_id == mei_hdr->host_addr &&
69 cl->me_client_id == mei_hdr->me_addr; 69 cl->me_client_id == mei_hdr->me_addr;
70} 70}
71
71/** 72/**
72 * mei_cl_is_reading - checks if the client 73 * mei_irq_discard_msg - discard received message
73 * is the one to read this message
74 *
75 * @cl: mei client
76 * @mei_hdr: header of mei message
77 * 74 *
78 * Return: true on match and false otherwise 75 * @dev: mei device
76 * @hdr: message header
79 */ 77 */
80static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) 78static inline
79void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
81{ 80{
82 return mei_cl_hbm_equal(cl, mei_hdr) && 81 /*
83 cl->state == MEI_FILE_CONNECTED && 82 * no need to check for size as it is guarantied
84 cl->reading_state != MEI_READ_COMPLETE; 83 * that length fits into rd_msg_buf
84 */
85 mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
86 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
87 MEI_HDR_PRM(hdr));
85} 88}
86 89
87/** 90/**
88 * mei_cl_irq_read_msg - process client message 91 * mei_cl_irq_read_msg - process client message
89 * 92 *
90 * @dev: the device structure 93 * @cl: reading client
91 * @mei_hdr: header of mei client message 94 * @mei_hdr: header of mei client message
92 * @complete_list: An instance of our list structure 95 * @complete_list: completion list
93 * 96 *
94 * Return: 0 on success, <0 on failure. 97 * Return: always 0
95 */ 98 */
96static int mei_cl_irq_read_msg(struct mei_device *dev, 99int mei_cl_irq_read_msg(struct mei_cl *cl,
97 struct mei_msg_hdr *mei_hdr, 100 struct mei_msg_hdr *mei_hdr,
98 struct mei_cl_cb *complete_list) 101 struct mei_cl_cb *complete_list)
99{ 102{
100 struct mei_cl *cl; 103 struct mei_device *dev = cl->dev;
101 struct mei_cl_cb *cb, *next; 104 struct mei_cl_cb *cb;
102 unsigned char *buffer = NULL; 105 unsigned char *buffer = NULL;
103 106
104 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { 107 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
105 cl = cb->cl; 108 if (!cb) {
106 if (!mei_cl_is_reading(cl, mei_hdr)) 109 cl_err(dev, cl, "pending read cb not found\n");
107 continue; 110 goto out;
108 111 }
109 cl->reading_state = MEI_READING;
110 112
111 if (cb->response_buffer.size == 0 || 113 if (!mei_cl_is_connected(cl)) {
112 cb->response_buffer.data == NULL) { 114 cl_dbg(dev, cl, "not connected\n");
113 cl_err(dev, cl, "response buffer is not allocated.\n"); 115 cb->status = -ENODEV;
114 list_del(&cb->list); 116 goto out;
115 return -ENOMEM; 117 }
116 }
117 118
118 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { 119 if (cb->buf.size == 0 || cb->buf.data == NULL) {
119 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", 120 cl_err(dev, cl, "response buffer is not allocated.\n");
120 cb->response_buffer.size, 121 list_move_tail(&cb->list, &complete_list->list);
121 mei_hdr->length, cb->buf_idx); 122 cb->status = -ENOMEM;
122 buffer = krealloc(cb->response_buffer.data, 123 goto out;
123 mei_hdr->length + cb->buf_idx, 124 }
124 GFP_KERNEL);
125
126 if (!buffer) {
127 list_del(&cb->list);
128 return -ENOMEM;
129 }
130 cb->response_buffer.data = buffer;
131 cb->response_buffer.size =
132 mei_hdr->length + cb->buf_idx;
133 }
134 125
135 buffer = cb->response_buffer.data + cb->buf_idx; 126 if (cb->buf.size < mei_hdr->length + cb->buf_idx) {
136 mei_read_slots(dev, buffer, mei_hdr->length); 127 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
128 cb->buf.size, mei_hdr->length, cb->buf_idx);
129 buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx,
130 GFP_KERNEL);
137 131
138 cb->buf_idx += mei_hdr->length; 132 if (!buffer) {
139 if (mei_hdr->msg_complete) { 133 cb->status = -ENOMEM;
140 cl->status = 0; 134 list_move_tail(&cb->list, &complete_list->list);
141 list_del(&cb->list); 135 goto out;
142 cl_dbg(dev, cl, "completed read length = %lu\n",
143 cb->buf_idx);
144 list_add_tail(&cb->list, &complete_list->list);
145 } 136 }
146 break; 137 cb->buf.data = buffer;
138 cb->buf.size = mei_hdr->length + cb->buf_idx;
147 } 139 }
148 140
149 dev_dbg(dev->dev, "message read\n"); 141 buffer = cb->buf.data + cb->buf_idx;
150 if (!buffer) { 142 mei_read_slots(dev, buffer, mei_hdr->length);
151 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); 143
152 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", 144 cb->buf_idx += mei_hdr->length;
153 MEI_HDR_PRM(mei_hdr)); 145
146 if (mei_hdr->msg_complete) {
147 cb->read_time = jiffies;
148 cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx);
149 list_move_tail(&cb->list, &complete_list->list);
154 } 150 }
155 151
152out:
153 if (!buffer)
154 mei_irq_discard_msg(dev, mei_hdr);
155
156 return 0; 156 return 0;
157} 157}
158 158
@@ -183,7 +183,6 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
183 183
184 cl->state = MEI_FILE_DISCONNECTED; 184 cl->state = MEI_FILE_DISCONNECTED;
185 cl->status = 0; 185 cl->status = 0;
186 list_del(&cb->list);
187 mei_io_cb_free(cb); 186 mei_io_cb_free(cb);
188 187
189 return ret; 188 return ret;
@@ -263,7 +262,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
263 return ret; 262 return ret;
264 } 263 }
265 264
266 list_move_tail(&cb->list, &dev->read_list.list); 265 list_move_tail(&cb->list, &cl->rd_pending);
267 266
268 return 0; 267 return 0;
269} 268}
@@ -301,7 +300,7 @@ static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
301 if (ret) { 300 if (ret) {
302 cl->status = ret; 301 cl->status = ret;
303 cb->buf_idx = 0; 302 cb->buf_idx = 0;
304 list_del(&cb->list); 303 list_del_init(&cb->list);
305 return ret; 304 return ret;
306 } 305 }
307 306
@@ -378,25 +377,13 @@ int mei_irq_read_handler(struct mei_device *dev,
378 goto end; 377 goto end;
379 } 378 }
380 379
381 if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && 380 if (cl == &dev->iamthif_cl) {
382 MEI_FILE_CONNECTED == dev->iamthif_cl.state && 381 ret = mei_amthif_irq_read_msg(cl, mei_hdr, cmpl_list);
383 dev->iamthif_state == MEI_IAMTHIF_READING) {
384
385 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
386 if (ret) {
387 dev_err(dev->dev, "mei_amthif_irq_read_msg failed = %d\n",
388 ret);
389 goto end;
390 }
391 } else { 382 } else {
392 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); 383 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
393 if (ret) {
394 dev_err(dev->dev, "mei_cl_irq_read_msg failed = %d\n",
395 ret);
396 goto end;
397 }
398 } 384 }
399 385
386
400reset_slots: 387reset_slots:
401 /* reset the number of slots and header */ 388 /* reset the number of slots and header */
402 *slots = mei_count_full_read_slots(dev); 389 *slots = mei_count_full_read_slots(dev);
@@ -449,21 +436,9 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
449 cl = cb->cl; 436 cl = cb->cl;
450 437
451 cl->status = 0; 438 cl->status = 0;
452 list_del(&cb->list); 439 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
453 if (cb->fop_type == MEI_FOP_WRITE && 440 cl->writing_state = MEI_WRITE_COMPLETE;
454 cl != &dev->iamthif_cl) { 441 list_move_tail(&cb->list, &cmpl_list->list);
455 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
456 cl->writing_state = MEI_WRITE_COMPLETE;
457 list_add_tail(&cb->list, &cmpl_list->list);
458 }
459 if (cl == &dev->iamthif_cl) {
460 cl_dbg(dev, cl, "check iamthif flow control.\n");
461 if (dev->iamthif_flow_control_pending) {
462 ret = mei_amthif_irq_read(dev, &slots);
463 if (ret)
464 return ret;
465 }
466 }
467 } 442 }
468 443
469 if (dev->wd_state == MEI_WD_STOPPING) { 444 if (dev->wd_state == MEI_WD_STOPPING) {
@@ -587,10 +562,7 @@ void mei_timer(struct work_struct *work)
587 if (--dev->iamthif_stall_timer == 0) { 562 if (--dev->iamthif_stall_timer == 0) {
588 dev_err(dev->dev, "timer: amthif hanged.\n"); 563 dev_err(dev->dev, "timer: amthif hanged.\n");
589 mei_reset(dev); 564 mei_reset(dev);
590 dev->iamthif_msg_buf_size = 0;
591 dev->iamthif_msg_buf_index = 0;
592 dev->iamthif_canceled = false; 565 dev->iamthif_canceled = false;
593 dev->iamthif_ioctl = true;
594 dev->iamthif_state = MEI_IAMTHIF_IDLE; 566 dev->iamthif_state = MEI_IAMTHIF_IDLE;
595 dev->iamthif_timer = 0; 567 dev->iamthif_timer = 0;
596 568
@@ -636,4 +608,3 @@ out:
636 schedule_delayed_work(&dev->timer_work, 2 * HZ); 608 schedule_delayed_work(&dev->timer_work, 2 * HZ);
637 mutex_unlock(&dev->device_lock); 609 mutex_unlock(&dev->device_lock);
638} 610}
639
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 47680c84801c..3e2968159506 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -58,24 +58,18 @@ static int mei_open(struct inode *inode, struct file *file)
58 58
59 mutex_lock(&dev->device_lock); 59 mutex_lock(&dev->device_lock);
60 60
61 cl = NULL;
62
63 err = -ENODEV;
64 if (dev->dev_state != MEI_DEV_ENABLED) { 61 if (dev->dev_state != MEI_DEV_ENABLED) {
65 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", 62 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
66 mei_dev_state_str(dev->dev_state)); 63 mei_dev_state_str(dev->dev_state));
64 err = -ENODEV;
67 goto err_unlock; 65 goto err_unlock;
68 } 66 }
69 67
70 err = -ENOMEM; 68 cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
71 cl = mei_cl_allocate(dev); 69 if (IS_ERR(cl)) {
72 if (!cl) 70 err = PTR_ERR(cl);
73 goto err_unlock;
74
75 /* open_handle_count check is handled in the mei_cl_link */
76 err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
77 if (err)
78 goto err_unlock; 71 goto err_unlock;
72 }
79 73
80 file->private_data = cl; 74 file->private_data = cl;
81 75
@@ -85,7 +79,6 @@ static int mei_open(struct inode *inode, struct file *file)
85 79
86err_unlock: 80err_unlock:
87 mutex_unlock(&dev->device_lock); 81 mutex_unlock(&dev->device_lock);
88 kfree(cl);
89 return err; 82 return err;
90} 83}
91 84
@@ -100,7 +93,6 @@ err_unlock:
100static int mei_release(struct inode *inode, struct file *file) 93static int mei_release(struct inode *inode, struct file *file)
101{ 94{
102 struct mei_cl *cl = file->private_data; 95 struct mei_cl *cl = file->private_data;
103 struct mei_cl_cb *cb;
104 struct mei_device *dev; 96 struct mei_device *dev;
105 int rets = 0; 97 int rets = 0;
106 98
@@ -114,33 +106,18 @@ static int mei_release(struct inode *inode, struct file *file)
114 rets = mei_amthif_release(dev, file); 106 rets = mei_amthif_release(dev, file);
115 goto out; 107 goto out;
116 } 108 }
117 if (cl->state == MEI_FILE_CONNECTED) { 109 if (mei_cl_is_connected(cl)) {
118 cl->state = MEI_FILE_DISCONNECTING; 110 cl->state = MEI_FILE_DISCONNECTING;
119 cl_dbg(dev, cl, "disconnecting\n"); 111 cl_dbg(dev, cl, "disconnecting\n");
120 rets = mei_cl_disconnect(cl); 112 rets = mei_cl_disconnect(cl);
121 } 113 }
122 mei_cl_flush_queues(cl); 114 mei_cl_flush_queues(cl, file);
123 cl_dbg(dev, cl, "removing\n"); 115 cl_dbg(dev, cl, "removing\n");
124 116
125 mei_cl_unlink(cl); 117 mei_cl_unlink(cl);
126 118
127
128 /* free read cb */
129 cb = NULL;
130 if (cl->read_cb) {
131 cb = mei_cl_find_read_cb(cl);
132 /* Remove entry from read list */
133 if (cb)
134 list_del(&cb->list);
135
136 cb = cl->read_cb;
137 cl->read_cb = NULL;
138 }
139
140 file->private_data = NULL; 119 file->private_data = NULL;
141 120
142 mei_io_cb_free(cb);
143
144 kfree(cl); 121 kfree(cl);
145out: 122out:
146 mutex_unlock(&dev->device_lock); 123 mutex_unlock(&dev->device_lock);
@@ -162,9 +139,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
162 size_t length, loff_t *offset) 139 size_t length, loff_t *offset)
163{ 140{
164 struct mei_cl *cl = file->private_data; 141 struct mei_cl *cl = file->private_data;
165 struct mei_cl_cb *cb_pos = NULL;
166 struct mei_cl_cb *cb = NULL;
167 struct mei_device *dev; 142 struct mei_device *dev;
143 struct mei_cl_cb *cb = NULL;
168 int rets; 144 int rets;
169 int err; 145 int err;
170 146
@@ -191,8 +167,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
191 goto out; 167 goto out;
192 } 168 }
193 169
194 if (cl->read_cb) { 170 cb = mei_cl_read_cb(cl, file);
195 cb = cl->read_cb; 171 if (cb) {
196 /* read what left */ 172 /* read what left */
197 if (cb->buf_idx > *offset) 173 if (cb->buf_idx > *offset)
198 goto copy_buffer; 174 goto copy_buffer;
@@ -208,7 +184,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
208 *offset = 0; 184 *offset = 0;
209 } 185 }
210 186
211 err = mei_cl_read_start(cl, length); 187 err = mei_cl_read_start(cl, length, file);
212 if (err && err != -EBUSY) { 188 if (err && err != -EBUSY) {
213 dev_dbg(dev->dev, 189 dev_dbg(dev->dev,
214 "mei start read failure with status = %d\n", err); 190 "mei start read failure with status = %d\n", err);
@@ -216,8 +192,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
216 goto out; 192 goto out;
217 } 193 }
218 194
219 if (MEI_READ_COMPLETE != cl->reading_state && 195 if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
220 !waitqueue_active(&cl->rx_wait)) {
221 if (file->f_flags & O_NONBLOCK) { 196 if (file->f_flags & O_NONBLOCK) {
222 rets = -EAGAIN; 197 rets = -EAGAIN;
223 goto out; 198 goto out;
@@ -226,8 +201,8 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
226 mutex_unlock(&dev->device_lock); 201 mutex_unlock(&dev->device_lock);
227 202
228 if (wait_event_interruptible(cl->rx_wait, 203 if (wait_event_interruptible(cl->rx_wait,
229 MEI_READ_COMPLETE == cl->reading_state || 204 (!list_empty(&cl->rd_completed)) ||
230 mei_cl_is_transitioning(cl))) { 205 (!mei_cl_is_connected(cl)))) {
231 206
232 if (signal_pending(current)) 207 if (signal_pending(current))
233 return -EINTR; 208 return -EINTR;
@@ -235,26 +210,28 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
235 } 210 }
236 211
237 mutex_lock(&dev->device_lock); 212 mutex_lock(&dev->device_lock);
238 if (mei_cl_is_transitioning(cl)) { 213 if (!mei_cl_is_connected(cl)) {
239 rets = -EBUSY; 214 rets = -EBUSY;
240 goto out; 215 goto out;
241 } 216 }
242 } 217 }
243 218
244 cb = cl->read_cb; 219 cb = mei_cl_read_cb(cl, file);
245
246 if (!cb) { 220 if (!cb) {
247 rets = -ENODEV;
248 goto out;
249 }
250 if (cl->reading_state != MEI_READ_COMPLETE) {
251 rets = 0; 221 rets = 0;
252 goto out; 222 goto out;
253 } 223 }
254 /* now copy the data to user space */ 224
255copy_buffer: 225copy_buffer:
226 /* now copy the data to user space */
227 if (cb->status) {
228 rets = cb->status;
229 dev_dbg(dev->dev, "read operation failed %d\n", rets);
230 goto free;
231 }
232
256 dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n", 233 dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n",
257 cb->response_buffer.size, cb->buf_idx); 234 cb->buf.size, cb->buf_idx);
258 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { 235 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
259 rets = -EMSGSIZE; 236 rets = -EMSGSIZE;
260 goto free; 237 goto free;
@@ -264,7 +241,7 @@ copy_buffer:
264 * however buf_idx may point beyond that */ 241 * however buf_idx may point beyond that */
265 length = min_t(size_t, length, cb->buf_idx - *offset); 242 length = min_t(size_t, length, cb->buf_idx - *offset);
266 243
267 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 244 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
268 dev_dbg(dev->dev, "failed to copy data to userland\n"); 245 dev_dbg(dev->dev, "failed to copy data to userland\n");
269 rets = -EFAULT; 246 rets = -EFAULT;
270 goto free; 247 goto free;
@@ -276,13 +253,8 @@ copy_buffer:
276 goto out; 253 goto out;
277 254
278free: 255free:
279 cb_pos = mei_cl_find_read_cb(cl);
280 /* Remove entry from read list */
281 if (cb_pos)
282 list_del(&cb_pos->list);
283 mei_io_cb_free(cb); 256 mei_io_cb_free(cb);
284 cl->reading_state = MEI_IDLE; 257
285 cl->read_cb = NULL;
286out: 258out:
287 dev_dbg(dev->dev, "end mei read rets= %d\n", rets); 259 dev_dbg(dev->dev, "end mei read rets= %d\n", rets);
288 mutex_unlock(&dev->device_lock); 260 mutex_unlock(&dev->device_lock);
@@ -336,9 +308,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
336 goto out; 308 goto out;
337 } 309 }
338 310
339 if (cl->state != MEI_FILE_CONNECTED) { 311 if (!mei_cl_is_connected(cl)) {
340 dev_err(dev->dev, "host client = %d, is not connected to ME client = %d", 312 cl_err(dev, cl, "is not connected");
341 cl->host_client_id, cl->me_client_id);
342 rets = -ENODEV; 313 rets = -ENODEV;
343 goto out; 314 goto out;
344 } 315 }
@@ -349,41 +320,22 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
349 timeout = write_cb->read_time + 320 timeout = write_cb->read_time +
350 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 321 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
351 322
352 if (time_after(jiffies, timeout) || 323 if (time_after(jiffies, timeout)) {
353 cl->reading_state == MEI_READ_COMPLETE) {
354 *offset = 0; 324 *offset = 0;
355 list_del(&write_cb->list);
356 mei_io_cb_free(write_cb); 325 mei_io_cb_free(write_cb);
357 write_cb = NULL; 326 write_cb = NULL;
358 } 327 }
359 } 328 }
360 } 329 }
361 330
362 /* free entry used in read */ 331 *offset = 0;
363 if (cl->reading_state == MEI_READ_COMPLETE) { 332 write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
364 *offset = 0;
365 write_cb = mei_cl_find_read_cb(cl);
366 if (write_cb) {
367 list_del(&write_cb->list);
368 mei_io_cb_free(write_cb);
369 write_cb = NULL;
370 cl->reading_state = MEI_IDLE;
371 cl->read_cb = NULL;
372 }
373 } else if (cl->reading_state == MEI_IDLE)
374 *offset = 0;
375
376
377 write_cb = mei_io_cb_init(cl, file);
378 if (!write_cb) { 333 if (!write_cb) {
379 rets = -ENOMEM; 334 rets = -ENOMEM;
380 goto out; 335 goto out;
381 } 336 }
382 rets = mei_io_cb_alloc_req_buf(write_cb, length);
383 if (rets)
384 goto out;
385 337
386 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); 338 rets = copy_from_user(write_cb->buf.data, ubuf, length);
387 if (rets) { 339 if (rets) {
388 dev_dbg(dev->dev, "failed to copy data from userland\n"); 340 dev_dbg(dev->dev, "failed to copy data from userland\n");
389 rets = -EFAULT; 341 rets = -EFAULT;
@@ -391,7 +343,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
391 } 343 }
392 344
393 if (cl == &dev->iamthif_cl) { 345 if (cl == &dev->iamthif_cl) {
394 rets = mei_amthif_write(dev, write_cb); 346 rets = mei_amthif_write(cl, write_cb);
395 347
396 if (rets) { 348 if (rets) {
397 dev_err(dev->dev, 349 dev_err(dev->dev,
@@ -464,7 +416,7 @@ static int mei_ioctl_connect_client(struct file *file,
464 */ 416 */
465 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { 417 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
466 dev_dbg(dev->dev, "FW Client is amthi\n"); 418 dev_dbg(dev->dev, "FW Client is amthi\n");
467 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { 419 if (!mei_cl_is_connected(&dev->iamthif_cl)) {
468 rets = -ENODEV; 420 rets = -ENODEV;
469 goto end; 421 goto end;
470 } 422 }
@@ -588,6 +540,7 @@ static long mei_compat_ioctl(struct file *file,
588 */ 540 */
589static unsigned int mei_poll(struct file *file, poll_table *wait) 541static unsigned int mei_poll(struct file *file, poll_table *wait)
590{ 542{
543 unsigned long req_events = poll_requested_events(wait);
591 struct mei_cl *cl = file->private_data; 544 struct mei_cl *cl = file->private_data;
592 struct mei_device *dev; 545 struct mei_device *dev;
593 unsigned int mask = 0; 546 unsigned int mask = 0;
@@ -599,27 +552,26 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
599 552
600 mutex_lock(&dev->device_lock); 553 mutex_lock(&dev->device_lock);
601 554
602 if (!mei_cl_is_connected(cl)) { 555
556 if (dev->dev_state != MEI_DEV_ENABLED ||
557 !mei_cl_is_connected(cl)) {
603 mask = POLLERR; 558 mask = POLLERR;
604 goto out; 559 goto out;
605 } 560 }
606 561
607 mutex_unlock(&dev->device_lock); 562 if (cl == &dev->iamthif_cl) {
608 563 mask = mei_amthif_poll(dev, file, wait);
609
610 if (cl == &dev->iamthif_cl)
611 return mei_amthif_poll(dev, file, wait);
612
613 poll_wait(file, &cl->tx_wait, wait);
614
615 mutex_lock(&dev->device_lock);
616
617 if (!mei_cl_is_connected(cl)) {
618 mask = POLLERR;
619 goto out; 564 goto out;
620 } 565 }
621 566
622 mask |= (POLLIN | POLLRDNORM); 567 if (req_events & (POLLIN | POLLRDNORM)) {
568 poll_wait(file, &cl->rx_wait, wait);
569
570 if (!list_empty(&cl->rd_completed))
571 mask |= POLLIN | POLLRDNORM;
572 else
573 mei_cl_read_start(cl, 0, file);
574 }
623 575
624out: 576out:
625 mutex_unlock(&dev->device_lock); 577 mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
new file mode 100644
index 000000000000..388efb519138
--- /dev/null
+++ b/drivers/misc/mei/mei-trace.c
@@ -0,0 +1,25 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2015, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16#include <linux/module.h>
17
18/* sparse doesn't like tracepoint macros */
19#ifndef __CHECKER__
20#define CREATE_TRACE_POINTS
21#include "mei-trace.h"
22
23EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
24EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
25#endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
new file mode 100644
index 000000000000..47e1bc6551d4
--- /dev/null
+++ b/drivers/misc/mei/mei-trace.h
@@ -0,0 +1,74 @@
1/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2015, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#if !defined(_MEI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
18#define _MEI_TRACE_H_
19
20#include <linux/stringify.h>
21#include <linux/types.h>
22#include <linux/tracepoint.h>
23
24#include <linux/device.h>
25
26#undef TRACE_SYSTEM
27#define TRACE_SYSTEM mei
28
29TRACE_EVENT(mei_reg_read,
30 TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
31 TP_ARGS(dev, reg, offs, val),
32 TP_STRUCT__entry(
33 __string(dev, dev_name(dev))
34 __field(const char *, reg)
35 __field(u32, offs)
36 __field(u32, val)
37 ),
38 TP_fast_assign(
39 __assign_str(dev, dev_name(dev))
40 __entry->reg = reg;
41 __entry->offs = offs;
42 __entry->val = val;
43 ),
44 TP_printk("[%s] read %s:[%#x] = %#x",
45 __get_str(dev), __entry->reg, __entry->offs, __entry->val)
46);
47
48TRACE_EVENT(mei_reg_write,
49 TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
50 TP_ARGS(dev, reg, offs, val),
51 TP_STRUCT__entry(
52 __string(dev, dev_name(dev))
53 __field(const char *, reg)
54 __field(u32, offs)
55 __field(u32, val)
56 ),
57 TP_fast_assign(
58 __assign_str(dev, dev_name(dev))
59 __entry->reg = reg;
60 __entry->offs = offs;
61 __entry->val = val;
62 ),
63 TP_printk("[%s] write %s[%#x] = %#x)",
64 __get_str(dev), __entry->reg, __entry->offs, __entry->val)
65);
66
67#endif /* _MEI_TRACE_H_ */
68
69/* This part must be outside protection */
70#undef TRACE_INCLUDE_PATH
71#undef TRACE_INCLUDE_FILE
72#define TRACE_INCLUDE_PATH .
73#define TRACE_INCLUDE_FILE mei-trace
74#include <trace/define_trace.h>
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 6c6ce9381535..f066ecd71939 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -194,23 +194,25 @@ struct mei_cl;
194 * @list: link in callback queue 194 * @list: link in callback queue
195 * @cl: file client who is running this operation 195 * @cl: file client who is running this operation
196 * @fop_type: file operation type 196 * @fop_type: file operation type
197 * @request_buffer: buffer to store request data 197 * @buf: buffer for data associated with the callback
198 * @response_buffer: buffer to store response data
199 * @buf_idx: last read index 198 * @buf_idx: last read index
200 * @read_time: last read operation time stamp (iamthif) 199 * @read_time: last read operation time stamp (iamthif)
201 * @file_object: pointer to file structure 200 * @file_object: pointer to file structure
201 * @status: io status of the cb
202 * @internal: communication between driver and FW flag 202 * @internal: communication between driver and FW flag
203 * @completed: the transfer or reception has completed
203 */ 204 */
204struct mei_cl_cb { 205struct mei_cl_cb {
205 struct list_head list; 206 struct list_head list;
206 struct mei_cl *cl; 207 struct mei_cl *cl;
207 enum mei_cb_file_ops fop_type; 208 enum mei_cb_file_ops fop_type;
208 struct mei_msg_data request_buffer; 209 struct mei_msg_data buf;
209 struct mei_msg_data response_buffer;
210 unsigned long buf_idx; 210 unsigned long buf_idx;
211 unsigned long read_time; 211 unsigned long read_time;
212 struct file *file_object; 212 struct file *file_object;
213 int status;
213 u32 internal:1; 214 u32 internal:1;
215 u32 completed:1;
214}; 216};
215 217
216/** 218/**
@@ -229,9 +231,9 @@ struct mei_cl_cb {
229 * @me_client_id: me/fw id 231 * @me_client_id: me/fw id
230 * @mei_flow_ctrl_creds: transmit flow credentials 232 * @mei_flow_ctrl_creds: transmit flow credentials
231 * @timer_count: watchdog timer for operation completion 233 * @timer_count: watchdog timer for operation completion
232 * @reading_state: state of the rx
233 * @writing_state: state of the tx 234 * @writing_state: state of the tx
234 * @read_cb: current pending reading callback 235 * @rd_pending: pending read credits
236 * @rd_completed: completed read
235 * 237 *
236 * @device: device on the mei client bus 238 * @device: device on the mei client bus
237 * @device_link: link to bus clients 239 * @device_link: link to bus clients
@@ -249,9 +251,9 @@ struct mei_cl {
249 u8 me_client_id; 251 u8 me_client_id;
250 u8 mei_flow_ctrl_creds; 252 u8 mei_flow_ctrl_creds;
251 u8 timer_count; 253 u8 timer_count;
252 enum mei_file_transaction_states reading_state;
253 enum mei_file_transaction_states writing_state; 254 enum mei_file_transaction_states writing_state;
254 struct mei_cl_cb *read_cb; 255 struct list_head rd_pending;
256 struct list_head rd_completed;
255 257
256 /* MEI CL bus data */ 258 /* MEI CL bus data */
257 struct mei_cl_device *device; 259 struct mei_cl_device *device;
@@ -423,7 +425,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
423 * @cdev : character device 425 * @cdev : character device
424 * @minor : minor number allocated for device 426 * @minor : minor number allocated for device
425 * 427 *
426 * @read_list : read completion list
427 * @write_list : write pending list 428 * @write_list : write pending list
428 * @write_waiting_list : write completion list 429 * @write_waiting_list : write completion list
429 * @ctrl_wr_list : pending control write list 430 * @ctrl_wr_list : pending control write list
@@ -460,6 +461,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
460 * @version : HBM protocol version in use 461 * @version : HBM protocol version in use
461 * @hbm_f_pg_supported : hbm feature pgi protocol 462 * @hbm_f_pg_supported : hbm feature pgi protocol
462 * 463 *
464 * @me_clients_rwsem: rw lock over me_clients list
463 * @me_clients : list of FW clients 465 * @me_clients : list of FW clients
464 * @me_clients_map : FW clients bit map 466 * @me_clients_map : FW clients bit map
465 * @host_clients_map : host clients id pool 467 * @host_clients_map : host clients id pool
@@ -480,12 +482,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
480 * @iamthif_mtu : amthif client max message length 482 * @iamthif_mtu : amthif client max message length
481 * @iamthif_timer : time stamp of current amthif command completion 483 * @iamthif_timer : time stamp of current amthif command completion
482 * @iamthif_stall_timer : timer to detect amthif hang 484 * @iamthif_stall_timer : timer to detect amthif hang
483 * @iamthif_msg_buf : amthif current message buffer
484 * @iamthif_msg_buf_size : size of current amthif message request buffer
485 * @iamthif_msg_buf_index : current index in amthif message request buffer
486 * @iamthif_state : amthif processor state 485 * @iamthif_state : amthif processor state
487 * @iamthif_flow_control_pending: amthif waits for flow control
488 * @iamthif_ioctl : wait for completion if amthif control message
489 * @iamthif_canceled : current amthif command is canceled 486 * @iamthif_canceled : current amthif command is canceled
490 * 487 *
491 * @init_work : work item for the device init 488 * @init_work : work item for the device init
@@ -503,7 +500,6 @@ struct mei_device {
503 struct cdev cdev; 500 struct cdev cdev;
504 int minor; 501 int minor;
505 502
506 struct mei_cl_cb read_list;
507 struct mei_cl_cb write_list; 503 struct mei_cl_cb write_list;
508 struct mei_cl_cb write_waiting_list; 504 struct mei_cl_cb write_waiting_list;
509 struct mei_cl_cb ctrl_wr_list; 505 struct mei_cl_cb ctrl_wr_list;
@@ -556,6 +552,7 @@ struct mei_device {
556 struct hbm_version version; 552 struct hbm_version version;
557 unsigned int hbm_f_pg_supported:1; 553 unsigned int hbm_f_pg_supported:1;
558 554
555 struct rw_semaphore me_clients_rwsem;
559 struct list_head me_clients; 556 struct list_head me_clients;
560 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); 557 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
561 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 558 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
@@ -579,12 +576,7 @@ struct mei_device {
579 int iamthif_mtu; 576 int iamthif_mtu;
580 unsigned long iamthif_timer; 577 unsigned long iamthif_timer;
581 u32 iamthif_stall_timer; 578 u32 iamthif_stall_timer;
582 unsigned char *iamthif_msg_buf; /* Note: memory has to be allocated */
583 u32 iamthif_msg_buf_size;
584 u32 iamthif_msg_buf_index;
585 enum iamthif_states iamthif_state; 579 enum iamthif_states iamthif_state;
586 bool iamthif_flow_control_pending;
587 bool iamthif_ioctl;
588 bool iamthif_canceled; 580 bool iamthif_canceled;
589 581
590 struct work_struct init_work; 582 struct work_struct init_work;
@@ -662,8 +654,6 @@ void mei_amthif_reset_params(struct mei_device *dev);
662 654
663int mei_amthif_host_init(struct mei_device *dev); 655int mei_amthif_host_init(struct mei_device *dev);
664 656
665int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
666
667int mei_amthif_read(struct mei_device *dev, struct file *file, 657int mei_amthif_read(struct mei_device *dev, struct file *file,
668 char __user *ubuf, size_t length, loff_t *offset); 658 char __user *ubuf, size_t length, loff_t *offset);
669 659
@@ -675,13 +665,13 @@ int mei_amthif_release(struct mei_device *dev, struct file *file);
675struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, 665struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
676 struct file *file); 666 struct file *file);
677 667
678void mei_amthif_run_next_cmd(struct mei_device *dev); 668int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
679 669int mei_amthif_run_next_cmd(struct mei_device *dev);
680int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 670int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
681 struct mei_cl_cb *cmpl_list); 671 struct mei_cl_cb *cmpl_list);
682 672
683void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb); 673void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
684int mei_amthif_irq_read_msg(struct mei_device *dev, 674int mei_amthif_irq_read_msg(struct mei_cl *cl,
685 struct mei_msg_hdr *mei_hdr, 675 struct mei_msg_hdr *mei_hdr,
686 struct mei_cl_cb *complete_list); 676 struct mei_cl_cb *complete_list);
687int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); 677int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index bb61a119b8bb..c3bcb63686d7 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -482,8 +482,8 @@ err:
482int mei_nfc_host_init(struct mei_device *dev) 482int mei_nfc_host_init(struct mei_device *dev)
483{ 483{
484 struct mei_nfc_dev *ndev; 484 struct mei_nfc_dev *ndev;
485 struct mei_cl *cl_info, *cl = NULL; 485 struct mei_cl *cl_info, *cl;
486 struct mei_me_client *me_cl; 486 struct mei_me_client *me_cl = NULL;
487 int ret; 487 int ret;
488 488
489 489
@@ -500,17 +500,6 @@ int mei_nfc_host_init(struct mei_device *dev)
500 goto err; 500 goto err;
501 } 501 }
502 502
503 ndev->cl_info = mei_cl_allocate(dev);
504 ndev->cl = mei_cl_allocate(dev);
505
506 cl = ndev->cl;
507 cl_info = ndev->cl_info;
508
509 if (!cl || !cl_info) {
510 ret = -ENOMEM;
511 goto err;
512 }
513
514 /* check for valid client id */ 503 /* check for valid client id */
515 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid); 504 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
516 if (!me_cl) { 505 if (!me_cl) {
@@ -519,17 +508,21 @@ int mei_nfc_host_init(struct mei_device *dev)
519 goto err; 508 goto err;
520 } 509 }
521 510
511 cl_info = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
512 if (IS_ERR(cl_info)) {
513 ret = PTR_ERR(cl_info);
514 goto err;
515 }
516
522 cl_info->me_client_id = me_cl->client_id; 517 cl_info->me_client_id = me_cl->client_id;
523 cl_info->cl_uuid = me_cl->props.protocol_name; 518 cl_info->cl_uuid = me_cl->props.protocol_name;
524 mei_me_cl_put(me_cl); 519 mei_me_cl_put(me_cl);
525 520 me_cl = NULL;
526 ret = mei_cl_link(cl_info, MEI_HOST_CLIENT_ID_ANY);
527 if (ret)
528 goto err;
529
530 521
531 list_add_tail(&cl_info->device_link, &dev->device_list); 522 list_add_tail(&cl_info->device_link, &dev->device_list);
532 523
524 ndev->cl_info = cl_info;
525
533 /* check for valid client id */ 526 /* check for valid client id */
534 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid); 527 me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
535 if (!me_cl) { 528 if (!me_cl) {
@@ -538,16 +531,21 @@ int mei_nfc_host_init(struct mei_device *dev)
538 goto err; 531 goto err;
539 } 532 }
540 533
534 cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
535 if (IS_ERR(cl)) {
536 ret = PTR_ERR(cl);
537 goto err;
538 }
539
541 cl->me_client_id = me_cl->client_id; 540 cl->me_client_id = me_cl->client_id;
542 cl->cl_uuid = me_cl->props.protocol_name; 541 cl->cl_uuid = me_cl->props.protocol_name;
543 mei_me_cl_put(me_cl); 542 mei_me_cl_put(me_cl);
544 543 me_cl = NULL;
545 ret = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
546 if (ret)
547 goto err;
548 544
549 list_add_tail(&cl->device_link, &dev->device_list); 545 list_add_tail(&cl->device_link, &dev->device_list);
550 546
547 ndev->cl = cl;
548
551 ndev->req_id = 1; 549 ndev->req_id = 1;
552 550
553 INIT_WORK(&ndev->init_work, mei_nfc_init); 551 INIT_WORK(&ndev->init_work, mei_nfc_init);
@@ -557,6 +555,7 @@ int mei_nfc_host_init(struct mei_device *dev)
557 return 0; 555 return 0;
558 556
559err: 557err:
558 mei_me_cl_put(me_cl);
560 mei_nfc_free(ndev); 559 mei_nfc_free(ndev);
561 560
562 return ret; 561 return ret;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index af44ee26075d..23f71f5ce4fb 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -388,7 +388,7 @@ static int mei_me_pm_runtime_suspend(struct device *device)
388 mutex_lock(&dev->device_lock); 388 mutex_lock(&dev->device_lock);
389 389
390 if (mei_write_is_idle(dev)) 390 if (mei_write_is_idle(dev))
391 ret = mei_me_pg_set_sync(dev); 391 ret = mei_me_pg_enter_sync(dev);
392 else 392 else
393 ret = -EAGAIN; 393 ret = -EAGAIN;
394 394
@@ -413,7 +413,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
413 413
414 mutex_lock(&dev->device_lock); 414 mutex_lock(&dev->device_lock);
415 415
416 ret = mei_me_pg_unset_sync(dev); 416 ret = mei_me_pg_exit_sync(dev);
417 417
418 mutex_unlock(&dev->device_lock); 418 mutex_unlock(&dev->device_lock);
419 419
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index c86e2ddbe30a..dcfcba44b6f7 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -63,7 +63,7 @@ static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
63 } 63 }
64} 64}
65/** 65/**
66 * mei_probe - Device Initialization Routine 66 * mei_txe_probe - Device Initialization Routine
67 * 67 *
68 * @pdev: PCI device structure 68 * @pdev: PCI device structure
69 * @ent: entry in mei_txe_pci_tbl 69 * @ent: entry in mei_txe_pci_tbl
@@ -193,7 +193,7 @@ end:
193} 193}
194 194
195/** 195/**
196 * mei_remove - Device Removal Routine 196 * mei_txe_remove - Device Removal Routine
197 * 197 *
198 * @pdev: PCI device structure 198 * @pdev: PCI device structure
199 * 199 *
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 475f1dea45bf..2725f865c3d6 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -160,9 +160,10 @@ int mei_wd_send(struct mei_device *dev)
160 */ 160 */
161int mei_wd_stop(struct mei_device *dev) 161int mei_wd_stop(struct mei_device *dev)
162{ 162{
163 struct mei_cl *cl = &dev->wd_cl;
163 int ret; 164 int ret;
164 165
165 if (dev->wd_cl.state != MEI_FILE_CONNECTED || 166 if (!mei_cl_is_connected(cl) ||
166 dev->wd_state != MEI_WD_RUNNING) 167 dev->wd_state != MEI_WD_RUNNING)
167 return 0; 168 return 0;
168 169
@@ -170,7 +171,7 @@ int mei_wd_stop(struct mei_device *dev)
170 171
171 dev->wd_state = MEI_WD_STOPPING; 172 dev->wd_state = MEI_WD_STOPPING;
172 173
173 ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); 174 ret = mei_cl_flow_ctrl_creds(cl);
174 if (ret < 0) 175 if (ret < 0)
175 goto err; 176 goto err;
176 177
@@ -202,22 +203,25 @@ err:
202 return ret; 203 return ret;
203} 204}
204 205
205/* 206/**
206 * mei_wd_ops_start - wd start command from the watchdog core. 207 * mei_wd_ops_start - wd start command from the watchdog core.
207 * 208 *
208 * @wd_dev - watchdog device struct 209 * @wd_dev: watchdog device struct
209 * 210 *
210 * Return: 0 if success, negative errno code for failure 211 * Return: 0 if success, negative errno code for failure
211 */ 212 */
212static int mei_wd_ops_start(struct watchdog_device *wd_dev) 213static int mei_wd_ops_start(struct watchdog_device *wd_dev)
213{ 214{
214 int err = -ENODEV;
215 struct mei_device *dev; 215 struct mei_device *dev;
216 struct mei_cl *cl;
217 int err = -ENODEV;
216 218
217 dev = watchdog_get_drvdata(wd_dev); 219 dev = watchdog_get_drvdata(wd_dev);
218 if (!dev) 220 if (!dev)
219 return -ENODEV; 221 return -ENODEV;
220 222
223 cl = &dev->wd_cl;
224
221 mutex_lock(&dev->device_lock); 225 mutex_lock(&dev->device_lock);
222 226
223 if (dev->dev_state != MEI_DEV_ENABLED) { 227 if (dev->dev_state != MEI_DEV_ENABLED) {
@@ -226,8 +230,8 @@ static int mei_wd_ops_start(struct watchdog_device *wd_dev)
226 goto end_unlock; 230 goto end_unlock;
227 } 231 }
228 232
229 if (dev->wd_cl.state != MEI_FILE_CONNECTED) { 233 if (!mei_cl_is_connected(cl)) {
230 dev_dbg(dev->dev, "MEI Driver is not connected to Watchdog Client\n"); 234 cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n");
231 goto end_unlock; 235 goto end_unlock;
232 } 236 }
233 237
@@ -239,10 +243,10 @@ end_unlock:
239 return err; 243 return err;
240} 244}
241 245
242/* 246/**
243 * mei_wd_ops_stop - wd stop command from the watchdog core. 247 * mei_wd_ops_stop - wd stop command from the watchdog core.
244 * 248 *
245 * @wd_dev - watchdog device struct 249 * @wd_dev: watchdog device struct
246 * 250 *
247 * Return: 0 if success, negative errno code for failure 251 * Return: 0 if success, negative errno code for failure
248 */ 252 */
@@ -261,10 +265,10 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
261 return 0; 265 return 0;
262} 266}
263 267
264/* 268/**
265 * mei_wd_ops_ping - wd ping command from the watchdog core. 269 * mei_wd_ops_ping - wd ping command from the watchdog core.
266 * 270 *
267 * @wd_dev - watchdog device struct 271 * @wd_dev: watchdog device struct
268 * 272 *
269 * Return: 0 if success, negative errno code for failure 273 * Return: 0 if success, negative errno code for failure
270 */ 274 */
@@ -282,8 +286,8 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
282 286
283 mutex_lock(&dev->device_lock); 287 mutex_lock(&dev->device_lock);
284 288
285 if (cl->state != MEI_FILE_CONNECTED) { 289 if (!mei_cl_is_connected(cl)) {
286 dev_err(dev->dev, "wd: not connected.\n"); 290 cl_err(dev, cl, "wd: not connected.\n");
287 ret = -ENODEV; 291 ret = -ENODEV;
288 goto end; 292 goto end;
289 } 293 }
@@ -311,11 +315,11 @@ end:
311 return ret; 315 return ret;
312} 316}
313 317
314/* 318/**
315 * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core. 319 * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core.
316 * 320 *
317 * @wd_dev - watchdog device struct 321 * @wd_dev: watchdog device struct
318 * @timeout - timeout value to set 322 * @timeout: timeout value to set
319 * 323 *
320 * Return: 0 if success, negative errno code for failure 324 * Return: 0 if success, negative errno code for failure
321 */ 325 */
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index ff2b0fb1a6be..d9fa609da061 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -309,7 +309,7 @@ void mic_complete_resume(struct mic_device *mdev)
309 */ 309 */
310void mic_prepare_suspend(struct mic_device *mdev) 310void mic_prepare_suspend(struct mic_device *mdev)
311{ 311{
312 int rc; 312 unsigned long timeout;
313 313
314#define MIC_SUSPEND_TIMEOUT (60 * HZ) 314#define MIC_SUSPEND_TIMEOUT (60 * HZ)
315 315
@@ -331,10 +331,10 @@ void mic_prepare_suspend(struct mic_device *mdev)
331 */ 331 */
332 mic_set_state(mdev, MIC_SUSPENDING); 332 mic_set_state(mdev, MIC_SUSPENDING);
333 mutex_unlock(&mdev->mic_mutex); 333 mutex_unlock(&mdev->mic_mutex);
334 rc = wait_for_completion_timeout(&mdev->reset_wait, 334 timeout = wait_for_completion_timeout(&mdev->reset_wait,
335 MIC_SUSPEND_TIMEOUT); 335 MIC_SUSPEND_TIMEOUT);
336 /* Force reset the card if the shutdown completion timed out */ 336 /* Force reset the card if the shutdown completion timed out */
337 if (!rc) { 337 if (!timeout) {
338 mutex_lock(&mdev->mic_mutex); 338 mutex_lock(&mdev->mic_mutex);
339 mic_set_state(mdev, MIC_SUSPENDED); 339 mic_set_state(mdev, MIC_SUSPENDED);
340 mutex_unlock(&mdev->mic_mutex); 340 mutex_unlock(&mdev->mic_mutex);
@@ -348,10 +348,10 @@ void mic_prepare_suspend(struct mic_device *mdev)
348 */ 348 */
349 mic_set_state(mdev, MIC_SUSPENDED); 349 mic_set_state(mdev, MIC_SUSPENDED);
350 mutex_unlock(&mdev->mic_mutex); 350 mutex_unlock(&mdev->mic_mutex);
351 rc = wait_for_completion_timeout(&mdev->reset_wait, 351 timeout = wait_for_completion_timeout(&mdev->reset_wait,
352 MIC_SUSPEND_TIMEOUT); 352 MIC_SUSPEND_TIMEOUT);
353 /* Force reset the card if the shutdown completion timed out */ 353 /* Force reset the card if the shutdown completion timed out */
354 if (!rc) 354 if (!timeout)
355 mic_stop(mdev, true); 355 mic_stop(mdev, true);
356 break; 356 break;
357 default: 357 default:
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index d686f2846ac7..b4ca6c884d19 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -363,8 +363,6 @@ static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
363{ 363{
364 int rc; 364 int rc;
365 365
366 pci_msi_off(pdev);
367
368 /* Enable intx */ 366 /* Enable intx */
369 pci_intx(pdev, 1); 367 pci_intx(pdev, 1);
370 rc = mic_setup_callbacks(mdev); 368 rc = mic_setup_callbacks(mdev);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 21181fa243df..eeaaf5fca105 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -69,12 +69,23 @@ static int sram_probe(struct platform_device *pdev)
69 INIT_LIST_HEAD(&reserve_list); 69 INIT_LIST_HEAD(&reserve_list);
70 70
71 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 71 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
72 virt_base = devm_ioremap_resource(&pdev->dev, res); 72 if (!res) {
73 if (IS_ERR(virt_base)) 73 dev_err(&pdev->dev, "found no memory resource\n");
74 return PTR_ERR(virt_base); 74 return -EINVAL;
75 }
75 76
76 size = resource_size(res); 77 size = resource_size(res);
77 78
79 if (!devm_request_mem_region(&pdev->dev,
80 res->start, size, pdev->name)) {
81 dev_err(&pdev->dev, "could not request region for resource\n");
82 return -EBUSY;
83 }
84
85 virt_base = devm_ioremap_wc(&pdev->dev, res->start, size);
86 if (IS_ERR(virt_base))
87 return PTR_ERR(virt_base);
88
78 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL); 89 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
79 if (!sram) 90 if (!sram)
80 return -ENOMEM; 91 return -ENOMEM;
@@ -205,7 +216,7 @@ static int sram_remove(struct platform_device *pdev)
205} 216}
206 217
207#ifdef CONFIG_OF 218#ifdef CONFIG_OF
208static struct of_device_id sram_dt_ids[] = { 219static const struct of_device_id sram_dt_ids[] = {
209 { .compatible = "mmio-sram" }, 220 { .compatible = "mmio-sram" },
210 {} 221 {}
211}; 222};
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index a606c8901e18..a37a42f67088 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -236,6 +236,7 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
236{ 236{
237 struct tifm_adapter *fm = pci_get_drvdata(dev); 237 struct tifm_adapter *fm = pci_get_drvdata(dev);
238 int rc; 238 int rc;
239 unsigned long timeout;
239 unsigned int good_sockets = 0, bad_sockets = 0; 240 unsigned int good_sockets = 0, bad_sockets = 0;
240 unsigned long flags; 241 unsigned long flags;
241 unsigned char new_ids[fm->num_sockets]; 242 unsigned char new_ids[fm->num_sockets];
@@ -272,8 +273,8 @@ static int tifm_7xx1_resume(struct pci_dev *dev)
272 if (good_sockets) { 273 if (good_sockets) {
273 fm->finish_me = &finish_resume; 274 fm->finish_me = &finish_resume;
274 spin_unlock_irqrestore(&fm->lock, flags); 275 spin_unlock_irqrestore(&fm->lock, flags);
275 rc = wait_for_completion_timeout(&finish_resume, HZ); 276 timeout = wait_for_completion_timeout(&finish_resume, HZ);
276 dev_dbg(&dev->dev, "wait returned %d\n", rc); 277 dev_dbg(&dev->dev, "wait returned %lu\n", timeout);
277 writel(TIFM_IRQ_FIFOMASK(good_sockets) 278 writel(TIFM_IRQ_FIFOMASK(good_sockets)
278 | TIFM_IRQ_CARDMASK(good_sockets), 279 | TIFM_IRQ_CARDMASK(good_sockets),
279 fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 280 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 032d35cf93ca..b823f9a6e464 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
113 113
114MODULE_AUTHOR("VMware, Inc."); 114MODULE_AUTHOR("VMware, Inc.");
115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); 115MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
116MODULE_VERSION("1.1.1.0-k"); 116MODULE_VERSION("1.1.3.0-k");
117MODULE_LICENSE("GPL v2"); 117MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 66fc9921fc85..a721b5d8a9da 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -395,6 +395,12 @@ static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
395 return -EFAULT; 395 return -EFAULT;
396 } 396 }
397 397
398 if (VMCI_DG_SIZE(dg) != send_info.len) {
399 vmci_ioctl_err("datagram size mismatch\n");
400 kfree(dg);
401 return -EINVAL;
402 }
403
398 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", 404 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
399 dg->dst.context, dg->dst.resource, 405 dg->dst.context, dg->dst.resource,
400 dg->src.context, dg->src.resource, 406 dg->src.context, dg->src.resource,
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 35f19a683822..f42d9c4e4561 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -295,12 +295,20 @@ static void *qp_alloc_queue(u64 size, u32 flags)
295{ 295{
296 u64 i; 296 u64 i;
297 struct vmci_queue *queue; 297 struct vmci_queue *queue;
298 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 298 size_t pas_size;
299 const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 299 size_t vas_size;
300 const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
301 const size_t queue_size = 301 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
302 sizeof(*queue) + sizeof(*queue->kernel_if) + 302
303 pas_size + vas_size; 303 if (num_pages >
304 (SIZE_MAX - queue_size) /
305 (sizeof(*queue->kernel_if->u.g.pas) +
306 sizeof(*queue->kernel_if->u.g.vas)))
307 return NULL;
308
309 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
310 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
311 queue_size += pas_size + vas_size;
304 312
305 queue = vmalloc(queue_size); 313 queue = vmalloc(queue_size);
306 if (!queue) 314 if (!queue)
@@ -615,10 +623,15 @@ static int qp_memcpy_from_queue_iov(void *dest,
615static struct vmci_queue *qp_host_alloc_queue(u64 size) 623static struct vmci_queue *qp_host_alloc_queue(u64 size)
616{ 624{
617 struct vmci_queue *queue; 625 struct vmci_queue *queue;
618 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 626 size_t queue_page_size;
627 const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
619 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 628 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
620 const size_t queue_page_size = 629
621 num_pages * sizeof(*queue->kernel_if->u.h.page); 630 if (num_pages > (SIZE_MAX - queue_size) /
631 sizeof(*queue->kernel_if->u.h.page))
632 return NULL;
633
634 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
622 635
623 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 636 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
624 if (queue) { 637 if (queue) {
@@ -737,7 +750,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
737 produce_q->kernel_if->num_pages, 1, 750 produce_q->kernel_if->num_pages, 1,
738 produce_q->kernel_if->u.h.header_page); 751 produce_q->kernel_if->u.h.header_page);
739 if (retval < produce_q->kernel_if->num_pages) { 752 if (retval < produce_q->kernel_if->num_pages) {
740 pr_warn("get_user_pages(produce) failed (retval=%d)", retval); 753 pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
754 retval);
741 qp_release_pages(produce_q->kernel_if->u.h.header_page, 755 qp_release_pages(produce_q->kernel_if->u.h.header_page,
742 retval, false); 756 retval, false);
743 err = VMCI_ERROR_NO_MEM; 757 err = VMCI_ERROR_NO_MEM;
@@ -748,7 +762,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
748 consume_q->kernel_if->num_pages, 1, 762 consume_q->kernel_if->num_pages, 1,
749 consume_q->kernel_if->u.h.header_page); 763 consume_q->kernel_if->u.h.header_page);
750 if (retval < consume_q->kernel_if->num_pages) { 764 if (retval < consume_q->kernel_if->num_pages) {
751 pr_warn("get_user_pages(consume) failed (retval=%d)", retval); 765 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
766 retval);
752 qp_release_pages(consume_q->kernel_if->u.h.header_page, 767 qp_release_pages(consume_q->kernel_if->u.h.header_page,
753 retval, false); 768 retval, false);
754 qp_release_pages(produce_q->kernel_if->u.h.header_page, 769 qp_release_pages(produce_q->kernel_if->u.h.header_page,
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 8170102d1e93..4e2f501e5548 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -220,9 +220,7 @@ static int __init omap_cf_probe(struct platform_device *pdev)
220 cf = kzalloc(sizeof *cf, GFP_KERNEL); 220 cf = kzalloc(sizeof *cf, GFP_KERNEL);
221 if (!cf) 221 if (!cf)
222 return -ENOMEM; 222 return -ENOMEM;
223 init_timer(&cf->timer); 223 setup_timer(&cf->timer, omap_cf_timer, (unsigned long)cf);
224 cf->timer.function = omap_cf_timer;
225 cf->timer.data = (unsigned long) cf;
226 224
227 cf->pdev = pdev; 225 cf->pdev = pdev;
228 platform_set_drvdata(pdev, cf); 226 platform_set_drvdata(pdev, cf);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 34ace4854dc2..0f70b4d58f9e 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -707,11 +707,9 @@ static int pd6729_pci_probe(struct pci_dev *dev,
707 } 707 }
708 } else { 708 } else {
709 /* poll Card status change */ 709 /* poll Card status change */
710 init_timer(&socket->poll_timer); 710 setup_timer(&socket->poll_timer, pd6729_interrupt_wrapper,
711 socket->poll_timer.function = pd6729_interrupt_wrapper; 711 (unsigned long)socket);
712 socket->poll_timer.data = (unsigned long)socket; 712 mod_timer(&socket->poll_timer, jiffies + HZ);
713 socket->poll_timer.expires = jiffies + HZ;
714 add_timer(&socket->poll_timer);
715 } 713 }
716 714
717 for (i = 0; i < MAX_SOCKETS; i++) { 715 for (i = 0; i < MAX_SOCKETS; i++) {
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 933f4657515b..eed5e9c05353 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -726,9 +726,8 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
726{ 726{
727 int ret; 727 int ret;
728 728
729 init_timer(&skt->poll_timer); 729 setup_timer(&skt->poll_timer, soc_common_pcmcia_poll_event,
730 skt->poll_timer.function = soc_common_pcmcia_poll_event; 730 (unsigned long)skt);
731 skt->poll_timer.data = (unsigned long)skt;
732 skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD; 731 skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
733 732
734 ret = request_resource(&iomem_resource, &skt->res_skt); 733 ret = request_resource(&iomem_resource, &skt->res_skt);
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 8a23ccb41213..965bd8491233 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1236,11 +1236,9 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id)
1236 if (!socket->cb_irq || request_irq(socket->cb_irq, yenta_interrupt, IRQF_SHARED, "yenta", socket)) { 1236 if (!socket->cb_irq || request_irq(socket->cb_irq, yenta_interrupt, IRQF_SHARED, "yenta", socket)) {
1237 /* No IRQ or request_irq failed. Poll */ 1237 /* No IRQ or request_irq failed. Poll */
1238 socket->cb_irq = 0; /* But zero is a valid IRQ number. */ 1238 socket->cb_irq = 0; /* But zero is a valid IRQ number. */
1239 init_timer(&socket->poll_timer); 1239 setup_timer(&socket->poll_timer, yenta_interrupt_wrapper,
1240 socket->poll_timer.function = yenta_interrupt_wrapper; 1240 (unsigned long)socket);
1241 socket->poll_timer.data = (unsigned long)socket; 1241 mod_timer(&socket->poll_timer, jiffies + HZ);
1242 socket->poll_timer.expires = jiffies + HZ;
1243 add_timer(&socket->poll_timer);
1244 dev_printk(KERN_INFO, &dev->dev, 1242 dev_printk(KERN_INFO, &dev->dev,
1245 "no PCI IRQ, CardBus support disabled for this " 1243 "no PCI IRQ, CardBus support disabled for this "
1246 "socket.\n"); 1244 "socket.\n");
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index bf1295e19f89..c8d99563d245 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -12,7 +12,6 @@ if SPMI
12 12
13config SPMI_MSM_PMIC_ARB 13config SPMI_MSM_PMIC_ARB
14 tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)" 14 tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
15 depends on ARM
16 depends on IRQ_DOMAIN 15 depends on IRQ_DOMAIN
17 depends on ARCH_QCOM || COMPILE_TEST 16 depends on ARCH_QCOM || COMPILE_TEST
18 default ARCH_QCOM 17 default ARCH_QCOM
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 20559ab3466d..d7119db49cfe 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,4 +1,5 @@
1/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
2 * 3 *
3 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and 5 * it under the terms of the GNU General Public License version 2 and
@@ -25,22 +26,18 @@
25 26
26/* PMIC Arbiter configuration registers */ 27/* PMIC Arbiter configuration registers */
27#define PMIC_ARB_VERSION 0x0000 28#define PMIC_ARB_VERSION 0x0000
29#define PMIC_ARB_VERSION_V2_MIN 0x20010000
28#define PMIC_ARB_INT_EN 0x0004 30#define PMIC_ARB_INT_EN 0x0004
29 31
30/* PMIC Arbiter channel registers */ 32/* PMIC Arbiter channel registers offsets */
31#define PMIC_ARB_CMD(N) (0x0800 + (0x80 * (N))) 33#define PMIC_ARB_CMD 0x00
32#define PMIC_ARB_CONFIG(N) (0x0804 + (0x80 * (N))) 34#define PMIC_ARB_CONFIG 0x04
33#define PMIC_ARB_STATUS(N) (0x0808 + (0x80 * (N))) 35#define PMIC_ARB_STATUS 0x08
34#define PMIC_ARB_WDATA0(N) (0x0810 + (0x80 * (N))) 36#define PMIC_ARB_WDATA0 0x10
35#define PMIC_ARB_WDATA1(N) (0x0814 + (0x80 * (N))) 37#define PMIC_ARB_WDATA1 0x14
36#define PMIC_ARB_RDATA0(N) (0x0818 + (0x80 * (N))) 38#define PMIC_ARB_RDATA0 0x18
37#define PMIC_ARB_RDATA1(N) (0x081C + (0x80 * (N))) 39#define PMIC_ARB_RDATA1 0x1C
38 40#define PMIC_ARB_REG_CHNL(N) (0x800 + 0x4 * (N))
39/* Interrupt Controller */
40#define SPMI_PIC_OWNER_ACC_STATUS(M, N) (0x0000 + ((32 * (M)) + (4 * (N))))
41#define SPMI_PIC_ACC_ENABLE(N) (0x0200 + (4 * (N)))
42#define SPMI_PIC_IRQ_STATUS(N) (0x0600 + (4 * (N)))
43#define SPMI_PIC_IRQ_CLEAR(N) (0x0A00 + (4 * (N)))
44 41
45/* Mapping Table */ 42/* Mapping Table */
46#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N))) 43#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
@@ -52,6 +49,7 @@
52 49
53#define SPMI_MAPPING_TABLE_LEN 255 50#define SPMI_MAPPING_TABLE_LEN 255
54#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */ 51#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
52#define PPID_TO_CHAN_TABLE_SZ BIT(12) /* PPID is 12bit chan is 1byte*/
55 53
56/* Ownership Table */ 54/* Ownership Table */
57#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N))) 55#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
@@ -88,6 +86,7 @@ enum pmic_arb_cmd_op_code {
88 86
89/* Maximum number of support PMIC peripherals */ 87/* Maximum number of support PMIC peripherals */
90#define PMIC_ARB_MAX_PERIPHS 256 88#define PMIC_ARB_MAX_PERIPHS 256
89#define PMIC_ARB_MAX_CHNL 128
91#define PMIC_ARB_PERIPH_ID_VALID (1 << 15) 90#define PMIC_ARB_PERIPH_ID_VALID (1 << 15)
92#define PMIC_ARB_TIMEOUT_US 100 91#define PMIC_ARB_TIMEOUT_US 100
93#define PMIC_ARB_MAX_TRANS_BYTES (8) 92#define PMIC_ARB_MAX_TRANS_BYTES (8)
@@ -98,14 +97,17 @@ enum pmic_arb_cmd_op_code {
98/* interrupt enable bit */ 97/* interrupt enable bit */
99#define SPMI_PIC_ACC_ENABLE_BIT BIT(0) 98#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
100 99
100struct pmic_arb_ver_ops;
101
101/** 102/**
102 * spmi_pmic_arb_dev - SPMI PMIC Arbiter object 103 * spmi_pmic_arb_dev - SPMI PMIC Arbiter object
103 * 104 *
104 * @base: address of the PMIC Arbiter core registers. 105 * @rd_base: on v1 "core", on v2 "observer" register base off DT.
106 * @wr_base: on v1 "core", on v2 "chnls" register base off DT.
105 * @intr: address of the SPMI interrupt control registers. 107 * @intr: address of the SPMI interrupt control registers.
106 * @cnfg: address of the PMIC Arbiter configuration registers. 108 * @cnfg: address of the PMIC Arbiter configuration registers.
107 * @lock: lock to synchronize accesses. 109 * @lock: lock to synchronize accesses.
108 * @channel: which channel to use for accesses. 110 * @channel: execution environment channel to use for accesses.
109 * @irq: PMIC ARB interrupt. 111 * @irq: PMIC ARB interrupt.
110 * @ee: the current Execution Environment 112 * @ee: the current Execution Environment
111 * @min_apid: minimum APID (used for bounding IRQ search) 113 * @min_apid: minimum APID (used for bounding IRQ search)
@@ -113,10 +115,14 @@ enum pmic_arb_cmd_op_code {
113 * @mapping_table: in-memory copy of PPID -> APID mapping table. 115 * @mapping_table: in-memory copy of PPID -> APID mapping table.
114 * @domain: irq domain object for PMIC IRQ domain 116 * @domain: irq domain object for PMIC IRQ domain
115 * @spmic: SPMI controller object 117 * @spmic: SPMI controller object
116 * @apid_to_ppid: cached mapping from APID to PPID 118 * @apid_to_ppid: in-memory copy of APID -> PPID mapping table.
119 * @ver_ops: version dependent operations.
120 * @ppid_to_chan in-memory copy of PPID -> channel (APID) mapping table.
121 * v2 only.
117 */ 122 */
118struct spmi_pmic_arb_dev { 123struct spmi_pmic_arb_dev {
119 void __iomem *base; 124 void __iomem *rd_base;
125 void __iomem *wr_base;
120 void __iomem *intr; 126 void __iomem *intr;
121 void __iomem *cnfg; 127 void __iomem *cnfg;
122 raw_spinlock_t lock; 128 raw_spinlock_t lock;
@@ -129,17 +135,54 @@ struct spmi_pmic_arb_dev {
129 struct irq_domain *domain; 135 struct irq_domain *domain;
130 struct spmi_controller *spmic; 136 struct spmi_controller *spmic;
131 u16 apid_to_ppid[256]; 137 u16 apid_to_ppid[256];
138 const struct pmic_arb_ver_ops *ver_ops;
139 u8 *ppid_to_chan;
140};
141
142/**
143 * pmic_arb_ver: version dependent functionality.
144 *
145 * @non_data_cmd: on v1 issues an spmi non-data command.
146 * on v2 no HW support, returns -EOPNOTSUPP.
147 * @offset: on v1 offset of per-ee channel.
148 * on v2 offset of per-ee and per-ppid channel.
149 * @fmt_cmd: formats a GENI/SPMI command.
150 * @owner_acc_status: on v1 offset of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn
151 * on v2 offset of SPMI_PIC_OWNERm_ACC_STATUSn.
152 * @acc_enable: on v1 offset of PMIC_ARB_SPMI_PIC_ACC_ENABLEn
153 * on v2 offset of SPMI_PIC_ACC_ENABLEn.
154 * @irq_status: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_STATUSn
155 * on v2 offset of SPMI_PIC_IRQ_STATUSn.
156 * @irq_clear: on v1 offset of PMIC_ARB_SPMI_PIC_IRQ_CLEARn
157 * on v2 offset of SPMI_PIC_IRQ_CLEARn.
158 */
159struct pmic_arb_ver_ops {
160 /* spmi commands (read_cmd, write_cmd, cmd) functionality */
161 u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr);
162 u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
163 int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
164 /* Interrupts controller functionality (offset of PIC registers) */
165 u32 (*owner_acc_status)(u8 m, u8 n);
166 u32 (*acc_enable)(u8 n);
167 u32 (*irq_status)(u8 n);
168 u32 (*irq_clear)(u8 n);
132}; 169};
133 170
134static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset) 171static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset)
135{ 172{
136 return readl_relaxed(dev->base + offset); 173 return readl_relaxed(dev->rd_base + offset);
137} 174}
138 175
139static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev, 176static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
140 u32 offset, u32 val) 177 u32 offset, u32 val)
141{ 178{
142 writel_relaxed(val, dev->base + offset); 179 writel_relaxed(val, dev->wr_base + offset);
180}
181
182static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
183 u32 offset, u32 val)
184{
185 writel_relaxed(val, dev->rd_base + offset);
143} 186}
144 187
145/** 188/**
@@ -168,15 +211,16 @@ pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
168 pmic_arb_base_write(dev, reg, data); 211 pmic_arb_base_write(dev, reg, data);
169} 212}
170 213
171static int pmic_arb_wait_for_done(struct spmi_controller *ctrl) 214static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
215 void __iomem *base, u8 sid, u16 addr)
172{ 216{
173 struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl); 217 struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
174 u32 status = 0; 218 u32 status = 0;
175 u32 timeout = PMIC_ARB_TIMEOUT_US; 219 u32 timeout = PMIC_ARB_TIMEOUT_US;
176 u32 offset = PMIC_ARB_STATUS(dev->channel); 220 u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS;
177 221
178 while (timeout--) { 222 while (timeout--) {
179 status = pmic_arb_base_read(dev, offset); 223 status = readl_relaxed(base + offset);
180 224
181 if (status & PMIC_ARB_STATUS_DONE) { 225 if (status & PMIC_ARB_STATUS_DONE) {
182 if (status & PMIC_ARB_STATUS_DENIED) { 226 if (status & PMIC_ARB_STATUS_DENIED) {
@@ -211,28 +255,45 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl)
211 return -ETIMEDOUT; 255 return -ETIMEDOUT;
212} 256}
213 257
214/* Non-data command */ 258static int
215static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid) 259pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
216{ 260{
217 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl); 261 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
218 unsigned long flags; 262 unsigned long flags;
219 u32 cmd; 263 u32 cmd;
220 int rc; 264 int rc;
221 265 u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0);
222 /* Check for valid non-data command */
223 if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
224 return -EINVAL;
225 266
226 cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20); 267 cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
227 268
228 raw_spin_lock_irqsave(&pmic_arb->lock, flags); 269 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
229 pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd); 270 pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
230 rc = pmic_arb_wait_for_done(ctrl); 271 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0);
231 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); 272 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
232 273
233 return rc; 274 return rc;
234} 275}
235 276
277static int
278pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid)
279{
280 return -EOPNOTSUPP;
281}
282
283/* Non-data command */
284static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
285{
286 struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
287
288 dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
289
290 /* Check for valid non-data command */
291 if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
292 return -EINVAL;
293
294 return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
295}
296
236static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, 297static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
237 u16 addr, u8 *buf, size_t len) 298 u16 addr, u8 *buf, size_t len)
238{ 299{
@@ -241,10 +302,11 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
241 u8 bc = len - 1; 302 u8 bc = len - 1;
242 u32 cmd; 303 u32 cmd;
243 int rc; 304 int rc;
305 u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
244 306
245 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { 307 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
246 dev_err(&ctrl->dev, 308 dev_err(&ctrl->dev,
247 "pmic-arb supports 1..%d bytes per trans, but %d requested", 309 "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
248 PMIC_ARB_MAX_TRANS_BYTES, len); 310 PMIC_ARB_MAX_TRANS_BYTES, len);
249 return -EINVAL; 311 return -EINVAL;
250 } 312 }
@@ -259,20 +321,20 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
259 else 321 else
260 return -EINVAL; 322 return -EINVAL;
261 323
262 cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7); 324 cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
263 325
264 raw_spin_lock_irqsave(&pmic_arb->lock, flags); 326 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
265 pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd); 327 pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
266 rc = pmic_arb_wait_for_done(ctrl); 328 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr);
267 if (rc) 329 if (rc)
268 goto done; 330 goto done;
269 331
270 pa_read_data(pmic_arb, buf, PMIC_ARB_RDATA0(pmic_arb->channel), 332 pa_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
271 min_t(u8, bc, 3)); 333 min_t(u8, bc, 3));
272 334
273 if (bc > 3) 335 if (bc > 3)
274 pa_read_data(pmic_arb, buf + 4, 336 pa_read_data(pmic_arb, buf + 4,
275 PMIC_ARB_RDATA1(pmic_arb->channel), bc - 4); 337 offset + PMIC_ARB_RDATA1, bc - 4);
276 338
277done: 339done:
278 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); 340 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
@@ -287,10 +349,11 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
287 u8 bc = len - 1; 349 u8 bc = len - 1;
288 u32 cmd; 350 u32 cmd;
289 int rc; 351 int rc;
352 u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
290 353
291 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { 354 if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
292 dev_err(&ctrl->dev, 355 dev_err(&ctrl->dev,
293 "pmic-arb supports 1..%d bytes per trans, but:%d requested", 356 "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
294 PMIC_ARB_MAX_TRANS_BYTES, len); 357 PMIC_ARB_MAX_TRANS_BYTES, len);
295 return -EINVAL; 358 return -EINVAL;
296 } 359 }
@@ -307,19 +370,19 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
307 else 370 else
308 return -EINVAL; 371 return -EINVAL;
309 372
310 cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7); 373 cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
311 374
312 /* Write data to FIFOs */ 375 /* Write data to FIFOs */
313 raw_spin_lock_irqsave(&pmic_arb->lock, flags); 376 raw_spin_lock_irqsave(&pmic_arb->lock, flags);
314 pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel) 377 pa_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
315 , min_t(u8, bc, 3)); 378 min_t(u8, bc, 3));
316 if (bc > 3) 379 if (bc > 3)
317 pa_write_data(pmic_arb, buf + 4, 380 pa_write_data(pmic_arb, buf + 4,
318 PMIC_ARB_WDATA1(pmic_arb->channel), bc - 4); 381 offset + PMIC_ARB_WDATA1, bc - 4);
319 382
320 /* Start the transaction */ 383 /* Start the transaction */
321 pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd); 384 pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
322 rc = pmic_arb_wait_for_done(ctrl); 385 rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr);
323 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); 386 raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
324 387
325 return rc; 388 return rc;
@@ -376,7 +439,7 @@ static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
376 u32 status; 439 u32 status;
377 int id; 440 int id;
378 441
379 status = readl_relaxed(pa->intr + SPMI_PIC_IRQ_STATUS(apid)); 442 status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
380 while (status) { 443 while (status) {
381 id = ffs(status) - 1; 444 id = ffs(status) - 1;
382 status &= ~(1 << id); 445 status &= ~(1 << id);
@@ -402,7 +465,7 @@ static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
402 465
403 for (i = first; i <= last; ++i) { 466 for (i = first; i <= last; ++i) {
404 status = readl_relaxed(intr + 467 status = readl_relaxed(intr +
405 SPMI_PIC_OWNER_ACC_STATUS(pa->ee, i)); 468 pa->ver_ops->owner_acc_status(pa->ee, i));
406 while (status) { 469 while (status) {
407 id = ffs(status) - 1; 470 id = ffs(status) - 1;
408 status &= ~(1 << id); 471 status &= ~(1 << id);
@@ -422,7 +485,7 @@ static void qpnpint_irq_ack(struct irq_data *d)
422 u8 data; 485 u8 data;
423 486
424 raw_spin_lock_irqsave(&pa->lock, flags); 487 raw_spin_lock_irqsave(&pa->lock, flags);
425 writel_relaxed(1 << irq, pa->intr + SPMI_PIC_IRQ_CLEAR(apid)); 488 writel_relaxed(1 << irq, pa->intr + pa->ver_ops->irq_clear(apid));
426 raw_spin_unlock_irqrestore(&pa->lock, flags); 489 raw_spin_unlock_irqrestore(&pa->lock, flags);
427 490
428 data = 1 << irq; 491 data = 1 << irq;
@@ -439,10 +502,11 @@ static void qpnpint_irq_mask(struct irq_data *d)
439 u8 data; 502 u8 data;
440 503
441 raw_spin_lock_irqsave(&pa->lock, flags); 504 raw_spin_lock_irqsave(&pa->lock, flags);
442 status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid)); 505 status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
443 if (status & SPMI_PIC_ACC_ENABLE_BIT) { 506 if (status & SPMI_PIC_ACC_ENABLE_BIT) {
444 status = status & ~SPMI_PIC_ACC_ENABLE_BIT; 507 status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
445 writel_relaxed(status, pa->intr + SPMI_PIC_ACC_ENABLE(apid)); 508 writel_relaxed(status, pa->intr +
509 pa->ver_ops->acc_enable(apid));
446 } 510 }
447 raw_spin_unlock_irqrestore(&pa->lock, flags); 511 raw_spin_unlock_irqrestore(&pa->lock, flags);
448 512
@@ -460,10 +524,10 @@ static void qpnpint_irq_unmask(struct irq_data *d)
460 u8 data; 524 u8 data;
461 525
462 raw_spin_lock_irqsave(&pa->lock, flags); 526 raw_spin_lock_irqsave(&pa->lock, flags);
463 status = readl_relaxed(pa->intr + SPMI_PIC_ACC_ENABLE(apid)); 527 status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
464 if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) { 528 if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
465 writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT, 529 writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
466 pa->intr + SPMI_PIC_ACC_ENABLE(apid)); 530 pa->intr + pa->ver_ops->acc_enable(apid));
467 } 531 }
468 raw_spin_unlock_irqrestore(&pa->lock, flags); 532 raw_spin_unlock_irqrestore(&pa->lock, flags);
469 533
@@ -624,6 +688,91 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
624 return 0; 688 return 0;
625} 689}
626 690
691/* v1 offset per ee */
692static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
693{
694 return 0x800 + 0x80 * pa->channel;
695}
696
697/* v2 offset per ppid (chan) and per ee */
698static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
699{
700 u16 ppid = (sid << 8) | (addr >> 8);
701 u8 chan = pa->ppid_to_chan[ppid];
702
703 return 0x1000 * pa->ee + 0x8000 * chan;
704}
705
706static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
707{
708 return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
709}
710
711static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
712{
713 return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
714}
715
716static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n)
717{
718 return 0x20 * m + 0x4 * n;
719}
720
721static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n)
722{
723 return 0x100000 + 0x1000 * m + 0x4 * n;
724}
725
726static u32 pmic_arb_acc_enable_v1(u8 n)
727{
728 return 0x200 + 0x4 * n;
729}
730
731static u32 pmic_arb_acc_enable_v2(u8 n)
732{
733 return 0x1000 * n;
734}
735
736static u32 pmic_arb_irq_status_v1(u8 n)
737{
738 return 0x600 + 0x4 * n;
739}
740
741static u32 pmic_arb_irq_status_v2(u8 n)
742{
743 return 0x4 + 0x1000 * n;
744}
745
746static u32 pmic_arb_irq_clear_v1(u8 n)
747{
748 return 0xA00 + 0x4 * n;
749}
750
751static u32 pmic_arb_irq_clear_v2(u8 n)
752{
753 return 0x8 + 0x1000 * n;
754}
755
756static const struct pmic_arb_ver_ops pmic_arb_v1 = {
757 .non_data_cmd = pmic_arb_non_data_cmd_v1,
758 .offset = pmic_arb_offset_v1,
759 .fmt_cmd = pmic_arb_fmt_cmd_v1,
760 .owner_acc_status = pmic_arb_owner_acc_status_v1,
761 .acc_enable = pmic_arb_acc_enable_v1,
762 .irq_status = pmic_arb_irq_status_v1,
763 .irq_clear = pmic_arb_irq_clear_v1,
764};
765
766static const struct pmic_arb_ver_ops pmic_arb_v2 = {
767 .non_data_cmd = pmic_arb_non_data_cmd_v2,
768 .offset = pmic_arb_offset_v2,
769 .fmt_cmd = pmic_arb_fmt_cmd_v2,
770 .owner_acc_status = pmic_arb_owner_acc_status_v2,
771 .acc_enable = pmic_arb_acc_enable_v2,
772 .irq_status = pmic_arb_irq_status_v2,
773 .irq_clear = pmic_arb_irq_clear_v2,
774};
775
627static const struct irq_domain_ops pmic_arb_irq_domain_ops = { 776static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
628 .map = qpnpint_irq_domain_map, 777 .map = qpnpint_irq_domain_map,
629 .xlate = qpnpint_irq_domain_dt_translate, 778 .xlate = qpnpint_irq_domain_dt_translate,
@@ -634,8 +783,10 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
634 struct spmi_pmic_arb_dev *pa; 783 struct spmi_pmic_arb_dev *pa;
635 struct spmi_controller *ctrl; 784 struct spmi_controller *ctrl;
636 struct resource *res; 785 struct resource *res;
637 u32 channel, ee; 786 void __iomem *core;
787 u32 channel, ee, hw_ver;
638 int err, i; 788 int err, i;
789 bool is_v1;
639 790
640 ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa)); 791 ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
641 if (!ctrl) 792 if (!ctrl)
@@ -645,12 +796,65 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
645 pa->spmic = ctrl; 796 pa->spmic = ctrl;
646 797
647 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); 798 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
648 pa->base = devm_ioremap_resource(&ctrl->dev, res); 799 core = devm_ioremap_resource(&ctrl->dev, res);
649 if (IS_ERR(pa->base)) { 800 if (IS_ERR(core)) {
650 err = PTR_ERR(pa->base); 801 err = PTR_ERR(core);
651 goto err_put_ctrl; 802 goto err_put_ctrl;
652 } 803 }
653 804
805 hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
806 is_v1 = (hw_ver < PMIC_ARB_VERSION_V2_MIN);
807
808 dev_info(&ctrl->dev, "PMIC Arb Version-%d (0x%x)\n", (is_v1 ? 1 : 2),
809 hw_ver);
810
811 if (is_v1) {
812 pa->ver_ops = &pmic_arb_v1;
813 pa->wr_base = core;
814 pa->rd_base = core;
815 } else {
816 u8 chan;
817 u16 ppid;
818 u32 regval;
819
820 pa->ver_ops = &pmic_arb_v2;
821
822 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
823 "obsrvr");
824 pa->rd_base = devm_ioremap_resource(&ctrl->dev, res);
825 if (IS_ERR(pa->rd_base)) {
826 err = PTR_ERR(pa->rd_base);
827 goto err_put_ctrl;
828 }
829
830 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
831 "chnls");
832 pa->wr_base = devm_ioremap_resource(&ctrl->dev, res);
833 if (IS_ERR(pa->wr_base)) {
834 err = PTR_ERR(pa->wr_base);
835 goto err_put_ctrl;
836 }
837
838 pa->ppid_to_chan = devm_kzalloc(&ctrl->dev,
839 PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL);
840 if (!pa->ppid_to_chan) {
841 err = -ENOMEM;
842 goto err_put_ctrl;
843 }
844 /*
845 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
846 * ppid_to_chan is an in-memory invert of that table.
847 */
848 for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) {
849 regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan));
850 if (!regval)
851 continue;
852
853 ppid = (regval >> 8) & 0xFFF;
854 pa->ppid_to_chan[ppid] = chan;
855 }
856 }
857
654 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr"); 858 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
655 pa->intr = devm_ioremap_resource(&ctrl->dev, res); 859 pa->intr = devm_ioremap_resource(&ctrl->dev, res);
656 if (IS_ERR(pa->intr)) { 860 if (IS_ERR(pa->intr)) {
@@ -731,9 +935,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
731 if (err) 935 if (err)
732 goto err_domain_remove; 936 goto err_domain_remove;
733 937
734 dev_dbg(&ctrl->dev, "PMIC Arb Version 0x%x\n",
735 pmic_arb_base_read(pa, PMIC_ARB_VERSION));
736
737 return 0; 938 return 0;
738 939
739err_domain_remove: 940err_domain_remove:
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 1d92f5103ebf..94938436aef9 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -1,4 +1,5 @@
1/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
2 * 3 *
3 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and 5 * it under the terms of the GNU General Public License version 2 and
@@ -316,11 +317,6 @@ static int spmi_drv_probe(struct device *dev)
316 struct spmi_device *sdev = to_spmi_device(dev); 317 struct spmi_device *sdev = to_spmi_device(dev);
317 int err; 318 int err;
318 319
319 /* Ensure the slave is in ACTIVE state */
320 err = spmi_command_wakeup(sdev);
321 if (err)
322 goto fail_wakeup;
323
324 pm_runtime_get_noresume(dev); 320 pm_runtime_get_noresume(dev);
325 pm_runtime_set_active(dev); 321 pm_runtime_set_active(dev);
326 pm_runtime_enable(dev); 322 pm_runtime_enable(dev);
@@ -335,7 +331,6 @@ fail_probe:
335 pm_runtime_disable(dev); 331 pm_runtime_disable(dev);
336 pm_runtime_set_suspended(dev); 332 pm_runtime_set_suspended(dev);
337 pm_runtime_put_noidle(dev); 333 pm_runtime_put_noidle(dev);
338fail_wakeup:
339 return err; 334 return err;
340} 335}
341 336
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 6276f13e9e12..65bf0676d54a 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -835,7 +835,15 @@ int __uio_register_device(struct module *owner,
835 info->uio_dev = idev; 835 info->uio_dev = idev;
836 836
837 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { 837 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
838 ret = devm_request_irq(idev->dev, info->irq, uio_interrupt, 838 /*
839 * Note that we deliberately don't use devm_request_irq
840 * here. The parent module can unregister the UIO device
841 * and call pci_disable_msi, which requires that this
842 * irq has been freed. However, the device may have open
843 * FDs at the time of unregister and therefore may not be
844 * freed until they are released.
845 */
846 ret = request_irq(info->irq, uio_interrupt,
839 info->irq_flags, info->name, idev); 847 info->irq_flags, info->name, idev);
840 if (ret) 848 if (ret)
841 goto err_request_irq; 849 goto err_request_irq;
@@ -871,6 +879,8 @@ void uio_unregister_device(struct uio_info *info)
871 879
872 uio_dev_del_attributes(idev); 880 uio_dev_del_attributes(idev);
873 881
882 free_irq(idev->info->irq, idev);
883
874 device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); 884 device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
875 885
876 return; 886 return;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 53bf2c860ad3..a4621757a47f 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -166,7 +166,7 @@ static int mxc_w1_remove(struct platform_device *pdev)
166 return 0; 166 return 0;
167} 167}
168 168
169static struct of_device_id mxc_w1_dt_ids[] = { 169static const struct of_device_id mxc_w1_dt_ids[] = {
170 { .compatible = "fsl,imx21-owire" }, 170 { .compatible = "fsl,imx21-owire" },
171 { /* sentinel */ } 171 { /* sentinel */ }
172}; 172};
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 03321d6a2684..e7d448963a24 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -72,7 +72,7 @@ struct hdq_data {
72static int omap_hdq_probe(struct platform_device *pdev); 72static int omap_hdq_probe(struct platform_device *pdev);
73static int omap_hdq_remove(struct platform_device *pdev); 73static int omap_hdq_remove(struct platform_device *pdev);
74 74
75static struct of_device_id omap_hdq_dt_ids[] = { 75static const struct of_device_id omap_hdq_dt_ids[] = {
76 { .compatible = "ti,omap3-1w" }, 76 { .compatible = "ti,omap3-1w" },
77 {} 77 {}
78}; 78};
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index b99a932ad901..8f7848c62811 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -68,7 +68,7 @@ static u8 w1_gpio_read_bit(void *data)
68} 68}
69 69
70#if defined(CONFIG_OF) 70#if defined(CONFIG_OF)
71static struct of_device_id w1_gpio_dt_ids[] = { 71static const struct of_device_id w1_gpio_dt_ids[] = {
72 { .compatible = "w1-gpio" }, 72 { .compatible = "w1-gpio" },
73 {} 73 {}
74}; 74};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5a2ba674795e..902c37aef67e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -646,12 +646,13 @@ struct hv_input_signal_event_buffer {
646}; 646};
647 647
648struct vmbus_channel { 648struct vmbus_channel {
649 /* Unique channel id */
650 int id;
651
649 struct list_head listentry; 652 struct list_head listentry;
650 653
651 struct hv_device *device_obj; 654 struct hv_device *device_obj;
652 655
653 struct work_struct work;
654
655 enum vmbus_channel_state state; 656 enum vmbus_channel_state state;
656 657
657 struct vmbus_channel_offer_channel offermsg; 658 struct vmbus_channel_offer_channel offermsg;
@@ -672,7 +673,6 @@ struct vmbus_channel {
672 struct hv_ring_buffer_info outbound; /* send to parent */ 673 struct hv_ring_buffer_info outbound; /* send to parent */
673 struct hv_ring_buffer_info inbound; /* receive from parent */ 674 struct hv_ring_buffer_info inbound; /* receive from parent */
674 spinlock_t inbound_lock; 675 spinlock_t inbound_lock;
675 struct workqueue_struct *controlwq;
676 676
677 struct vmbus_close_msg close_msg; 677 struct vmbus_close_msg close_msg;
678 678
@@ -758,6 +758,9 @@ struct vmbus_channel {
758 * link up channels based on their CPU affinity. 758 * link up channels based on their CPU affinity.
759 */ 759 */
760 struct list_head percpu_list; 760 struct list_head percpu_list;
761
762 int num_sc;
763 int next_oc;
761}; 764};
762 765
763static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 766static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
@@ -861,6 +864,14 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
861 enum vmbus_packet_type type, 864 enum vmbus_packet_type type,
862 u32 flags); 865 u32 flags);
863 866
867extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
868 void *buffer,
869 u32 bufferLen,
870 u64 requestid,
871 enum vmbus_packet_type type,
872 u32 flags,
873 bool kick_q);
874
864extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 875extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
865 struct hv_page_buffer pagebuffers[], 876 struct hv_page_buffer pagebuffers[],
866 u32 pagecount, 877 u32 pagecount,
@@ -868,6 +879,15 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
868 u32 bufferlen, 879 u32 bufferlen,
869 u64 requestid); 880 u64 requestid);
870 881
882extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
883 struct hv_page_buffer pagebuffers[],
884 u32 pagecount,
885 void *buffer,
886 u32 bufferlen,
887 u64 requestid,
888 u32 flags,
889 bool kick_q);
890
871extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, 891extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
872 struct hv_multipage_buffer *mpb, 892 struct hv_multipage_buffer *mpb,
873 void *buffer, 893 void *buffer,
@@ -1107,6 +1127,16 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
1107 } 1127 }
1108 1128
1109/* 1129/*
1130 * NetworkDirect. This is the guest RDMA service.
1131 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1132 */
1133#define HV_ND_GUID \
1134 .guid = { \
1135 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \
1136 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \
1137 }
1138
1139/*
1110 * Common header for Hyper-V ICs 1140 * Common header for Hyper-V ICs
1111 */ 1141 */
1112 1142
@@ -1213,6 +1243,7 @@ void hv_kvp_onchannelcallback(void *);
1213int hv_vss_init(struct hv_util_service *); 1243int hv_vss_init(struct hv_util_service *);
1214void hv_vss_deinit(void); 1244void hv_vss_deinit(void);
1215void hv_vss_onchannelcallback(void *); 1245void hv_vss_onchannelcallback(void *);
1246void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1216 1247
1217extern struct resource hyperv_mmio; 1248extern struct resource hyperv_mmio;
1218 1249
diff --git a/include/linux/io.h b/include/linux/io.h
index 4cc299c598e0..986f2bffea1e 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -72,6 +72,8 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
72 resource_size_t size); 72 resource_size_t size);
73void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, 73void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
74 resource_size_t size); 74 resource_size_t size);
75void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
76 resource_size_t size);
75void devm_iounmap(struct device *dev, void __iomem *addr); 77void devm_iounmap(struct device *dev, void __iomem *addr);
76int check_signature(const volatile void __iomem *io_addr, 78int check_signature(const volatile void __iomem *io_addr,
77 const unsigned char *signature, int length); 79 const unsigned char *signature, int length);
diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h
new file mode 100644
index 000000000000..e7f1cc7a2284
--- /dev/null
+++ b/include/linux/jz4780-nemc.h
@@ -0,0 +1,43 @@
1/*
2 * JZ4780 NAND/external memory controller (NEMC)
3 *
4 * Copyright (c) 2015 Imagination Technologies
5 * Author: Alex Smith <alex@alex-smith.me.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#ifndef __LINUX_JZ4780_NEMC_H__
14#define __LINUX_JZ4780_NEMC_H__
15
16#include <linux/types.h>
17
18struct device;
19
20/*
21 * Number of NEMC banks. Note that there are actually 6, but they are numbered
22 * from 1.
23 */
24#define JZ4780_NEMC_NUM_BANKS 7
25
26/**
27 * enum jz4780_nemc_bank_type - device types which can be connected to a bank
28 * @JZ4780_NEMC_BANK_SRAM: SRAM
29 * @JZ4780_NEMC_BANK_NAND: NAND
30 */
31enum jz4780_nemc_bank_type {
32 JZ4780_NEMC_BANK_SRAM,
33 JZ4780_NEMC_BANK_NAND,
34};
35
36extern unsigned int jz4780_nemc_num_banks(struct device *dev);
37
38extern void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
39 enum jz4780_nemc_bank_type type);
40extern void jz4780_nemc_assert(struct device *dev, unsigned int bank,
41 bool assert);
42
43#endif /* __LINUX_JZ4780_NEMC_H__ */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index f97010576f56..16a498f48169 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -127,7 +127,7 @@ struct arizona {
127 struct regmap_irq_chip_data *aod_irq_chip; 127 struct regmap_irq_chip_data *aod_irq_chip;
128 struct regmap_irq_chip_data *irq_chip; 128 struct regmap_irq_chip_data *irq_chip;
129 129
130 bool hpdet_magic; 130 bool hpdet_clamp;
131 unsigned int hp_ena; 131 unsigned int hp_ena;
132 132
133 struct mutex clk_lock; 133 struct mutex clk_lock;
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index ee80dd7d9f60..819077c32690 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -52,6 +52,7 @@
52#define MISC_DYNAMIC_MINOR 255 52#define MISC_DYNAMIC_MINOR 255
53 53
54struct device; 54struct device;
55struct attribute_group;
55 56
56struct miscdevice { 57struct miscdevice {
57 int minor; 58 int minor;
@@ -60,6 +61,7 @@ struct miscdevice {
60 struct list_head list; 61 struct list_head list;
61 struct device *parent; 62 struct device *parent;
62 struct device *this_device; 63 struct device *this_device;
64 const struct attribute_group **groups;
63 const char *nodename; 65 const char *nodename;
64 umode_t mode; 66 umode_t mode;
65}; 67};
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 25d942d1da27..11dc22a6983b 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -440,7 +440,7 @@ int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
440 mutex_unlock(&clockevents_mutex); 440 mutex_unlock(&clockevents_mutex);
441 return ret; 441 return ret;
442} 442}
443EXPORT_SYMBOL_GPL(clockevents_unbind); 443EXPORT_SYMBOL_GPL(clockevents_unbind_device);
444 444
445/* Sanity check of state transition callbacks */ 445/* Sanity check of state transition callbacks */
446static int clockevents_sanity_check(struct clock_event_device *dev) 446static int clockevents_sanity_check(struct clock_event_device *dev)
diff --git a/lib/devres.c b/lib/devres.c
index 0f1dd2e9d2c1..fbe2aac522e6 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -72,6 +72,34 @@ void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
72EXPORT_SYMBOL(devm_ioremap_nocache); 72EXPORT_SYMBOL(devm_ioremap_nocache);
73 73
74/** 74/**
75 * devm_ioremap_wc - Managed ioremap_wc()
76 * @dev: Generic device to remap IO address for
77 * @offset: BUS offset to map
78 * @size: Size of map
79 *
80 * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
81 */
82void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
83 resource_size_t size)
84{
85 void __iomem **ptr, *addr;
86
87 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
88 if (!ptr)
89 return NULL;
90
91 addr = ioremap_wc(offset, size);
92 if (addr) {
93 *ptr = addr;
94 devres_add(dev, ptr);
95 } else
96 devres_free(ptr);
97
98 return addr;
99}
100EXPORT_SYMBOL(devm_ioremap_wc);
101
102/**
75 * devm_iounmap - Managed iounmap() 103 * devm_iounmap - Managed iounmap()
76 * @dev: Generic device to unmap for 104 * @dev: Generic device to unmap for
77 * @addr: Address to unmap 105 * @addr: Address to unmap
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index e9cc689033fe..74086a583d8d 100644..100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -1,8 +1,8 @@
1#!/usr/bin/env python 1#!/usr/bin/env python
2 2
3"""Find Kconfig identifiers that are referenced but not defined.""" 3"""Find Kconfig symbols that are referenced but not defined."""
4 4
5# (c) 2014 Valentin Rothberg <valentinrothberg@gmail.com> 5# (c) 2014-2015 Valentin Rothberg <Valentin.Rothberg@lip6.fr>
6# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de> 6# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
7# 7#
8# Licensed under the terms of the GNU GPL License version 2 8# Licensed under the terms of the GNU GPL License version 2
@@ -10,7 +10,9 @@
10 10
11import os 11import os
12import re 12import re
13import sys
13from subprocess import Popen, PIPE, STDOUT 14from subprocess import Popen, PIPE, STDOUT
15from optparse import OptionParser
14 16
15 17
16# regex expressions 18# regex expressions
@@ -32,22 +34,149 @@ REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
32REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$") 34REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
33 35
34 36
37def parse_options():
38 """The user interface of this module."""
39 usage = "%prog [options]\n\n" \
40 "Run this tool to detect Kconfig symbols that are referenced but " \
41 "not defined in\nKconfig. The output of this tool has the " \
42 "format \'Undefined symbol\\tFile list\'\n\n" \
43 "If no option is specified, %prog will default to check your\n" \
44 "current tree. Please note that specifying commits will " \
45 "\'git reset --hard\'\nyour current tree! You may save " \
46 "uncommitted changes to avoid losing data."
47
48 parser = OptionParser(usage=usage)
49
50 parser.add_option('-c', '--commit', dest='commit', action='store',
51 default="",
52 help="Check if the specified commit (hash) introduces "
53 "undefined Kconfig symbols.")
54
55 parser.add_option('-d', '--diff', dest='diff', action='store',
56 default="",
57 help="Diff undefined symbols between two commits. The "
58 "input format bases on Git log's "
59 "\'commmit1..commit2\'.")
60
61 parser.add_option('', '--force', dest='force', action='store_true',
62 default=False,
63 help="Reset current Git tree even when it's dirty.")
64
65 (opts, _) = parser.parse_args()
66
67 if opts.commit and opts.diff:
68 sys.exit("Please specify only one option at once.")
69
70 if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff):
71 sys.exit("Please specify valid input in the following format: "
72 "\'commmit1..commit2\'")
73
74 if opts.commit or opts.diff:
75 if not opts.force and tree_is_dirty():
76 sys.exit("The current Git tree is dirty (see 'git status'). "
77 "Running this script may\ndelete important data since it "
78 "calls 'git reset --hard' for some performance\nreasons. "
79 " Please run this script in a clean Git tree or pass "
80 "'--force' if you\nwant to ignore this warning and "
81 "continue.")
82
83 return opts
84
85
35def main(): 86def main():
36 """Main function of this module.""" 87 """Main function of this module."""
88 opts = parse_options()
89
90 if opts.commit or opts.diff:
91 head = get_head()
92
93 # get commit range
94 commit_a = None
95 commit_b = None
96 if opts.commit:
97 commit_a = opts.commit + "~"
98 commit_b = opts.commit
99 elif opts.diff:
100 split = opts.diff.split("..")
101 commit_a = split[0]
102 commit_b = split[1]
103 undefined_a = {}
104 undefined_b = {}
105
106 # get undefined items before the commit
107 execute("git reset --hard %s" % commit_a)
108 undefined_a = check_symbols()
109
110 # get undefined items for the commit
111 execute("git reset --hard %s" % commit_b)
112 undefined_b = check_symbols()
113
114 # report cases that are present for the commit but not before
115 for feature in sorted(undefined_b):
116 # feature has not been undefined before
117 if not feature in undefined_a:
118 files = sorted(undefined_b.get(feature))
119 print "%s\t%s" % (feature, ", ".join(files))
120 # check if there are new files that reference the undefined feature
121 else:
122 files = sorted(undefined_b.get(feature) -
123 undefined_a.get(feature))
124 if files:
125 print "%s\t%s" % (feature, ", ".join(files))
126
127 # reset to head
128 execute("git reset --hard %s" % head)
129
130 # default to check the entire tree
131 else:
132 undefined = check_symbols()
133 for feature in sorted(undefined):
134 files = sorted(undefined.get(feature))
135 print "%s\t%s" % (feature, ", ".join(files))
136
137
138def execute(cmd):
139 """Execute %cmd and return stdout. Exit in case of error."""
140 pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
141 (stdout, _) = pop.communicate() # wait until finished
142 if pop.returncode != 0:
143 sys.exit(stdout)
144 return stdout
145
146
147def tree_is_dirty():
148 """Return true if the current working tree is dirty (i.e., if any file has
149 been added, deleted, modified, renamed or copied but not committed)."""
150 stdout = execute("git status --porcelain")
151 for line in stdout:
152 if re.findall(r"[URMADC]{1}", line[:2]):
153 return True
154 return False
155
156
157def get_head():
158 """Return commit hash of current HEAD."""
159 stdout = execute("git rev-parse HEAD")
160 return stdout.strip('\n')
161
162
163def check_symbols():
164 """Find undefined Kconfig symbols and return a dict with the symbol as key
165 and a list of referencing files as value."""
37 source_files = [] 166 source_files = []
38 kconfig_files = [] 167 kconfig_files = []
39 defined_features = set() 168 defined_features = set()
40 referenced_features = dict() # {feature: [files]} 169 referenced_features = dict() # {feature: [files]}
41 170
42 # use 'git ls-files' to get the worklist 171 # use 'git ls-files' to get the worklist
43 pop = Popen("git ls-files", stdout=PIPE, stderr=STDOUT, shell=True) 172 stdout = execute("git ls-files")
44 (stdout, _) = pop.communicate() # wait until finished
45 if len(stdout) > 0 and stdout[-1] == "\n": 173 if len(stdout) > 0 and stdout[-1] == "\n":
46 stdout = stdout[:-1] 174 stdout = stdout[:-1]
47 175
48 for gitfile in stdout.rsplit("\n"): 176 for gitfile in stdout.rsplit("\n"):
49 if ".git" in gitfile or "ChangeLog" in gitfile or \ 177 if ".git" in gitfile or "ChangeLog" in gitfile or \
50 ".log" in gitfile or os.path.isdir(gitfile): 178 ".log" in gitfile or os.path.isdir(gitfile) or \
179 gitfile.startswith("tools/"):
51 continue 180 continue
52 if REGEX_FILE_KCONFIG.match(gitfile): 181 if REGEX_FILE_KCONFIG.match(gitfile):
53 kconfig_files.append(gitfile) 182 kconfig_files.append(gitfile)
@@ -61,7 +190,7 @@ def main():
61 for kfile in kconfig_files: 190 for kfile in kconfig_files:
62 parse_kconfig_file(kfile, defined_features, referenced_features) 191 parse_kconfig_file(kfile, defined_features, referenced_features)
63 192
64 print "Undefined symbol used\tFile list" 193 undefined = {} # {feature: [files]}
65 for feature in sorted(referenced_features): 194 for feature in sorted(referenced_features):
66 # filter some false positives 195 # filter some false positives
67 if feature == "FOO" or feature == "BAR" or \ 196 if feature == "FOO" or feature == "BAR" or \
@@ -72,8 +201,8 @@ def main():
72 # avoid false positives for kernel modules 201 # avoid false positives for kernel modules
73 if feature[:-len("_MODULE")] in defined_features: 202 if feature[:-len("_MODULE")] in defined_features:
74 continue 203 continue
75 files = referenced_features.get(feature) 204 undefined[feature] = referenced_features.get(feature)
76 print "%s\t%s" % (feature, ", ".join(files)) 205 return undefined
77 206
78 207
79def parse_source_file(sfile, referenced_features): 208def parse_source_file(sfile, referenced_features):
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 57da0ceda03f..eff4b4d512b7 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -840,8 +840,8 @@ int arizona_hp_ev(struct snd_soc_dapm_widget *w,
840 priv->arizona->hp_ena &= ~mask; 840 priv->arizona->hp_ena &= ~mask;
841 priv->arizona->hp_ena |= val; 841 priv->arizona->hp_ena |= val;
842 842
843 /* Force off if HPDET magic is active */ 843 /* Force off if HPDET clamp is active */
844 if (priv->arizona->hpdet_magic) 844 if (priv->arizona->hpdet_clamp)
845 val = 0; 845 val = 0;
846 846
847 regmap_update_bits_async(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, 847 regmap_update_bits_async(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1,
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 99ffe61051a7..a8ab79556926 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -3,7 +3,7 @@
3CC = $(CROSS_COMPILE)gcc 3CC = $(CROSS_COMPILE)gcc
4PTHREAD_LIBS = -lpthread 4PTHREAD_LIBS = -lpthread
5WARNINGS = -Wall -Wextra 5WARNINGS = -Wall -Wextra
6CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) 6CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
7 7
8all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon 8all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
9%: %.c 9%: %.c
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 5e63f70bd956..506dd0148828 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -81,6 +81,7 @@ static int vss_operate(int operation)
81 char match[] = "/dev/"; 81 char match[] = "/dev/";
82 FILE *mounts; 82 FILE *mounts;
83 struct mntent *ent; 83 struct mntent *ent;
84 char errdir[1024] = {0};
84 unsigned int cmd; 85 unsigned int cmd;
85 int error = 0, root_seen = 0, save_errno = 0; 86 int error = 0, root_seen = 0, save_errno = 0;
86 87
@@ -115,6 +116,8 @@ static int vss_operate(int operation)
115 goto err; 116 goto err;
116 } 117 }
117 118
119 endmntent(mounts);
120
118 if (root_seen) { 121 if (root_seen) {
119 error |= vss_do_freeze("/", cmd); 122 error |= vss_do_freeze("/", cmd);
120 if (error && operation == VSS_OP_FREEZE) 123 if (error && operation == VSS_OP_FREEZE)
@@ -124,16 +127,19 @@ static int vss_operate(int operation)
124 goto out; 127 goto out;
125err: 128err:
126 save_errno = errno; 129 save_errno = errno;
130 if (ent) {
131 strncpy(errdir, ent->mnt_dir, sizeof(errdir)-1);
132 endmntent(mounts);
133 }
127 vss_operate(VSS_OP_THAW); 134 vss_operate(VSS_OP_THAW);
128 /* Call syslog after we thaw all filesystems */ 135 /* Call syslog after we thaw all filesystems */
129 if (ent) 136 if (ent)
130 syslog(LOG_ERR, "FREEZE of %s failed; error:%d %s", 137 syslog(LOG_ERR, "FREEZE of %s failed; error:%d %s",
131 ent->mnt_dir, save_errno, strerror(save_errno)); 138 errdir, save_errno, strerror(save_errno));
132 else 139 else
133 syslog(LOG_ERR, "FREEZE of / failed; error:%d %s", save_errno, 140 syslog(LOG_ERR, "FREEZE of / failed; error:%d %s", save_errno,
134 strerror(save_errno)); 141 strerror(save_errno));
135out: 142out:
136 endmntent(mounts);
137 return error; 143 return error;
138} 144}
139 145