aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 20:32:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 20:32:24 -0400
commit2521129a6d2fd8a81f99cf95055eddea3df914ff (patch)
treef8b7879979f656669ce31cbc247b97ae702291fb
parent98a96f202203fecad65b44449077c695686ad4db (diff)
parent16eb2bfc65ef86d3ac6420d50ddc2c48f0023cee (diff)
Merge tag 'char-misc-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char / misc driver patches from Greg KH: "Here's the big driver misc / char pull request for 3.17-rc1. Lots of things in here, the thunderbolt support for Apple laptops, some other new drivers, testing fixes, and other good things. All have been in linux-next for a long time" * tag 'char-misc-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (119 commits) misc: bh1780: Introduce the use of devm_kzalloc Lattice ECP3 FPGA: Correct endianness drivers/misc/ti-st: Load firmware from ti-connectivity directory. dt-bindings: extcon: Add support for SM5502 MUIC device extcon: sm5502: Change internal hardware switch according to cable type extcon: sm5502: Detect cable state after completing platform booting extcon: sm5502: Add support new SM5502 extcon device driver extcon: arizona: Get MICVDD against extcon device extcon: Remove unnecessary OOM messages misc: vexpress: Fix sparse non static symbol warnings mei: drop unused hw dependent fw status functions misc: bh1770glc: Use managed functions pcmcia: remove DEFINE_PCI_DEVICE_TABLE usage misc: remove DEFINE_PCI_DEVICE_TABLE usage ipack: Replace DEFINE_PCI_DEVICE_TABLE macro use drivers/char/dsp56k.c: drop check for negativity of unsigned parameter mei: fix return value on disconnect timeout mei: don't schedule suspend in pm idle mei: start disconnect request timer consistently mei: reset client connection state on timeout ...
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei16
-rw-r--r--Documentation/ABI/testing/sysfs-driver-genwqe9
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-sm5502.txt23
-rw-r--r--Documentation/mic/mic_overview.txt67
-rwxr-xr-xDocumentation/mic/mpssd/mpss14
-rw-r--r--Documentation/w1/slaves/w1_ds240625
-rw-r--r--MAINTAINERS5
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/i8k.c123
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/mic_x100_dma.c774
-rw-r--r--drivers/dma/mic_x100_dma.h286
-rw-r--r--drivers/extcon/Kconfig38
-rw-r--r--drivers/extcon/Makefile7
-rw-r--r--drivers/extcon/extcon-adc-jack.c1
-rw-r--r--drivers/extcon/extcon-arizona.c74
-rw-r--r--drivers/extcon/extcon-class.c2
-rw-r--r--drivers/extcon/extcon-gpio.c1
-rw-r--r--drivers/extcon/extcon-max14577.c5
-rw-r--r--drivers/extcon/extcon-max77693.c41
-rw-r--r--drivers/extcon/extcon-max8997.c5
-rw-r--r--drivers/extcon/extcon-palmas.c3
-rw-r--r--drivers/extcon/extcon-sm5502.c724
-rw-r--r--drivers/hv/channel.c6
-rw-r--r--drivers/ipack/carriers/tpci200.c2
-rw-r--r--drivers/ipack/devices/ipoctal.c20
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/max77693-irq.c336
-rw-r--r--drivers/mfd/max77693.c210
-rw-r--r--drivers/misc/bh1770glc.c35
-rw-r--r--drivers/misc/bh1780gli.c33
-rw-r--r--drivers/misc/carma/carma-fpga.c5
-rw-r--r--drivers/misc/dummy-irq.c1
-rw-r--r--drivers/misc/genwqe/Kconfig6
-rw-r--r--drivers/misc/genwqe/card_base.c217
-rw-r--r--drivers/misc/genwqe/card_base.h2
-rw-r--r--drivers/misc/genwqe/card_ddcb.c28
-rw-r--r--drivers/misc/genwqe/card_debugfs.c7
-rw-r--r--drivers/misc/genwqe/card_dev.c5
-rw-r--r--drivers/misc/genwqe/card_sysfs.c25
-rw-r--r--drivers/misc/genwqe/card_utils.c20
-rw-r--r--drivers/misc/genwqe/genwqe_driver.h2
-rw-r--r--drivers/misc/lattice-ecp3-config.c16
-rw-r--r--drivers/misc/lkdtm.c1
-rw-r--r--drivers/misc/mei/client.c17
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/hw-me.c54
-rw-r--r--drivers/misc/mei/hw-txe.c32
-rw-r--r--drivers/misc/mei/main.c148
-rw-r--r--drivers/misc/mei/mei_dev.h12
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mic/Kconfig21
-rw-r--r--drivers/misc/mic/Makefile1
-rw-r--r--drivers/misc/mic/bus/Makefile5
-rw-r--r--drivers/misc/mic/bus/mic_bus.c218
-rw-r--r--drivers/misc/mic/card/mic_device.c23
-rw-r--r--drivers/misc/mic/card/mic_device.h9
-rw-r--r--drivers/misc/mic/card/mic_virtio.c7
-rw-r--r--drivers/misc/mic/card/mic_x100.c62
-rw-r--r--drivers/misc/mic/host/mic_boot.c83
-rw-r--r--drivers/misc/mic/host/mic_device.h24
-rw-r--r--drivers/misc/mic/host/mic_intr.c121
-rw-r--r--drivers/misc/mic/host/mic_intr.h27
-rw-r--r--drivers/misc/mic/host/mic_main.c7
-rw-r--r--drivers/misc/mic/host/mic_virtio.c187
-rw-r--r--drivers/misc/mic/host/mic_virtio.h21
-rw-r--r--drivers/misc/mic/host/mic_x100.c8
-rw-r--r--drivers/misc/ti-st/st_kim.c8
-rw-r--r--drivers/misc/vexpress-syscfg.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c2
-rw-r--r--drivers/pci/pci-driver.c18
-rw-r--r--drivers/pci/quirks.c104
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c10
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c2
-rw-r--r--drivers/regulator/max77693.c12
-rw-r--r--drivers/spmi/spmi.c1
-rw-r--r--drivers/thunderbolt/Kconfig13
-rw-r--r--drivers/thunderbolt/Makefile3
-rw-r--r--drivers/thunderbolt/cap.c116
-rw-r--r--drivers/thunderbolt/ctl.c731
-rw-r--r--drivers/thunderbolt/ctl.h75
-rw-r--r--drivers/thunderbolt/eeprom.c449
-rw-r--r--drivers/thunderbolt/nhi.c675
-rw-r--r--drivers/thunderbolt/nhi.h114
-rw-r--r--drivers/thunderbolt/nhi_regs.h101
-rw-r--r--drivers/thunderbolt/path.c215
-rw-r--r--drivers/thunderbolt/switch.c507
-rw-r--r--drivers/thunderbolt/tb.c436
-rw-r--r--drivers/thunderbolt/tb.h271
-rw-r--r--drivers/thunderbolt/tb_regs.h213
-rw-r--r--drivers/thunderbolt/tunnel_pci.c232
-rw-r--r--drivers/thunderbolt/tunnel_pci.h30
-rw-r--r--drivers/uio/uio_pruss.c37
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.h1
-rw-r--r--drivers/w1/masters/ds1wm.c2
-rw-r--r--drivers/w1/masters/ds2482.c2
-rw-r--r--drivers/w1/masters/ds2490.c50
-rw-r--r--drivers/w1/masters/mxc_w1.c64
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2406.c168
-rw-r--r--drivers/w1/slaves/w1_ds2760.c3
-rw-r--r--drivers/w1/w1.c10
-rw-r--r--drivers/w1/w1_family.c2
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/w1/w1_int.c19
-rw-r--r--drivers/w1/w1_log.h4
-rw-r--r--drivers/w1/w1_netlink.c3
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/linux/extcon/sm5502.h287
-rw-r--r--include/linux/mfd/arizona/pdata.h3
-rw-r--r--include/linux/mfd/max77693-private.h54
-rw-r--r--include/linux/mic_bus.h110
-rw-r--r--include/linux/pci.h12
-rw-r--r--include/uapi/linux/genwqe/genwqe_card.h1
-rw-r--r--include/uapi/linux/i8k.h3
-rw-r--r--tools/hv/hv_fcopy_daemon.c3
-rw-r--r--tools/testing/selftests/Makefile18
-rw-r--r--tools/testing/selftests/README.txt27
-rw-r--r--tools/testing/selftests/cpu-hotplug/Makefile3
-rw-r--r--tools/testing/selftests/cpu-hotplug/on-off-test.sh52
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c2
-rw-r--r--tools/testing/selftests/memory-hotplug/Makefile3
-rw-r--r--tools/testing/selftests/memory-hotplug/on-off-test.sh8
-rw-r--r--tools/testing/selftests/mqueue/Makefile4
-rw-r--r--tools/testing/selftests/mqueue/mq_open_tests.c20
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c40
140 files changed, 8646 insertions, 1083 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
new file mode 100644
index 000000000000..0ec8b8178c41
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -0,0 +1,16 @@
1What: /sys/class/mei/
2Date: May 2014
3KernelVersion: 3.17
4Contact: Tomas Winkler <tomas.winkler@intel.com>
5Description:
6 The mei/ class sub-directory belongs to mei device class
7
8
9What: /sys/class/mei/meiN/
10Date: May 2014
11KernelVersion: 3.17
12Contact: Tomas Winkler <tomas.winkler@intel.com>
13Description:
14 The /sys/class/mei/meiN directory is created for
15 each probed mei device
16
diff --git a/Documentation/ABI/testing/sysfs-driver-genwqe b/Documentation/ABI/testing/sysfs-driver-genwqe
index 1870737a1f5e..64ac6d567c4b 100644
--- a/Documentation/ABI/testing/sysfs-driver-genwqe
+++ b/Documentation/ABI/testing/sysfs-driver-genwqe
@@ -25,6 +25,15 @@ Date: Oct 2013
25Contact: haver@linux.vnet.ibm.com 25Contact: haver@linux.vnet.ibm.com
26Description: Interface to set the next bitstream to be used. 26Description: Interface to set the next bitstream to be used.
27 27
28What: /sys/class/genwqe/genwqe<n>_card/reload_bitstream
29Date: May 2014
30Contact: klebers@linux.vnet.ibm.com
31Description: Interface to trigger a PCIe card reset to reload the bitstream.
32 sudo sh -c 'echo 1 > \
33 /sys/class/genwqe/genwqe0_card/reload_bitstream'
34 If successfully, the card will come back with the bitstream set
35 on 'next_bitstream'.
36
28What: /sys/class/genwqe/genwqe<n>_card/tempsens 37What: /sys/class/genwqe/genwqe<n>_card/tempsens
29Date: Oct 2013 38Date: Oct 2013
30Contact: haver@linux.vnet.ibm.com 39Contact: haver@linux.vnet.ibm.com
diff --git a/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt b/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
new file mode 100644
index 000000000000..4ecda224955f
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-sm5502.txt
@@ -0,0 +1,23 @@
1
2* SM5502 MUIC (Micro-USB Interface Controller) device
3
4The Silicon Mitus SM5502 is a MUIC (Micro-USB Interface Controller) device
5which can detect the state of external accessory when external accessory is
6attached or detached and button is pressed or released. It is interfaced to
7the host controller using an I2C interface.
8
9Required properties:
10- compatible: Should be "siliconmitus,sm5502-muic"
11- reg: Specifies the I2C slave address of the MUIC block. It should be 0x25
12- interrupt-parent: Specifies the phandle of the interrupt controller to which
13 the interrupts from sm5502 are delivered to.
14- interrupts: Interrupt specifiers for detection interrupt sources.
15
16Example:
17
18 sm5502@25 {
19 compatible = "siliconmitus,sm5502-muic";
20 interrupt-parent = <&gpx1>;
21 interrupts = <5 0>;
22 reg = <0x25>;
23 };
diff --git a/Documentation/mic/mic_overview.txt b/Documentation/mic/mic_overview.txt
index b41929224804..77c541802ad9 100644
--- a/Documentation/mic/mic_overview.txt
+++ b/Documentation/mic/mic_overview.txt
@@ -17,35 +17,50 @@ for applications. A key benefit of our solution is that it leverages
17the standard virtio framework for network, disk and console devices, 17the standard virtio framework for network, disk and console devices,
18though in our case the virtio framework is used across a PCIe bus. 18though in our case the virtio framework is used across a PCIe bus.
19 19
20MIC PCIe card has a dma controller with 8 channels. These channels are
21shared between the host s/w and the card s/w. 0 to 3 are used by host
22and 4 to 7 by card. As the dma device doesn't show up as PCIe device,
23a virtual bus called mic bus is created and virtual dma devices are
24created on it by the host/card drivers. On host the channels are private
25and used only by the host driver to transfer data for the virtio devices.
26
20Here is a block diagram of the various components described above. The 27Here is a block diagram of the various components described above. The
21virtio backends are situated on the host rather than the card given better 28virtio backends are situated on the host rather than the card given better
22single threaded performance for the host compared to MIC, the ability of 29single threaded performance for the host compared to MIC, the ability of
23the host to initiate DMA's to/from the card using the MIC DMA engine and 30the host to initiate DMA's to/from the card using the MIC DMA engine and
24the fact that the virtio block storage backend can only be on the host. 31the fact that the virtio block storage backend can only be on the host.
25 32
26 | 33 |
27 +----------+ | +----------+ 34 +----------+ | +----------+
28 | Card OS | | | Host OS | 35 | Card OS | | | Host OS |
29 +----------+ | +----------+ 36 +----------+ | +----------+
30 | 37 |
31+-------+ +--------+ +------+ | +---------+ +--------+ +--------+ 38 +-------+ +--------+ +------+ | +---------+ +--------+ +--------+
32| Virtio| |Virtio | |Virtio| | |Virtio | |Virtio | |Virtio | 39 | Virtio| |Virtio | |Virtio| | |Virtio | |Virtio | |Virtio |
33| Net | |Console | |Block | | |Net | |Console | |Block | 40 | Net | |Console | |Block | | |Net | |Console | |Block |
34| Driver| |Driver | |Driver| | |backend | |backend | |backend | 41 | Driver| |Driver | |Driver| | |backend | |backend | |backend |
35+-------+ +--------+ +------+ | +---------+ +--------+ +--------+ 42 +-------+ +--------+ +------+ | +---------+ +--------+ +--------+
36 | | | | | | | 43 | | | | | | |
37 | | | |User | | | 44 | | | |User | | |
38 | | | |------|------------|---------|------- 45 | | | |------|------------|---------|-------
39 +-------------------+ |Kernel +--------------------------+ 46 +-------------------+ |Kernel +--------------------------+
40 | | | Virtio over PCIe IOCTLs | 47 | | | Virtio over PCIe IOCTLs |
41 | | +--------------------------+ 48 | | +--------------------------+
42 +--------------+ | | 49+-----------+ | | | +-----------+
43 |Intel MIC | | +---------------+ 50| MIC DMA | | | | | MIC DMA |
44 |Card Driver | | |Intel MIC | 51| Driver | | | | | Driver |
45 +--------------+ | |Host Driver | 52+-----------+ | | | +-----------+
46 | | +---------------+ 53 | | | | |
47 | | | 54+---------------+ | | | +----------------+
48 +-------------------------------------------------------------+ 55|MIC virtual Bus| | | | |MIC virtual Bus |
49 | | 56+---------------+ | | | +----------------+
50 | PCIe Bus | 57 | | | | |
51 +-------------------------------------------------------------+ 58 | +--------------+ | +---------------+ |
59 | |Intel MIC | | |Intel MIC | |
60 +---|Card Driver | | |Host Driver | |
61 +--------------+ | +---------------+-----+
62 | | |
63 +-------------------------------------------------------------+
64 | |
65 | PCIe Bus |
66 +-------------------------------------------------------------+
diff --git a/Documentation/mic/mpssd/mpss b/Documentation/mic/mpssd/mpss
index 3136c68dad0b..cacbdb0aefb9 100755
--- a/Documentation/mic/mpssd/mpss
+++ b/Documentation/mic/mpssd/mpss
@@ -48,18 +48,18 @@ start()
48 fi 48 fi
49 49
50 echo -e $"Starting MPSS Stack" 50 echo -e $"Starting MPSS Stack"
51 echo -e $"Loading MIC_HOST Module" 51 echo -e $"Loading MIC_X100_DMA & MIC_HOST Modules"
52 52
53 # Ensure the driver is loaded 53 for f in "mic_host" "mic_x100_dma"
54 if [ ! -d "$sysfs" ]; then 54 do
55 modprobe mic_host 55 modprobe $f
56 RETVAL=$? 56 RETVAL=$?
57 if [ $RETVAL -ne 0 ]; then 57 if [ $RETVAL -ne 0 ]; then
58 failure 58 failure
59 echo 59 echo
60 return $RETVAL 60 return $RETVAL
61 fi 61 fi
62 fi 62 done
63 63
64 # Start the daemon 64 # Start the daemon
65 echo -n $"Starting MPSSD " 65 echo -n $"Starting MPSSD "
@@ -170,8 +170,8 @@ unload()
170 stop 170 stop
171 171
172 sleep 5 172 sleep 5
173 echo -n $"Removing MIC_HOST Module: " 173 echo -n $"Removing MIC_HOST & MIC_X100_DMA Modules: "
174 modprobe -r mic_host 174 modprobe -r mic_host mic_x100_dma
175 RETVAL=$? 175 RETVAL=$?
176 [ $RETVAL -ne 0 ] && failure || success 176 [ $RETVAL -ne 0 ] && failure || success
177 echo 177 echo
diff --git a/Documentation/w1/slaves/w1_ds2406 b/Documentation/w1/slaves/w1_ds2406
new file mode 100644
index 000000000000..8137fe6f6c3d
--- /dev/null
+++ b/Documentation/w1/slaves/w1_ds2406
@@ -0,0 +1,25 @@
1w1_ds2406 kernel driver
2=======================
3
4Supported chips:
5 * Maxim DS2406 (and other family 0x12) addressable switches
6
7Author: Scott Alfter <scott@alfter.us>
8
9Description
10-----------
11
12The w1_ds2406 driver allows connected devices to be switched on and off.
13These chips also provide 128 bytes of OTP EPROM, but reading/writing it is
14not supported. In TSOC-6 form, the DS2406 provides two switch outputs and
15can be provided with power on a dedicated input. In TO-92 form, it provides
16one output and uses parasitic power only.
17
18The driver provides two sysfs files. state is readable; it gives the
19current state of each switch, with PIO A in bit 0 and PIO B in bit 1. The
20driver ORs this state with 0x30, so shell scripts get an ASCII 0/1/2/3 to
21work with. output is writable; bits 0 and 1 control PIO A and B,
22respectively. Bits 2-7 are ignored, so it's safe to write ASCII data.
23
24CRCs are checked on read and write. Failed checks cause an I/O error to be
25returned. On a failed write, the switch status is not changed.
diff --git a/MAINTAINERS b/MAINTAINERS
index 1acc624ecfd7..211389b6182f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7844,6 +7844,11 @@ S: Maintained
7844F: include/linux/mmc/dw_mmc.h 7844F: include/linux/mmc/dw_mmc.h
7845F: drivers/mmc/host/dw_mmc* 7845F: drivers/mmc/host/dw_mmc*
7846 7846
7847THUNDERBOLT DRIVER
7848M: Andreas Noever <andreas.noever@gmail.com>
7849S: Maintained
7850F: drivers/thunderbolt/
7851
7847TIMEKEEPING, CLOCKSOURCE CORE, NTP 7852TIMEKEEPING, CLOCKSOURCE CORE, NTP
7848M: John Stultz <john.stultz@linaro.org> 7853M: John Stultz <john.stultz@linaro.org>
7849M: Thomas Gleixner <tglx@linutronix.de> 7854M: Thomas Gleixner <tglx@linutronix.de>
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4e6e66c3c8d6..622fa266b29e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -178,4 +178,6 @@ source "drivers/mcb/Kconfig"
178 178
179source "drivers/ras/Kconfig" 179source "drivers/ras/Kconfig"
180 180
181source "drivers/thunderbolt/Kconfig"
182
181endmenu 183endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 65c32b1cea3d..54bfae1f09a4 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -159,3 +159,4 @@ obj-$(CONFIG_FMC) += fmc/
159obj-$(CONFIG_POWERCAP) += powercap/ 159obj-$(CONFIG_POWERCAP) += powercap/
160obj-$(CONFIG_MCB) += mcb/ 160obj-$(CONFIG_MCB) += mcb/
161obj-$(CONFIG_RAS) += ras/ 161obj-$(CONFIG_RAS) += ras/
162obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 8fedbc250414..a6cef548e01e 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -259,7 +259,7 @@ static int bsr_add_node(struct device_node *bn)
259 } 259 }
260 260
261 cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev, 261 cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
262 cur, cur->bsr_name); 262 cur, "%s", cur->bsr_name);
263 if (IS_ERR(cur->bsr_device)) { 263 if (IS_ERR(cur->bsr_device)) {
264 printk(KERN_ERR "device_create failed for %s\n", 264 printk(KERN_ERR "device_create failed for %s\n",
265 cur->bsr_name); 265 cur->bsr_name);
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 01a5ca7425d7..8bf70e8c3f79 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -383,7 +383,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
383 return put_user(status, &hf->status); 383 return put_user(status, &hf->status);
384 } 384 }
385 case DSP56K_HOST_CMD: 385 case DSP56K_HOST_CMD:
386 if (arg > 31 || arg < 0) 386 if (arg > 31)
387 return -EINVAL; 387 return -EINVAL;
388 mutex_lock(&dsp56k_mutex); 388 mutex_lock(&dsp56k_mutex);
389 dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) | 389 dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) |
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 93dcad0c1cbe..65525c7e903c 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -65,6 +65,8 @@ static char bios_version[4];
65static struct device *i8k_hwmon_dev; 65static struct device *i8k_hwmon_dev;
66static u32 i8k_hwmon_flags; 66static u32 i8k_hwmon_flags;
67static int i8k_fan_mult; 67static int i8k_fan_mult;
68static int i8k_pwm_mult;
69static int i8k_fan_max = I8K_FAN_HIGH;
68 70
69#define I8K_HWMON_HAVE_TEMP1 (1 << 0) 71#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
70#define I8K_HWMON_HAVE_TEMP2 (1 << 1) 72#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -97,6 +99,10 @@ static int fan_mult = I8K_FAN_MULT;
97module_param(fan_mult, int, 0); 99module_param(fan_mult, int, 0);
98MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with"); 100MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
99 101
102static int fan_max = I8K_FAN_HIGH;
103module_param(fan_max, int, 0);
104MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed");
105
100static int i8k_open_fs(struct inode *inode, struct file *file); 106static int i8k_open_fs(struct inode *inode, struct file *file);
101static long i8k_ioctl(struct file *, unsigned int, unsigned long); 107static long i8k_ioctl(struct file *, unsigned int, unsigned long);
102 108
@@ -276,7 +282,7 @@ static int i8k_set_fan(int fan, int speed)
276{ 282{
277 struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, }; 283 struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
278 284
279 speed = (speed < 0) ? 0 : ((speed > I8K_FAN_MAX) ? I8K_FAN_MAX : speed); 285 speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed);
280 regs.ebx = (fan & 0xff) | (speed << 8); 286 regs.ebx = (fan & 0xff) | (speed << 8);
281 287
282 return i8k_smm(&regs) ? : i8k_get_fan_status(fan); 288 return i8k_smm(&regs) ? : i8k_get_fan_status(fan);
@@ -521,7 +527,7 @@ static ssize_t i8k_hwmon_show_pwm(struct device *dev,
521 status = i8k_get_fan_status(index); 527 status = i8k_get_fan_status(index);
522 if (status < 0) 528 if (status < 0)
523 return -EIO; 529 return -EIO;
524 return sprintf(buf, "%d\n", clamp_val(status * 128, 0, 255)); 530 return sprintf(buf, "%d\n", clamp_val(status * i8k_pwm_mult, 0, 255));
525} 531}
526 532
527static ssize_t i8k_hwmon_set_pwm(struct device *dev, 533static ssize_t i8k_hwmon_set_pwm(struct device *dev,
@@ -535,7 +541,7 @@ static ssize_t i8k_hwmon_set_pwm(struct device *dev,
535 err = kstrtoul(buf, 10, &val); 541 err = kstrtoul(buf, 10, &val);
536 if (err) 542 if (err)
537 return err; 543 return err;
538 val = clamp_val(DIV_ROUND_CLOSEST(val, 128), 0, 2); 544 val = clamp_val(DIV_ROUND_CLOSEST(val, i8k_pwm_mult), 0, i8k_fan_max);
539 545
540 mutex_lock(&i8k_mutex); 546 mutex_lock(&i8k_mutex);
541 err = i8k_set_fan(index, val); 547 err = i8k_set_fan(index, val);
@@ -544,20 +550,6 @@ static ssize_t i8k_hwmon_set_pwm(struct device *dev,
544 return err < 0 ? -EIO : count; 550 return err < 0 ? -EIO : count;
545} 551}
546 552
547static ssize_t i8k_hwmon_show_label(struct device *dev,
548 struct device_attribute *devattr,
549 char *buf)
550{
551 static const char *labels[3] = {
552 "CPU",
553 "Left Fan",
554 "Right Fan",
555 };
556 int index = to_sensor_dev_attr(devattr)->index;
557
558 return sprintf(buf, "%s\n", labels[index]);
559}
560
561static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0); 553static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0);
562static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1); 554static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1);
563static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2); 555static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2);
@@ -570,41 +562,34 @@ static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
570 I8K_FAN_RIGHT); 562 I8K_FAN_RIGHT);
571static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm, 563static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
572 i8k_hwmon_set_pwm, I8K_FAN_RIGHT); 564 i8k_hwmon_set_pwm, I8K_FAN_RIGHT);
573static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
574static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
575static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
576 565
577static struct attribute *i8k_attrs[] = { 566static struct attribute *i8k_attrs[] = {
578 &sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */ 567 &sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */
579 &sensor_dev_attr_temp1_label.dev_attr.attr, /* 1 */ 568 &sensor_dev_attr_temp2_input.dev_attr.attr, /* 1 */
580 &sensor_dev_attr_temp2_input.dev_attr.attr, /* 2 */ 569 &sensor_dev_attr_temp3_input.dev_attr.attr, /* 2 */
581 &sensor_dev_attr_temp3_input.dev_attr.attr, /* 3 */ 570 &sensor_dev_attr_temp4_input.dev_attr.attr, /* 3 */
582 &sensor_dev_attr_temp4_input.dev_attr.attr, /* 4 */ 571 &sensor_dev_attr_fan1_input.dev_attr.attr, /* 4 */
583 &sensor_dev_attr_fan1_input.dev_attr.attr, /* 5 */ 572 &sensor_dev_attr_pwm1.dev_attr.attr, /* 5 */
584 &sensor_dev_attr_pwm1.dev_attr.attr, /* 6 */ 573 &sensor_dev_attr_fan2_input.dev_attr.attr, /* 6 */
585 &sensor_dev_attr_fan1_label.dev_attr.attr, /* 7 */ 574 &sensor_dev_attr_pwm2.dev_attr.attr, /* 7 */
586 &sensor_dev_attr_fan2_input.dev_attr.attr, /* 8 */
587 &sensor_dev_attr_pwm2.dev_attr.attr, /* 9 */
588 &sensor_dev_attr_fan2_label.dev_attr.attr, /* 10 */
589 NULL 575 NULL
590}; 576};
591 577
592static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, 578static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
593 int index) 579 int index)
594{ 580{
595 if ((index == 0 || index == 1) && 581 if (index == 0 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
596 !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
597 return 0; 582 return 0;
598 if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2)) 583 if (index == 1 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2))
599 return 0; 584 return 0;
600 if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3)) 585 if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3))
601 return 0; 586 return 0;
602 if (index == 4 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4)) 587 if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
603 return 0; 588 return 0;
604 if (index >= 5 && index <= 7 && 589 if (index >= 4 && index <= 5 &&
605 !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1)) 590 !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
606 return 0; 591 return 0;
607 if (index >= 8 && index <= 10 && 592 if (index >= 6 && index <= 7 &&
608 !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2)) 593 !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
609 return 0; 594 return 0;
610 595
@@ -659,6 +644,37 @@ static int __init i8k_init_hwmon(void)
659 return 0; 644 return 0;
660} 645}
661 646
647struct i8k_config_data {
648 int fan_mult;
649 int fan_max;
650};
651
652enum i8k_configs {
653 DELL_LATITUDE_D520,
654 DELL_PRECISION_490,
655 DELL_STUDIO,
656 DELL_XPS_M140,
657};
658
659static const struct i8k_config_data i8k_config_data[] = {
660 [DELL_LATITUDE_D520] = {
661 .fan_mult = 1,
662 .fan_max = I8K_FAN_TURBO,
663 },
664 [DELL_PRECISION_490] = {
665 .fan_mult = 1,
666 .fan_max = I8K_FAN_TURBO,
667 },
668 [DELL_STUDIO] = {
669 .fan_mult = 1,
670 .fan_max = I8K_FAN_HIGH,
671 },
672 [DELL_XPS_M140] = {
673 .fan_mult = 1,
674 .fan_max = I8K_FAN_HIGH,
675 },
676};
677
662static struct dmi_system_id i8k_dmi_table[] __initdata = { 678static struct dmi_system_id i8k_dmi_table[] __initdata = {
663 { 679 {
664 .ident = "Dell Inspiron", 680 .ident = "Dell Inspiron",
@@ -682,6 +698,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
682 }, 698 },
683 }, 699 },
684 { 700 {
701 .ident = "Dell Latitude D520",
702 .matches = {
703 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
704 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D520"),
705 },
706 .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
707 },
708 {
685 .ident = "Dell Latitude 2", 709 .ident = "Dell Latitude 2",
686 .matches = { 710 .matches = {
687 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 711 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -703,6 +727,15 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
703 }, 727 },
704 }, 728 },
705 { 729 {
730 .ident = "Dell Precision 490",
731 .matches = {
732 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
733 DMI_MATCH(DMI_PRODUCT_NAME,
734 "Precision WorkStation 490"),
735 },
736 .driver_data = (void *)&i8k_config_data[DELL_PRECISION_490],
737 },
738 {
706 .ident = "Dell Precision", 739 .ident = "Dell Precision",
707 .matches = { 740 .matches = {
708 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 741 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -729,7 +762,7 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
729 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 762 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
730 DMI_MATCH(DMI_PRODUCT_NAME, "Studio"), 763 DMI_MATCH(DMI_PRODUCT_NAME, "Studio"),
731 }, 764 },
732 .driver_data = (void *)1, /* fan multiplier override */ 765 .driver_data = (void *)&i8k_config_data[DELL_STUDIO],
733 }, 766 },
734 { 767 {
735 .ident = "Dell XPS M140", 768 .ident = "Dell XPS M140",
@@ -737,7 +770,7 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
737 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 770 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
738 DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"), 771 DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"),
739 }, 772 },
740 .driver_data = (void *)1, /* fan multiplier override */ 773 .driver_data = (void *)&i8k_config_data[DELL_XPS_M140],
741 }, 774 },
742 { } 775 { }
743}; 776};
@@ -777,9 +810,17 @@ static int __init i8k_probe(void)
777 } 810 }
778 811
779 i8k_fan_mult = fan_mult; 812 i8k_fan_mult = fan_mult;
813 i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */
780 id = dmi_first_match(i8k_dmi_table); 814 id = dmi_first_match(i8k_dmi_table);
781 if (id && fan_mult == I8K_FAN_MULT && id->driver_data) 815 if (id && id->driver_data) {
782 i8k_fan_mult = (unsigned long)id->driver_data; 816 const struct i8k_config_data *conf = id->driver_data;
817
818 if (fan_mult == I8K_FAN_MULT && conf->fan_mult)
819 i8k_fan_mult = conf->fan_mult;
820 if (fan_max == I8K_FAN_HIGH && conf->fan_max)
821 i8k_fan_max = conf->fan_max;
822 }
823 i8k_pwm_mult = DIV_ROUND_UP(255, i8k_fan_max);
783 824
784 return 0; 825 return 0;
785} 826}
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index f6345f932e46..9b1a5ac4881d 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -661,6 +661,7 @@ static int hwicap_setup(struct device *dev, int id,
661 drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size); 661 drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
662 if (!drvdata->base_address) { 662 if (!drvdata->base_address) {
663 dev_err(dev, "ioremap() failed\n"); 663 dev_err(dev, "ioremap() failed\n");
664 retval = -ENOMEM;
664 goto failed2; 665 goto failed2;
665 } 666 }
666 667
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1eca7b9760e6..8f6afbf9ba54 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,6 +33,24 @@ if DMADEVICES
33 33
34comment "DMA Devices" 34comment "DMA Devices"
35 35
36config INTEL_MIC_X100_DMA
37 tristate "Intel MIC X100 DMA Driver"
38 depends on 64BIT && X86 && INTEL_MIC_BUS
39 select DMA_ENGINE
40 help
41 This enables DMA support for the Intel Many Integrated Core
42 (MIC) family of PCIe form factor coprocessor X100 devices that
43 run a 64 bit Linux OS. This driver will be used by both MIC
44 host and card drivers.
45
46 If you are building host kernel with a MIC device or a card
47 kernel for a MIC device, then say M (recommended) or Y, else
48 say N. If unsure say N.
49
50 More information about the Intel MIC family as well as the Linux
51 OS and tools for MIC to use with this driver are available from
52 <http://software.intel.com/en-us/mic-developer>.
53
36config INTEL_MID_DMAC 54config INTEL_MID_DMAC
37 tristate "Intel MID DMA support for Peripheral DMA controllers" 55 tristate "Intel MID DMA support for Peripheral DMA controllers"
38 depends on PCI && X86 56 depends on PCI && X86
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c779e1eb2db2..bd9e7fa928bd 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
47obj-$(CONFIG_FSL_EDMA) += fsl-edma.o 47obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
48obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o 48obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
49obj-y += xilinx/ 49obj-y += xilinx/
50obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
new file mode 100644
index 000000000000..6de2e677be04
--- /dev/null
+++ b/drivers/dma/mic_x100_dma.c
@@ -0,0 +1,774 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC X100 DMA Driver.
19 *
20 * Adapted from IOAT dma driver.
21 */
22#include <linux/module.h>
23#include <linux/io.h>
24#include <linux/seq_file.h>
25
26#include "mic_x100_dma.h"
27
28#define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\
29 MIC_DMA_ALIGN_BYTES)
30#define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1)
31#define MIC_DMA_DESC_TYPE_SHIFT 60
32#define MIC_DMA_MEMCPY_LEN_SHIFT 46
33#define MIC_DMA_STAT_INTR_SHIFT 59
34
35/* high-water mark for pushing dma descriptors */
36static int mic_dma_pending_level = 4;
37
38/* Status descriptor is used to write a 64 bit value to a memory location */
39enum mic_dma_desc_format_type {
40 MIC_DMA_MEMCPY = 1,
41 MIC_DMA_STATUS,
42};
43
44static inline u32 mic_dma_hw_ring_inc(u32 val)
45{
46 return (val + 1) % MIC_DMA_DESC_RX_SIZE;
47}
48
49static inline u32 mic_dma_hw_ring_dec(u32 val)
50{
51 return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1;
52}
53
54static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch)
55{
56 ch->head = mic_dma_hw_ring_inc(ch->head);
57}
58
59/* Prepare a memcpy desc */
60static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc,
61 dma_addr_t src_phys, dma_addr_t dst_phys, u64 size)
62{
63 u64 qw0, qw1;
64
65 qw0 = src_phys;
66 qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT;
67 qw1 = MIC_DMA_MEMCPY;
68 qw1 <<= MIC_DMA_DESC_TYPE_SHIFT;
69 qw1 |= dst_phys;
70 desc->qw0 = qw0;
71 desc->qw1 = qw1;
72}
73
74/* Prepare a status desc. with @data to be written at @dst_phys */
75static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data,
76 dma_addr_t dst_phys, bool generate_intr)
77{
78 u64 qw0, qw1;
79
80 qw0 = data;
81 qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys;
82 if (generate_intr)
83 qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT);
84 desc->qw0 = qw0;
85 desc->qw1 = qw1;
86}
87
88static void mic_dma_cleanup(struct mic_dma_chan *ch)
89{
90 struct dma_async_tx_descriptor *tx;
91 u32 tail;
92 u32 last_tail;
93
94 spin_lock(&ch->cleanup_lock);
95 tail = mic_dma_read_cmp_cnt(ch);
96 /*
97 * This is the barrier pair for smp_wmb() in fn.
98 * mic_dma_tx_submit_unlock. It's required so that we read the
99 * updated cookie value from tx->cookie.
100 */
101 smp_rmb();
102 for (last_tail = ch->last_tail; tail != last_tail;) {
103 tx = &ch->tx_array[last_tail];
104 if (tx->cookie) {
105 dma_cookie_complete(tx);
106 if (tx->callback) {
107 tx->callback(tx->callback_param);
108 tx->callback = NULL;
109 }
110 }
111 last_tail = mic_dma_hw_ring_inc(last_tail);
112 }
113 /* finish all completion callbacks before incrementing tail */
114 smp_mb();
115 ch->last_tail = last_tail;
116 spin_unlock(&ch->cleanup_lock);
117}
118
119static u32 mic_dma_ring_count(u32 head, u32 tail)
120{
121 u32 count;
122
123 if (head >= tail)
124 count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head);
125 else
126 count = tail - head;
127 return count - 1;
128}
129
130/* Returns the num. of free descriptors on success, -ENOMEM on failure */
131static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required)
132{
133 struct device *dev = mic_dma_ch_to_device(ch);
134 u32 count;
135
136 count = mic_dma_ring_count(ch->head, ch->last_tail);
137 if (count < required) {
138 mic_dma_cleanup(ch);
139 count = mic_dma_ring_count(ch->head, ch->last_tail);
140 }
141
142 if (count < required) {
143 dev_dbg(dev, "Not enough desc space");
144 dev_dbg(dev, "%s %d required=%u, avail=%u\n",
145 __func__, __LINE__, required, count);
146 return -ENOMEM;
147 } else {
148 return count;
149 }
150}
151
152/* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/
153static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src,
154 dma_addr_t dst, size_t len)
155{
156 size_t current_transfer_len;
157 size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size;
158 /* 3 is added to make sure we have enough space for status desc */
159 int num_desc = len / max_xfer_size + 3;
160 int ret;
161
162 if (len % max_xfer_size)
163 num_desc++;
164
165 ret = mic_dma_avail_desc_ring_space(ch, num_desc);
166 if (ret < 0)
167 return ret;
168 do {
169 current_transfer_len = min(len, max_xfer_size);
170 mic_dma_memcpy_desc(&ch->desc_ring[ch->head],
171 src, dst, current_transfer_len);
172 mic_dma_hw_ring_inc_head(ch);
173 len -= current_transfer_len;
174 dst = dst + current_transfer_len;
175 src = src + current_transfer_len;
176 } while (len > 0);
177 return 0;
178}
179
180/* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */
181static void mic_dma_prog_intr(struct mic_dma_chan *ch)
182{
183 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
184 ch->status_dest_micpa, false);
185 mic_dma_hw_ring_inc_head(ch);
186 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
187 ch->status_dest_micpa, true);
188 mic_dma_hw_ring_inc_head(ch);
189}
190
191/* Wrapper function to program memcpy descriptors/status descriptors */
192static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src,
193 dma_addr_t dst, size_t len)
194{
195 if (-ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len))
196 return -ENOMEM;
197 /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */
198 if (flags & DMA_PREP_FENCE) {
199 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
200 ch->status_dest_micpa, false);
201 mic_dma_hw_ring_inc_head(ch);
202 }
203
204 if (flags & DMA_PREP_INTERRUPT)
205 mic_dma_prog_intr(ch);
206
207 return 0;
208}
209
210static inline void mic_dma_issue_pending(struct dma_chan *ch)
211{
212 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
213
214 spin_lock(&mic_ch->issue_lock);
215 /*
216 * Write to head triggers h/w to act on the descriptors.
217 * On MIC, writing the same head value twice causes
218 * a h/w error. On second write, h/w assumes we filled
219 * the entire ring & overwrote some of the descriptors.
220 */
221 if (mic_ch->issued == mic_ch->submitted)
222 goto out;
223 mic_ch->issued = mic_ch->submitted;
224 /*
225 * make descriptor updates visible before advancing head,
226 * this is purposefully not smp_wmb() since we are also
227 * publishing the descriptor updates to a dma device
228 */
229 wmb();
230 mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued);
231out:
232 spin_unlock(&mic_ch->issue_lock);
233}
234
235static inline void mic_dma_update_pending(struct mic_dma_chan *ch)
236{
237 if (mic_dma_ring_count(ch->issued, ch->submitted)
238 > mic_dma_pending_level)
239 mic_dma_issue_pending(&ch->api_ch);
240}
241
242static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
243{
244 struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan);
245 dma_cookie_t cookie;
246
247 dma_cookie_assign(tx);
248 cookie = tx->cookie;
249 /*
250 * We need an smp write barrier here because another CPU might see
251 * an update to submitted and update h/w head even before we
252 * assigned a cookie to this tx.
253 */
254 smp_wmb();
255 mic_ch->submitted = mic_ch->head;
256 spin_unlock(&mic_ch->prep_lock);
257 mic_dma_update_pending(mic_ch);
258 return cookie;
259}
260
261static inline struct dma_async_tx_descriptor *
262allocate_tx(struct mic_dma_chan *ch)
263{
264 u32 idx = mic_dma_hw_ring_dec(ch->head);
265 struct dma_async_tx_descriptor *tx = &ch->tx_array[idx];
266
267 dma_async_tx_descriptor_init(tx, &ch->api_ch);
268 tx->tx_submit = mic_dma_tx_submit_unlock;
269 return tx;
270}
271
272/*
273 * Prepare a memcpy descriptor to be added to the ring.
274 * Note that the temporary descriptor adds an extra overhead of copying the
275 * descriptor to ring. So, we copy directly to the descriptor ring
276 */
277static struct dma_async_tx_descriptor *
278mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
279 dma_addr_t dma_src, size_t len, unsigned long flags)
280{
281 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
282 struct device *dev = mic_dma_ch_to_device(mic_ch);
283 int result;
284
285 if (!len && !flags)
286 return NULL;
287
288 spin_lock(&mic_ch->prep_lock);
289 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
290 if (result >= 0)
291 return allocate_tx(mic_ch);
292 dev_err(dev, "Error enqueueing dma, error=%d\n", result);
293 spin_unlock(&mic_ch->prep_lock);
294 return NULL;
295}
296
297static struct dma_async_tx_descriptor *
298mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
299{
300 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
301 int ret;
302
303 spin_lock(&mic_ch->prep_lock);
304 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
305 if (!ret)
306 return allocate_tx(mic_ch);
307 spin_unlock(&mic_ch->prep_lock);
308 return NULL;
309}
310
311/* Return the status of the transaction */
312static enum dma_status
313mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie,
314 struct dma_tx_state *txstate)
315{
316 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
317
318 if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate))
319 mic_dma_cleanup(mic_ch);
320
321 return dma_cookie_status(ch, cookie, txstate);
322}
323
324static irqreturn_t mic_dma_thread_fn(int irq, void *data)
325{
326 mic_dma_cleanup((struct mic_dma_chan *)data);
327 return IRQ_HANDLED;
328}
329
330static irqreturn_t mic_dma_intr_handler(int irq, void *data)
331{
332 struct mic_dma_chan *ch = ((struct mic_dma_chan *)data);
333
334 mic_dma_ack_interrupt(ch);
335 return IRQ_WAKE_THREAD;
336}
337
338static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch)
339{
340 u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
341 struct device *dev = &to_mbus_device(ch)->dev;
342
343 desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
344 ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL);
345
346 if (!ch->desc_ring)
347 return -ENOMEM;
348
349 ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring,
350 desc_ring_size, DMA_BIDIRECTIONAL);
351 if (dma_mapping_error(dev, ch->desc_ring_micpa))
352 goto map_error;
353
354 ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array));
355 if (!ch->tx_array)
356 goto tx_error;
357 return 0;
358tx_error:
359 dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size,
360 DMA_BIDIRECTIONAL);
361map_error:
362 kfree(ch->desc_ring);
363 return -ENOMEM;
364}
365
366static void mic_dma_free_desc_ring(struct mic_dma_chan *ch)
367{
368 u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
369
370 vfree(ch->tx_array);
371 desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
372 dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa,
373 desc_ring_size, DMA_BIDIRECTIONAL);
374 kfree(ch->desc_ring);
375 ch->desc_ring = NULL;
376}
377
378static void mic_dma_free_status_dest(struct mic_dma_chan *ch)
379{
380 dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa,
381 L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
382 kfree(ch->status_dest);
383}
384
385static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch)
386{
387 struct device *dev = &to_mbus_device(ch)->dev;
388
389 ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL);
390 if (!ch->status_dest)
391 return -ENOMEM;
392 ch->status_dest_micpa = dma_map_single(dev, ch->status_dest,
393 L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
394 if (dma_mapping_error(dev, ch->status_dest_micpa)) {
395 kfree(ch->status_dest);
396 ch->status_dest = NULL;
397 return -ENOMEM;
398 }
399 return 0;
400}
401
402static int mic_dma_check_chan(struct mic_dma_chan *ch)
403{
404 if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) ||
405 mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) {
406 mic_dma_disable_chan(ch);
407 mic_dma_chan_mask_intr(ch);
408 dev_err(mic_dma_ch_to_device(ch),
409 "%s %d error setting up mic dma chan %d\n",
410 __func__, __LINE__, ch->ch_num);
411 return -EBUSY;
412 }
413 return 0;
414}
415
416static int mic_dma_chan_setup(struct mic_dma_chan *ch)
417{
418 if (MIC_DMA_CHAN_MIC == ch->owner)
419 mic_dma_chan_set_owner(ch);
420 mic_dma_disable_chan(ch);
421 mic_dma_chan_mask_intr(ch);
422 mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0);
423 mic_dma_chan_set_desc_ring(ch);
424 ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR);
425 ch->head = ch->last_tail;
426 ch->issued = 0;
427 mic_dma_chan_unmask_intr(ch);
428 mic_dma_enable_chan(ch);
429 return mic_dma_check_chan(ch);
430}
431
432static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
433{
434 mic_dma_disable_chan(ch);
435 mic_dma_chan_mask_intr(ch);
436}
437
438static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev)
439{
440 dma_async_device_unregister(&mic_dma_dev->dma_dev);
441}
442
443static int mic_dma_setup_irq(struct mic_dma_chan *ch)
444{
445 ch->cookie =
446 to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
447 mic_dma_intr_handler, mic_dma_thread_fn,
448 "mic dma_channel", ch, ch->ch_num);
449 if (IS_ERR(ch->cookie))
450 return IS_ERR(ch->cookie);
451 return 0;
452}
453
454static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
455{
456 to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch);
457}
458
459static int mic_dma_chan_init(struct mic_dma_chan *ch)
460{
461 int ret = mic_dma_alloc_desc_ring(ch);
462
463 if (ret)
464 goto ring_error;
465 ret = mic_dma_alloc_status_dest(ch);
466 if (ret)
467 goto status_error;
468 ret = mic_dma_chan_setup(ch);
469 if (ret)
470 goto chan_error;
471 return ret;
472chan_error:
473 mic_dma_free_status_dest(ch);
474status_error:
475 mic_dma_free_desc_ring(ch);
476ring_error:
477 return ret;
478}
479
480static int mic_dma_drain_chan(struct mic_dma_chan *ch)
481{
482 struct dma_async_tx_descriptor *tx;
483 int err = 0;
484 dma_cookie_t cookie;
485
486 tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE);
487 if (!tx) {
488 err = -ENOMEM;
489 goto error;
490 }
491
492 cookie = tx->tx_submit(tx);
493 if (dma_submit_error(cookie))
494 err = -ENOMEM;
495 else
496 err = dma_sync_wait(&ch->api_ch, cookie);
497 if (err) {
498 dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n",
499 __func__, __LINE__, ch->ch_num);
500 err = -EIO;
501 }
502error:
503 mic_dma_cleanup(ch);
504 return err;
505}
506
507static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch)
508{
509 mic_dma_chan_destroy(ch);
510 mic_dma_cleanup(ch);
511 mic_dma_free_status_dest(ch);
512 mic_dma_free_desc_ring(ch);
513}
514
515static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
516 enum mic_dma_chan_owner owner)
517{
518 int i, first_chan = mic_dma_dev->start_ch;
519 struct mic_dma_chan *ch;
520 int ret;
521
522 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
523 unsigned long data;
524 ch = &mic_dma_dev->mic_ch[i];
525 data = (unsigned long)ch;
526 ch->ch_num = i;
527 ch->owner = owner;
528 spin_lock_init(&ch->cleanup_lock);
529 spin_lock_init(&ch->prep_lock);
530 spin_lock_init(&ch->issue_lock);
531 ret = mic_dma_setup_irq(ch);
532 if (ret)
533 goto error;
534 }
535 return 0;
536error:
537 for (i = i - 1; i >= first_chan; i--)
538 mic_dma_free_irq(ch);
539 return ret;
540}
541
542static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev)
543{
544 int i, first_chan = mic_dma_dev->start_ch;
545 struct mic_dma_chan *ch;
546
547 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
548 ch = &mic_dma_dev->mic_ch[i];
549 mic_dma_free_irq(ch);
550 }
551}
552
553static int mic_dma_alloc_chan_resources(struct dma_chan *ch)
554{
555 int ret = mic_dma_chan_init(to_mic_dma_chan(ch));
556 if (ret)
557 return ret;
558 return MIC_DMA_DESC_RX_SIZE;
559}
560
561static void mic_dma_free_chan_resources(struct dma_chan *ch)
562{
563 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
564 mic_dma_drain_chan(mic_ch);
565 mic_dma_chan_uninit(mic_ch);
566}
567
568/* Set the fn. handlers and register the dma device with dma api */
569static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
570 enum mic_dma_chan_owner owner)
571{
572 int i, first_chan = mic_dma_dev->start_ch;
573
574 dma_cap_zero(mic_dma_dev->dma_dev.cap_mask);
575 /*
576 * This dma engine is not capable of host memory to host memory
577 * transfers
578 */
579 dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask);
580
581 if (MIC_DMA_CHAN_HOST == owner)
582 dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask);
583 mic_dma_dev->dma_dev.device_alloc_chan_resources =
584 mic_dma_alloc_chan_resources;
585 mic_dma_dev->dma_dev.device_free_chan_resources =
586 mic_dma_free_chan_resources;
587 mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status;
588 mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock;
589 mic_dma_dev->dma_dev.device_prep_dma_interrupt =
590 mic_dma_prep_interrupt_lock;
591 mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending;
592 mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT;
593 INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels);
594 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
595 mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev;
596 dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch);
597 list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
598 &mic_dma_dev->dma_dev.channels);
599 }
600 return dma_async_device_register(&mic_dma_dev->dma_dev);
601}
602
603/*
604 * Initializes dma channels and registers the dma device with the
605 * dma engine api.
606 */
607static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
608 enum mic_dma_chan_owner owner)
609{
610 struct mic_dma_device *mic_dma_dev;
611 int ret;
612 struct device *dev = &mbdev->dev;
613
614 mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
615 if (!mic_dma_dev) {
616 ret = -ENOMEM;
617 goto alloc_error;
618 }
619 mic_dma_dev->mbdev = mbdev;
620 mic_dma_dev->dma_dev.dev = dev;
621 mic_dma_dev->mmio = mbdev->mmio_va;
622 if (MIC_DMA_CHAN_HOST == owner) {
623 mic_dma_dev->start_ch = 0;
624 mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST;
625 } else {
626 mic_dma_dev->start_ch = 4;
627 mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD;
628 }
629 ret = mic_dma_init(mic_dma_dev, owner);
630 if (ret)
631 goto init_error;
632 ret = mic_dma_register_dma_device(mic_dma_dev, owner);
633 if (ret)
634 goto reg_error;
635 return mic_dma_dev;
636reg_error:
637 mic_dma_uninit(mic_dma_dev);
638init_error:
639 kfree(mic_dma_dev);
640 mic_dma_dev = NULL;
641alloc_error:
642 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
643 return mic_dma_dev;
644}
645
646static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
647{
648 mic_dma_unregister_dma_device(mic_dma_dev);
649 mic_dma_uninit(mic_dma_dev);
650 kfree(mic_dma_dev);
651}
652
653/* DEBUGFS CODE */
654static int mic_dma_reg_seq_show(struct seq_file *s, void *pos)
655{
656 struct mic_dma_device *mic_dma_dev = s->private;
657 int i, chan_num, first_chan = mic_dma_dev->start_ch;
658 struct mic_dma_chan *ch;
659
660 seq_printf(s, "SBOX_DCR: %#x\n",
661 mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan],
662 MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR));
663 seq_puts(s, "DMA Channel Registers\n");
664 seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s",
665 "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO");
666 seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT");
667 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
668 ch = &mic_dma_dev->mic_ch[i];
669 chan_num = ch->ch_num;
670 seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x",
671 chan_num,
672 mic_dma_read_reg(ch, MIC_DMA_REG_DCAR),
673 mic_dma_read_reg(ch, MIC_DMA_REG_DTPR),
674 mic_dma_read_reg(ch, MIC_DMA_REG_DHPR),
675 mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI));
676 seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n",
677 mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO),
678 mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR),
679 mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK),
680 mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT));
681 }
682 return 0;
683}
684
685static int mic_dma_reg_debug_open(struct inode *inode, struct file *file)
686{
687 return single_open(file, mic_dma_reg_seq_show, inode->i_private);
688}
689
690static int mic_dma_reg_debug_release(struct inode *inode, struct file *file)
691{
692 return single_release(inode, file);
693}
694
695static const struct file_operations mic_dma_reg_ops = {
696 .owner = THIS_MODULE,
697 .open = mic_dma_reg_debug_open,
698 .read = seq_read,
699 .llseek = seq_lseek,
700 .release = mic_dma_reg_debug_release
701};
702
703/* Debugfs parent dir */
704static struct dentry *mic_dma_dbg;
705
706static int mic_dma_driver_probe(struct mbus_device *mbdev)
707{
708 struct mic_dma_device *mic_dma_dev;
709 enum mic_dma_chan_owner owner;
710
711 if (MBUS_DEV_DMA_MIC == mbdev->id.device)
712 owner = MIC_DMA_CHAN_MIC;
713 else
714 owner = MIC_DMA_CHAN_HOST;
715
716 mic_dma_dev = mic_dma_dev_reg(mbdev, owner);
717 dev_set_drvdata(&mbdev->dev, mic_dma_dev);
718
719 if (mic_dma_dbg) {
720 mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
721 mic_dma_dbg);
722 if (mic_dma_dev->dbg_dir)
723 debugfs_create_file("mic_dma_reg", 0444,
724 mic_dma_dev->dbg_dir, mic_dma_dev,
725 &mic_dma_reg_ops);
726 }
727 return 0;
728}
729
730static void mic_dma_driver_remove(struct mbus_device *mbdev)
731{
732 struct mic_dma_device *mic_dma_dev;
733
734 mic_dma_dev = dev_get_drvdata(&mbdev->dev);
735 debugfs_remove_recursive(mic_dma_dev->dbg_dir);
736 mic_dma_dev_unreg(mic_dma_dev);
737}
738
739static struct mbus_device_id id_table[] = {
740 {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID},
741 {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID},
742 {0},
743};
744
745static struct mbus_driver mic_dma_driver = {
746 .driver.name = KBUILD_MODNAME,
747 .driver.owner = THIS_MODULE,
748 .id_table = id_table,
749 .probe = mic_dma_driver_probe,
750 .remove = mic_dma_driver_remove,
751};
752
753static int __init mic_x100_dma_init(void)
754{
755 int rc = mbus_register_driver(&mic_dma_driver);
756 if (rc)
757 return rc;
758 mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
759 return 0;
760}
761
762static void __exit mic_x100_dma_exit(void)
763{
764 debugfs_remove_recursive(mic_dma_dbg);
765 mbus_unregister_driver(&mic_dma_driver);
766}
767
768module_init(mic_x100_dma_init);
769module_exit(mic_x100_dma_exit);
770
771MODULE_DEVICE_TABLE(mbus, id_table);
772MODULE_AUTHOR("Intel Corporation");
773MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver");
774MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h
new file mode 100644
index 000000000000..f663b0bdd11d
--- /dev/null
+++ b/drivers/dma/mic_x100_dma.h
@@ -0,0 +1,286 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC X100 DMA Driver.
19 *
20 * Adapted from IOAT dma driver.
21 */
22#ifndef _MIC_X100_DMA_H_
23#define _MIC_X100_DMA_H_
24
25#include <linux/kernel.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
28#include <linux/debugfs.h>
29#include <linux/slab.h>
30#include <linux/interrupt.h>
31#include <linux/mic_bus.h>
32
33#include "dmaengine.h"
34
35/*
36 * MIC has a total of 8 dma channels.
37 * Four channels are assigned for host SW use & the remaining for MIC SW.
38 * MIC DMA transfer size & addresses need to be 64 byte aligned.
39 */
40#define MIC_DMA_MAX_NUM_CHAN 8
41#define MIC_DMA_NUM_CHAN 4
42#define MIC_DMA_ALIGN_SHIFT 6
43#define MIC_DMA_ALIGN_BYTES (1 << MIC_DMA_ALIGN_SHIFT)
44#define MIC_DMA_DESC_RX_SIZE (128 * 1024 - 4)
45
46/*
47 * Register descriptions
48 * All the registers are 32 bit registers.
49 * DCR is a global register and all others are per-channel.
50 * DCR - bits 0, 2, 4, 6, 8, 10, 12, 14 - enable bits for channels 0 to 7
51 * bits 1, 3, 5, 7, 9, 11, 13, 15 - owner bits for channels 0 to 7
52 * DCAR - bit 24 & 25 interrupt masks for mic owned & host owned channels
53 * DHPR - head of the descriptor ring updated by s/w
54 * DTPR - tail of the descriptor ring updated by h/w
55 * DRAR_LO - lower 32 bits of descriptor ring's mic address
56 * DRAR_HI - 3:0 - remaining 4 bits of descriptor ring's mic address
57 * 20:4 descriptor ring size
58 * 25:21 mic smpt entry number
59 * DSTAT - 16:0 h/w completion count; 31:28 dma engine status
60 * DCHERR - this register is non-zero on error
61 * DCHERRMSK - interrupt mask register
62 */
63#define MIC_DMA_HW_CMP_CNT_MASK 0x1ffff
64#define MIC_DMA_CHAN_QUIESCE 0x20000000
65#define MIC_DMA_SBOX_BASE 0x00010000
66#define MIC_DMA_SBOX_DCR 0x0000A280
67#define MIC_DMA_SBOX_CH_BASE 0x0001A000
68#define MIC_DMA_SBOX_CHAN_OFF 0x40
69#define MIC_DMA_SBOX_DCAR_IM0 (0x1 << 24)
70#define MIC_DMA_SBOX_DCAR_IM1 (0x1 << 25)
71#define MIC_DMA_SBOX_DRARHI_SYS_MASK (0x1 << 26)
72#define MIC_DMA_REG_DCAR 0
73#define MIC_DMA_REG_DHPR 4
74#define MIC_DMA_REG_DTPR 8
75#define MIC_DMA_REG_DRAR_LO 20
76#define MIC_DMA_REG_DRAR_HI 24
77#define MIC_DMA_REG_DSTAT 32
78#define MIC_DMA_REG_DCHERR 44
79#define MIC_DMA_REG_DCHERRMSK 48
80
81/* HW dma desc */
82struct mic_dma_desc {
83 u64 qw0;
84 u64 qw1;
85};
86
87enum mic_dma_chan_owner {
88 MIC_DMA_CHAN_MIC = 0,
89 MIC_DMA_CHAN_HOST
90};
91
92/*
93 * mic_dma_chan - channel specific information
94 * @ch_num: channel number
95 * @owner: owner of this channel
96 * @last_tail: cached value of descriptor ring tail
97 * @head: index of next descriptor in desc_ring
98 * @issued: hardware notification point
99 * @submitted: index that will be used to submit descriptors to h/w
100 * @api_ch: dma engine api channel
101 * @desc_ring: dma descriptor ring
102 * @desc_ring_micpa: mic physical address of desc_ring
103 * @status_dest: destination for status (fence) descriptor
104 * @status_dest_micpa: mic address for status_dest,
105 * DMA controller uses this address
106 * @tx_array: array of async_tx
107 * @cleanup_lock: lock held when processing completed tx
108 * @prep_lock: lock held in prep_memcpy & released in tx_submit
109 * @issue_lock: lock used to synchronize writes to head
110 * @cookie: mic_irq cookie used with mic irq request
111 */
112struct mic_dma_chan {
113 int ch_num;
114 enum mic_dma_chan_owner owner;
115 u32 last_tail;
116 u32 head;
117 u32 issued;
118 u32 submitted;
119 struct dma_chan api_ch;
120 struct mic_dma_desc *desc_ring;
121 dma_addr_t desc_ring_micpa;
122 u64 *status_dest;
123 dma_addr_t status_dest_micpa;
124 struct dma_async_tx_descriptor *tx_array;
125 spinlock_t cleanup_lock;
126 spinlock_t prep_lock;
127 spinlock_t issue_lock;
128 struct mic_irq *cookie;
129};
130
131/*
132 * struct mic_dma_device - per mic device
133 * @mic_ch: dma channels
134 * @dma_dev: underlying dma device
135 * @mbdev: mic bus dma device
136 * @mmio: virtual address of the mmio space
137 * @dbg_dir: debugfs directory
138 * @start_ch: first channel number that can be used
139 * @max_xfer_size: maximum transfer size per dma descriptor
140 */
141struct mic_dma_device {
142 struct mic_dma_chan mic_ch[MIC_DMA_MAX_NUM_CHAN];
143 struct dma_device dma_dev;
144 struct mbus_device *mbdev;
145 void __iomem *mmio;
146 struct dentry *dbg_dir;
147 int start_ch;
148 size_t max_xfer_size;
149};
150
151static inline struct mic_dma_chan *to_mic_dma_chan(struct dma_chan *ch)
152{
153 return container_of(ch, struct mic_dma_chan, api_ch);
154}
155
156static inline struct mic_dma_device *to_mic_dma_dev(struct mic_dma_chan *ch)
157{
158 return
159 container_of((const typeof(((struct mic_dma_device *)0)->mic_ch)*)
160 (ch - ch->ch_num), struct mic_dma_device, mic_ch);
161}
162
163static inline struct mbus_device *to_mbus_device(struct mic_dma_chan *ch)
164{
165 return to_mic_dma_dev(ch)->mbdev;
166}
167
168static inline struct mbus_hw_ops *to_mbus_hw_ops(struct mic_dma_chan *ch)
169{
170 return to_mbus_device(ch)->hw_ops;
171}
172
173static inline struct device *mic_dma_ch_to_device(struct mic_dma_chan *ch)
174{
175 return to_mic_dma_dev(ch)->dma_dev.dev;
176}
177
178static inline void __iomem *mic_dma_chan_to_mmio(struct mic_dma_chan *ch)
179{
180 return to_mic_dma_dev(ch)->mmio;
181}
182
183static inline u32 mic_dma_read_reg(struct mic_dma_chan *ch, u32 reg)
184{
185 return ioread32(mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE +
186 ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg);
187}
188
189static inline void mic_dma_write_reg(struct mic_dma_chan *ch, u32 reg, u32 val)
190{
191 iowrite32(val, mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE +
192 ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg);
193}
194
195static inline u32 mic_dma_mmio_read(struct mic_dma_chan *ch, u32 offset)
196{
197 return ioread32(mic_dma_chan_to_mmio(ch) + offset);
198}
199
200static inline void mic_dma_mmio_write(struct mic_dma_chan *ch, u32 val,
201 u32 offset)
202{
203 iowrite32(val, mic_dma_chan_to_mmio(ch) + offset);
204}
205
206static inline u32 mic_dma_read_cmp_cnt(struct mic_dma_chan *ch)
207{
208 return mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) &
209 MIC_DMA_HW_CMP_CNT_MASK;
210}
211
212static inline void mic_dma_chan_set_owner(struct mic_dma_chan *ch)
213{
214 u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
215 u32 chan_num = ch->ch_num;
216
217 dcr = (dcr & ~(0x1 << (chan_num * 2))) | (ch->owner << (chan_num * 2));
218 mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
219}
220
221static inline void mic_dma_enable_chan(struct mic_dma_chan *ch)
222{
223 u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
224
225 dcr |= 2 << (ch->ch_num << 1);
226 mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
227}
228
229static inline void mic_dma_disable_chan(struct mic_dma_chan *ch)
230{
231 u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
232
233 dcr &= ~(2 << (ch->ch_num << 1));
234 mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
235}
236
237static void mic_dma_chan_set_desc_ring(struct mic_dma_chan *ch)
238{
239 u32 drar_hi;
240 dma_addr_t desc_ring_micpa = ch->desc_ring_micpa;
241
242 drar_hi = (MIC_DMA_DESC_RX_SIZE & 0x1ffff) << 4;
243 if (MIC_DMA_CHAN_MIC == ch->owner) {
244 drar_hi |= (desc_ring_micpa >> 32) & 0xf;
245 } else {
246 drar_hi |= MIC_DMA_SBOX_DRARHI_SYS_MASK;
247 drar_hi |= ((desc_ring_micpa >> 34)
248 & 0x1f) << 21;
249 drar_hi |= (desc_ring_micpa >> 32) & 0x3;
250 }
251 mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_LO, (u32) desc_ring_micpa);
252 mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_HI, drar_hi);
253}
254
255static inline void mic_dma_chan_mask_intr(struct mic_dma_chan *ch)
256{
257 u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR);
258
259 if (MIC_DMA_CHAN_MIC == ch->owner)
260 dcar |= MIC_DMA_SBOX_DCAR_IM0;
261 else
262 dcar |= MIC_DMA_SBOX_DCAR_IM1;
263 mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar);
264}
265
266static inline void mic_dma_chan_unmask_intr(struct mic_dma_chan *ch)
267{
268 u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR);
269
270 if (MIC_DMA_CHAN_MIC == ch->owner)
271 dcar &= ~MIC_DMA_SBOX_DCAR_IM0;
272 else
273 dcar &= ~MIC_DMA_SBOX_DCAR_IM1;
274 mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar);
275}
276
277static void mic_dma_ack_interrupt(struct mic_dma_chan *ch)
278{
279 if (MIC_DMA_CHAN_MIC == ch->owner) {
280 /* HW errata */
281 mic_dma_chan_mask_intr(ch);
282 mic_dma_chan_unmask_intr(ch);
283 }
284 to_mbus_hw_ops(ch)->ack_interrupt(to_mbus_device(ch), ch->ch_num);
285}
286#endif
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index aebde489c291..6f2f4727de2c 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -14,6 +14,20 @@ if EXTCON
14 14
15comment "Extcon Device Drivers" 15comment "Extcon Device Drivers"
16 16
17config EXTCON_ADC_JACK
18 tristate "ADC Jack extcon support"
19 depends on IIO
20 help
21 Say Y here to enable extcon device driver based on ADC values.
22
23config EXTCON_ARIZONA
24 tristate "Wolfson Arizona EXTCON support"
25 depends on MFD_ARIZONA && INPUT && SND_SOC
26 help
27 Say Y here to enable support for external accessory detection
28 with Wolfson Arizona devices. These are audio CODECs with
29 advanced audio accessory detection support.
30
17config EXTCON_GPIO 31config EXTCON_GPIO
18 tristate "GPIO extcon support" 32 tristate "GPIO extcon support"
19 depends on GPIOLIB 33 depends on GPIOLIB
@@ -21,12 +35,6 @@ config EXTCON_GPIO
21 Say Y here to enable GPIO based extcon support. Note that GPIO 35 Say Y here to enable GPIO based extcon support. Note that GPIO
22 extcon supports single state per extcon instance. 36 extcon supports single state per extcon instance.
23 37
24config EXTCON_ADC_JACK
25 tristate "ADC Jack extcon support"
26 depends on IIO
27 help
28 Say Y here to enable extcon device driver based on ADC values.
29
30config EXTCON_MAX14577 38config EXTCON_MAX14577
31 tristate "MAX14577/77836 EXTCON Support" 39 tristate "MAX14577/77836 EXTCON Support"
32 depends on MFD_MAX14577 40 depends on MFD_MAX14577
@@ -55,14 +63,6 @@ config EXTCON_MAX8997
55 Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory 63 Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
56 detector and switch. 64 detector and switch.
57 65
58config EXTCON_ARIZONA
59 tristate "Wolfson Arizona EXTCON support"
60 depends on MFD_ARIZONA && INPUT && SND_SOC
61 help
62 Say Y here to enable support for external accessory detection
63 with Wolfson Arizona devices. These are audio CODECs with
64 advanced audio accessory detection support.
65
66config EXTCON_PALMAS 66config EXTCON_PALMAS
67 tristate "Palmas USB EXTCON support" 67 tristate "Palmas USB EXTCON support"
68 depends on MFD_PALMAS 68 depends on MFD_PALMAS
@@ -70,4 +70,14 @@ config EXTCON_PALMAS
70 Say Y here to enable support for USB peripheral and USB host 70 Say Y here to enable support for USB peripheral and USB host
71 detection by palmas usb. 71 detection by palmas usb.
72 72
73config EXTCON_SM5502
74 tristate "SM5502 EXTCON support"
75 select IRQ_DOMAIN
76 select REGMAP_I2C
77 select REGMAP_IRQ
78 help
79 If you say yes here you get support for the MUIC device of
80 Silicon Mitus SM5502. The SM5502 is a USB port accessory
81 detector and switch.
82
73endif # MULTISTATE_SWITCH 83endif # MULTISTATE_SWITCH
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index bf7861ec0906..b38546eb522a 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -1,12 +1,13 @@
1# 1
2# Makefile for external connector class (extcon) devices 2# Makefile for external connector class (extcon) devices
3# 3#
4 4
5obj-$(CONFIG_EXTCON) += extcon-class.o 5obj-$(CONFIG_EXTCON) += extcon-class.o
6obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
7obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o 6obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
7obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
8obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
8obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o 9obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
9obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o 10obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
10obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o 11obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
11obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
12obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o 12obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
13obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index e18f95be3733..d860229e4de1 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -112,7 +112,6 @@ static int adc_jack_probe(struct platform_device *pdev)
112 dev_err(&pdev->dev, "failed to allocate extcon device\n"); 112 dev_err(&pdev->dev, "failed to allocate extcon device\n");
113 return -ENOMEM; 113 return -ENOMEM;
114 } 114 }
115 data->edev->dev.parent = &pdev->dev;
116 data->edev->name = pdata->name; 115 data->edev->name = pdata->name;
117 116
118 /* Check the length of array and set num_cables */ 117 /* Check the length of array and set num_cables */
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 6c84e3d12043..ba51588cc000 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -39,6 +39,11 @@
39#define ARIZONA_ACCDET_MODE_HPL 1 39#define ARIZONA_ACCDET_MODE_HPL 1
40#define ARIZONA_ACCDET_MODE_HPR 2 40#define ARIZONA_ACCDET_MODE_HPR 2
41 41
42#define ARIZONA_MICD_CLAMP_MODE_JDL 0x4
43#define ARIZONA_MICD_CLAMP_MODE_JDH 0x5
44#define ARIZONA_MICD_CLAMP_MODE_JDL_GP5H 0x9
45#define ARIZONA_MICD_CLAMP_MODE_JDH_GP5H 0xb
46
42#define ARIZONA_HPDET_MAX 10000 47#define ARIZONA_HPDET_MAX 10000
43 48
44#define HPDET_DEBOUNCE 500 49#define HPDET_DEBOUNCE 500
@@ -324,14 +329,17 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
324} 329}
325 330
326static struct { 331static struct {
332 unsigned int threshold;
327 unsigned int factor_a; 333 unsigned int factor_a;
328 unsigned int factor_b; 334 unsigned int factor_b;
329} arizona_hpdet_b_ranges[] = { 335} arizona_hpdet_b_ranges[] = {
330 { 5528, 362464 }, 336 { 100, 5528, 362464 },
331 { 11084, 6186851 }, 337 { 169, 11084, 6186851 },
332 { 11065, 65460395 }, 338 { 169, 11065, 65460395 },
333}; 339};
334 340
341#define ARIZONA_HPDET_B_RANGE_MAX 0x3fb
342
335static struct { 343static struct {
336 int min; 344 int min;
337 int max; 345 int max;
@@ -386,7 +394,8 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
386 >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT; 394 >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
387 395
388 if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 && 396 if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
389 (val < 100 || val >= 0x3fb)) { 397 (val < arizona_hpdet_b_ranges[range].threshold ||
398 val >= ARIZONA_HPDET_B_RANGE_MAX)) {
390 range++; 399 range++;
391 dev_dbg(arizona->dev, "Moving to HPDET range %d\n", 400 dev_dbg(arizona->dev, "Moving to HPDET range %d\n",
392 range); 401 range);
@@ -399,7 +408,8 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
399 } 408 }
400 409
401 /* If we go out of range report top of range */ 410 /* If we go out of range report top of range */
402 if (val < 100 || val >= 0x3fb) { 411 if (val < arizona_hpdet_b_ranges[range].threshold ||
412 val >= ARIZONA_HPDET_B_RANGE_MAX) {
403 dev_dbg(arizona->dev, "Measurement out of range\n"); 413 dev_dbg(arizona->dev, "Measurement out of range\n");
404 return ARIZONA_HPDET_MAX; 414 return ARIZONA_HPDET_MAX;
405 } 415 }
@@ -664,9 +674,8 @@ err:
664 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); 674 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
665 675
666 /* Just report headphone */ 676 /* Just report headphone */
667 ret = extcon_update_state(info->edev, 677 ret = extcon_set_cable_state_(info->edev,
668 1 << ARIZONA_CABLE_HEADPHONE, 678 ARIZONA_CABLE_HEADPHONE, true);
669 1 << ARIZONA_CABLE_HEADPHONE);
670 if (ret != 0) 679 if (ret != 0)
671 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret); 680 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
672 681
@@ -723,9 +732,8 @@ err:
723 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); 732 ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
724 733
725 /* Just report headphone */ 734 /* Just report headphone */
726 ret = extcon_update_state(info->edev, 735 ret = extcon_set_cable_state_(info->edev,
727 1 << ARIZONA_CABLE_HEADPHONE, 736 ARIZONA_CABLE_HEADPHONE, true);
728 1 << ARIZONA_CABLE_HEADPHONE);
729 if (ret != 0) 737 if (ret != 0)
730 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret); 738 dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
731 739
@@ -812,16 +820,15 @@ static void arizona_micd_detect(struct work_struct *work)
812 if (info->detecting && (val & ARIZONA_MICD_LVL_8)) { 820 if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
813 arizona_identify_headphone(info); 821 arizona_identify_headphone(info);
814 822
815 ret = extcon_update_state(info->edev, 823 ret = extcon_set_cable_state_(info->edev,
816 1 << ARIZONA_CABLE_MICROPHONE, 824 ARIZONA_CABLE_MICROPHONE, true);
817 1 << ARIZONA_CABLE_MICROPHONE);
818 825
819 if (ret != 0) 826 if (ret != 0)
820 dev_err(arizona->dev, "Headset report failed: %d\n", 827 dev_err(arizona->dev, "Headset report failed: %d\n",
821 ret); 828 ret);
822 829
823 /* Don't need to regulate for button detection */ 830 /* Don't need to regulate for button detection */
824 ret = regulator_allow_bypass(info->micvdd, false); 831 ret = regulator_allow_bypass(info->micvdd, true);
825 if (ret != 0) { 832 if (ret != 0) {
826 dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n", 833 dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
827 ret); 834 ret);
@@ -962,10 +969,16 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
962 969
963 if (arizona->pdata.jd_gpio5) { 970 if (arizona->pdata.jd_gpio5) {
964 mask = ARIZONA_MICD_CLAMP_STS; 971 mask = ARIZONA_MICD_CLAMP_STS;
965 present = 0; 972 if (arizona->pdata.jd_invert)
973 present = ARIZONA_MICD_CLAMP_STS;
974 else
975 present = 0;
966 } else { 976 } else {
967 mask = ARIZONA_JD1_STS; 977 mask = ARIZONA_JD1_STS;
968 present = ARIZONA_JD1_STS; 978 if (arizona->pdata.jd_invert)
979 present = 0;
980 else
981 present = ARIZONA_JD1_STS;
969 } 982 }
970 983
971 ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val); 984 ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val);
@@ -1096,6 +1109,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1096 struct arizona_pdata *pdata = &arizona->pdata; 1109 struct arizona_pdata *pdata = &arizona->pdata;
1097 struct arizona_extcon_info *info; 1110 struct arizona_extcon_info *info;
1098 unsigned int val; 1111 unsigned int val;
1112 unsigned int clamp_mode;
1099 int jack_irq_fall, jack_irq_rise; 1113 int jack_irq_fall, jack_irq_rise;
1100 int ret, mode, i, j; 1114 int ret, mode, i, j;
1101 1115
@@ -1103,12 +1117,10 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1103 return -EPROBE_DEFER; 1117 return -EPROBE_DEFER;
1104 1118
1105 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 1119 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
1106 if (!info) { 1120 if (!info)
1107 dev_err(&pdev->dev, "Failed to allocate memory\n");
1108 return -ENOMEM; 1121 return -ENOMEM;
1109 }
1110 1122
1111 info->micvdd = devm_regulator_get(arizona->dev, "MICVDD"); 1123 info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
1112 if (IS_ERR(info->micvdd)) { 1124 if (IS_ERR(info->micvdd)) {
1113 ret = PTR_ERR(info->micvdd); 1125 ret = PTR_ERR(info->micvdd);
1114 dev_err(arizona->dev, "Failed to get MICVDD: %d\n", ret); 1126 dev_err(arizona->dev, "Failed to get MICVDD: %d\n", ret);
@@ -1156,7 +1168,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1156 return -ENOMEM; 1168 return -ENOMEM;
1157 } 1169 }
1158 info->edev->name = "Headset Jack"; 1170 info->edev->name = "Headset Jack";
1159 info->edev->dev.parent = arizona->dev;
1160 1171
1161 ret = devm_extcon_dev_register(&pdev->dev, info->edev); 1172 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
1162 if (ret < 0) { 1173 if (ret < 0) {
@@ -1174,7 +1185,6 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1174 1185
1175 info->input->name = "Headset"; 1186 info->input->name = "Headset";
1176 info->input->phys = "arizona/extcon"; 1187 info->input->phys = "arizona/extcon";
1177 info->input->dev.parent = &pdev->dev;
1178 1188
1179 if (pdata->num_micd_configs) { 1189 if (pdata->num_micd_configs) {
1180 info->micd_modes = pdata->micd_configs; 1190 info->micd_modes = pdata->micd_configs;
@@ -1305,16 +1315,22 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1305 regmap_write(arizona->regmap, ARIZONA_GPIO5_CTRL, 1315 regmap_write(arizona->regmap, ARIZONA_GPIO5_CTRL,
1306 val); 1316 val);
1307 1317
1308 regmap_update_bits(arizona->regmap, 1318 if (arizona->pdata.jd_invert)
1309 ARIZONA_MICD_CLAMP_CONTROL, 1319 clamp_mode = ARIZONA_MICD_CLAMP_MODE_JDH_GP5H;
1310 ARIZONA_MICD_CLAMP_MODE_MASK, 0x9); 1320 else
1321 clamp_mode = ARIZONA_MICD_CLAMP_MODE_JDL_GP5H;
1311 } else { 1322 } else {
1312 regmap_update_bits(arizona->regmap, 1323 if (arizona->pdata.jd_invert)
1313 ARIZONA_MICD_CLAMP_CONTROL, 1324 clamp_mode = ARIZONA_MICD_CLAMP_MODE_JDH;
1314 ARIZONA_MICD_CLAMP_MODE_MASK, 0x4); 1325 else
1326 clamp_mode = ARIZONA_MICD_CLAMP_MODE_JDL;
1315 } 1327 }
1316 1328
1317 regmap_update_bits(arizona->regmap, 1329 regmap_update_bits(arizona->regmap,
1330 ARIZONA_MICD_CLAMP_CONTROL,
1331 ARIZONA_MICD_CLAMP_MODE_MASK, clamp_mode);
1332
1333 regmap_update_bits(arizona->regmap,
1318 ARIZONA_JACK_DETECT_DEBOUNCE, 1334 ARIZONA_JACK_DETECT_DEBOUNCE,
1319 ARIZONA_MICD_CLAMP_DB, 1335 ARIZONA_MICD_CLAMP_DB,
1320 ARIZONA_MICD_CLAMP_DB); 1336 ARIZONA_MICD_CLAMP_DB);
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 18d42c0e4581..4c2f2c543bb7 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -645,6 +645,8 @@ struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
645 return edev; 645 return edev;
646 } 646 }
647 647
648 edev->dev.parent = dev;
649
648 *ptr = edev; 650 *ptr = edev;
649 devres_add(dev, ptr); 651 devres_add(dev, ptr);
650 652
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 645b28356819..5b7ec274cb63 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -105,7 +105,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
105 return -ENOMEM; 105 return -ENOMEM;
106 } 106 }
107 extcon_data->edev->name = pdata->name; 107 extcon_data->edev->name = pdata->name;
108 extcon_data->edev->dev.parent = &pdev->dev;
109 108
110 extcon_data->gpio = pdata->gpio; 109 extcon_data->gpio = pdata->gpio;
111 extcon_data->gpio_active_low = pdata->gpio_active_low; 110 extcon_data->gpio_active_low = pdata->gpio_active_low;
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index d49e891b5675..7309743d0da1 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -692,10 +692,9 @@ static int max14577_muic_probe(struct platform_device *pdev)
692 u8 id; 692 u8 id;
693 693
694 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 694 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
695 if (!info) { 695 if (!info)
696 dev_err(&pdev->dev, "failed to allocate memory\n");
697 return -ENOMEM; 696 return -ENOMEM;
698 } 697
699 info->dev = &pdev->dev; 698 info->dev = &pdev->dev;
700 info->max14577 = max14577; 699 info->max14577 = max14577;
701 700
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 2c7c3e191591..77460f2c1ca1 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -255,10 +255,10 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
255 case ADC_DEBOUNCE_TIME_10MS: 255 case ADC_DEBOUNCE_TIME_10MS:
256 case ADC_DEBOUNCE_TIME_25MS: 256 case ADC_DEBOUNCE_TIME_25MS:
257 case ADC_DEBOUNCE_TIME_38_62MS: 257 case ADC_DEBOUNCE_TIME_38_62MS:
258 ret = max77693_update_reg(info->max77693->regmap_muic, 258 ret = regmap_update_bits(info->max77693->regmap_muic,
259 MAX77693_MUIC_REG_CTRL3, 259 MAX77693_MUIC_REG_CTRL3,
260 time << CONTROL3_ADCDBSET_SHIFT, 260 CONTROL3_ADCDBSET_MASK,
261 CONTROL3_ADCDBSET_MASK); 261 time << CONTROL3_ADCDBSET_SHIFT);
262 if (ret) { 262 if (ret) {
263 dev_err(info->dev, "failed to set ADC debounce time\n"); 263 dev_err(info->dev, "failed to set ADC debounce time\n");
264 return ret; 264 return ret;
@@ -286,15 +286,15 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
286 u8 val, bool attached) 286 u8 val, bool attached)
287{ 287{
288 int ret = 0; 288 int ret = 0;
289 u8 ctrl1, ctrl2 = 0; 289 unsigned int ctrl1, ctrl2 = 0;
290 290
291 if (attached) 291 if (attached)
292 ctrl1 = val; 292 ctrl1 = val;
293 else 293 else
294 ctrl1 = CONTROL1_SW_OPEN; 294 ctrl1 = CONTROL1_SW_OPEN;
295 295
296 ret = max77693_update_reg(info->max77693->regmap_muic, 296 ret = regmap_update_bits(info->max77693->regmap_muic,
297 MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK); 297 MAX77693_MUIC_REG_CTRL1, COMP_SW_MASK, ctrl1);
298 if (ret < 0) { 298 if (ret < 0) {
299 dev_err(info->dev, "failed to update MUIC register\n"); 299 dev_err(info->dev, "failed to update MUIC register\n");
300 return ret; 300 return ret;
@@ -305,9 +305,9 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
305 else 305 else
306 ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */ 306 ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
307 307
308 ret = max77693_update_reg(info->max77693->regmap_muic, 308 ret = regmap_update_bits(info->max77693->regmap_muic,
309 MAX77693_MUIC_REG_CTRL2, ctrl2, 309 MAX77693_MUIC_REG_CTRL2,
310 CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK); 310 CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK, ctrl2);
311 if (ret < 0) { 311 if (ret < 0) {
312 dev_err(info->dev, "failed to update MUIC register\n"); 312 dev_err(info->dev, "failed to update MUIC register\n");
313 return ret; 313 return ret;
@@ -969,8 +969,8 @@ static void max77693_muic_irq_work(struct work_struct *work)
969 if (info->irq == muic_irqs[i].virq) 969 if (info->irq == muic_irqs[i].virq)
970 irq_type = muic_irqs[i].irq; 970 irq_type = muic_irqs[i].irq;
971 971
972 ret = max77693_bulk_read(info->max77693->regmap_muic, 972 ret = regmap_bulk_read(info->max77693->regmap_muic,
973 MAX77693_MUIC_REG_STATUS1, 2, info->status); 973 MAX77693_MUIC_REG_STATUS1, info->status, 2);
974 if (ret) { 974 if (ret) {
975 dev_err(info->dev, "failed to read MUIC register\n"); 975 dev_err(info->dev, "failed to read MUIC register\n");
976 mutex_unlock(&info->mutex); 976 mutex_unlock(&info->mutex);
@@ -1042,8 +1042,8 @@ static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
1042 mutex_lock(&info->mutex); 1042 mutex_lock(&info->mutex);
1043 1043
1044 /* Read STATUSx register to detect accessory */ 1044 /* Read STATUSx register to detect accessory */
1045 ret = max77693_bulk_read(info->max77693->regmap_muic, 1045 ret = regmap_bulk_read(info->max77693->regmap_muic,
1046 MAX77693_MUIC_REG_STATUS1, 2, info->status); 1046 MAX77693_MUIC_REG_STATUS1, info->status, 2);
1047 if (ret) { 1047 if (ret) {
1048 dev_err(info->dev, "failed to read MUIC register\n"); 1048 dev_err(info->dev, "failed to read MUIC register\n");
1049 mutex_unlock(&info->mutex); 1049 mutex_unlock(&info->mutex);
@@ -1095,14 +1095,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
1095 int delay_jiffies; 1095 int delay_jiffies;
1096 int ret; 1096 int ret;
1097 int i; 1097 int i;
1098 u8 id; 1098 unsigned int id;
1099 1099
1100 info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info), 1100 info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
1101 GFP_KERNEL); 1101 GFP_KERNEL);
1102 if (!info) { 1102 if (!info)
1103 dev_err(&pdev->dev, "failed to allocate memory\n");
1104 return -ENOMEM; 1103 return -ENOMEM;
1105 } 1104
1106 info->dev = &pdev->dev; 1105 info->dev = &pdev->dev;
1107 info->max77693 = max77693; 1106 info->max77693 = max77693;
1108 if (info->max77693->regmap_muic) { 1107 if (info->max77693->regmap_muic) {
@@ -1154,7 +1153,8 @@ static int max77693_muic_probe(struct platform_device *pdev)
1154 struct max77693_muic_irq *muic_irq = &muic_irqs[i]; 1153 struct max77693_muic_irq *muic_irq = &muic_irqs[i];
1155 unsigned int virq = 0; 1154 unsigned int virq = 0;
1156 1155
1157 virq = irq_create_mapping(max77693->irq_domain, muic_irq->irq); 1156 virq = regmap_irq_get_virq(max77693->irq_data_muic,
1157 muic_irq->irq);
1158 if (!virq) { 1158 if (!virq) {
1159 ret = -EINVAL; 1159 ret = -EINVAL;
1160 goto err_irq; 1160 goto err_irq;
@@ -1183,7 +1183,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
1183 goto err_irq; 1183 goto err_irq;
1184 } 1184 }
1185 info->edev->name = DEV_NAME; 1185 info->edev->name = DEV_NAME;
1186 info->edev->dev.parent = &pdev->dev;
1187 1186
1188 ret = devm_extcon_dev_register(&pdev->dev, info->edev); 1187 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
1189 if (ret) { 1188 if (ret) {
@@ -1204,7 +1203,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
1204 enum max77693_irq_source irq_src 1203 enum max77693_irq_source irq_src
1205 = MAX77693_IRQ_GROUP_NR; 1204 = MAX77693_IRQ_GROUP_NR;
1206 1205
1207 max77693_write_reg(info->max77693->regmap_muic, 1206 regmap_write(info->max77693->regmap_muic,
1208 init_data[i].addr, 1207 init_data[i].addr,
1209 init_data[i].data); 1208 init_data[i].data);
1210 1209
@@ -1262,7 +1261,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
1262 max77693_muic_set_path(info, info->path_uart, true); 1261 max77693_muic_set_path(info, info->path_uart, true);
1263 1262
1264 /* Check revision number of MUIC device*/ 1263 /* Check revision number of MUIC device*/
1265 ret = max77693_read_reg(info->max77693->regmap_muic, 1264 ret = regmap_read(info->max77693->regmap_muic,
1266 MAX77693_MUIC_REG_ID, &id); 1265 MAX77693_MUIC_REG_ID, &id);
1267 if (ret < 0) { 1266 if (ret < 0) {
1268 dev_err(&pdev->dev, "failed to read revision number\n"); 1267 dev_err(&pdev->dev, "failed to read revision number\n");
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index d9f7f1baaa03..75e501c98005 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -661,10 +661,8 @@ static int max8997_muic_probe(struct platform_device *pdev)
661 661
662 info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info), 662 info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
663 GFP_KERNEL); 663 GFP_KERNEL);
664 if (!info) { 664 if (!info)
665 dev_err(&pdev->dev, "failed to allocate memory\n");
666 return -ENOMEM; 665 return -ENOMEM;
667 }
668 666
669 info->dev = &pdev->dev; 667 info->dev = &pdev->dev;
670 info->muic = max8997->muic; 668 info->muic = max8997->muic;
@@ -706,7 +704,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
706 goto err_irq; 704 goto err_irq;
707 } 705 }
708 info->edev->name = DEV_NAME; 706 info->edev->name = DEV_NAME;
709 info->edev->dev.parent = &pdev->dev;
710 707
711 ret = devm_extcon_dev_register(&pdev->dev, info->edev); 708 ret = devm_extcon_dev_register(&pdev->dev, info->edev);
712 if (ret) { 709 if (ret) {
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 7417ce84eb2d..230e1220ce48 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -194,7 +194,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
194 return -ENOMEM; 194 return -ENOMEM;
195 } 195 }
196 palmas_usb->edev->name = kstrdup(node->name, GFP_KERNEL); 196 palmas_usb->edev->name = kstrdup(node->name, GFP_KERNEL);
197 palmas_usb->edev->dev.parent = palmas_usb->dev;
198 palmas_usb->edev->mutually_exclusive = mutually_exclusive; 197 palmas_usb->edev->mutually_exclusive = mutually_exclusive;
199 198
200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); 199 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
@@ -278,7 +277,7 @@ static int palmas_usb_resume(struct device *dev)
278 277
279static SIMPLE_DEV_PM_OPS(palmas_pm_ops, palmas_usb_suspend, palmas_usb_resume); 278static SIMPLE_DEV_PM_OPS(palmas_pm_ops, palmas_usb_suspend, palmas_usb_resume);
280 279
281static struct of_device_id of_palmas_match_tbl[] = { 280static const struct of_device_id of_palmas_match_tbl[] = {
282 { .compatible = "ti,palmas-usb", }, 281 { .compatible = "ti,palmas-usb", },
283 { .compatible = "ti,palmas-usb-vid", }, 282 { .compatible = "ti,palmas-usb-vid", },
284 { .compatible = "ti,twl6035-usb", }, 283 { .compatible = "ti,twl6035-usb", },
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
new file mode 100644
index 000000000000..560d7dccec7b
--- /dev/null
+++ b/drivers/extcon/extcon-sm5502.c
@@ -0,0 +1,724 @@
1/*
2 * extcon-sm5502.c - Silicon Mitus SM5502 extcon drvier to support USB switches
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 * Author: Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/err.h>
19#include <linux/i2c.h>
20#include <linux/input.h>
21#include <linux/interrupt.h>
22#include <linux/irqdomain.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/regmap.h>
27#include <linux/slab.h>
28#include <linux/extcon.h>
29#include <linux/extcon/sm5502.h>
30
31#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
32
33struct muic_irq {
34 unsigned int irq;
35 const char *name;
36 unsigned int virq;
37};
38
39struct reg_data {
40 u8 reg;
41 unsigned int val;
42 bool invert;
43};
44
45struct sm5502_muic_info {
46 struct device *dev;
47 struct extcon_dev *edev;
48
49 struct i2c_client *i2c;
50 struct regmap *regmap;
51
52 struct regmap_irq_chip_data *irq_data;
53 struct muic_irq *muic_irqs;
54 unsigned int num_muic_irqs;
55 int irq;
56 bool irq_attach;
57 bool irq_detach;
58 struct work_struct irq_work;
59
60 struct reg_data *reg_data;
61 unsigned int num_reg_data;
62
63 struct mutex mutex;
64
65 /*
66 * Use delayed workqueue to detect cable state and then
67 * notify cable state to notifiee/platform through uevent.
68 * After completing the booting of platform, the extcon provider
69 * driver should notify cable state to upper layer.
70 */
71 struct delayed_work wq_detcable;
72};
73
74/* Default value of SM5502 register to bring up MUIC device. */
75static struct reg_data sm5502_reg_data[] = {
76 {
77 .reg = SM5502_REG_CONTROL,
78 .val = SM5502_REG_CONTROL_MASK_INT_MASK,
79 .invert = false,
80 }, {
81 .reg = SM5502_REG_INTMASK1,
82 .val = SM5502_REG_INTM1_KP_MASK
83 | SM5502_REG_INTM1_LKP_MASK
84 | SM5502_REG_INTM1_LKR_MASK,
85 .invert = true,
86 }, {
87 .reg = SM5502_REG_INTMASK2,
88 .val = SM5502_REG_INTM2_VBUS_DET_MASK
89 | SM5502_REG_INTM2_REV_ACCE_MASK
90 | SM5502_REG_INTM2_ADC_CHG_MASK
91 | SM5502_REG_INTM2_STUCK_KEY_MASK
92 | SM5502_REG_INTM2_STUCK_KEY_RCV_MASK
93 | SM5502_REG_INTM2_MHL_MASK,
94 .invert = true,
95 },
96 { }
97};
98
99/* List of detectable cables */
100enum {
101 EXTCON_CABLE_USB = 0,
102 EXTCON_CABLE_USB_HOST,
103 EXTCON_CABLE_TA,
104
105 EXTCON_CABLE_END,
106};
107
108static const char *sm5502_extcon_cable[] = {
109 [EXTCON_CABLE_USB] = "USB",
110 [EXTCON_CABLE_USB_HOST] = "USB-Host",
111 [EXTCON_CABLE_TA] = "TA",
112 NULL,
113};
114
115/* Define supported accessory type */
116enum sm5502_muic_acc_type {
117 SM5502_MUIC_ADC_GROUND = 0x0,
118 SM5502_MUIC_ADC_SEND_END_BUTTON,
119 SM5502_MUIC_ADC_REMOTE_S1_BUTTON,
120 SM5502_MUIC_ADC_REMOTE_S2_BUTTON,
121 SM5502_MUIC_ADC_REMOTE_S3_BUTTON,
122 SM5502_MUIC_ADC_REMOTE_S4_BUTTON,
123 SM5502_MUIC_ADC_REMOTE_S5_BUTTON,
124 SM5502_MUIC_ADC_REMOTE_S6_BUTTON,
125 SM5502_MUIC_ADC_REMOTE_S7_BUTTON,
126 SM5502_MUIC_ADC_REMOTE_S8_BUTTON,
127 SM5502_MUIC_ADC_REMOTE_S9_BUTTON,
128 SM5502_MUIC_ADC_REMOTE_S10_BUTTON,
129 SM5502_MUIC_ADC_REMOTE_S11_BUTTON,
130 SM5502_MUIC_ADC_REMOTE_S12_BUTTON,
131 SM5502_MUIC_ADC_RESERVED_ACC_1,
132 SM5502_MUIC_ADC_RESERVED_ACC_2,
133 SM5502_MUIC_ADC_RESERVED_ACC_3,
134 SM5502_MUIC_ADC_RESERVED_ACC_4,
135 SM5502_MUIC_ADC_RESERVED_ACC_5,
136 SM5502_MUIC_ADC_AUDIO_TYPE2,
137 SM5502_MUIC_ADC_PHONE_POWERED_DEV,
138 SM5502_MUIC_ADC_TTY_CONVERTER,
139 SM5502_MUIC_ADC_UART_CABLE,
140 SM5502_MUIC_ADC_TYPE1_CHARGER,
141 SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB,
142 SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB,
143 SM5502_MUIC_ADC_AUDIO_VIDEO_CABLE,
144 SM5502_MUIC_ADC_TYPE2_CHARGER,
145 SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART,
146 SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART,
147 SM5502_MUIC_ADC_AUDIO_TYPE1,
148 SM5502_MUIC_ADC_OPEN = 0x1f,
149
150 /* The below accessories have same ADC value (0x1f or 0x1e).
151 So, Device type1 is used to separate specific accessory. */
152 /* |---------|--ADC| */
153 /* | [7:5]|[4:0]| */
154 SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* | 001|11110| */
155 SM5502_MUIC_ADC_AUDIO_TYPE1_SEND_END = 0x5e, /* | 010|11110| */
156 /* |Dev Type1|--ADC| */
157 SM5502_MUIC_ADC_OPEN_USB = 0x5f, /* | 010|11111| */
158 SM5502_MUIC_ADC_OPEN_TA = 0xdf, /* | 110|11111| */
159 SM5502_MUIC_ADC_OPEN_USB_OTG = 0xff, /* | 111|11111| */
160};
161
162/* List of supported interrupt for SM5502 */
163static struct muic_irq sm5502_muic_irqs[] = {
164 { SM5502_IRQ_INT1_ATTACH, "muic-attach" },
165 { SM5502_IRQ_INT1_DETACH, "muic-detach" },
166 { SM5502_IRQ_INT1_KP, "muic-kp" },
167 { SM5502_IRQ_INT1_LKP, "muic-lkp" },
168 { SM5502_IRQ_INT1_LKR, "muic-lkr" },
169 { SM5502_IRQ_INT1_OVP_EVENT, "muic-ovp-event" },
170 { SM5502_IRQ_INT1_OCP_EVENT, "muic-ocp-event" },
171 { SM5502_IRQ_INT1_OVP_OCP_DIS, "muic-ovp-ocp-dis" },
172 { SM5502_IRQ_INT2_VBUS_DET, "muic-vbus-det" },
173 { SM5502_IRQ_INT2_REV_ACCE, "muic-rev-acce" },
174 { SM5502_IRQ_INT2_ADC_CHG, "muic-adc-chg" },
175 { SM5502_IRQ_INT2_STUCK_KEY, "muic-stuck-key" },
176 { SM5502_IRQ_INT2_STUCK_KEY_RCV, "muic-stuck-key-rcv" },
177 { SM5502_IRQ_INT2_MHL, "muic-mhl" },
178};
179
180/* Define interrupt list of SM5502 to register regmap_irq */
181static const struct regmap_irq sm5502_irqs[] = {
182 /* INT1 interrupts */
183 { .reg_offset = 0, .mask = SM5502_IRQ_INT1_ATTACH_MASK, },
184 { .reg_offset = 0, .mask = SM5502_IRQ_INT1_DETACH_MASK, },
185 { .reg_offset = 0, .mask = SM5502_IRQ_INT1_KP_MASK, },
186 { .reg_offset = 0, .mask = SM5502_IRQ_INT1_LKP_MASK, },
187 { .reg_offset = 0, .mask = SM5502_IRQ_INT1_LKR_MASK, },
188 { .reg_offset = 0, .mask = SM5502_IRQ_INT1_OVP_EVENT_MASK, },
189 { .reg_offset = 0, .mask = SM5502_IRQ_INT1_OCP_EVENT_MASK, },
190 { .reg_offset = 0, .mask = SM5502_IRQ_INT1_OVP_OCP_DIS_MASK, },
191
192 /* INT2 interrupts */
193 { .reg_offset = 1, .mask = SM5502_IRQ_INT2_VBUS_DET_MASK,},
194 { .reg_offset = 1, .mask = SM5502_IRQ_INT2_REV_ACCE_MASK, },
195 { .reg_offset = 1, .mask = SM5502_IRQ_INT2_ADC_CHG_MASK, },
196 { .reg_offset = 1, .mask = SM5502_IRQ_INT2_STUCK_KEY_MASK, },
197 { .reg_offset = 1, .mask = SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK, },
198 { .reg_offset = 1, .mask = SM5502_IRQ_INT2_MHL_MASK, },
199};
200
201static const struct regmap_irq_chip sm5502_muic_irq_chip = {
202 .name = "sm5502",
203 .status_base = SM5502_REG_INT1,
204 .mask_base = SM5502_REG_INTMASK1,
205 .mask_invert = false,
206 .num_regs = 2,
207 .irqs = sm5502_irqs,
208 .num_irqs = ARRAY_SIZE(sm5502_irqs),
209};
210
211/* Define regmap configuration of SM5502 for I2C communication */
212static bool sm5502_muic_volatile_reg(struct device *dev, unsigned int reg)
213{
214 switch (reg) {
215 case SM5502_REG_INTMASK1:
216 case SM5502_REG_INTMASK2:
217 return true;
218 default:
219 break;
220 }
221 return false;
222}
223
224static const struct regmap_config sm5502_muic_regmap_config = {
225 .reg_bits = 8,
226 .val_bits = 8,
227 .volatile_reg = sm5502_muic_volatile_reg,
228 .max_register = SM5502_REG_END,
229};
230
231/* Change DM_CON/DP_CON/VBUSIN switch according to cable type */
232static int sm5502_muic_set_path(struct sm5502_muic_info *info,
233 unsigned int con_sw, unsigned int vbus_sw,
234 bool attached)
235{
236 int ret;
237
238 if (!attached) {
239 con_sw = DM_DP_SWITCH_OPEN;
240 vbus_sw = VBUSIN_SWITCH_OPEN;
241 }
242
243 switch (con_sw) {
244 case DM_DP_SWITCH_OPEN:
245 case DM_DP_SWITCH_USB:
246 case DM_DP_SWITCH_AUDIO:
247 case DM_DP_SWITCH_UART:
248 ret = regmap_update_bits(info->regmap, SM5502_REG_MANUAL_SW1,
249 SM5502_REG_MANUAL_SW1_DP_MASK |
250 SM5502_REG_MANUAL_SW1_DM_MASK,
251 con_sw);
252 if (ret < 0) {
253 dev_err(info->dev,
254 "cannot update DM_CON/DP_CON switch\n");
255 return ret;
256 }
257 break;
258 default:
259 dev_err(info->dev, "Unknown DM_CON/DP_CON switch type (%d)\n",
260 con_sw);
261 return -EINVAL;
262 };
263
264 switch (vbus_sw) {
265 case VBUSIN_SWITCH_OPEN:
266 case VBUSIN_SWITCH_VBUSOUT:
267 case VBUSIN_SWITCH_MIC:
268 case VBUSIN_SWITCH_VBUSOUT_WITH_USB:
269 ret = regmap_update_bits(info->regmap, SM5502_REG_MANUAL_SW1,
270 SM5502_REG_MANUAL_SW1_VBUSIN_MASK,
271 vbus_sw);
272 if (ret < 0) {
273 dev_err(info->dev,
274 "cannot update VBUSIN switch\n");
275 return ret;
276 }
277 break;
278 default:
279 dev_err(info->dev, "Unknown VBUS switch type (%d)\n", vbus_sw);
280 return -EINVAL;
281 };
282
283 return 0;
284}
285
286/* Return cable type of attached or detached accessories */
287static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
288{
289 unsigned int cable_type = -1, adc, dev_type1;
290 int ret;
291
292 /* Read ADC value according to external cable or button */
293 ret = regmap_read(info->regmap, SM5502_REG_ADC, &adc);
294 if (ret) {
295 dev_err(info->dev, "failed to read ADC register\n");
296 return ret;
297 }
298
299 /*
300 * If ADC is SM5502_MUIC_ADC_GROUND(0x0), external cable hasn't
301 * connected with to MUIC device.
302 */
303 cable_type &= SM5502_REG_ADC_MASK;
304 if (cable_type == SM5502_MUIC_ADC_GROUND)
305 return SM5502_MUIC_ADC_GROUND;
306
307 switch (cable_type) {
308 case SM5502_MUIC_ADC_GROUND:
309 case SM5502_MUIC_ADC_SEND_END_BUTTON:
310 case SM5502_MUIC_ADC_REMOTE_S1_BUTTON:
311 case SM5502_MUIC_ADC_REMOTE_S2_BUTTON:
312 case SM5502_MUIC_ADC_REMOTE_S3_BUTTON:
313 case SM5502_MUIC_ADC_REMOTE_S4_BUTTON:
314 case SM5502_MUIC_ADC_REMOTE_S5_BUTTON:
315 case SM5502_MUIC_ADC_REMOTE_S6_BUTTON:
316 case SM5502_MUIC_ADC_REMOTE_S7_BUTTON:
317 case SM5502_MUIC_ADC_REMOTE_S8_BUTTON:
318 case SM5502_MUIC_ADC_REMOTE_S9_BUTTON:
319 case SM5502_MUIC_ADC_REMOTE_S10_BUTTON:
320 case SM5502_MUIC_ADC_REMOTE_S11_BUTTON:
321 case SM5502_MUIC_ADC_REMOTE_S12_BUTTON:
322 case SM5502_MUIC_ADC_RESERVED_ACC_1:
323 case SM5502_MUIC_ADC_RESERVED_ACC_2:
324 case SM5502_MUIC_ADC_RESERVED_ACC_3:
325 case SM5502_MUIC_ADC_RESERVED_ACC_4:
326 case SM5502_MUIC_ADC_RESERVED_ACC_5:
327 case SM5502_MUIC_ADC_AUDIO_TYPE2:
328 case SM5502_MUIC_ADC_PHONE_POWERED_DEV:
329 case SM5502_MUIC_ADC_TTY_CONVERTER:
330 case SM5502_MUIC_ADC_UART_CABLE:
331 case SM5502_MUIC_ADC_TYPE1_CHARGER:
332 case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_USB:
333 case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_USB:
334 case SM5502_MUIC_ADC_AUDIO_VIDEO_CABLE:
335 case SM5502_MUIC_ADC_TYPE2_CHARGER:
336 case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_OFF_UART:
337 case SM5502_MUIC_ADC_FACTORY_MODE_BOOT_ON_UART:
338 break;
339 case SM5502_MUIC_ADC_AUDIO_TYPE1:
340 /*
341 * Check whether cable type is
342 * SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE
343 * or SM5502_MUIC_ADC_AUDIO_TYPE1_SEND_END
344 * by using Button event.
345 */
346 break;
347 case SM5502_MUIC_ADC_OPEN:
348 ret = regmap_read(info->regmap, SM5502_REG_DEV_TYPE1,
349 &dev_type1);
350 if (ret) {
351 dev_err(info->dev, "failed to read DEV_TYPE1 reg\n");
352 return ret;
353 }
354
355 switch (dev_type1) {
356 case SM5502_REG_DEV_TYPE1_USB_SDP_MASK:
357 cable_type = SM5502_MUIC_ADC_OPEN_USB;
358 break;
359 case SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK:
360 cable_type = SM5502_MUIC_ADC_OPEN_TA;
361 break;
362 case SM5502_REG_DEV_TYPE1_USB_OTG_MASK:
363 cable_type = SM5502_MUIC_ADC_OPEN_USB_OTG;
364 break;
365 default:
366 dev_dbg(info->dev,
367 "cannot identify the cable type: adc(0x%x) "
368 "dev_type1(0x%x)\n", adc, dev_type1);
369 return -EINVAL;
370 };
371 break;
372 default:
373 dev_err(info->dev,
374 "failed to identify the cable type: adc(0x%x)\n", adc);
375 return -EINVAL;
376 };
377
378 return cable_type;
379}
380
381static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
382 bool attached)
383{
384 static unsigned int prev_cable_type = SM5502_MUIC_ADC_GROUND;
385 const char **cable_names = info->edev->supported_cable;
386 unsigned int cable_type = SM5502_MUIC_ADC_GROUND;
387 unsigned int con_sw = DM_DP_SWITCH_OPEN;
388 unsigned int vbus_sw = VBUSIN_SWITCH_OPEN;
389 unsigned int idx = 0;
390 int ret;
391
392 if (!cable_names)
393 return 0;
394
395 /* Get the type of attached or detached cable */
396 if (attached)
397 cable_type = sm5502_muic_get_cable_type(info);
398 else if (!attached)
399 cable_type = prev_cable_type;
400 prev_cable_type = cable_type;
401
402 switch (cable_type) {
403 case SM5502_MUIC_ADC_OPEN_USB:
404 idx = EXTCON_CABLE_USB;
405 con_sw = DM_DP_SWITCH_USB;
406 vbus_sw = VBUSIN_SWITCH_VBUSOUT_WITH_USB;
407 break;
408 case SM5502_MUIC_ADC_OPEN_TA:
409 idx = EXTCON_CABLE_TA;
410 con_sw = DM_DP_SWITCH_OPEN;
411 vbus_sw = VBUSIN_SWITCH_VBUSOUT;
412 break;
413 case SM5502_MUIC_ADC_OPEN_USB_OTG:
414 idx = EXTCON_CABLE_USB_HOST;
415 con_sw = DM_DP_SWITCH_USB;
416 vbus_sw = VBUSIN_SWITCH_OPEN;
417 break;
418 default:
419 dev_dbg(info->dev,
420 "cannot handle this cable_type (0x%x)\n", cable_type);
421 return 0;
422 };
423
424 /* Change internal hardware path(DM_CON/DP_CON, VBUSIN) */
425 ret = sm5502_muic_set_path(info, con_sw, vbus_sw, attached);
426 if (ret < 0)
427 return ret;
428
429 /* Change the state of external accessory */
430 extcon_set_cable_state(info->edev, cable_names[idx], attached);
431
432 return 0;
433}
434
435static void sm5502_muic_irq_work(struct work_struct *work)
436{
437 struct sm5502_muic_info *info = container_of(work,
438 struct sm5502_muic_info, irq_work);
439 int ret = 0;
440
441 if (!info->edev)
442 return;
443
444 mutex_lock(&info->mutex);
445
446 /* Detect attached or detached cables */
447 if (info->irq_attach) {
448 ret = sm5502_muic_cable_handler(info, true);
449 info->irq_attach = false;
450 }
451 if (info->irq_detach) {
452 ret = sm5502_muic_cable_handler(info, false);
453 info->irq_detach = false;
454 }
455
456 if (ret < 0)
457 dev_err(info->dev, "failed to handle MUIC interrupt\n");
458
459 mutex_unlock(&info->mutex);
460
461 return;
462}
463
464/*
465 * Sets irq_attach or irq_detach in sm5502_muic_info and returns 0.
466 * Returns -ESRCH if irq_type does not match registered IRQ for this dev type.
467 */
468static int sm5502_parse_irq(struct sm5502_muic_info *info, int irq_type)
469{
470 switch (irq_type) {
471 case SM5502_IRQ_INT1_ATTACH:
472 info->irq_attach = true;
473 break;
474 case SM5502_IRQ_INT1_DETACH:
475 info->irq_detach = true;
476 break;
477 case SM5502_IRQ_INT1_KP:
478 case SM5502_IRQ_INT1_LKP:
479 case SM5502_IRQ_INT1_LKR:
480 case SM5502_IRQ_INT1_OVP_EVENT:
481 case SM5502_IRQ_INT1_OCP_EVENT:
482 case SM5502_IRQ_INT1_OVP_OCP_DIS:
483 case SM5502_IRQ_INT2_VBUS_DET:
484 case SM5502_IRQ_INT2_REV_ACCE:
485 case SM5502_IRQ_INT2_ADC_CHG:
486 case SM5502_IRQ_INT2_STUCK_KEY:
487 case SM5502_IRQ_INT2_STUCK_KEY_RCV:
488 case SM5502_IRQ_INT2_MHL:
489 default:
490 break;
491 }
492
493 return 0;
494}
495
496static irqreturn_t sm5502_muic_irq_handler(int irq, void *data)
497{
498 struct sm5502_muic_info *info = data;
499 int i, irq_type = -1, ret;
500
501 for (i = 0; i < info->num_muic_irqs; i++)
502 if (irq == info->muic_irqs[i].virq)
503 irq_type = info->muic_irqs[i].irq;
504
505 ret = sm5502_parse_irq(info, irq_type);
506 if (ret < 0) {
507 dev_warn(info->dev, "cannot handle is interrupt:%d\n",
508 irq_type);
509 return IRQ_HANDLED;
510 }
511 schedule_work(&info->irq_work);
512
513 return IRQ_HANDLED;
514}
515
516static void sm5502_muic_detect_cable_wq(struct work_struct *work)
517{
518 struct sm5502_muic_info *info = container_of(to_delayed_work(work),
519 struct sm5502_muic_info, wq_detcable);
520 int ret;
521
522 /* Notify the state of connector cable or not */
523 ret = sm5502_muic_cable_handler(info, true);
524 if (ret < 0)
525 dev_warn(info->dev, "failed to detect cable state\n");
526}
527
528static void sm5502_init_dev_type(struct sm5502_muic_info *info)
529{
530 unsigned int reg_data, vendor_id, version_id;
531 int i, ret;
532
533 /* To test I2C, Print version_id and vendor_id of SM5502 */
534 ret = regmap_read(info->regmap, SM5502_REG_DEVICE_ID, &reg_data);
535 if (ret) {
536 dev_err(info->dev,
537 "failed to read DEVICE_ID register: %d\n", ret);
538 return;
539 }
540
541 vendor_id = ((reg_data & SM5502_REG_DEVICE_ID_VENDOR_MASK) >>
542 SM5502_REG_DEVICE_ID_VENDOR_SHIFT);
543 version_id = ((reg_data & SM5502_REG_DEVICE_ID_VERSION_MASK) >>
544 SM5502_REG_DEVICE_ID_VERSION_SHIFT);
545
546 dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
547 version_id, vendor_id);
548
549 /* Initiazle the register of SM5502 device to bring-up */
550 for (i = 0; i < info->num_reg_data; i++) {
551 unsigned int val = 0;
552
553 if (!info->reg_data[i].invert)
554 val |= ~info->reg_data[i].val;
555 else
556 val = info->reg_data[i].val;
557 regmap_write(info->regmap, info->reg_data[i].reg, val);
558 }
559}
560
561static int sm5022_muic_i2c_probe(struct i2c_client *i2c,
562 const struct i2c_device_id *id)
563{
564 struct device_node *np = i2c->dev.of_node;
565 struct sm5502_muic_info *info;
566 int i, ret, irq_flags;
567
568 if (!np)
569 return -EINVAL;
570
571 info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL);
572 if (!info)
573 return -ENOMEM;
574 i2c_set_clientdata(i2c, info);
575
576 info->dev = &i2c->dev;
577 info->i2c = i2c;
578 info->irq = i2c->irq;
579 info->muic_irqs = sm5502_muic_irqs;
580 info->num_muic_irqs = ARRAY_SIZE(sm5502_muic_irqs);
581 info->reg_data = sm5502_reg_data;
582 info->num_reg_data = ARRAY_SIZE(sm5502_reg_data);
583
584 mutex_init(&info->mutex);
585
586 INIT_WORK(&info->irq_work, sm5502_muic_irq_work);
587
588 info->regmap = devm_regmap_init_i2c(i2c, &sm5502_muic_regmap_config);
589 if (IS_ERR(info->regmap)) {
590 ret = PTR_ERR(info->regmap);
591 dev_err(info->dev, "failed to allocate register map: %d\n",
592 ret);
593 return ret;
594 }
595
596 /* Support irq domain for SM5502 MUIC device */
597 irq_flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT | IRQF_SHARED;
598 ret = regmap_add_irq_chip(info->regmap, info->irq, irq_flags, 0,
599 &sm5502_muic_irq_chip, &info->irq_data);
600 if (ret != 0) {
601 dev_err(info->dev, "failed to request IRQ %d: %d\n",
602 info->irq, ret);
603 return ret;
604 }
605
606 for (i = 0; i < info->num_muic_irqs; i++) {
607 struct muic_irq *muic_irq = &info->muic_irqs[i];
608 unsigned int virq = 0;
609
610 virq = regmap_irq_get_virq(info->irq_data, muic_irq->irq);
611 if (virq <= 0)
612 return -EINVAL;
613 muic_irq->virq = virq;
614
615 ret = devm_request_threaded_irq(info->dev, virq, NULL,
616 sm5502_muic_irq_handler,
617 IRQF_NO_SUSPEND,
618 muic_irq->name, info);
619 if (ret) {
620 dev_err(info->dev, "failed: irq request (IRQ: %d,"
621 " error :%d)\n", muic_irq->irq, ret);
622 return ret;
623 }
624 }
625
626 /* Allocate extcon device */
627 info->edev = devm_extcon_dev_allocate(info->dev, sm5502_extcon_cable);
628 if (IS_ERR(info->edev)) {
629 dev_err(info->dev, "failed to allocate memory for extcon\n");
630 return -ENOMEM;
631 }
632 info->edev->name = np->name;
633
634 /* Register extcon device */
635 ret = devm_extcon_dev_register(info->dev, info->edev);
636 if (ret) {
637 dev_err(info->dev, "failed to register extcon device\n");
638 return ret;
639 }
640
641 /*
642 * Detect accessory after completing the initialization of platform
643 *
644 * - Use delayed workqueue to detect cable state and then
645 * notify cable state to notifiee/platform through uevent.
646 * After completing the booting of platform, the extcon provider
647 * driver should notify cable state to upper layer.
648 */
649 INIT_DELAYED_WORK(&info->wq_detcable, sm5502_muic_detect_cable_wq);
650 queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
651 msecs_to_jiffies(DELAY_MS_DEFAULT));
652
653 /* Initialize SM5502 device and print vendor id and version id */
654 sm5502_init_dev_type(info);
655
656 return 0;
657}
658
659static int sm5502_muic_i2c_remove(struct i2c_client *i2c)
660{
661 struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
662
663 regmap_del_irq_chip(info->irq, info->irq_data);
664
665 return 0;
666}
667
668static struct of_device_id sm5502_dt_match[] = {
669 { .compatible = "siliconmitus,sm5502-muic" },
670 { },
671};
672
673#ifdef CONFIG_PM_SLEEP
674static int sm5502_muic_suspend(struct device *dev)
675{
676 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
677 struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
678
679 enable_irq_wake(info->irq);
680
681 return 0;
682}
683
684static int sm5502_muic_resume(struct device *dev)
685{
686 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
687 struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
688
689 disable_irq_wake(info->irq);
690
691 return 0;
692}
693#endif
694
695static SIMPLE_DEV_PM_OPS(sm5502_muic_pm_ops,
696 sm5502_muic_suspend, sm5502_muic_resume);
697
698static const struct i2c_device_id sm5502_i2c_id[] = {
699 { "sm5502", TYPE_SM5502 },
700 { }
701};
702MODULE_DEVICE_TABLE(i2c, sm5502_i2c_id);
703
704static struct i2c_driver sm5502_muic_i2c_driver = {
705 .driver = {
706 .name = "sm5502",
707 .owner = THIS_MODULE,
708 .pm = &sm5502_muic_pm_ops,
709 .of_match_table = sm5502_dt_match,
710 },
711 .probe = sm5022_muic_i2c_probe,
712 .remove = sm5502_muic_i2c_remove,
713 .id_table = sm5502_i2c_id,
714};
715
716static int __init sm5502_muic_i2c_init(void)
717{
718 return i2c_add_driver(&sm5502_muic_i2c_driver);
719}
720subsys_initcall(sm5502_muic_i2c_init);
721
722MODULE_DESCRIPTION("Silicon Mitus SM5502 Extcon driver");
723MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
724MODULE_LICENSE("GPL");
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 284cf66489f4..531a593912ec 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -808,12 +808,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
808 808
809 *buffer_actual_len = packetlen; 809 *buffer_actual_len = packetlen;
810 810
811 if (packetlen > bufferlen) { 811 if (packetlen > bufferlen)
812 pr_err("Buffer too small - needed %d bytes but "
813 "got space for only %d bytes\n",
814 packetlen, bufferlen);
815 return -ENOBUFS; 812 return -ENOBUFS;
816 }
817 813
818 *requestid = desc.trans_id; 814 *requestid = desc.trans_id;
819 815
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index c276fde318e5..de5e32151a1e 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -618,7 +618,7 @@ static void tpci200_pci_remove(struct pci_dev *dev)
618 __tpci200_pci_remove(tpci200); 618 __tpci200_pci_remove(tpci200);
619} 619}
620 620
621static DEFINE_PCI_DEVICE_TABLE(tpci200_idtable) = { 621static const struct pci_device_id tpci200_idtable[] = {
622 { TPCI200_VENDOR_ID, TPCI200_DEVICE_ID, TPCI200_SUBVENDOR_ID, 622 { TPCI200_VENDOR_ID, TPCI200_DEVICE_ID, TPCI200_SUBVENDOR_ID,
623 TPCI200_SUBDEVICE_ID }, 623 TPCI200_SUBDEVICE_ID },
624 { 0, }, 624 { 0, },
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 141094e7c06e..e41bef048c23 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -177,19 +177,20 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
177 if (channel->nb_bytes == 0) 177 if (channel->nb_bytes == 0)
178 return; 178 return;
179 179
180 spin_lock(&channel->lock);
180 value = channel->tty_port.xmit_buf[*pointer_write]; 181 value = channel->tty_port.xmit_buf[*pointer_write];
181 iowrite8(value, &channel->regs->w.thr); 182 iowrite8(value, &channel->regs->w.thr);
182 channel->stats.tx++; 183 channel->stats.tx++;
183 (*pointer_write)++; 184 (*pointer_write)++;
184 *pointer_write = *pointer_write % PAGE_SIZE; 185 *pointer_write = *pointer_write % PAGE_SIZE;
185 channel->nb_bytes--; 186 channel->nb_bytes--;
187 spin_unlock(&channel->lock);
186} 188}
187 189
188static void ipoctal_irq_channel(struct ipoctal_channel *channel) 190static void ipoctal_irq_channel(struct ipoctal_channel *channel)
189{ 191{
190 u8 isr, sr; 192 u8 isr, sr;
191 193
192 spin_lock(&channel->lock);
193 /* The HW is organized in pair of channels. See which register we need 194 /* The HW is organized in pair of channels. See which register we need
194 * to read from */ 195 * to read from */
195 isr = ioread8(&channel->block_regs->r.isr); 196 isr = ioread8(&channel->block_regs->r.isr);
@@ -213,8 +214,6 @@ static void ipoctal_irq_channel(struct ipoctal_channel *channel)
213 /* TX of each character */ 214 /* TX of each character */
214 if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY)) 215 if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
215 ipoctal_irq_tx(channel); 216 ipoctal_irq_tx(channel);
216
217 spin_unlock(&channel->lock);
218} 217}
219 218
220static irqreturn_t ipoctal_irq_handler(void *arg) 219static irqreturn_t ipoctal_irq_handler(void *arg)
@@ -324,13 +323,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
324 &block_regs[i].w.imr); 323 &block_regs[i].w.imr);
325 } 324 }
326 325
327 /*
328 * IP-OCTAL has different addresses to copy its IRQ vector.
329 * Depending of the carrier these addresses are accesible or not.
330 * More info in the datasheet.
331 */
332 ipoctal->dev->bus->ops->request_irq(ipoctal->dev,
333 ipoctal_irq_handler, ipoctal);
334 /* Dummy write */ 326 /* Dummy write */
335 iowrite8(1, ipoctal->mem8_space + 1); 327 iowrite8(1, ipoctal->mem8_space + 1);
336 328
@@ -391,6 +383,14 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
391 dev_set_drvdata(tty_dev, channel); 383 dev_set_drvdata(tty_dev, channel);
392 } 384 }
393 385
386 /*
387 * IP-OCTAL has different addresses to copy its IRQ vector.
388 * Depending of the carrier these addresses are accesible or not.
389 * More info in the datasheet.
390 */
391 ipoctal->dev->bus->ops->request_irq(ipoctal->dev,
392 ipoctal_irq_handler, ipoctal);
393
394 return 0; 394 return 0;
395} 395}
396 396
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6cc4b6acc22a..fb824f501197 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -384,6 +384,7 @@ config MFD_MAX77693
384 depends on I2C=y 384 depends on I2C=y
385 select MFD_CORE 385 select MFD_CORE
386 select REGMAP_I2C 386 select REGMAP_I2C
387 select REGMAP_IRQ
387 help 388 help
388 Say yes here to add support for Maxim Semiconductor MAX77693. 389 Say yes here to add support for Maxim Semiconductor MAX77693.
389 This is a companion Power Management IC with Flash, Haptic, Charger, 390 This is a companion Power Management IC with Flash, Haptic, Charger,
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 8afedba535c7..8c6e7bba4660 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -116,7 +116,7 @@ obj-$(CONFIG_MFD_DA9063) += da9063.o
116 116
117obj-$(CONFIG_MFD_MAX14577) += max14577.o 117obj-$(CONFIG_MFD_MAX14577) += max14577.o
118obj-$(CONFIG_MFD_MAX77686) += max77686.o max77686-irq.o 118obj-$(CONFIG_MFD_MAX77686) += max77686.o max77686-irq.o
119obj-$(CONFIG_MFD_MAX77693) += max77693.o max77693-irq.o 119obj-$(CONFIG_MFD_MAX77693) += max77693.o
120obj-$(CONFIG_MFD_MAX8907) += max8907.o 120obj-$(CONFIG_MFD_MAX8907) += max8907.o
121max8925-objs := max8925-core.o max8925-i2c.o 121max8925-objs := max8925-core.o max8925-i2c.o
122obj-$(CONFIG_MFD_MAX8925) += max8925.o 122obj-$(CONFIG_MFD_MAX8925) += max8925.o
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
deleted file mode 100644
index 66b58fe77094..000000000000
--- a/drivers/mfd/max77693-irq.c
+++ /dev/null
@@ -1,336 +0,0 @@
1/*
2 * max77693-irq.c - Interrupt controller support for MAX77693
3 *
4 * Copyright (C) 2012 Samsung Electronics Co.Ltd
5 * SangYoung Son <hello.son@samsung.com>
6 *
7 * This program is not provided / owned by Maxim Integrated Products.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * This driver is based on max8997-irq.c
24 */
25
26#include <linux/err.h>
27#include <linux/irq.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/irqdomain.h>
31#include <linux/mfd/max77693.h>
32#include <linux/mfd/max77693-private.h>
33
34static const u8 max77693_mask_reg[] = {
35 [LED_INT] = MAX77693_LED_REG_FLASH_INT_MASK,
36 [TOPSYS_INT] = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
37 [CHG_INT] = MAX77693_CHG_REG_CHG_INT_MASK,
38 [MUIC_INT1] = MAX77693_MUIC_REG_INTMASK1,
39 [MUIC_INT2] = MAX77693_MUIC_REG_INTMASK2,
40 [MUIC_INT3] = MAX77693_MUIC_REG_INTMASK3,
41};
42
43static struct regmap *max77693_get_regmap(struct max77693_dev *max77693,
44 enum max77693_irq_source src)
45{
46 switch (src) {
47 case LED_INT ... CHG_INT:
48 return max77693->regmap;
49 case MUIC_INT1 ... MUIC_INT3:
50 return max77693->regmap_muic;
51 default:
52 return ERR_PTR(-EINVAL);
53 }
54}
55
56struct max77693_irq_data {
57 int mask;
58 enum max77693_irq_source group;
59};
60
61#define DECLARE_IRQ(idx, _group, _mask) \
62 [(idx)] = { .group = (_group), .mask = (_mask) }
63static const struct max77693_irq_data max77693_irqs[] = {
64 DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_OPEN, LED_INT, 1 << 0),
65 DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_SHORT, LED_INT, 1 << 1),
66 DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_OPEN, LED_INT, 1 << 2),
67 DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_SHORT, LED_INT, 1 << 3),
68 DECLARE_IRQ(MAX77693_LED_IRQ_MAX_FLASH, LED_INT, 1 << 4),
69
70 DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T120C_INT, TOPSYS_INT, 1 << 0),
71 DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T140C_INT, TOPSYS_INT, 1 << 1),
72 DECLARE_IRQ(MAX77693_TOPSYS_IRQ_LOWSYS_INT, TOPSYS_INT, 1 << 3),
73
74 DECLARE_IRQ(MAX77693_CHG_IRQ_BYP_I, CHG_INT, 1 << 0),
75 DECLARE_IRQ(MAX77693_CHG_IRQ_THM_I, CHG_INT, 1 << 2),
76 DECLARE_IRQ(MAX77693_CHG_IRQ_BAT_I, CHG_INT, 1 << 3),
77 DECLARE_IRQ(MAX77693_CHG_IRQ_CHG_I, CHG_INT, 1 << 4),
78 DECLARE_IRQ(MAX77693_CHG_IRQ_CHGIN_I, CHG_INT, 1 << 6),
79
80 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC, MUIC_INT1, 1 << 0),
81 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_LOW, MUIC_INT1, 1 << 1),
82 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_ERR, MUIC_INT1, 1 << 2),
83 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC1K, MUIC_INT1, 1 << 3),
84
85 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGTYP, MUIC_INT2, 1 << 0),
86 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGDETREUN, MUIC_INT2, 1 << 1),
87 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DCDTMR, MUIC_INT2, 1 << 2),
88 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DXOVP, MUIC_INT2, 1 << 3),
89 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VBVOLT, MUIC_INT2, 1 << 4),
90 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VIDRM, MUIC_INT2, 1 << 5),
91
92 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_EOC, MUIC_INT3, 1 << 0),
93 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CGMBC, MUIC_INT3, 1 << 1),
94 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_OVP, MUIC_INT3, 1 << 2),
95 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR, MUIC_INT3, 1 << 3),
96 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, MUIC_INT3, 1 << 4),
97 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_BAT_DET, MUIC_INT3, 1 << 5),
98};
99
100static void max77693_irq_lock(struct irq_data *data)
101{
102 struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
103
104 mutex_lock(&max77693->irqlock);
105}
106
107static void max77693_irq_sync_unlock(struct irq_data *data)
108{
109 struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
110 int i;
111
112 for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
113 u8 mask_reg = max77693_mask_reg[i];
114 struct regmap *map = max77693_get_regmap(max77693, i);
115
116 if (mask_reg == MAX77693_REG_INVALID ||
117 IS_ERR_OR_NULL(map))
118 continue;
119 max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i];
120
121 max77693_write_reg(map, max77693_mask_reg[i],
122 max77693->irq_masks_cur[i]);
123 }
124
125 mutex_unlock(&max77693->irqlock);
126}
127
128static const inline struct max77693_irq_data *
129irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
130{
131 struct irq_data *data = irq_get_irq_data(irq);
132 return &max77693_irqs[data->hwirq];
133}
134
135static void max77693_irq_mask(struct irq_data *data)
136{
137 struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
138 const struct max77693_irq_data *irq_data =
139 irq_to_max77693_irq(max77693, data->irq);
140
141 if (irq_data->group >= MAX77693_IRQ_GROUP_NR)
142 return;
143
144 if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
145 max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
146 else
147 max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
148}
149
150static void max77693_irq_unmask(struct irq_data *data)
151{
152 struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
153 const struct max77693_irq_data *irq_data =
154 irq_to_max77693_irq(max77693, data->irq);
155
156 if (irq_data->group >= MAX77693_IRQ_GROUP_NR)
157 return;
158
159 if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
160 max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
161 else
162 max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
163}
164
165static struct irq_chip max77693_irq_chip = {
166 .name = "max77693",
167 .irq_bus_lock = max77693_irq_lock,
168 .irq_bus_sync_unlock = max77693_irq_sync_unlock,
169 .irq_mask = max77693_irq_mask,
170 .irq_unmask = max77693_irq_unmask,
171};
172
173#define MAX77693_IRQSRC_CHG (1 << 0)
174#define MAX77693_IRQSRC_TOP (1 << 1)
175#define MAX77693_IRQSRC_FLASH (1 << 2)
176#define MAX77693_IRQSRC_MUIC (1 << 3)
177static irqreturn_t max77693_irq_thread(int irq, void *data)
178{
179 struct max77693_dev *max77693 = data;
180 u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {};
181 u8 irq_src;
182 int ret;
183 int i, cur_irq;
184
185 ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC,
186 &irq_src);
187 if (ret < 0) {
188 dev_err(max77693->dev, "Failed to read interrupt source: %d\n",
189 ret);
190 return IRQ_NONE;
191 }
192
193 if (irq_src & MAX77693_IRQSRC_CHG)
194 /* CHG_INT */
195 ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT,
196 &irq_reg[CHG_INT]);
197
198 if (irq_src & MAX77693_IRQSRC_TOP)
199 /* TOPSYS_INT */
200 ret = max77693_read_reg(max77693->regmap,
201 MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]);
202
203 if (irq_src & MAX77693_IRQSRC_FLASH)
204 /* LED_INT */
205 ret = max77693_read_reg(max77693->regmap,
206 MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]);
207
208 if (irq_src & MAX77693_IRQSRC_MUIC)
209 /* MUIC INT1 ~ INT3 */
210 max77693_bulk_read(max77693->regmap_muic, MAX77693_MUIC_REG_INT1,
211 MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
212
213 /* Apply masking */
214 for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
215 if (i >= MUIC_INT1 && i <= MUIC_INT3)
216 irq_reg[i] &= max77693->irq_masks_cur[i];
217 else
218 irq_reg[i] &= ~max77693->irq_masks_cur[i];
219 }
220
221 /* Report */
222 for (i = 0; i < MAX77693_IRQ_NR; i++) {
223 if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) {
224 cur_irq = irq_find_mapping(max77693->irq_domain, i);
225 if (cur_irq)
226 handle_nested_irq(cur_irq);
227 }
228 }
229
230 return IRQ_HANDLED;
231}
232
233int max77693_irq_resume(struct max77693_dev *max77693)
234{
235 if (max77693->irq)
236 max77693_irq_thread(0, max77693);
237
238 return 0;
239}
240
241static int max77693_irq_domain_map(struct irq_domain *d, unsigned int irq,
242 irq_hw_number_t hw)
243{
244 struct max77693_dev *max77693 = d->host_data;
245
246 irq_set_chip_data(irq, max77693);
247 irq_set_chip_and_handler(irq, &max77693_irq_chip, handle_edge_irq);
248 irq_set_nested_thread(irq, 1);
249#ifdef CONFIG_ARM
250 set_irq_flags(irq, IRQF_VALID);
251#else
252 irq_set_noprobe(irq);
253#endif
254 return 0;
255}
256
257static struct irq_domain_ops max77693_irq_domain_ops = {
258 .map = max77693_irq_domain_map,
259};
260
261int max77693_irq_init(struct max77693_dev *max77693)
262{
263 struct irq_domain *domain;
264 int i;
265 int ret = 0;
266 u8 intsrc_mask;
267
268 mutex_init(&max77693->irqlock);
269
270 /* Mask individual interrupt sources */
271 for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
272 struct regmap *map;
273 /* MUIC IRQ 0:MASK 1:NOT MASK */
274 /* Other IRQ 1:MASK 0:NOT MASK */
275 if (i >= MUIC_INT1 && i <= MUIC_INT3) {
276 max77693->irq_masks_cur[i] = 0x00;
277 max77693->irq_masks_cache[i] = 0x00;
278 } else {
279 max77693->irq_masks_cur[i] = 0xff;
280 max77693->irq_masks_cache[i] = 0xff;
281 }
282 map = max77693_get_regmap(max77693, i);
283
284 if (IS_ERR_OR_NULL(map))
285 continue;
286 if (max77693_mask_reg[i] == MAX77693_REG_INVALID)
287 continue;
288 if (i >= MUIC_INT1 && i <= MUIC_INT3)
289 max77693_write_reg(map, max77693_mask_reg[i], 0x00);
290 else
291 max77693_write_reg(map, max77693_mask_reg[i], 0xff);
292 }
293
294 domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR,
295 &max77693_irq_domain_ops, max77693);
296 if (!domain) {
297 dev_err(max77693->dev, "could not create irq domain\n");
298 ret = -ENODEV;
299 goto err_irq;
300 }
301 max77693->irq_domain = domain;
302
303 /* Unmask max77693 interrupt */
304 ret = max77693_read_reg(max77693->regmap,
305 MAX77693_PMIC_REG_INTSRC_MASK, &intsrc_mask);
306 if (ret < 0) {
307 dev_err(max77693->dev, "fail to read PMIC register\n");
308 goto err_irq;
309 }
310
311 intsrc_mask &= ~(MAX77693_IRQSRC_CHG);
312 intsrc_mask &= ~(MAX77693_IRQSRC_FLASH);
313 intsrc_mask &= ~(MAX77693_IRQSRC_MUIC);
314 ret = max77693_write_reg(max77693->regmap,
315 MAX77693_PMIC_REG_INTSRC_MASK, intsrc_mask);
316 if (ret < 0) {
317 dev_err(max77693->dev, "fail to write PMIC register\n");
318 goto err_irq;
319 }
320
321 ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
322 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
323 "max77693-irq", max77693);
324 if (ret)
325 dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
326 max77693->irq, ret);
327
328err_irq:
329 return ret;
330}
331
332void max77693_irq_exit(struct max77693_dev *max77693)
333{
334 if (max77693->irq)
335 free_irq(max77693->irq, max77693);
336}
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 7e05428c756d..249c139ef04a 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -49,62 +49,62 @@ static const struct mfd_cell max77693_devs[] = {
49 { .name = "max77693-haptic", }, 49 { .name = "max77693-haptic", },
50}; 50};
51 51
52int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest) 52static const struct regmap_config max77693_regmap_config = {
53{ 53 .reg_bits = 8,
54 unsigned int val; 54 .val_bits = 8,
55 int ret; 55 .max_register = MAX77693_PMIC_REG_END,
56 56};
57 ret = regmap_read(map, reg, &val);
58 *dest = val;
59
60 return ret;
61}
62EXPORT_SYMBOL_GPL(max77693_read_reg);
63
64int max77693_bulk_read(struct regmap *map, u8 reg, int count, u8 *buf)
65{
66 int ret;
67
68 ret = regmap_bulk_read(map, reg, buf, count);
69
70 return ret;
71}
72EXPORT_SYMBOL_GPL(max77693_bulk_read);
73
74int max77693_write_reg(struct regmap *map, u8 reg, u8 value)
75{
76 int ret;
77
78 ret = regmap_write(map, reg, value);
79
80 return ret;
81}
82EXPORT_SYMBOL_GPL(max77693_write_reg);
83
84int max77693_bulk_write(struct regmap *map, u8 reg, int count, u8 *buf)
85{
86 int ret;
87 57
88 ret = regmap_bulk_write(map, reg, buf, count); 58static const struct regmap_irq max77693_led_irqs[] = {
59 { .mask = LED_IRQ_FLED2_OPEN, },
60 { .mask = LED_IRQ_FLED2_SHORT, },
61 { .mask = LED_IRQ_FLED1_OPEN, },
62 { .mask = LED_IRQ_FLED1_SHORT, },
63 { .mask = LED_IRQ_MAX_FLASH, },
64};
89 65
90 return ret; 66static const struct regmap_irq_chip max77693_led_irq_chip = {
91} 67 .name = "max77693-led",
92EXPORT_SYMBOL_GPL(max77693_bulk_write); 68 .status_base = MAX77693_LED_REG_FLASH_INT,
69 .mask_base = MAX77693_LED_REG_FLASH_INT_MASK,
70 .mask_invert = false,
71 .num_regs = 1,
72 .irqs = max77693_led_irqs,
73 .num_irqs = ARRAY_SIZE(max77693_led_irqs),
74};
93 75
94int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask) 76static const struct regmap_irq max77693_topsys_irqs[] = {
95{ 77 { .mask = TOPSYS_IRQ_T120C_INT, },
96 int ret; 78 { .mask = TOPSYS_IRQ_T140C_INT, },
79 { .mask = TOPSYS_IRQ_LOWSYS_INT, },
80};
97 81
98 ret = regmap_update_bits(map, reg, mask, val); 82static const struct regmap_irq_chip max77693_topsys_irq_chip = {
83 .name = "max77693-topsys",
84 .status_base = MAX77693_PMIC_REG_TOPSYS_INT,
85 .mask_base = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
86 .mask_invert = false,
87 .num_regs = 1,
88 .irqs = max77693_topsys_irqs,
89 .num_irqs = ARRAY_SIZE(max77693_topsys_irqs),
90};
99 91
100 return ret; 92static const struct regmap_irq max77693_charger_irqs[] = {
101} 93 { .mask = CHG_IRQ_BYP_I, },
102EXPORT_SYMBOL_GPL(max77693_update_reg); 94 { .mask = CHG_IRQ_THM_I, },
95 { .mask = CHG_IRQ_BAT_I, },
96 { .mask = CHG_IRQ_CHG_I, },
97 { .mask = CHG_IRQ_CHGIN_I, },
98};
103 99
104static const struct regmap_config max77693_regmap_config = { 100static const struct regmap_irq_chip max77693_charger_irq_chip = {
105 .reg_bits = 8, 101 .name = "max77693-charger",
106 .val_bits = 8, 102 .status_base = MAX77693_CHG_REG_CHG_INT,
107 .max_register = MAX77693_PMIC_REG_END, 103 .mask_base = MAX77693_CHG_REG_CHG_INT_MASK,
104 .mask_invert = false,
105 .num_regs = 1,
106 .irqs = max77693_charger_irqs,
107 .num_irqs = ARRAY_SIZE(max77693_charger_irqs),
108}; 108};
109 109
110static const struct regmap_config max77693_regmap_muic_config = { 110static const struct regmap_config max77693_regmap_muic_config = {
@@ -113,11 +113,42 @@ static const struct regmap_config max77693_regmap_muic_config = {
113 .max_register = MAX77693_MUIC_REG_END, 113 .max_register = MAX77693_MUIC_REG_END,
114}; 114};
115 115
116static const struct regmap_irq max77693_muic_irqs[] = {
117 { .reg_offset = 0, .mask = MUIC_IRQ_INT1_ADC, },
118 { .reg_offset = 0, .mask = MUIC_IRQ_INT1_ADC_LOW, },
119 { .reg_offset = 0, .mask = MUIC_IRQ_INT1_ADC_ERR, },
120 { .reg_offset = 0, .mask = MUIC_IRQ_INT1_ADC1K, },
121
122 { .reg_offset = 1, .mask = MUIC_IRQ_INT2_CHGTYP, },
123 { .reg_offset = 1, .mask = MUIC_IRQ_INT2_CHGDETREUN, },
124 { .reg_offset = 1, .mask = MUIC_IRQ_INT2_DCDTMR, },
125 { .reg_offset = 1, .mask = MUIC_IRQ_INT2_DXOVP, },
126 { .reg_offset = 1, .mask = MUIC_IRQ_INT2_VBVOLT, },
127 { .reg_offset = 1, .mask = MUIC_IRQ_INT2_VIDRM, },
128
129 { .reg_offset = 2, .mask = MUIC_IRQ_INT3_EOC, },
130 { .reg_offset = 2, .mask = MUIC_IRQ_INT3_CGMBC, },
131 { .reg_offset = 2, .mask = MUIC_IRQ_INT3_OVP, },
132 { .reg_offset = 2, .mask = MUIC_IRQ_INT3_MBCCHG_ERR, },
133 { .reg_offset = 2, .mask = MUIC_IRQ_INT3_CHG_ENABLED, },
134 { .reg_offset = 2, .mask = MUIC_IRQ_INT3_BAT_DET, },
135};
136
137static const struct regmap_irq_chip max77693_muic_irq_chip = {
138 .name = "max77693-muic",
139 .status_base = MAX77693_MUIC_REG_INT1,
140 .mask_base = MAX77693_MUIC_REG_INTMASK1,
141 .mask_invert = true,
142 .num_regs = 3,
143 .irqs = max77693_muic_irqs,
144 .num_irqs = ARRAY_SIZE(max77693_muic_irqs),
145};
146
116static int max77693_i2c_probe(struct i2c_client *i2c, 147static int max77693_i2c_probe(struct i2c_client *i2c,
117 const struct i2c_device_id *id) 148 const struct i2c_device_id *id)
118{ 149{
119 struct max77693_dev *max77693; 150 struct max77693_dev *max77693;
120 u8 reg_data; 151 unsigned int reg_data;
121 int ret = 0; 152 int ret = 0;
122 153
123 max77693 = devm_kzalloc(&i2c->dev, 154 max77693 = devm_kzalloc(&i2c->dev,
@@ -139,7 +170,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
139 return ret; 170 return ret;
140 } 171 }
141 172
142 ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2, 173 ret = regmap_read(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
143 &reg_data); 174 &reg_data);
144 if (ret < 0) { 175 if (ret < 0) {
145 dev_err(max77693->dev, "device not found on this channel\n"); 176 dev_err(max77693->dev, "device not found on this channel\n");
@@ -176,9 +207,45 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
176 goto err_regmap_muic; 207 goto err_regmap_muic;
177 } 208 }
178 209
179 ret = max77693_irq_init(max77693); 210 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
180 if (ret < 0) 211 IRQF_ONESHOT | IRQF_SHARED |
181 goto err_irq; 212 IRQF_TRIGGER_FALLING, 0,
213 &max77693_led_irq_chip,
214 &max77693->irq_data_led);
215 if (ret) {
216 dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
217 goto err_regmap_muic;
218 }
219
220 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
221 IRQF_ONESHOT | IRQF_SHARED |
222 IRQF_TRIGGER_FALLING, 0,
223 &max77693_topsys_irq_chip,
224 &max77693->irq_data_topsys);
225 if (ret) {
226 dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
227 goto err_irq_topsys;
228 }
229
230 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
231 IRQF_ONESHOT | IRQF_SHARED |
232 IRQF_TRIGGER_FALLING, 0,
233 &max77693_charger_irq_chip,
234 &max77693->irq_data_charger);
235 if (ret) {
236 dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
237 goto err_irq_charger;
238 }
239
240 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
241 IRQF_ONESHOT | IRQF_SHARED |
242 IRQF_TRIGGER_FALLING, 0,
243 &max77693_muic_irq_chip,
244 &max77693->irq_data_muic);
245 if (ret) {
246 dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
247 goto err_irq_muic;
248 }
182 249
183 pm_runtime_set_active(max77693->dev); 250 pm_runtime_set_active(max77693->dev);
184 251
@@ -190,8 +257,14 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
190 return ret; 257 return ret;
191 258
192err_mfd: 259err_mfd:
193 max77693_irq_exit(max77693); 260 mfd_remove_devices(max77693->dev);
194err_irq: 261 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
262err_irq_muic:
263 regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
264err_irq_charger:
265 regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
266err_irq_topsys:
267 regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
195err_regmap_muic: 268err_regmap_muic:
196 i2c_unregister_device(max77693->haptic); 269 i2c_unregister_device(max77693->haptic);
197err_i2c_haptic: 270err_i2c_haptic:
@@ -204,7 +277,12 @@ static int max77693_i2c_remove(struct i2c_client *i2c)
204 struct max77693_dev *max77693 = i2c_get_clientdata(i2c); 277 struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
205 278
206 mfd_remove_devices(max77693->dev); 279 mfd_remove_devices(max77693->dev);
207 max77693_irq_exit(max77693); 280
281 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
282 regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
283 regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
284 regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
285
208 i2c_unregister_device(max77693->muic); 286 i2c_unregister_device(max77693->muic);
209 i2c_unregister_device(max77693->haptic); 287 i2c_unregister_device(max77693->haptic);
210 288
@@ -222,8 +300,11 @@ static int max77693_suspend(struct device *dev)
222 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); 300 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
223 struct max77693_dev *max77693 = i2c_get_clientdata(i2c); 301 struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
224 302
225 if (device_may_wakeup(dev)) 303 if (device_may_wakeup(dev)) {
226 irq_set_irq_wake(max77693->irq, 1); 304 enable_irq_wake(max77693->irq);
305 disable_irq(max77693->irq);
306 }
307
227 return 0; 308 return 0;
228} 309}
229 310
@@ -232,9 +313,12 @@ static int max77693_resume(struct device *dev)
232 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); 313 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
233 struct max77693_dev *max77693 = i2c_get_clientdata(i2c); 314 struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
234 315
235 if (device_may_wakeup(dev)) 316 if (device_may_wakeup(dev)) {
236 irq_set_irq_wake(max77693->irq, 0); 317 disable_irq_wake(max77693->irq);
237 return max77693_irq_resume(max77693); 318 enable_irq(max77693->irq);
319 }
320
321 return 0;
238} 322}
239 323
240static const struct dev_pm_ops max77693_pm = { 324static const struct dev_pm_ops max77693_pm = {
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 99a04686e45f..7b55f8a152d4 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1185,7 +1185,7 @@ static int bh1770_probe(struct i2c_client *client,
1185 struct bh1770_chip *chip; 1185 struct bh1770_chip *chip;
1186 int err; 1186 int err;
1187 1187
1188 chip = kzalloc(sizeof *chip, GFP_KERNEL); 1188 chip = devm_kzalloc(&client->dev, sizeof *chip, GFP_KERNEL);
1189 if (!chip) 1189 if (!chip)
1190 return -ENOMEM; 1190 return -ENOMEM;
1191 1191
@@ -1198,8 +1198,7 @@ static int bh1770_probe(struct i2c_client *client,
1198 1198
1199 if (client->dev.platform_data == NULL) { 1199 if (client->dev.platform_data == NULL) {
1200 dev_err(&client->dev, "platform data is mandatory\n"); 1200 dev_err(&client->dev, "platform data is mandatory\n");
1201 err = -EINVAL; 1201 return -EINVAL;
1202 goto fail1;
1203 } 1202 }
1204 1203
1205 chip->pdata = client->dev.platform_data; 1204 chip->pdata = client->dev.platform_data;
@@ -1224,24 +1223,24 @@ static int bh1770_probe(struct i2c_client *client,
1224 chip->regs[0].supply = reg_vcc; 1223 chip->regs[0].supply = reg_vcc;
1225 chip->regs[1].supply = reg_vleds; 1224 chip->regs[1].supply = reg_vleds;
1226 1225
1227 err = regulator_bulk_get(&client->dev, 1226 err = devm_regulator_bulk_get(&client->dev,
1228 ARRAY_SIZE(chip->regs), chip->regs); 1227 ARRAY_SIZE(chip->regs), chip->regs);
1229 if (err < 0) { 1228 if (err < 0) {
1230 dev_err(&client->dev, "Cannot get regulators\n"); 1229 dev_err(&client->dev, "Cannot get regulators\n");
1231 goto fail1; 1230 return err;
1232 } 1231 }
1233 1232
1234 err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), 1233 err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
1235 chip->regs); 1234 chip->regs);
1236 if (err < 0) { 1235 if (err < 0) {
1237 dev_err(&client->dev, "Cannot enable regulators\n"); 1236 dev_err(&client->dev, "Cannot enable regulators\n");
1238 goto fail2; 1237 return err;
1239 } 1238 }
1240 1239
1241 usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2); 1240 usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
1242 err = bh1770_detect(chip); 1241 err = bh1770_detect(chip);
1243 if (err < 0) 1242 if (err < 0)
1244 goto fail3; 1243 goto fail0;
1245 1244
1246 /* Start chip */ 1245 /* Start chip */
1247 bh1770_chip_on(chip); 1246 bh1770_chip_on(chip);
@@ -1252,14 +1251,14 @@ static int bh1770_probe(struct i2c_client *client,
1252 if (chip->lux_corr == 0) { 1251 if (chip->lux_corr == 0) {
1253 dev_err(&client->dev, "Improper correction values\n"); 1252 dev_err(&client->dev, "Improper correction values\n");
1254 err = -EINVAL; 1253 err = -EINVAL;
1255 goto fail3; 1254 goto fail0;
1256 } 1255 }
1257 1256
1258 if (chip->pdata->setup_resources) { 1257 if (chip->pdata->setup_resources) {
1259 err = chip->pdata->setup_resources(); 1258 err = chip->pdata->setup_resources();
1260 if (err) { 1259 if (err) {
1261 err = -EINVAL; 1260 err = -EINVAL;
1262 goto fail3; 1261 goto fail0;
1263 } 1262 }
1264 } 1263 }
1265 1264
@@ -1267,7 +1266,7 @@ static int bh1770_probe(struct i2c_client *client,
1267 &bh1770_attribute_group); 1266 &bh1770_attribute_group);
1268 if (err < 0) { 1267 if (err < 0) {
1269 dev_err(&chip->client->dev, "Sysfs registration failed\n"); 1268 dev_err(&chip->client->dev, "Sysfs registration failed\n");
1270 goto fail4; 1269 goto fail1;
1271 } 1270 }
1272 1271
1273 /* 1272 /*
@@ -1283,22 +1282,18 @@ static int bh1770_probe(struct i2c_client *client,
1283 if (err) { 1282 if (err) {
1284 dev_err(&client->dev, "could not get IRQ %d\n", 1283 dev_err(&client->dev, "could not get IRQ %d\n",
1285 client->irq); 1284 client->irq);
1286 goto fail5; 1285 goto fail2;
1287 } 1286 }
1288 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); 1287 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
1289 return err; 1288 return err;
1290fail5: 1289fail2:
1291 sysfs_remove_group(&chip->client->dev.kobj, 1290 sysfs_remove_group(&chip->client->dev.kobj,
1292 &bh1770_attribute_group); 1291 &bh1770_attribute_group);
1293fail4: 1292fail1:
1294 if (chip->pdata->release_resources) 1293 if (chip->pdata->release_resources)
1295 chip->pdata->release_resources(); 1294 chip->pdata->release_resources();
1296fail3: 1295fail0:
1297 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); 1296 regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
1298fail2:
1299 regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
1300fail1:
1301 kfree(chip);
1302 return err; 1297 return err;
1303} 1298}
1304 1299
@@ -1322,8 +1317,6 @@ static int bh1770_remove(struct i2c_client *client)
1322 pm_runtime_disable(&client->dev); 1317 pm_runtime_disable(&client->dev);
1323 pm_runtime_set_suspended(&client->dev); 1318 pm_runtime_set_suspended(&client->dev);
1324 1319
1325 regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
1326 kfree(chip);
1327 return 0; 1320 return 0;
1328} 1321}
1329 1322
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 48ea33d15a79..4c4a59b25537 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -149,50 +149,35 @@ static int bh1780_probe(struct i2c_client *client,
149 const struct i2c_device_id *id) 149 const struct i2c_device_id *id)
150{ 150{
151 int ret; 151 int ret;
152 struct bh1780_data *ddata = NULL; 152 struct bh1780_data *ddata;
153 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 153 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
154 154
155 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) { 155 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
156 ret = -EIO; 156 return -EIO;
157 goto err_op_failed;
158 }
159 157
160 ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL); 158 ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data),
161 if (ddata == NULL) { 159 GFP_KERNEL);
162 ret = -ENOMEM; 160 if (ddata == NULL)
163 goto err_op_failed; 161 return -ENOMEM;
164 }
165 162
166 ddata->client = client; 163 ddata->client = client;
167 i2c_set_clientdata(client, ddata); 164 i2c_set_clientdata(client, ddata);
168 165
169 ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID"); 166 ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
170 if (ret < 0) 167 if (ret < 0)
171 goto err_op_failed; 168 return ret;
172 169
173 dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n", 170 dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
174 (ret & BH1780_REVMASK)); 171 (ret & BH1780_REVMASK));
175 172
176 mutex_init(&ddata->lock); 173 mutex_init(&ddata->lock);
177 174
178 ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group); 175 return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
179 if (ret)
180 goto err_op_failed;
181
182 return 0;
183
184err_op_failed:
185 kfree(ddata);
186 return ret;
187} 176}
188 177
189static int bh1780_remove(struct i2c_client *client) 178static int bh1780_remove(struct i2c_client *client)
190{ 179{
191 struct bh1780_data *ddata;
192
193 ddata = i2c_get_clientdata(client);
194 sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); 180 sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
195 kfree(ddata);
196 181
197 return 0; 182 return 0;
198} 183}
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 14d90eae605b..55e913b7eb11 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -954,10 +954,7 @@ static int data_debugfs_init(struct fpga_device *priv)
954{ 954{
955 priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv, 955 priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
956 &data_debug_fops); 956 &data_debug_fops);
957 if (IS_ERR(priv->dbg_entry)) 957 return PTR_ERR_OR_ZERO(priv->dbg_entry);
958 return PTR_ERR(priv->dbg_entry);
959
960 return 0;
961} 958}
962 959
963static void data_debugfs_exit(struct fpga_device *priv) 960static void data_debugfs_exit(struct fpga_device *priv)
diff --git a/drivers/misc/dummy-irq.c b/drivers/misc/dummy-irq.c
index 4d0db15df115..acbbe0390be4 100644
--- a/drivers/misc/dummy-irq.c
+++ b/drivers/misc/dummy-irq.c
@@ -61,3 +61,4 @@ MODULE_LICENSE("GPL");
61MODULE_AUTHOR("Jiri Kosina"); 61MODULE_AUTHOR("Jiri Kosina");
62module_param(irq, uint, 0444); 62module_param(irq, uint, 0444);
63MODULE_PARM_DESC(irq, "The IRQ to register for"); 63MODULE_PARM_DESC(irq, "The IRQ to register for");
64MODULE_DESCRIPTION("Dummy IRQ handler driver");
diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig
index 6069d8cd79d7..4c0a033cbfdb 100644
--- a/drivers/misc/genwqe/Kconfig
+++ b/drivers/misc/genwqe/Kconfig
@@ -11,3 +11,9 @@ menuconfig GENWQE
11 Enables PCIe card driver for IBM GenWQE accelerators. 11 Enables PCIe card driver for IBM GenWQE accelerators.
12 The user-space interface is described in 12 The user-space interface is described in
13 include/linux/genwqe/genwqe_card.h. 13 include/linux/genwqe/genwqe_card.h.
14
15config GENWQE_PLATFORM_ERROR_RECOVERY
16 int "Use platform recovery procedures (0=off, 1=on)"
17 depends on GENWQE
18 default 1 if PPC64
19 default 0
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 74d51c9bb858..43bbabc96b6c 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -38,7 +38,6 @@
38#include <linux/notifier.h> 38#include <linux/notifier.h>
39#include <linux/device.h> 39#include <linux/device.h>
40#include <linux/log2.h> 40#include <linux/log2.h>
41#include <linux/genwqe/genwqe_card.h>
42 41
43#include "card_base.h" 42#include "card_base.h"
44#include "card_ddcb.h" 43#include "card_ddcb.h"
@@ -58,7 +57,7 @@ static struct dentry *debugfs_genwqe;
58static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX]; 57static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX];
59 58
60/* PCI structure for identifying device by PCI vendor and device ID */ 59/* PCI structure for identifying device by PCI vendor and device ID */
61static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = { 60static const struct pci_device_id genwqe_device_table[] = {
62 { .vendor = PCI_VENDOR_ID_IBM, 61 { .vendor = PCI_VENDOR_ID_IBM,
63 .device = PCI_DEVICE_GENWQE, 62 .device = PCI_DEVICE_GENWQE,
64 .subvendor = PCI_SUBVENDOR_ID_IBM, 63 .subvendor = PCI_SUBVENDOR_ID_IBM,
@@ -140,6 +139,12 @@ static struct genwqe_dev *genwqe_dev_alloc(void)
140 cd->class_genwqe = class_genwqe; 139 cd->class_genwqe = class_genwqe;
141 cd->debugfs_genwqe = debugfs_genwqe; 140 cd->debugfs_genwqe = debugfs_genwqe;
142 141
142 /*
143 * This comes from kernel config option and can be overritten via
144 * debugfs.
145 */
146 cd->use_platform_recovery = CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY;
147
143 init_waitqueue_head(&cd->queue_waitq); 148 init_waitqueue_head(&cd->queue_waitq);
144 149
145 spin_lock_init(&cd->file_lock); 150 spin_lock_init(&cd->file_lock);
@@ -761,6 +766,124 @@ static u64 genwqe_fir_checking(struct genwqe_dev *cd)
761} 766}
762 767
763/** 768/**
769 * genwqe_pci_fundamental_reset() - trigger a PCIe fundamental reset on the slot
770 *
771 * Note: pci_set_pcie_reset_state() is not implemented on all archs, so this
772 * reset method will not work in all cases.
773 *
774 * Return: 0 on success or error code from pci_set_pcie_reset_state()
775 */
776static int genwqe_pci_fundamental_reset(struct pci_dev *pci_dev)
777{
778 int rc;
779
780 /*
781 * lock pci config space access from userspace,
782 * save state and issue PCIe fundamental reset
783 */
784 pci_cfg_access_lock(pci_dev);
785 pci_save_state(pci_dev);
786 rc = pci_set_pcie_reset_state(pci_dev, pcie_warm_reset);
787 if (!rc) {
788 /* keep PCIe reset asserted for 250ms */
789 msleep(250);
790 pci_set_pcie_reset_state(pci_dev, pcie_deassert_reset);
791 /* Wait for 2s to reload flash and train the link */
792 msleep(2000);
793 }
794 pci_restore_state(pci_dev);
795 pci_cfg_access_unlock(pci_dev);
796 return rc;
797}
798
799
800static int genwqe_platform_recovery(struct genwqe_dev *cd)
801{
802 struct pci_dev *pci_dev = cd->pci_dev;
803 int rc;
804
805 dev_info(&pci_dev->dev,
806 "[%s] resetting card for error recovery\n", __func__);
807
808 /* Clear out error injection flags */
809 cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
810 GENWQE_INJECT_GFIR_FATAL |
811 GENWQE_INJECT_GFIR_INFO);
812
813 genwqe_stop(cd);
814
815 /* Try recoverying the card with fundamental reset */
816 rc = genwqe_pci_fundamental_reset(pci_dev);
817 if (!rc) {
818 rc = genwqe_start(cd);
819 if (!rc)
820 dev_info(&pci_dev->dev,
821 "[%s] card recovered\n", __func__);
822 else
823 dev_err(&pci_dev->dev,
824 "[%s] err: cannot start card services! (err=%d)\n",
825 __func__, rc);
826 } else {
827 dev_err(&pci_dev->dev,
828 "[%s] card reset failed\n", __func__);
829 }
830
831 return rc;
832}
833
834/*
835 * genwqe_reload_bistream() - reload card bitstream
836 *
837 * Set the appropriate register and call fundamental reset to reaload the card
838 * bitstream.
839 *
840 * Return: 0 on success, error code otherwise
841 */
842static int genwqe_reload_bistream(struct genwqe_dev *cd)
843{
844 struct pci_dev *pci_dev = cd->pci_dev;
845 int rc;
846
847 dev_info(&pci_dev->dev,
848 "[%s] resetting card for bitstream reload\n",
849 __func__);
850
851 genwqe_stop(cd);
852
853 /*
854 * Cause a CPLD reprogram with the 'next_bitstream'
855 * partition on PCIe hot or fundamental reset
856 */
857 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
858 (cd->softreset & 0xcull) | 0x70ull);
859
860 rc = genwqe_pci_fundamental_reset(pci_dev);
861 if (rc) {
862 /*
863 * A fundamental reset failure can be caused
864 * by lack of support on the arch, so we just
865 * log the error and try to start the card
866 * again.
867 */
868 dev_err(&pci_dev->dev,
869 "[%s] err: failed to reset card for bitstream reload\n",
870 __func__);
871 }
872
873 rc = genwqe_start(cd);
874 if (rc) {
875 dev_err(&pci_dev->dev,
876 "[%s] err: cannot start card services! (err=%d)\n",
877 __func__, rc);
878 return rc;
879 }
880 dev_info(&pci_dev->dev,
881 "[%s] card reloaded\n", __func__);
882 return 0;
883}
884
885
886/**
764 * genwqe_health_thread() - Health checking thread 887 * genwqe_health_thread() - Health checking thread
765 * 888 *
766 * This thread is only started for the PF of the card. 889 * This thread is only started for the PF of the card.
@@ -786,6 +909,7 @@ static int genwqe_health_thread(void *data)
786 struct pci_dev *pci_dev = cd->pci_dev; 909 struct pci_dev *pci_dev = cd->pci_dev;
787 u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg; 910 u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg;
788 911
912 health_thread_begin:
789 while (!kthread_should_stop()) { 913 while (!kthread_should_stop()) {
790 rc = wait_event_interruptible_timeout(cd->health_waitq, 914 rc = wait_event_interruptible_timeout(cd->health_waitq,
791 (genwqe_health_check_cond(cd, &gfir) || 915 (genwqe_health_check_cond(cd, &gfir) ||
@@ -846,6 +970,13 @@ static int genwqe_health_thread(void *data)
846 } 970 }
847 } 971 }
848 972
973 if (cd->card_state == GENWQE_CARD_RELOAD_BITSTREAM) {
974 /* Userspace requested card bitstream reload */
975 rc = genwqe_reload_bistream(cd);
976 if (rc)
977 goto fatal_error;
978 }
979
849 cd->last_gfir = gfir; 980 cd->last_gfir = gfir;
850 cond_resched(); 981 cond_resched();
851 } 982 }
@@ -853,6 +984,28 @@ static int genwqe_health_thread(void *data)
853 return 0; 984 return 0;
854 985
855 fatal_error: 986 fatal_error:
987 if (cd->use_platform_recovery) {
988 /*
989 * Since we use raw accessors, EEH errors won't be detected
990 * by the platform until we do a non-raw MMIO or config space
991 * read
992 */
993 readq(cd->mmio + IO_SLC_CFGREG_GFIR);
994
995 /* We do nothing if the card is going over PCI recovery */
996 if (pci_channel_offline(pci_dev))
997 return -EIO;
998
999 /*
1000 * If it's supported by the platform, we try a fundamental reset
1001 * to recover from a fatal error. Otherwise, we continue to wait
1002 * for an external recovery procedure to take care of it.
1003 */
1004 rc = genwqe_platform_recovery(cd);
1005 if (!rc)
1006 goto health_thread_begin;
1007 }
1008
856 dev_err(&pci_dev->dev, 1009 dev_err(&pci_dev->dev,
857 "[%s] card unusable. Please trigger unbind!\n", __func__); 1010 "[%s] card unusable. Please trigger unbind!\n", __func__);
858 1011
@@ -958,6 +1111,9 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
958 pci_set_master(pci_dev); 1111 pci_set_master(pci_dev);
959 pci_enable_pcie_error_reporting(pci_dev); 1112 pci_enable_pcie_error_reporting(pci_dev);
960 1113
1114 /* EEH recovery requires PCIe fundamental reset */
1115 pci_dev->needs_freset = 1;
1116
961 /* request complete BAR-0 space (length = 0) */ 1117 /* request complete BAR-0 space (length = 0) */
962 cd->mmio_len = pci_resource_len(pci_dev, 0); 1118 cd->mmio_len = pci_resource_len(pci_dev, 0);
963 cd->mmio = pci_iomap(pci_dev, 0, 0); 1119 cd->mmio = pci_iomap(pci_dev, 0, 0);
@@ -1096,23 +1252,40 @@ static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev,
1096 1252
1097 dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state); 1253 dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state);
1098 1254
1099 if (pci_dev == NULL)
1100 return PCI_ERS_RESULT_NEED_RESET;
1101
1102 cd = dev_get_drvdata(&pci_dev->dev); 1255 cd = dev_get_drvdata(&pci_dev->dev);
1103 if (cd == NULL) 1256 if (cd == NULL)
1104 return PCI_ERS_RESULT_NEED_RESET; 1257 return PCI_ERS_RESULT_DISCONNECT;
1105 1258
1106 switch (state) { 1259 /* Stop the card */
1107 case pci_channel_io_normal: 1260 genwqe_health_check_stop(cd);
1108 return PCI_ERS_RESULT_CAN_RECOVER; 1261 genwqe_stop(cd);
1109 case pci_channel_io_frozen: 1262
1110 return PCI_ERS_RESULT_NEED_RESET; 1263 /*
1111 case pci_channel_io_perm_failure: 1264 * On permanent failure, the PCI code will call device remove
1265 * after the return of this function.
1266 * genwqe_stop() can be called twice.
1267 */
1268 if (state == pci_channel_io_perm_failure) {
1112 return PCI_ERS_RESULT_DISCONNECT; 1269 return PCI_ERS_RESULT_DISCONNECT;
1270 } else {
1271 genwqe_pci_remove(cd);
1272 return PCI_ERS_RESULT_NEED_RESET;
1113 } 1273 }
1274}
1275
1276static pci_ers_result_t genwqe_err_slot_reset(struct pci_dev *pci_dev)
1277{
1278 int rc;
1279 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
1114 1280
1115 return PCI_ERS_RESULT_NEED_RESET; 1281 rc = genwqe_pci_setup(cd);
1282 if (!rc) {
1283 return PCI_ERS_RESULT_RECOVERED;
1284 } else {
1285 dev_err(&pci_dev->dev,
1286 "err: problems with PCI setup (err=%d)\n", rc);
1287 return PCI_ERS_RESULT_DISCONNECT;
1288 }
1116} 1289}
1117 1290
1118static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev) 1291static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
@@ -1120,8 +1293,22 @@ static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
1120 return PCI_ERS_RESULT_NONE; 1293 return PCI_ERS_RESULT_NONE;
1121} 1294}
1122 1295
1123static void genwqe_err_resume(struct pci_dev *dev) 1296static void genwqe_err_resume(struct pci_dev *pci_dev)
1124{ 1297{
1298 int rc;
1299 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
1300
1301 rc = genwqe_start(cd);
1302 if (!rc) {
1303 rc = genwqe_health_check_start(cd);
1304 if (rc)
1305 dev_err(&pci_dev->dev,
1306 "err: cannot start health checking! (err=%d)\n",
1307 rc);
1308 } else {
1309 dev_err(&pci_dev->dev,
1310 "err: cannot start card services! (err=%d)\n", rc);
1311 }
1125} 1312}
1126 1313
1127static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs) 1314static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
@@ -1144,7 +1331,7 @@ static struct pci_error_handlers genwqe_err_handler = {
1144 .error_detected = genwqe_err_error_detected, 1331 .error_detected = genwqe_err_error_detected,
1145 .mmio_enabled = genwqe_err_result_none, 1332 .mmio_enabled = genwqe_err_result_none,
1146 .link_reset = genwqe_err_result_none, 1333 .link_reset = genwqe_err_result_none,
1147 .slot_reset = genwqe_err_result_none, 1334 .slot_reset = genwqe_err_slot_reset,
1148 .resume = genwqe_err_resume, 1335 .resume = genwqe_err_resume,
1149}; 1336};
1150 1337
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 0e608a288603..67abd8cb2247 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -291,6 +291,8 @@ struct genwqe_dev {
291 struct task_struct *health_thread; 291 struct task_struct *health_thread;
292 wait_queue_head_t health_waitq; 292 wait_queue_head_t health_waitq;
293 293
294 int use_platform_recovery; /* use platform recovery mechanisms */
295
294 /* char device */ 296 /* char device */
295 dev_t devnum_genwqe; /* major/minor num card */ 297 dev_t devnum_genwqe; /* major/minor num card */
296 struct class *class_genwqe; /* reference to class object */ 298 struct class *class_genwqe; /* reference to class object */
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index c8046db2d5a2..dc9851a5540e 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -1118,7 +1118,21 @@ static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
1118 * safer, but slower for the good-case ... See above. 1118 * safer, but slower for the good-case ... See above.
1119 */ 1119 */
1120 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); 1120 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
1121 if ((gfir & GFIR_ERR_TRIGGER) != 0x0) { 1121 if (((gfir & GFIR_ERR_TRIGGER) != 0x0) &&
1122 !pci_channel_offline(pci_dev)) {
1123
1124 if (cd->use_platform_recovery) {
1125 /*
1126 * Since we use raw accessors, EEH errors won't be
1127 * detected by the platform until we do a non-raw
1128 * MMIO or config space read
1129 */
1130 readq(cd->mmio + IO_SLC_CFGREG_GFIR);
1131
1132 /* Don't do anything if the PCI channel is frozen */
1133 if (pci_channel_offline(pci_dev))
1134 goto exit;
1135 }
1122 1136
1123 wake_up_interruptible(&cd->health_waitq); 1137 wake_up_interruptible(&cd->health_waitq);
1124 1138
@@ -1126,12 +1140,12 @@ static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
1126 * By default GFIRs causes recovery actions. This 1140 * By default GFIRs causes recovery actions. This
1127 * count is just for debug when recovery is masked. 1141 * count is just for debug when recovery is masked.
1128 */ 1142 */
1129 printk_ratelimited(KERN_ERR 1143 dev_err_ratelimited(&pci_dev->dev,
1130 "%s %s: [%s] GFIR=%016llx\n", 1144 "[%s] GFIR=%016llx\n",
1131 GENWQE_DEVNAME, dev_name(&pci_dev->dev), 1145 __func__, gfir);
1132 __func__, gfir);
1133 } 1146 }
1134 1147
1148 exit:
1135 return IRQ_HANDLED; 1149 return IRQ_HANDLED;
1136} 1150}
1137 1151
@@ -1237,9 +1251,7 @@ int genwqe_setup_service_layer(struct genwqe_dev *cd)
1237 } 1251 }
1238 1252
1239 rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS); 1253 rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
1240 if (rc > 0) 1254 if (rc) {
1241 rc = genwqe_set_interrupt_capability(cd, rc);
1242 if (rc != 0) {
1243 rc = -ENODEV; 1255 rc = -ENODEV;
1244 goto stop_kthread; 1256 goto stop_kthread;
1245 } 1257 }
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 0a33ade64109..c9b4d6d0eb99 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -485,6 +485,13 @@ int genwqe_init_debugfs(struct genwqe_dev *cd)
485 goto err1; 485 goto err1;
486 } 486 }
487 487
488 file = debugfs_create_u32("use_platform_recovery", 0666, root,
489 &cd->use_platform_recovery);
490 if (!file) {
491 ret = -ENOMEM;
492 goto err1;
493 }
494
488 cd->debugfs_root = root; 495 cd->debugfs_root = root;
489 return 0; 496 return 0;
490err1: 497err1:
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 1d2f163a1906..aae42555e2ca 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -1048,10 +1048,15 @@ static long genwqe_ioctl(struct file *filp, unsigned int cmd,
1048 int rc = 0; 1048 int rc = 0;
1049 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; 1049 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
1050 struct genwqe_dev *cd = cfile->cd; 1050 struct genwqe_dev *cd = cfile->cd;
1051 struct pci_dev *pci_dev = cd->pci_dev;
1051 struct genwqe_reg_io __user *io; 1052 struct genwqe_reg_io __user *io;
1052 u64 val; 1053 u64 val;
1053 u32 reg_offs; 1054 u32 reg_offs;
1054 1055
1056 /* Return -EIO if card hit EEH */
1057 if (pci_channel_offline(pci_dev))
1058 return -EIO;
1059
1055 if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) 1060 if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
1056 return -EINVAL; 1061 return -EINVAL;
1057 1062
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
index a72a99266c3c..7232e40a3ad9 100644
--- a/drivers/misc/genwqe/card_sysfs.c
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -223,6 +223,30 @@ static ssize_t next_bitstream_store(struct device *dev,
223} 223}
224static DEVICE_ATTR_RW(next_bitstream); 224static DEVICE_ATTR_RW(next_bitstream);
225 225
226static ssize_t reload_bitstream_store(struct device *dev,
227 struct device_attribute *attr,
228 const char *buf, size_t count)
229{
230 int reload;
231 struct genwqe_dev *cd = dev_get_drvdata(dev);
232
233 if (kstrtoint(buf, 0, &reload) < 0)
234 return -EINVAL;
235
236 if (reload == 0x1) {
237 if (cd->card_state == GENWQE_CARD_UNUSED ||
238 cd->card_state == GENWQE_CARD_USED)
239 cd->card_state = GENWQE_CARD_RELOAD_BITSTREAM;
240 else
241 return -EIO;
242 } else {
243 return -EINVAL;
244 }
245
246 return count;
247}
248static DEVICE_ATTR_WO(reload_bitstream);
249
226/* 250/*
227 * Create device_attribute structures / params: name, mode, show, store 251 * Create device_attribute structures / params: name, mode, show, store
228 * additional flag if valid in VF 252 * additional flag if valid in VF
@@ -239,6 +263,7 @@ static struct attribute *genwqe_attributes[] = {
239 &dev_attr_status.attr, 263 &dev_attr_status.attr,
240 &dev_attr_freerunning_timer.attr, 264 &dev_attr_freerunning_timer.attr,
241 &dev_attr_queue_working_time.attr, 265 &dev_attr_queue_working_time.attr,
266 &dev_attr_reload_bitstream.attr,
242 NULL, 267 NULL,
243}; 268};
244 269
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 62cc6bb3f62e..a6400f09229c 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -53,12 +53,17 @@
53 */ 53 */
54int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val) 54int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
55{ 55{
56 struct pci_dev *pci_dev = cd->pci_dev;
57
56 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) 58 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
57 return -EIO; 59 return -EIO;
58 60
59 if (cd->mmio == NULL) 61 if (cd->mmio == NULL)
60 return -EIO; 62 return -EIO;
61 63
64 if (pci_channel_offline(pci_dev))
65 return -EIO;
66
62 __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs); 67 __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
63 return 0; 68 return 0;
64} 69}
@@ -99,12 +104,17 @@ u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
99 */ 104 */
100int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val) 105int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
101{ 106{
107 struct pci_dev *pci_dev = cd->pci_dev;
108
102 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE) 109 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
103 return -EIO; 110 return -EIO;
104 111
105 if (cd->mmio == NULL) 112 if (cd->mmio == NULL)
106 return -EIO; 113 return -EIO;
107 114
115 if (pci_channel_offline(pci_dev))
116 return -EIO;
117
108 __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs); 118 __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
109 return 0; 119 return 0;
110} 120}
@@ -718,10 +728,12 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
718 int rc; 728 int rc;
719 struct pci_dev *pci_dev = cd->pci_dev; 729 struct pci_dev *pci_dev = cd->pci_dev;
720 730
721 rc = pci_enable_msi_exact(pci_dev, count); 731 rc = pci_enable_msi_range(pci_dev, 1, count);
722 if (rc == 0) 732 if (rc < 0)
723 cd->flags |= GENWQE_FLAG_MSI_ENABLED; 733 return rc;
724 return rc; 734
735 cd->flags |= GENWQE_FLAG_MSI_ENABLED;
736 return 0;
725} 737}
726 738
727/** 739/**
diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h
index cd5263163a6e..a506e9aa2d57 100644
--- a/drivers/misc/genwqe/genwqe_driver.h
+++ b/drivers/misc/genwqe/genwqe_driver.h
@@ -36,7 +36,7 @@
36#include <asm/byteorder.h> 36#include <asm/byteorder.h>
37#include <linux/genwqe/genwqe_card.h> 37#include <linux/genwqe/genwqe_card.h>
38 38
39#define DRV_VERS_STRING "2.0.15" 39#define DRV_VERS_STRING "2.0.21"
40 40
41/* 41/*
42 * Static minor number assignement, until we decide/implement 42 * Static minor number assignement, until we decide/implement
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 0a1565e63c71..7ffdb589841e 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -15,6 +15,7 @@
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <asm/unaligned.h>
18 19
19#define FIRMWARE_NAME "lattice-ecp3.bit" 20#define FIRMWARE_NAME "lattice-ecp3.bit"
20 21
@@ -91,8 +92,8 @@ static void firmware_load(const struct firmware *fw, void *context)
91 /* Trying to speak with the FPGA via SPI... */ 92 /* Trying to speak with the FPGA via SPI... */
92 txbuf[0] = FPGA_CMD_READ_ID; 93 txbuf[0] = FPGA_CMD_READ_ID;
93 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); 94 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
94 dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]); 95 jedec_id = get_unaligned_be32(&rxbuf[4]);
95 jedec_id = *(u32 *)&rxbuf[4]; 96 dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", jedec_id);
96 97
97 for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) { 98 for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
98 if (jedec_id == ecp3_dev[i].jedec_id) 99 if (jedec_id == ecp3_dev[i].jedec_id)
@@ -109,7 +110,8 @@ static void firmware_load(const struct firmware *fw, void *context)
109 110
110 txbuf[0] = FPGA_CMD_READ_STATUS; 111 txbuf[0] = FPGA_CMD_READ_STATUS;
111 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); 112 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
112 dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]); 113 status = get_unaligned_be32(&rxbuf[4]);
114 dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
113 115
114 buffer = kzalloc(fw->size + 8, GFP_KERNEL); 116 buffer = kzalloc(fw->size + 8, GFP_KERNEL);
115 if (!buffer) { 117 if (!buffer) {
@@ -141,7 +143,7 @@ static void firmware_load(const struct firmware *fw, void *context)
141 for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) { 143 for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
142 txbuf[0] = FPGA_CMD_READ_STATUS; 144 txbuf[0] = FPGA_CMD_READ_STATUS;
143 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); 145 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
144 status = *(u32 *)&rxbuf[4]; 146 status = get_unaligned_be32(&rxbuf[4]);
145 if (status == FPGA_STATUS_CLEARED) 147 if (status == FPGA_STATUS_CLEARED)
146 break; 148 break;
147 149
@@ -164,8 +166,8 @@ static void firmware_load(const struct firmware *fw, void *context)
164 166
165 txbuf[0] = FPGA_CMD_READ_STATUS; 167 txbuf[0] = FPGA_CMD_READ_STATUS;
166 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); 168 ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
167 dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]); 169 status = get_unaligned_be32(&rxbuf[4]);
168 status = *(u32 *)&rxbuf[4]; 170 dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
169 171
170 /* Check result */ 172 /* Check result */
171 if (status & FPGA_STATUS_DONE) 173 if (status & FPGA_STATUS_DONE)
@@ -196,7 +198,7 @@ static int lattice_ecp3_probe(struct spi_device *spi)
196 spi_set_drvdata(spi, data); 198 spi_set_drvdata(spi, data);
197 199
198 init_completion(&data->fw_loaded); 200 init_completion(&data->fw_loaded);
199 err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, 201 err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
200 FIRMWARE_NAME, &spi->dev, 202 FIRMWARE_NAME, &spi->dev,
201 GFP_KERNEL, spi, firmware_load); 203 GFP_KERNEL, spi, firmware_load);
202 if (err) { 204 if (err) {
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index d66a2f24f6b3..b5abe34120b8 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -870,3 +870,4 @@ module_init(lkdtm_module_init);
870module_exit(lkdtm_module_exit); 870module_exit(lkdtm_module_exit);
871 871
872MODULE_LICENSE("GPL"); 872MODULE_LICENSE("GPL");
873MODULE_DESCRIPTION("Kprobe module for testing crash dumps");
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 59d20c599b16..324e1de93687 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -459,7 +459,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
459{ 459{
460 struct mei_device *dev; 460 struct mei_device *dev;
461 struct mei_cl_cb *cb; 461 struct mei_cl_cb *cb;
462 int rets, err; 462 int rets;
463 463
464 if (WARN_ON(!cl || !cl->dev)) 464 if (WARN_ON(!cl || !cl->dev))
465 return -ENODEV; 465 return -ENODEV;
@@ -491,6 +491,7 @@ int mei_cl_disconnect(struct mei_cl *cl)
491 cl_err(dev, cl, "failed to disconnect.\n"); 491 cl_err(dev, cl, "failed to disconnect.\n");
492 goto free; 492 goto free;
493 } 493 }
494 cl->timer_count = MEI_CONNECT_TIMEOUT;
494 mdelay(10); /* Wait for hardware disconnection ready */ 495 mdelay(10); /* Wait for hardware disconnection ready */
495 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 496 list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
496 } else { 497 } else {
@@ -500,23 +501,18 @@ int mei_cl_disconnect(struct mei_cl *cl)
500 } 501 }
501 mutex_unlock(&dev->device_lock); 502 mutex_unlock(&dev->device_lock);
502 503
503 err = wait_event_timeout(dev->wait_recvd_msg, 504 wait_event_timeout(dev->wait_recvd_msg,
504 MEI_FILE_DISCONNECTED == cl->state, 505 MEI_FILE_DISCONNECTED == cl->state,
505 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 506 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
506 507
507 mutex_lock(&dev->device_lock); 508 mutex_lock(&dev->device_lock);
509
508 if (MEI_FILE_DISCONNECTED == cl->state) { 510 if (MEI_FILE_DISCONNECTED == cl->state) {
509 rets = 0; 511 rets = 0;
510 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 512 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
511 } else { 513 } else {
512 rets = -ENODEV; 514 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
513 if (MEI_FILE_DISCONNECTED != cl->state) 515 rets = -ETIME;
514 cl_err(dev, cl, "wrong status client disconnect.\n");
515
516 if (err)
517 cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err);
518
519 cl_err(dev, cl, "failed to disconnect from FW client.\n");
520 } 516 }
521 517
522 mei_io_list_flush(&dev->ctrl_rd_list, cl); 518 mei_io_list_flush(&dev->ctrl_rd_list, cl);
@@ -616,6 +612,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file)
616 mutex_lock(&dev->device_lock); 612 mutex_lock(&dev->device_lock);
617 613
618 if (cl->state != MEI_FILE_CONNECTED) { 614 if (cl->state != MEI_FILE_CONNECTED) {
615 cl->state = MEI_FILE_DISCONNECTED;
619 /* something went really wrong */ 616 /* something went really wrong */
620 if (!cl->status) 617 if (!cl->status)
621 cl->status = -EFAULT; 618 cl->status = -EFAULT;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index a7856c0ac576..c5feafdd58a8 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -115,6 +115,7 @@
115#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ 115#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
116 116
117#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ 117#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
118#define MEI_DEV_ID_WPT_LP_2 0x9CBB /* Wildcat Point LP 2 */
118 119
119/* Host Firmware Status Registers in PCI Config Space */ 120/* Host Firmware Status Registers in PCI Config Space */
120#define PCI_CFG_HFS_1 0x40 121#define PCI_CFG_HFS_1 0x40
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 6a2d272cea43..a9a0d08f758e 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -710,64 +710,10 @@ end:
710 return IRQ_HANDLED; 710 return IRQ_HANDLED;
711} 711}
712 712
713/**
714 * mei_me_fw_status - retrieve fw status from the pci config space
715 *
716 * @dev: the device structure
717 * @fw_status: fw status registers storage
718 *
719 * returns 0 on success an error code otherwise
720 */
721static int mei_me_fw_status(struct mei_device *dev,
722 struct mei_fw_status *fw_status)
723{
724 const u32 pci_cfg_reg[] = {PCI_CFG_HFS_1, PCI_CFG_HFS_2};
725 int i;
726
727 if (!fw_status)
728 return -EINVAL;
729
730 switch (dev->pdev->device) {
731 case MEI_DEV_ID_IBXPK_1:
732 case MEI_DEV_ID_IBXPK_2:
733 case MEI_DEV_ID_CPT_1:
734 case MEI_DEV_ID_PBG_1:
735 case MEI_DEV_ID_PPT_1:
736 case MEI_DEV_ID_PPT_2:
737 case MEI_DEV_ID_PPT_3:
738 case MEI_DEV_ID_LPT_H:
739 case MEI_DEV_ID_LPT_W:
740 case MEI_DEV_ID_LPT_LP:
741 case MEI_DEV_ID_LPT_HR:
742 case MEI_DEV_ID_WPT_LP:
743 fw_status->count = 2;
744 break;
745 case MEI_DEV_ID_ICH10_1:
746 case MEI_DEV_ID_ICH10_2:
747 case MEI_DEV_ID_ICH10_3:
748 case MEI_DEV_ID_ICH10_4:
749 fw_status->count = 1;
750 break;
751 default:
752 fw_status->count = 0;
753 break;
754 }
755
756 for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
757 int ret;
758 ret = pci_read_config_dword(dev->pdev,
759 pci_cfg_reg[i], &fw_status->status[i]);
760 if (ret)
761 return ret;
762 }
763 return 0;
764}
765
766static const struct mei_hw_ops mei_me_hw_ops = { 713static const struct mei_hw_ops mei_me_hw_ops = {
767 714
768 .pg_state = mei_me_pg_state, 715 .pg_state = mei_me_pg_state,
769 716
770 .fw_status = mei_me_fw_status,
771 .host_is_ready = mei_me_host_is_ready, 717 .host_is_ready = mei_me_host_is_ready,
772 718
773 .hw_is_ready = mei_me_hw_is_ready, 719 .hw_is_ready = mei_me_hw_is_ready,
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 93273783dec5..f1cd166094f2 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1042,40 +1042,8 @@ end:
1042 return IRQ_HANDLED; 1042 return IRQ_HANDLED;
1043} 1043}
1044 1044
1045
1046/**
1047 * mei_txe_fw_status - retrieve fw status from the pci config space
1048 *
1049 * @dev: the device structure
1050 * @fw_status: fw status registers storage
1051 *
1052 * returns: 0 on success an error code otherwise
1053 */
1054static int mei_txe_fw_status(struct mei_device *dev,
1055 struct mei_fw_status *fw_status)
1056{
1057 const u32 pci_cfg_reg[] = {PCI_CFG_TXE_FW_STS0, PCI_CFG_TXE_FW_STS1};
1058 int i;
1059
1060 if (!fw_status)
1061 return -EINVAL;
1062
1063 fw_status->count = 2;
1064
1065 for (i = 0; i < fw_status->count && i < MEI_FW_STATUS_MAX; i++) {
1066 int ret;
1067 ret = pci_read_config_dword(dev->pdev,
1068 pci_cfg_reg[i], &fw_status->status[i]);
1069 if (ret)
1070 return ret;
1071 }
1072
1073 return 0;
1074}
1075
1076static const struct mei_hw_ops mei_txe_hw_ops = { 1045static const struct mei_hw_ops mei_txe_hw_ops = {
1077 1046
1078 .fw_status = mei_txe_fw_status,
1079 .host_is_ready = mei_txe_host_is_ready, 1047 .host_is_ready = mei_txe_host_is_ready,
1080 1048
1081 .pg_state = mei_txe_pg_state, 1049 .pg_state = mei_txe_pg_state,
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 66f0a1a06451..401a3d526cd0 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -32,7 +32,6 @@
32#include <linux/compat.h> 32#include <linux/compat.h>
33#include <linux/jiffies.h> 33#include <linux/jiffies.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/miscdevice.h>
36 35
37#include <linux/mei.h> 36#include <linux/mei.h>
38 37
@@ -49,19 +48,12 @@
49 */ 48 */
50static int mei_open(struct inode *inode, struct file *file) 49static int mei_open(struct inode *inode, struct file *file)
51{ 50{
52 struct miscdevice *misc = file->private_data;
53 struct pci_dev *pdev;
54 struct mei_cl *cl;
55 struct mei_device *dev; 51 struct mei_device *dev;
52 struct mei_cl *cl;
56 53
57 int err; 54 int err;
58 55
59 if (!misc->parent) 56 dev = container_of(inode->i_cdev, struct mei_device, cdev);
60 return -ENODEV;
61
62 pdev = container_of(misc->parent, struct pci_dev, dev);
63
64 dev = pci_get_drvdata(pdev);
65 if (!dev) 57 if (!dev)
66 return -ENODEV; 58 return -ENODEV;
67 59
@@ -667,46 +659,148 @@ static const struct file_operations mei_fops = {
667 .llseek = no_llseek 659 .llseek = no_llseek
668}; 660};
669 661
670/* 662static struct class *mei_class;
671 * Misc Device Struct 663static dev_t mei_devt;
664#define MEI_MAX_DEVS MINORMASK
665static DEFINE_MUTEX(mei_minor_lock);
666static DEFINE_IDR(mei_idr);
667
668/**
669 * mei_minor_get - obtain next free device minor number
670 *
671 * @dev: device pointer
672 *
673 * returns allocated minor, or -ENOSPC if no free minor left
672 */ 674 */
673static struct miscdevice mei_misc_device = { 675static int mei_minor_get(struct mei_device *dev)
674 .name = "mei", 676{
675 .fops = &mei_fops, 677 int ret;
676 .minor = MISC_DYNAMIC_MINOR, 678
677}; 679 mutex_lock(&mei_minor_lock);
680 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
681 if (ret >= 0)
682 dev->minor = ret;
683 else if (ret == -ENOSPC)
684 dev_err(&dev->pdev->dev, "too many mei devices\n");
678 685
686 mutex_unlock(&mei_minor_lock);
687 return ret;
688}
679 689
680int mei_register(struct mei_device *dev) 690/**
691 * mei_minor_free - mark device minor number as free
692 *
693 * @dev: device pointer
694 */
695static void mei_minor_free(struct mei_device *dev)
681{ 696{
682 int ret; 697 mutex_lock(&mei_minor_lock);
683 mei_misc_device.parent = &dev->pdev->dev; 698 idr_remove(&mei_idr, dev->minor);
684 ret = misc_register(&mei_misc_device); 699 mutex_unlock(&mei_minor_lock);
685 if (ret) 700}
701
702int mei_register(struct mei_device *dev, struct device *parent)
703{
704 struct device *clsdev; /* class device */
705 int ret, devno;
706
707 ret = mei_minor_get(dev);
708 if (ret < 0)
686 return ret; 709 return ret;
687 710
688 if (mei_dbgfs_register(dev, mei_misc_device.name)) 711 /* Fill in the data structures */
689 dev_err(&dev->pdev->dev, "cannot register debugfs\n"); 712 devno = MKDEV(MAJOR(mei_devt), dev->minor);
713 cdev_init(&dev->cdev, &mei_fops);
714 dev->cdev.owner = mei_fops.owner;
715
716 /* Add the device */
717 ret = cdev_add(&dev->cdev, devno, 1);
718 if (ret) {
719 dev_err(parent, "unable to add device %d:%d\n",
720 MAJOR(mei_devt), dev->minor);
721 goto err_dev_add;
722 }
723
724 clsdev = device_create(mei_class, parent, devno,
725 NULL, "mei%d", dev->minor);
726
727 if (IS_ERR(clsdev)) {
728 dev_err(parent, "unable to create device %d:%d\n",
729 MAJOR(mei_devt), dev->minor);
730 ret = PTR_ERR(clsdev);
731 goto err_dev_create;
732 }
733
734 ret = mei_dbgfs_register(dev, dev_name(clsdev));
735 if (ret) {
736 dev_err(clsdev, "cannot register debugfs ret = %d\n", ret);
737 goto err_dev_dbgfs;
738 }
690 739
691 return 0; 740 return 0;
741
742err_dev_dbgfs:
743 device_destroy(mei_class, devno);
744err_dev_create:
745 cdev_del(&dev->cdev);
746err_dev_add:
747 mei_minor_free(dev);
748 return ret;
692} 749}
693EXPORT_SYMBOL_GPL(mei_register); 750EXPORT_SYMBOL_GPL(mei_register);
694 751
695void mei_deregister(struct mei_device *dev) 752void mei_deregister(struct mei_device *dev)
696{ 753{
754 int devno;
755
756 devno = dev->cdev.dev;
757 cdev_del(&dev->cdev);
758
697 mei_dbgfs_deregister(dev); 759 mei_dbgfs_deregister(dev);
698 misc_deregister(&mei_misc_device); 760
699 mei_misc_device.parent = NULL; 761 device_destroy(mei_class, devno);
762
763 mei_minor_free(dev);
700} 764}
701EXPORT_SYMBOL_GPL(mei_deregister); 765EXPORT_SYMBOL_GPL(mei_deregister);
702 766
703static int __init mei_init(void) 767static int __init mei_init(void)
704{ 768{
705 return mei_cl_bus_init(); 769 int ret;
770
771 mei_class = class_create(THIS_MODULE, "mei");
772 if (IS_ERR(mei_class)) {
773 pr_err("couldn't create class\n");
774 ret = PTR_ERR(mei_class);
775 goto err;
776 }
777
778 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
779 if (ret < 0) {
780 pr_err("unable to allocate char dev region\n");
781 goto err_class;
782 }
783
784 ret = mei_cl_bus_init();
785 if (ret < 0) {
786 pr_err("unable to initialize bus\n");
787 goto err_chrdev;
788 }
789
790 return 0;
791
792err_chrdev:
793 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
794err_class:
795 class_destroy(mei_class);
796err:
797 return ret;
706} 798}
707 799
708static void __exit mei_exit(void) 800static void __exit mei_exit(void)
709{ 801{
802 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
803 class_destroy(mei_class);
710 mei_cl_bus_exit(); 804 mei_cl_bus_exit();
711} 805}
712 806
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 5c7e990e2f22..0b0d6135543b 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -227,7 +227,6 @@ struct mei_cl {
227 227
228/** struct mei_hw_ops 228/** struct mei_hw_ops
229 * 229 *
230 * @fw_status - read FW status from PCI config space
231 * @host_is_ready - query for host readiness 230 * @host_is_ready - query for host readiness
232 231
233 * @hw_is_ready - query if hw is ready 232 * @hw_is_ready - query if hw is ready
@@ -255,8 +254,6 @@ struct mei_cl {
255 */ 254 */
256struct mei_hw_ops { 255struct mei_hw_ops {
257 256
258 int (*fw_status)(struct mei_device *dev,
259 struct mei_fw_status *fw_status);
260 bool (*host_is_ready)(struct mei_device *dev); 257 bool (*host_is_ready)(struct mei_device *dev);
261 258
262 bool (*hw_is_ready)(struct mei_device *dev); 259 bool (*hw_is_ready)(struct mei_device *dev);
@@ -400,6 +397,10 @@ struct mei_cfg {
400/** 397/**
401 * struct mei_device - MEI private device struct 398 * struct mei_device - MEI private device struct
402 399
400 * @pdev - pointer to pci device struct
401 * @cdev - character device
402 * @minor - minor number allocated for device
403 *
403 * @reset_count - limits the number of consecutive resets 404 * @reset_count - limits the number of consecutive resets
404 * @hbm_state - state of host bus message protocol 405 * @hbm_state - state of host bus message protocol
405 * @pg_event - power gating event 406 * @pg_event - power gating event
@@ -412,6 +413,9 @@ struct mei_cfg {
412 */ 413 */
413struct mei_device { 414struct mei_device {
414 struct pci_dev *pdev; /* pointer to pci device struct */ 415 struct pci_dev *pdev; /* pointer to pci device struct */
416 struct cdev cdev;
417 int minor;
418
415 /* 419 /*
416 * lists of queues 420 * lists of queues
417 */ 421 */
@@ -741,7 +745,7 @@ static inline int mei_dbgfs_register(struct mei_device *dev, const char *name)
741static inline void mei_dbgfs_deregister(struct mei_device *dev) {} 745static inline void mei_dbgfs_deregister(struct mei_device *dev) {}
742#endif /* CONFIG_DEBUG_FS */ 746#endif /* CONFIG_DEBUG_FS */
743 747
744int mei_register(struct mei_device *dev); 748int mei_register(struct mei_device *dev, struct device *parent);
745void mei_deregister(struct mei_device *dev); 749void mei_deregister(struct mei_device *dev);
746 750
747#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" 751#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d"
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 1b46c64a649f..a0e9422b55a2 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -31,7 +31,6 @@
31#include <linux/compat.h> 31#include <linux/compat.h>
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/miscdevice.h>
35 34
36#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
37 36
@@ -82,6 +81,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)}, 81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)},
83 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)}, 82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)}, 83 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch_cfg)},
85 85
86 /* required last entry */ 86 /* required last entry */
87 {0, } 87 {0, }
@@ -207,7 +207,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
207 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT); 207 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
208 pm_runtime_use_autosuspend(&pdev->dev); 208 pm_runtime_use_autosuspend(&pdev->dev);
209 209
210 err = mei_register(dev); 210 err = mei_register(dev, &pdev->dev);
211 if (err) 211 if (err)
212 goto release_irq; 212 goto release_irq;
213 213
@@ -369,7 +369,7 @@ static int mei_me_pm_runtime_idle(struct device *device)
369 if (!dev) 369 if (!dev)
370 return -ENODEV; 370 return -ENODEV;
371 if (mei_write_is_idle(dev)) 371 if (mei_write_is_idle(dev))
372 pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2); 372 pm_runtime_autosuspend(device);
373 373
374 return -EBUSY; 374 return -EBUSY;
375} 375}
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 2343c6236df9..19de57368b7a 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -149,7 +149,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
149 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); 149 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
150 pm_runtime_use_autosuspend(&pdev->dev); 150 pm_runtime_use_autosuspend(&pdev->dev);
151 151
152 err = mei_register(dev); 152 err = mei_register(dev, &pdev->dev);
153 if (err) 153 if (err)
154 goto release_irq; 154 goto release_irq;
155 155
@@ -306,7 +306,7 @@ static int mei_txe_pm_runtime_idle(struct device *device)
306 if (!dev) 306 if (!dev)
307 return -ENODEV; 307 return -ENODEV;
308 if (mei_write_is_idle(dev)) 308 if (mei_write_is_idle(dev))
309 pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2); 309 pm_runtime_autosuspend(device);
310 310
311 return -EBUSY; 311 return -EBUSY;
312} 312}
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 462a5b1d8651..cc4eef040c14 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -1,8 +1,25 @@
1comment "Intel MIC Bus Driver"
2
3config INTEL_MIC_BUS
4 tristate "Intel MIC Bus Driver"
5 depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
6 help
7 This option is selected by any driver which registers a
8 device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
9 CONFIG_INTEL_MIC_CARD, CONFIG_INTEL_MIC_X100_DMA etc.
10
11 If you are building a host/card kernel with an Intel MIC device
12 then say M (recommended) or Y, else say N. If unsure say N.
13
14 More information about the Intel MIC family as well as the Linux
15 OS and tools for MIC to use with this driver are available from
16 <http://software.intel.com/en-us/mic-developer>.
17
1comment "Intel MIC Host Driver" 18comment "Intel MIC Host Driver"
2 19
3config INTEL_MIC_HOST 20config INTEL_MIC_HOST
4 tristate "Intel MIC Host Driver" 21 tristate "Intel MIC Host Driver"
5 depends on 64BIT && PCI && X86 22 depends on 64BIT && PCI && X86 && INTEL_MIC_BUS
6 select VHOST_RING 23 select VHOST_RING
7 help 24 help
8 This enables Host Driver support for the Intel Many Integrated 25 This enables Host Driver support for the Intel Many Integrated
@@ -22,7 +39,7 @@ comment "Intel MIC Card Driver"
22 39
23config INTEL_MIC_CARD 40config INTEL_MIC_CARD
24 tristate "Intel MIC Card Driver" 41 tristate "Intel MIC Card Driver"
25 depends on 64BIT && X86 42 depends on 64BIT && X86 && INTEL_MIC_BUS
26 select VIRTIO 43 select VIRTIO
27 help 44 help
28 This enables card driver support for the Intel Many Integrated 45 This enables card driver support for the Intel Many Integrated
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index 05b34d683a58..e9bf148755e2 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -4,3 +4,4 @@
4# 4#
5obj-$(CONFIG_INTEL_MIC_HOST) += host/ 5obj-$(CONFIG_INTEL_MIC_HOST) += host/
6obj-$(CONFIG_INTEL_MIC_CARD) += card/ 6obj-$(CONFIG_INTEL_MIC_CARD) += card/
7obj-$(CONFIG_INTEL_MIC_BUS) += bus/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
new file mode 100644
index 000000000000..d85c7f2a0af4
--- /dev/null
+++ b/drivers/misc/mic/bus/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile - Intel MIC Linux driver.
3# Copyright(c) 2014, Intel Corporation.
4#
5obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
diff --git a/drivers/misc/mic/bus/mic_bus.c b/drivers/misc/mic/bus/mic_bus.c
new file mode 100644
index 000000000000..961ae90aae47
--- /dev/null
+++ b/drivers/misc/mic/bus/mic_bus.c
@@ -0,0 +1,218 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC Bus driver.
19 *
20 * This implementation is very similar to the the virtio bus driver
21 * implementation @ drivers/virtio/virtio.c
22 */
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/idr.h>
26#include <linux/mic_bus.h>
27
28/* Unique numbering for mbus devices. */
29static DEFINE_IDA(mbus_index_ida);
30
31static ssize_t device_show(struct device *d,
32 struct device_attribute *attr, char *buf)
33{
34 struct mbus_device *dev = dev_to_mbus(d);
35 return sprintf(buf, "0x%04x\n", dev->id.device);
36}
37static DEVICE_ATTR_RO(device);
38
39static ssize_t vendor_show(struct device *d,
40 struct device_attribute *attr, char *buf)
41{
42 struct mbus_device *dev = dev_to_mbus(d);
43 return sprintf(buf, "0x%04x\n", dev->id.vendor);
44}
45static DEVICE_ATTR_RO(vendor);
46
47static ssize_t modalias_show(struct device *d,
48 struct device_attribute *attr, char *buf)
49{
50 struct mbus_device *dev = dev_to_mbus(d);
51 return sprintf(buf, "mbus:d%08Xv%08X\n",
52 dev->id.device, dev->id.vendor);
53}
54static DEVICE_ATTR_RO(modalias);
55
56static struct attribute *mbus_dev_attrs[] = {
57 &dev_attr_device.attr,
58 &dev_attr_vendor.attr,
59 &dev_attr_modalias.attr,
60 NULL,
61};
62ATTRIBUTE_GROUPS(mbus_dev);
63
64static inline int mbus_id_match(const struct mbus_device *dev,
65 const struct mbus_device_id *id)
66{
67 if (id->device != dev->id.device && id->device != MBUS_DEV_ANY_ID)
68 return 0;
69
70 return id->vendor == MBUS_DEV_ANY_ID || id->vendor == dev->id.vendor;
71}
72
73/*
74 * This looks through all the IDs a driver claims to support. If any of them
75 * match, we return 1 and the kernel will call mbus_dev_probe().
76 */
77static int mbus_dev_match(struct device *dv, struct device_driver *dr)
78{
79 unsigned int i;
80 struct mbus_device *dev = dev_to_mbus(dv);
81 const struct mbus_device_id *ids;
82
83 ids = drv_to_mbus(dr)->id_table;
84 for (i = 0; ids[i].device; i++)
85 if (mbus_id_match(dev, &ids[i]))
86 return 1;
87 return 0;
88}
89
90static int mbus_uevent(struct device *dv, struct kobj_uevent_env *env)
91{
92 struct mbus_device *dev = dev_to_mbus(dv);
93
94 return add_uevent_var(env, "MODALIAS=mbus:d%08Xv%08X",
95 dev->id.device, dev->id.vendor);
96}
97
98static int mbus_dev_probe(struct device *d)
99{
100 int err;
101 struct mbus_device *dev = dev_to_mbus(d);
102 struct mbus_driver *drv = drv_to_mbus(dev->dev.driver);
103
104 err = drv->probe(dev);
105 if (!err)
106 if (drv->scan)
107 drv->scan(dev);
108 return err;
109}
110
111static int mbus_dev_remove(struct device *d)
112{
113 struct mbus_device *dev = dev_to_mbus(d);
114 struct mbus_driver *drv = drv_to_mbus(dev->dev.driver);
115
116 drv->remove(dev);
117 return 0;
118}
119
120static struct bus_type mic_bus = {
121 .name = "mic_bus",
122 .match = mbus_dev_match,
123 .dev_groups = mbus_dev_groups,
124 .uevent = mbus_uevent,
125 .probe = mbus_dev_probe,
126 .remove = mbus_dev_remove,
127};
128
129int mbus_register_driver(struct mbus_driver *driver)
130{
131 driver->driver.bus = &mic_bus;
132 return driver_register(&driver->driver);
133}
134EXPORT_SYMBOL_GPL(mbus_register_driver);
135
136void mbus_unregister_driver(struct mbus_driver *driver)
137{
138 driver_unregister(&driver->driver);
139}
140EXPORT_SYMBOL_GPL(mbus_unregister_driver);
141
142static void mbus_release_dev(struct device *d)
143{
144 struct mbus_device *mbdev = dev_to_mbus(d);
145 kfree(mbdev);
146}
147
148struct mbus_device *
149mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
150 struct mbus_hw_ops *hw_ops, void __iomem *mmio_va)
151{
152 int ret;
153 struct mbus_device *mbdev;
154
155 mbdev = kzalloc(sizeof(*mbdev), GFP_KERNEL);
156 if (!mbdev)
157 return ERR_PTR(-ENOMEM);
158
159 mbdev->mmio_va = mmio_va;
160 mbdev->dev.parent = pdev;
161 mbdev->id.device = id;
162 mbdev->id.vendor = MBUS_DEV_ANY_ID;
163 mbdev->dev.archdata.dma_ops = dma_ops;
164 mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask;
165 dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64));
166 mbdev->dev.release = mbus_release_dev;
167 mbdev->hw_ops = hw_ops;
168 mbdev->dev.bus = &mic_bus;
169
170 /* Assign a unique device index and hence name. */
171 ret = ida_simple_get(&mbus_index_ida, 0, 0, GFP_KERNEL);
172 if (ret < 0)
173 goto free_mbdev;
174
175 mbdev->index = ret;
176 dev_set_name(&mbdev->dev, "mbus-dev%u", mbdev->index);
177 /*
178 * device_register() causes the bus infrastructure to look for a
179 * matching driver.
180 */
181 ret = device_register(&mbdev->dev);
182 if (ret)
183 goto ida_remove;
184 return mbdev;
185ida_remove:
186 ida_simple_remove(&mbus_index_ida, mbdev->index);
187free_mbdev:
188 kfree(mbdev);
189 return ERR_PTR(ret);
190}
191EXPORT_SYMBOL_GPL(mbus_register_device);
192
193void mbus_unregister_device(struct mbus_device *mbdev)
194{
195 int index = mbdev->index; /* save for after device release */
196
197 device_unregister(&mbdev->dev);
198 ida_simple_remove(&mbus_index_ida, index);
199}
200EXPORT_SYMBOL_GPL(mbus_unregister_device);
201
202static int __init mbus_init(void)
203{
204 return bus_register(&mic_bus);
205}
206
207static void __exit mbus_exit(void)
208{
209 bus_unregister(&mic_bus);
210 ida_destroy(&mbus_index_ida);
211}
212
213core_initcall(mbus_init);
214module_exit(mbus_exit);
215
216MODULE_AUTHOR("Intel Corporation");
217MODULE_DESCRIPTION("Intel(R) MIC Bus driver");
218MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index d0980ff96833..83819eee553b 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -83,8 +83,8 @@ static int mic_shutdown_init(void)
83 int shutdown_db; 83 int shutdown_db;
84 84
85 shutdown_db = mic_next_card_db(); 85 shutdown_db = mic_next_card_db();
86 shutdown_cookie = mic_request_card_irq(mic_shutdown_isr, 86 shutdown_cookie = mic_request_card_irq(mic_shutdown_isr, NULL,
87 "Shutdown", mdrv, shutdown_db); 87 "Shutdown", mdrv, shutdown_db);
88 if (IS_ERR(shutdown_cookie)) 88 if (IS_ERR(shutdown_cookie))
89 rc = PTR_ERR(shutdown_cookie); 89 rc = PTR_ERR(shutdown_cookie);
90 else 90 else
@@ -136,7 +136,8 @@ static void mic_dp_uninit(void)
136/** 136/**
137 * mic_request_card_irq - request an irq. 137 * mic_request_card_irq - request an irq.
138 * 138 *
139 * @func: The callback function that handles the interrupt. 139 * @handler: interrupt handler passed to request_threaded_irq.
140 * @thread_fn: thread fn. passed to request_threaded_irq.
140 * @name: The ASCII name of the callee requesting the irq. 141 * @name: The ASCII name of the callee requesting the irq.
141 * @data: private data that is returned back when calling the 142 * @data: private data that is returned back when calling the
142 * function handler. 143 * function handler.
@@ -149,17 +150,19 @@ static void mic_dp_uninit(void)
149 * error code. 150 * error code.
150 * 151 *
151 */ 152 */
152struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data), 153struct mic_irq *
153 const char *name, void *data, int index) 154mic_request_card_irq(irq_handler_t handler,
155 irq_handler_t thread_fn, const char *name,
156 void *data, int index)
154{ 157{
155 int rc = 0; 158 int rc = 0;
156 unsigned long cookie; 159 unsigned long cookie;
157 struct mic_driver *mdrv = g_drv; 160 struct mic_driver *mdrv = g_drv;
158 161
159 rc = request_irq(mic_db_to_irq(mdrv, index), func, 162 rc = request_threaded_irq(mic_db_to_irq(mdrv, index), handler,
160 0, name, data); 163 thread_fn, 0, name, data);
161 if (rc) { 164 if (rc) {
162 dev_err(mdrv->dev, "request_irq failed rc = %d\n", rc); 165 dev_err(mdrv->dev, "request_threaded_irq failed rc = %d\n", rc);
163 goto err; 166 goto err;
164 } 167 }
165 mdrv->irq_info.irq_usage_count[index]++; 168 mdrv->irq_info.irq_usage_count[index]++;
@@ -172,9 +175,9 @@ err:
172/** 175/**
173 * mic_free_card_irq - free irq. 176 * mic_free_card_irq - free irq.
174 * 177 *
175 * @cookie: cookie obtained during a successful call to mic_request_irq 178 * @cookie: cookie obtained during a successful call to mic_request_threaded_irq
176 * @data: private data specified by the calling function during the 179 * @data: private data specified by the calling function during the
177 * mic_request_irq 180 * mic_request_threaded_irq
178 * 181 *
179 * returns: none. 182 * returns: none.
180 */ 183 */
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 306f502be95e..844be8fc9b22 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -30,6 +30,8 @@
30#include <linux/workqueue.h> 30#include <linux/workqueue.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/irqreturn.h> 32#include <linux/irqreturn.h>
33#include <linux/interrupt.h>
34#include <linux/mic_bus.h>
33 35
34/** 36/**
35 * struct mic_intr_info - Contains h/w specific interrupt sources info 37 * struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -70,6 +72,7 @@ struct mic_device {
70 * @hotplug_work: Hot plug work for adding/removing virtio devices. 72 * @hotplug_work: Hot plug work for adding/removing virtio devices.
71 * @irq_info: The OS specific irq information 73 * @irq_info: The OS specific irq information
72 * @intr_info: H/W specific interrupt information. 74 * @intr_info: H/W specific interrupt information.
75 * @dma_mbdev: dma device on the MIC virtual bus.
73 */ 76 */
74struct mic_driver { 77struct mic_driver {
75 char name[20]; 78 char name[20];
@@ -80,6 +83,7 @@ struct mic_driver {
80 struct work_struct hotplug_work; 83 struct work_struct hotplug_work;
81 struct mic_irq_info irq_info; 84 struct mic_irq_info irq_info;
82 struct mic_intr_info intr_info; 85 struct mic_intr_info intr_info;
86 struct mbus_device *dma_mbdev;
83}; 87};
84 88
85/** 89/**
@@ -116,8 +120,9 @@ mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
116int mic_driver_init(struct mic_driver *mdrv); 120int mic_driver_init(struct mic_driver *mdrv);
117void mic_driver_uninit(struct mic_driver *mdrv); 121void mic_driver_uninit(struct mic_driver *mdrv);
118int mic_next_card_db(void); 122int mic_next_card_db(void);
119struct mic_irq *mic_request_card_irq(irqreturn_t (*func)(int irq, void *data), 123struct mic_irq *
120 const char *name, void *data, int intr_src); 124mic_request_card_irq(irq_handler_t handler, irq_handler_t thread_fn,
125 const char *name, void *data, int intr_src);
121void mic_free_card_irq(struct mic_irq *cookie, void *data); 126void mic_free_card_irq(struct mic_irq *cookie, void *data);
122u32 mic_read_spad(struct mic_device *mdev, unsigned int idx); 127u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
123void mic_send_intr(struct mic_device *mdev, int doorbell); 128void mic_send_intr(struct mic_device *mdev, int doorbell);
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
index 653799b96bfa..f14b60080c21 100644
--- a/drivers/misc/mic/card/mic_virtio.c
+++ b/drivers/misc/mic/card/mic_virtio.c
@@ -417,7 +417,7 @@ static int mic_add_device(struct mic_device_desc __iomem *d,
417 417
418 virtio_db = mic_next_card_db(); 418 virtio_db = mic_next_card_db();
419 mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler, 419 mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler,
420 "virtio intr", mvdev, virtio_db); 420 NULL, "virtio intr", mvdev, virtio_db);
421 if (IS_ERR(mvdev->virtio_cookie)) { 421 if (IS_ERR(mvdev->virtio_cookie)) {
422 ret = PTR_ERR(mvdev->virtio_cookie); 422 ret = PTR_ERR(mvdev->virtio_cookie);
423 goto kfree; 423 goto kfree;
@@ -606,8 +606,9 @@ int mic_devices_init(struct mic_driver *mdrv)
606 mic_scan_devices(mdrv, !REMOVE_DEVICES); 606 mic_scan_devices(mdrv, !REMOVE_DEVICES);
607 607
608 config_db = mic_next_card_db(); 608 config_db = mic_next_card_db();
609 virtio_config_cookie = mic_request_card_irq(mic_extint_handler, 609 virtio_config_cookie = mic_request_card_irq(mic_extint_handler, NULL,
610 "virtio_config_intr", mdrv, config_db); 610 "virtio_config_intr", mdrv,
611 config_db);
611 if (IS_ERR(virtio_config_cookie)) { 612 if (IS_ERR(virtio_config_cookie)) {
612 rc = PTR_ERR(virtio_config_cookie); 613 rc = PTR_ERR(virtio_config_cookie);
613 goto exit; 614 goto exit;
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index 2868945c9a4d..9d57545d64f6 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -148,6 +148,47 @@ void mic_card_unmap(struct mic_device *mdev, void __iomem *addr)
148 iounmap(addr); 148 iounmap(addr);
149} 149}
150 150
151static inline struct mic_driver *mbdev_to_mdrv(struct mbus_device *mbdev)
152{
153 return dev_get_drvdata(mbdev->dev.parent);
154}
155
156static struct mic_irq *
157_mic_request_threaded_irq(struct mbus_device *mbdev,
158 irq_handler_t handler, irq_handler_t thread_fn,
159 const char *name, void *data, int intr_src)
160{
161 int rc = 0;
162 unsigned int irq = intr_src;
163 unsigned long cookie = irq;
164
165 rc = request_threaded_irq(irq, handler, thread_fn, 0, name, data);
166 if (rc) {
167 dev_err(mbdev_to_mdrv(mbdev)->dev,
168 "request_threaded_irq failed rc = %d\n", rc);
169 return ERR_PTR(rc);
170 }
171 return (struct mic_irq *)cookie;
172}
173
174static void _mic_free_irq(struct mbus_device *mbdev,
175 struct mic_irq *cookie, void *data)
176{
177 unsigned long irq = (unsigned long)cookie;
178 free_irq(irq, data);
179}
180
181static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
182{
183 mic_ack_interrupt(&mbdev_to_mdrv(mbdev)->mdev);
184}
185
186static struct mbus_hw_ops mbus_hw_ops = {
187 .request_threaded_irq = _mic_request_threaded_irq,
188 .free_irq = _mic_free_irq,
189 .ack_interrupt = _mic_ack_interrupt,
190};
191
151static int __init mic_probe(struct platform_device *pdev) 192static int __init mic_probe(struct platform_device *pdev)
152{ 193{
153 struct mic_driver *mdrv = &g_drv; 194 struct mic_driver *mdrv = &g_drv;
@@ -159,32 +200,41 @@ static int __init mic_probe(struct platform_device *pdev)
159 200
160 mdev->mmio.pa = MIC_X100_MMIO_BASE; 201 mdev->mmio.pa = MIC_X100_MMIO_BASE;
161 mdev->mmio.len = MIC_X100_MMIO_LEN; 202 mdev->mmio.len = MIC_X100_MMIO_LEN;
162 mdev->mmio.va = ioremap(MIC_X100_MMIO_BASE, MIC_X100_MMIO_LEN); 203 mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
204 MIC_X100_MMIO_LEN);
163 if (!mdev->mmio.va) { 205 if (!mdev->mmio.va) {
164 dev_err(&pdev->dev, "Cannot remap MMIO BAR\n"); 206 dev_err(&pdev->dev, "Cannot remap MMIO BAR\n");
165 rc = -EIO; 207 rc = -EIO;
166 goto done; 208 goto done;
167 } 209 }
168 mic_hw_intr_init(mdrv); 210 mic_hw_intr_init(mdrv);
211 platform_set_drvdata(pdev, mdrv);
212 mdrv->dma_mbdev = mbus_register_device(mdrv->dev, MBUS_DEV_DMA_MIC,
213 NULL, &mbus_hw_ops,
214 mdrv->mdev.mmio.va);
215 if (IS_ERR(mdrv->dma_mbdev)) {
216 rc = PTR_ERR(mdrv->dma_mbdev);
217 dev_err(&pdev->dev, "mbus_add_device failed rc %d\n", rc);
218 goto done;
219 }
169 rc = mic_driver_init(mdrv); 220 rc = mic_driver_init(mdrv);
170 if (rc) { 221 if (rc) {
171 dev_err(&pdev->dev, "mic_driver_init failed rc %d\n", rc); 222 dev_err(&pdev->dev, "mic_driver_init failed rc %d\n", rc);
172 goto iounmap; 223 goto remove_dma;
173 } 224 }
174done: 225done:
175 return rc; 226 return rc;
176iounmap: 227remove_dma:
177 iounmap(mdev->mmio.va); 228 mbus_unregister_device(mdrv->dma_mbdev);
178 return rc; 229 return rc;
179} 230}
180 231
181static int mic_remove(struct platform_device *pdev) 232static int mic_remove(struct platform_device *pdev)
182{ 233{
183 struct mic_driver *mdrv = &g_drv; 234 struct mic_driver *mdrv = &g_drv;
184 struct mic_device *mdev = &mdrv->mdev;
185 235
186 mic_driver_uninit(mdrv); 236 mic_driver_uninit(mdrv);
187 iounmap(mdev->mmio.va); 237 mbus_unregister_device(mdrv->dma_mbdev);
188 return 0; 238 return 0;
189} 239}
190 240
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index b75c6b5cc20f..ff2b0fb1a6be 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -23,11 +23,70 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24 24
25#include <linux/mic_common.h> 25#include <linux/mic_common.h>
26#include <linux/mic_bus.h>
26#include "../common/mic_dev.h" 27#include "../common/mic_dev.h"
27#include "mic_device.h" 28#include "mic_device.h"
28#include "mic_smpt.h" 29#include "mic_smpt.h"
29#include "mic_virtio.h" 30#include "mic_virtio.h"
30 31
32static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
33{
34 return dev_get_drvdata(mbdev->dev.parent);
35}
36
37static dma_addr_t
38mic_dma_map_page(struct device *dev, struct page *page,
39 unsigned long offset, size_t size, enum dma_data_direction dir,
40 struct dma_attrs *attrs)
41{
42 void *va = phys_to_virt(page_to_phys(page)) + offset;
43 struct mic_device *mdev = dev_get_drvdata(dev->parent);
44
45 return mic_map_single(mdev, va, size);
46}
47
48static void
49mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
50 size_t size, enum dma_data_direction dir,
51 struct dma_attrs *attrs)
52{
53 struct mic_device *mdev = dev_get_drvdata(dev->parent);
54 mic_unmap_single(mdev, dma_addr, size);
55}
56
57static struct dma_map_ops mic_dma_ops = {
58 .map_page = mic_dma_map_page,
59 .unmap_page = mic_dma_unmap_page,
60};
61
62static struct mic_irq *
63_mic_request_threaded_irq(struct mbus_device *mbdev,
64 irq_handler_t handler, irq_handler_t thread_fn,
65 const char *name, void *data, int intr_src)
66{
67 return mic_request_threaded_irq(mbdev_to_mdev(mbdev), handler,
68 thread_fn, name, data,
69 intr_src, MIC_INTR_DMA);
70}
71
72static void _mic_free_irq(struct mbus_device *mbdev,
73 struct mic_irq *cookie, void *data)
74{
75 return mic_free_irq(mbdev_to_mdev(mbdev), cookie, data);
76}
77
78static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
79{
80 struct mic_device *mdev = mbdev_to_mdev(mbdev);
81 mdev->ops->intr_workarounds(mdev);
82}
83
84static struct mbus_hw_ops mbus_hw_ops = {
85 .request_threaded_irq = _mic_request_threaded_irq,
86 .free_irq = _mic_free_irq,
87 .ack_interrupt = _mic_ack_interrupt,
88};
89
31/** 90/**
32 * mic_reset - Reset the MIC device. 91 * mic_reset - Reset the MIC device.
33 * @mdev: pointer to mic_device instance 92 * @mdev: pointer to mic_device instance
@@ -95,9 +154,21 @@ retry:
95 */ 154 */
96 goto retry; 155 goto retry;
97 } 156 }
157 mdev->dma_mbdev = mbus_register_device(mdev->sdev->parent,
158 MBUS_DEV_DMA_HOST, &mic_dma_ops,
159 &mbus_hw_ops, mdev->mmio.va);
160 if (IS_ERR(mdev->dma_mbdev)) {
161 rc = PTR_ERR(mdev->dma_mbdev);
162 goto unlock_ret;
163 }
164 mdev->dma_ch = mic_request_dma_chan(mdev);
165 if (!mdev->dma_ch) {
166 rc = -ENXIO;
167 goto dma_remove;
168 }
98 rc = mdev->ops->load_mic_fw(mdev, buf); 169 rc = mdev->ops->load_mic_fw(mdev, buf);
99 if (rc) 170 if (rc)
100 goto unlock_ret; 171 goto dma_release;
101 mic_smpt_restore(mdev); 172 mic_smpt_restore(mdev);
102 mic_intr_restore(mdev); 173 mic_intr_restore(mdev);
103 mdev->intr_ops->enable_interrupts(mdev); 174 mdev->intr_ops->enable_interrupts(mdev);
@@ -105,6 +176,11 @@ retry:
105 mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); 176 mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
106 mdev->ops->send_firmware_intr(mdev); 177 mdev->ops->send_firmware_intr(mdev);
107 mic_set_state(mdev, MIC_ONLINE); 178 mic_set_state(mdev, MIC_ONLINE);
179 goto unlock_ret;
180dma_release:
181 dma_release_channel(mdev->dma_ch);
182dma_remove:
183 mbus_unregister_device(mdev->dma_mbdev);
108unlock_ret: 184unlock_ret:
109 mutex_unlock(&mdev->mic_mutex); 185 mutex_unlock(&mdev->mic_mutex);
110 return rc; 186 return rc;
@@ -122,6 +198,11 @@ void mic_stop(struct mic_device *mdev, bool force)
122 mutex_lock(&mdev->mic_mutex); 198 mutex_lock(&mdev->mic_mutex);
123 if (MIC_OFFLINE != mdev->state || force) { 199 if (MIC_OFFLINE != mdev->state || force) {
124 mic_virtio_reset_devices(mdev); 200 mic_virtio_reset_devices(mdev);
201 if (mdev->dma_ch) {
202 dma_release_channel(mdev->dma_ch);
203 mdev->dma_ch = NULL;
204 }
205 mbus_unregister_device(mdev->dma_mbdev);
125 mic_bootparam_init(mdev); 206 mic_bootparam_init(mdev);
126 mic_reset(mdev); 207 mic_reset(mdev);
127 if (MIC_RESET_FAILED == mdev->state) 208 if (MIC_RESET_FAILED == mdev->state)
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 0398c696d257..016bd15a7bd1 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -25,6 +25,8 @@
25#include <linux/idr.h> 25#include <linux/idr.h>
26#include <linux/notifier.h> 26#include <linux/notifier.h>
27#include <linux/irqreturn.h> 27#include <linux/irqreturn.h>
28#include <linux/dmaengine.h>
29#include <linux/mic_bus.h>
28 30
29#include "mic_intr.h" 31#include "mic_intr.h"
30 32
@@ -87,6 +89,8 @@ enum mic_stepping {
87 * @cdev: Character device for MIC. 89 * @cdev: Character device for MIC.
88 * @vdev_list: list of virtio devices. 90 * @vdev_list: list of virtio devices.
89 * @pm_notifier: Handles PM notifications from the OS. 91 * @pm_notifier: Handles PM notifications from the OS.
92 * @dma_mbdev: MIC BUS DMA device.
93 * @dma_ch: DMA channel reserved by this driver for use by virtio devices.
90 */ 94 */
91struct mic_device { 95struct mic_device {
92 struct mic_mw mmio; 96 struct mic_mw mmio;
@@ -124,6 +128,8 @@ struct mic_device {
124 struct cdev cdev; 128 struct cdev cdev;
125 struct list_head vdev_list; 129 struct list_head vdev_list;
126 struct notifier_block pm_notifier; 130 struct notifier_block pm_notifier;
131 struct mbus_device *dma_mbdev;
132 struct dma_chan *dma_ch;
127}; 133};
128 134
129/** 135/**
@@ -144,6 +150,7 @@ struct mic_device {
144 * @load_mic_fw: Load firmware segments required to boot the card 150 * @load_mic_fw: Load firmware segments required to boot the card
145 * into card memory. This includes the kernel, command line, ramdisk etc. 151 * into card memory. This includes the kernel, command line, ramdisk etc.
146 * @get_postcode: Get post code status from firmware. 152 * @get_postcode: Get post code status from firmware.
153 * @dma_filter: DMA filter function to be used.
147 */ 154 */
148struct mic_hw_ops { 155struct mic_hw_ops {
149 u8 aper_bar; 156 u8 aper_bar;
@@ -159,6 +166,7 @@ struct mic_hw_ops {
159 void (*send_firmware_intr)(struct mic_device *mdev); 166 void (*send_firmware_intr)(struct mic_device *mdev);
160 int (*load_mic_fw)(struct mic_device *mdev, const char *buf); 167 int (*load_mic_fw)(struct mic_device *mdev, const char *buf);
161 u32 (*get_postcode)(struct mic_device *mdev); 168 u32 (*get_postcode)(struct mic_device *mdev);
169 bool (*dma_filter)(struct dma_chan *chan, void *param);
162}; 170};
163 171
164/** 172/**
@@ -187,6 +195,22 @@ mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
187 iowrite32(val, mw->va + offset); 195 iowrite32(val, mw->va + offset);
188} 196}
189 197
198static inline struct dma_chan *mic_request_dma_chan(struct mic_device *mdev)
199{
200 dma_cap_mask_t mask;
201 struct dma_chan *chan;
202
203 dma_cap_zero(mask);
204 dma_cap_set(DMA_MEMCPY, mask);
205 chan = dma_request_channel(mask, mdev->ops->dma_filter,
206 mdev->sdev->parent);
207 if (chan)
208 return chan;
209 dev_err(mdev->sdev->parent, "%s %d unable to acquire channel\n",
210 __func__, __LINE__);
211 return NULL;
212}
213
190void mic_sysfs_init(struct mic_device *mdev); 214void mic_sysfs_init(struct mic_device *mdev);
191int mic_start(struct mic_device *mdev, const char *buf); 215int mic_start(struct mic_device *mdev, const char *buf);
192void mic_stop(struct mic_device *mdev, bool force); 216void mic_stop(struct mic_device *mdev, bool force);
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
index dbc5afde1392..d686f2846ac7 100644
--- a/drivers/misc/mic/host/mic_intr.c
+++ b/drivers/misc/mic/host/mic_intr.c
@@ -24,28 +24,29 @@
24#include "../common/mic_dev.h" 24#include "../common/mic_dev.h"
25#include "mic_device.h" 25#include "mic_device.h"
26 26
27/* 27static irqreturn_t mic_thread_fn(int irq, void *dev)
28 * mic_invoke_callback - Invoke callback functions registered for
29 * the corresponding source id.
30 *
31 * @mdev: pointer to the mic_device instance
32 * @idx: The interrupt source id.
33 *
34 * Returns none.
35 */
36static inline void mic_invoke_callback(struct mic_device *mdev, int idx)
37{ 28{
29 struct mic_device *mdev = dev;
30 struct mic_intr_info *intr_info = mdev->intr_info;
31 struct mic_irq_info *irq_info = &mdev->irq_info;
38 struct mic_intr_cb *intr_cb; 32 struct mic_intr_cb *intr_cb;
39 struct pci_dev *pdev = container_of(mdev->sdev->parent, 33 struct pci_dev *pdev = container_of(mdev->sdev->parent,
40 struct pci_dev, dev); 34 struct pci_dev, dev);
35 int i;
41 36
42 spin_lock(&mdev->irq_info.mic_intr_lock); 37 spin_lock(&irq_info->mic_thread_lock);
43 list_for_each_entry(intr_cb, &mdev->irq_info.cb_list[idx], list) 38 for (i = intr_info->intr_start_idx[MIC_INTR_DB];
44 if (intr_cb->func) 39 i < intr_info->intr_len[MIC_INTR_DB]; i++)
45 intr_cb->func(pdev->irq, intr_cb->data); 40 if (test_and_clear_bit(i, &irq_info->mask)) {
46 spin_unlock(&mdev->irq_info.mic_intr_lock); 41 list_for_each_entry(intr_cb, &irq_info->cb_list[i],
42 list)
43 if (intr_cb->thread_fn)
44 intr_cb->thread_fn(pdev->irq,
45 intr_cb->data);
46 }
47 spin_unlock(&irq_info->mic_thread_lock);
48 return IRQ_HANDLED;
47} 49}
48
49/** 50/**
50 * mic_interrupt - Generic interrupt handler for 51 * mic_interrupt - Generic interrupt handler for
51 * MSI and INTx based interrupts. 52 * MSI and INTx based interrupts.
@@ -53,7 +54,11 @@ static inline void mic_invoke_callback(struct mic_device *mdev, int idx)
53static irqreturn_t mic_interrupt(int irq, void *dev) 54static irqreturn_t mic_interrupt(int irq, void *dev)
54{ 55{
55 struct mic_device *mdev = dev; 56 struct mic_device *mdev = dev;
56 struct mic_intr_info *info = mdev->intr_info; 57 struct mic_intr_info *intr_info = mdev->intr_info;
58 struct mic_irq_info *irq_info = &mdev->irq_info;
59 struct mic_intr_cb *intr_cb;
60 struct pci_dev *pdev = container_of(mdev->sdev->parent,
61 struct pci_dev, dev);
57 u32 mask; 62 u32 mask;
58 int i; 63 int i;
59 64
@@ -61,12 +66,19 @@ static irqreturn_t mic_interrupt(int irq, void *dev)
61 if (!mask) 66 if (!mask)
62 return IRQ_NONE; 67 return IRQ_NONE;
63 68
64 for (i = info->intr_start_idx[MIC_INTR_DB]; 69 spin_lock(&irq_info->mic_intr_lock);
65 i < info->intr_len[MIC_INTR_DB]; i++) 70 for (i = intr_info->intr_start_idx[MIC_INTR_DB];
66 if (mask & BIT(i)) 71 i < intr_info->intr_len[MIC_INTR_DB]; i++)
67 mic_invoke_callback(mdev, i); 72 if (mask & BIT(i)) {
68 73 list_for_each_entry(intr_cb, &irq_info->cb_list[i],
69 return IRQ_HANDLED; 74 list)
75 if (intr_cb->handler)
76 intr_cb->handler(pdev->irq,
77 intr_cb->data);
78 set_bit(i, &irq_info->mask);
79 }
80 spin_unlock(&irq_info->mic_intr_lock);
81 return IRQ_WAKE_THREAD;
70} 82}
71 83
72/* Return the interrupt offset from the index. Index is 0 based. */ 84/* Return the interrupt offset from the index. Index is 0 based. */
@@ -99,14 +111,15 @@ static struct msix_entry *mic_get_available_vector(struct mic_device *mdev)
99 * 111 *
100 * @mdev: pointer to the mic_device instance 112 * @mdev: pointer to the mic_device instance
101 * @idx: The source id to be registered. 113 * @idx: The source id to be registered.
102 * @func: The function to be called when the source id receives 114 * @handler: The function to be called when the source id receives
103 * the interrupt. 115 * the interrupt.
116 * @thread_fn: thread fn. corresponding to the handler
104 * @data: Private data of the requester. 117 * @data: Private data of the requester.
105 * Return the callback structure that was registered or an 118 * Return the callback structure that was registered or an
106 * appropriate error on failure. 119 * appropriate error on failure.
107 */ 120 */
108static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev, 121static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
109 u8 idx, irqreturn_t (*func) (int irq, void *dev), 122 u8 idx, irq_handler_t handler, irq_handler_t thread_fn,
110 void *data) 123 void *data)
111{ 124{
112 struct mic_intr_cb *intr_cb; 125 struct mic_intr_cb *intr_cb;
@@ -117,7 +130,8 @@ static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
117 if (!intr_cb) 130 if (!intr_cb)
118 return ERR_PTR(-ENOMEM); 131 return ERR_PTR(-ENOMEM);
119 132
120 intr_cb->func = func; 133 intr_cb->handler = handler;
134 intr_cb->thread_fn = thread_fn;
121 intr_cb->data = data; 135 intr_cb->data = data;
122 intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida, 136 intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida,
123 0, 0, GFP_KERNEL); 137 0, 0, GFP_KERNEL);
@@ -126,9 +140,11 @@ static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
126 goto ida_fail; 140 goto ida_fail;
127 } 141 }
128 142
143 spin_lock(&mdev->irq_info.mic_thread_lock);
129 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags); 144 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
130 list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]); 145 list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]);
131 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags); 146 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
147 spin_unlock(&mdev->irq_info.mic_thread_lock);
132 148
133 return intr_cb; 149 return intr_cb;
134ida_fail: 150ida_fail:
@@ -152,8 +168,9 @@ static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
152 unsigned long flags; 168 unsigned long flags;
153 int i; 169 int i;
154 170
171 spin_lock(&mdev->irq_info.mic_thread_lock);
172 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
155 for (i = 0; i < MIC_NUM_OFFSETS; i++) { 173 for (i = 0; i < MIC_NUM_OFFSETS; i++) {
156 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
157 list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) { 174 list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
158 intr_cb = list_entry(pos, struct mic_intr_cb, list); 175 intr_cb = list_entry(pos, struct mic_intr_cb, list);
159 if (intr_cb->cb_id == idx) { 176 if (intr_cb->cb_id == idx) {
@@ -163,11 +180,13 @@ static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
163 kfree(intr_cb); 180 kfree(intr_cb);
164 spin_unlock_irqrestore( 181 spin_unlock_irqrestore(
165 &mdev->irq_info.mic_intr_lock, flags); 182 &mdev->irq_info.mic_intr_lock, flags);
183 spin_unlock(&mdev->irq_info.mic_thread_lock);
166 return i; 184 return i;
167 } 185 }
168 } 186 }
169 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
170 } 187 }
188 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
189 spin_unlock(&mdev->irq_info.mic_thread_lock);
171 return MIC_NUM_OFFSETS; 190 return MIC_NUM_OFFSETS;
172} 191}
173 192
@@ -242,6 +261,7 @@ static int mic_setup_callbacks(struct mic_device *mdev)
242 INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]); 261 INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]);
243 ida_init(&mdev->irq_info.cb_ida); 262 ida_init(&mdev->irq_info.cb_ida);
244 spin_lock_init(&mdev->irq_info.mic_intr_lock); 263 spin_lock_init(&mdev->irq_info.mic_intr_lock);
264 spin_lock_init(&mdev->irq_info.mic_thread_lock);
245 return 0; 265 return 0;
246} 266}
247 267
@@ -258,14 +278,12 @@ static void mic_release_callbacks(struct mic_device *mdev)
258 struct mic_intr_cb *intr_cb; 278 struct mic_intr_cb *intr_cb;
259 int i; 279 int i;
260 280
281 spin_lock(&mdev->irq_info.mic_thread_lock);
282 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
261 for (i = 0; i < MIC_NUM_OFFSETS; i++) { 283 for (i = 0; i < MIC_NUM_OFFSETS; i++) {
262 spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
263 284
264 if (list_empty(&mdev->irq_info.cb_list[i])) { 285 if (list_empty(&mdev->irq_info.cb_list[i]))
265 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock,
266 flags);
267 break; 286 break;
268 }
269 287
270 list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) { 288 list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
271 intr_cb = list_entry(pos, struct mic_intr_cb, list); 289 intr_cb = list_entry(pos, struct mic_intr_cb, list);
@@ -274,8 +292,9 @@ static void mic_release_callbacks(struct mic_device *mdev)
274 intr_cb->cb_id); 292 intr_cb->cb_id);
275 kfree(intr_cb); 293 kfree(intr_cb);
276 } 294 }
277 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
278 } 295 }
296 spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
297 spin_unlock(&mdev->irq_info.mic_thread_lock);
279 ida_destroy(&mdev->irq_info.cb_ida); 298 ida_destroy(&mdev->irq_info.cb_ida);
280 kfree(mdev->irq_info.cb_list); 299 kfree(mdev->irq_info.cb_list);
281} 300}
@@ -313,7 +332,8 @@ static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev)
313 goto err_nomem2; 332 goto err_nomem2;
314 } 333 }
315 334
316 rc = request_irq(pdev->irq, mic_interrupt, 0 , "mic-msi", mdev); 335 rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
336 0, "mic-msi", mdev);
317 if (rc) { 337 if (rc) {
318 dev_err(&pdev->dev, "Error allocating MSI interrupt\n"); 338 dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
319 goto err_irq_req_fail; 339 goto err_irq_req_fail;
@@ -353,8 +373,8 @@ static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
353 goto err_nomem; 373 goto err_nomem;
354 } 374 }
355 375
356 rc = request_irq(pdev->irq, mic_interrupt, 376 rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
357 IRQF_SHARED, "mic-intx", mdev); 377 IRQF_SHARED, "mic-intx", mdev);
358 if (rc) 378 if (rc)
359 goto err; 379 goto err;
360 380
@@ -391,13 +411,14 @@ int mic_next_db(struct mic_device *mdev)
391#define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT) 411#define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT)
392 412
393/** 413/**
394 * mic_request_irq - request an irq. mic_mutex needs 414 * mic_request_threaded_irq - request an irq. mic_mutex needs
395 * to be held before calling this function. 415 * to be held before calling this function.
396 * 416 *
397 * @mdev: pointer to mic_device instance 417 * @mdev: pointer to mic_device instance
398 * @func: The callback function that handles the interrupt. 418 * @handler: The callback function that handles the interrupt.
399 * The function needs to call ack_interrupts 419 * The function needs to call ack_interrupts
400 * (mdev->ops->ack_interrupt(mdev)) when handling the interrupts. 420 * (mdev->ops->ack_interrupt(mdev)) when handling the interrupts.
421 * @thread_fn: thread fn required by request_threaded_irq.
401 * @name: The ASCII name of the callee requesting the irq. 422 * @name: The ASCII name of the callee requesting the irq.
402 * @data: private data that is returned back when calling the 423 * @data: private data that is returned back when calling the
403 * function handler. 424 * function handler.
@@ -412,10 +433,11 @@ int mic_next_db(struct mic_device *mdev)
412 * error code. 433 * error code.
413 * 434 *
414 */ 435 */
415struct mic_irq *mic_request_irq(struct mic_device *mdev, 436struct mic_irq *
416 irqreturn_t (*func)(int irq, void *dev), 437mic_request_threaded_irq(struct mic_device *mdev,
417 const char *name, void *data, int intr_src, 438 irq_handler_t handler, irq_handler_t thread_fn,
418 enum mic_intr_type type) 439 const char *name, void *data, int intr_src,
440 enum mic_intr_type type)
419{ 441{
420 u16 offset; 442 u16 offset;
421 int rc = 0; 443 int rc = 0;
@@ -444,7 +466,8 @@ struct mic_irq *mic_request_irq(struct mic_device *mdev,
444 goto err; 466 goto err;
445 } 467 }
446 468
447 rc = request_irq(msix->vector, func, 0, name, data); 469 rc = request_threaded_irq(msix->vector, handler, thread_fn,
470 0, name, data);
448 if (rc) { 471 if (rc) {
449 dev_dbg(mdev->sdev->parent, 472 dev_dbg(mdev->sdev->parent,
450 "request irq failed rc = %d\n", rc); 473 "request irq failed rc = %d\n", rc);
@@ -458,8 +481,8 @@ struct mic_irq *mic_request_irq(struct mic_device *mdev,
458 dev_dbg(mdev->sdev->parent, "irq: %d assigned for src: %d\n", 481 dev_dbg(mdev->sdev->parent, "irq: %d assigned for src: %d\n",
459 msix->vector, intr_src); 482 msix->vector, intr_src);
460 } else { 483 } else {
461 intr_cb = mic_register_intr_callback(mdev, 484 intr_cb = mic_register_intr_callback(mdev, offset, handler,
462 offset, func, data); 485 thread_fn, data);
463 if (IS_ERR(intr_cb)) { 486 if (IS_ERR(intr_cb)) {
464 dev_err(mdev->sdev->parent, 487 dev_err(mdev->sdev->parent,
465 "No available callback entries for use\n"); 488 "No available callback entries for use\n");
@@ -487,9 +510,9 @@ err:
487 * needs to be held before calling this function. 510 * needs to be held before calling this function.
488 * 511 *
489 * @mdev: pointer to mic_device instance 512 * @mdev: pointer to mic_device instance
490 * @cookie: cookie obtained during a successful call to mic_request_irq 513 * @cookie: cookie obtained during a successful call to mic_request_threaded_irq
491 * @data: private data specified by the calling function during the 514 * @data: private data specified by the calling function during the
492 * mic_request_irq 515 * mic_request_threaded_irq
493 * 516 *
494 * returns: none. 517 * returns: none.
495 */ 518 */
diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h
index 6091aa97e116..9f783d4ad7f1 100644
--- a/drivers/misc/mic/host/mic_intr.h
+++ b/drivers/misc/mic/host/mic_intr.h
@@ -21,12 +21,15 @@
21#ifndef _MIC_INTR_H_ 21#ifndef _MIC_INTR_H_
22#define _MIC_INTR_H_ 22#define _MIC_INTR_H_
23 23
24#include <linux/bitops.h>
25#include <linux/interrupt.h>
24/* 26/*
25 * The minimum number of msix vectors required for normal operation. 27 * The minimum number of msix vectors required for normal operation.
26 * 3 for virtio network, console and block devices. 28 * 3 for virtio network, console and block devices.
27 * 1 for card shutdown notifications. 29 * 1 for card shutdown notifications.
30 * 4 for host owned DMA channels.
28 */ 31 */
29#define MIC_MIN_MSIX 4 32#define MIC_MIN_MSIX 8
30#define MIC_NUM_OFFSETS 32 33#define MIC_NUM_OFFSETS 32
31 34
32/** 35/**
@@ -68,7 +71,11 @@ struct mic_intr_info {
68 * @num_vectors: The number of MSI/MSI-x vectors that have been allocated. 71 * @num_vectors: The number of MSI/MSI-x vectors that have been allocated.
69 * @cb_ida: callback ID allocator to track the callbacks registered. 72 * @cb_ida: callback ID allocator to track the callbacks registered.
70 * @mic_intr_lock: spinlock to protect the interrupt callback list. 73 * @mic_intr_lock: spinlock to protect the interrupt callback list.
74 * @mic_thread_lock: spinlock to protect the thread callback list.
75 * This lock is used to protect against thread_fn while
76 * mic_intr_lock is used to protect against interrupt handler.
71 * @cb_list: Array of callback lists one for each source. 77 * @cb_list: Array of callback lists one for each source.
78 * @mask: Mask used by the main thread fn to call the underlying thread fns.
72 */ 79 */
73struct mic_irq_info { 80struct mic_irq_info {
74 int next_avail_src; 81 int next_avail_src;
@@ -77,19 +84,23 @@ struct mic_irq_info {
77 u16 num_vectors; 84 u16 num_vectors;
78 struct ida cb_ida; 85 struct ida cb_ida;
79 spinlock_t mic_intr_lock; 86 spinlock_t mic_intr_lock;
87 spinlock_t mic_thread_lock;
80 struct list_head *cb_list; 88 struct list_head *cb_list;
89 unsigned long mask;
81}; 90};
82 91
83/** 92/**
84 * struct mic_intr_cb - Interrupt callback structure. 93 * struct mic_intr_cb - Interrupt callback structure.
85 * 94 *
86 * @func: The callback function 95 * @handler: The callback function
96 * @thread_fn: The thread_fn.
87 * @data: Private data of the requester. 97 * @data: Private data of the requester.
88 * @cb_id: The callback id. Identifies this callback. 98 * @cb_id: The callback id. Identifies this callback.
89 * @list: list head pointing to the next callback structure. 99 * @list: list head pointing to the next callback structure.
90 */ 100 */
91struct mic_intr_cb { 101struct mic_intr_cb {
92 irqreturn_t (*func) (int irq, void *data); 102 irq_handler_t handler;
103 irq_handler_t thread_fn;
93 void *data; 104 void *data;
94 int cb_id; 105 int cb_id;
95 struct list_head list; 106 struct list_head list;
@@ -124,11 +135,11 @@ struct mic_hw_intr_ops {
124}; 135};
125 136
126int mic_next_db(struct mic_device *mdev); 137int mic_next_db(struct mic_device *mdev);
127struct mic_irq *mic_request_irq(struct mic_device *mdev, 138struct mic_irq *
128 irqreturn_t (*func)(int irq, void *data), 139mic_request_threaded_irq(struct mic_device *mdev,
129 const char *name, void *data, int intr_src, 140 irq_handler_t handler, irq_handler_t thread_fn,
130 enum mic_intr_type type); 141 const char *name, void *data, int intr_src,
131 142 enum mic_intr_type type);
132void mic_free_irq(struct mic_device *mdev, 143void mic_free_irq(struct mic_device *mdev,
133 struct mic_irq *cookie, void *data); 144 struct mic_irq *cookie, void *data);
134int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev); 145int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index c04a021e20c7..ab37a3117d23 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -38,7 +38,7 @@
38 38
39static const char mic_driver_name[] = "mic"; 39static const char mic_driver_name[] = "mic";
40 40
41static DEFINE_PCI_DEVICE_TABLE(mic_pci_tbl) = { 41static const struct pci_device_id mic_pci_tbl[] = {
42 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2250)}, 42 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2250)},
43 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2251)}, 43 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2251)},
44 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2252)}, 44 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2252)},
@@ -389,8 +389,9 @@ static int mic_probe(struct pci_dev *pdev,
389 mutex_lock(&mdev->mic_mutex); 389 mutex_lock(&mdev->mic_mutex);
390 390
391 mdev->shutdown_db = mic_next_db(mdev); 391 mdev->shutdown_db = mic_next_db(mdev);
392 mdev->shutdown_cookie = mic_request_irq(mdev, mic_shutdown_db, 392 mdev->shutdown_cookie = mic_request_threaded_irq(mdev, mic_shutdown_db,
393 "shutdown-interrupt", mdev, mdev->shutdown_db, MIC_INTR_DB); 393 NULL, "shutdown-interrupt", mdev,
394 mdev->shutdown_db, MIC_INTR_DB);
394 if (IS_ERR(mdev->shutdown_cookie)) { 395 if (IS_ERR(mdev->shutdown_cookie)) {
395 rc = PTR_ERR(mdev->shutdown_cookie); 396 rc = PTR_ERR(mdev->shutdown_cookie);
396 mutex_unlock(&mdev->mic_mutex); 397 mutex_unlock(&mdev->mic_mutex);
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 7e1ef0ebbb80..a020e4eb435a 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -21,60 +21,157 @@
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24 24#include <linux/dmaengine.h>
25#include <linux/mic_common.h> 25#include <linux/mic_common.h>
26
26#include "../common/mic_dev.h" 27#include "../common/mic_dev.h"
27#include "mic_device.h" 28#include "mic_device.h"
28#include "mic_smpt.h" 29#include "mic_smpt.h"
29#include "mic_virtio.h" 30#include "mic_virtio.h"
30 31
31/* 32/*
32 * Initiates the copies across the PCIe bus from card memory to 33 * Size of the internal buffer used during DMA's as an intermediate buffer
33 * a user space buffer. 34 * for copy to/from user.
34 */ 35 */
35static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, 36#define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
36 void __user *ubuf, size_t len, u64 addr) 37
38static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
39 dma_addr_t src, size_t len)
37{ 40{
38 int err; 41 int err = 0;
39 void __iomem *dbuf = mvdev->mdev->aper.va + addr; 42 struct dma_async_tx_descriptor *tx;
40 /* 43 struct dma_chan *mic_ch = mdev->dma_ch;
41 * We are copying from IO below an should ideally use something 44
42 * like copy_to_user_fromio(..) if it existed. 45 if (!mic_ch) {
43 */ 46 err = -EBUSY;
44 if (copy_to_user(ubuf, (void __force *)dbuf, len)) { 47 goto error;
45 err = -EFAULT; 48 }
46 dev_err(mic_dev(mvdev), "%s %d err %d\n", 49
50 tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
51 DMA_PREP_FENCE);
52 if (!tx) {
53 err = -ENOMEM;
54 goto error;
55 } else {
56 dma_cookie_t cookie = tx->tx_submit(tx);
57
58 err = dma_submit_error(cookie);
59 if (err)
60 goto error;
61 err = dma_sync_wait(mic_ch, cookie);
62 }
63error:
64 if (err)
65 dev_err(mdev->sdev->parent, "%s %d err %d\n",
47 __func__, __LINE__, err); 66 __func__, __LINE__, err);
48 goto err; 67 return err;
68}
69
70/*
71 * Initiates the copies across the PCIe bus from card memory to a user
72 * space buffer. When transfers are done using DMA, source/destination
73 * addresses and transfer length must follow the alignment requirements of
74 * the MIC DMA engine.
75 */
76static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
77 size_t len, u64 daddr, size_t dlen,
78 int vr_idx)
79{
80 struct mic_device *mdev = mvdev->mdev;
81 void __iomem *dbuf = mdev->aper.va + daddr;
82 struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
83 size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
84 size_t dma_offset;
85 size_t partlen;
86 int err;
87
88 dma_offset = daddr - round_down(daddr, dma_alignment);
89 daddr -= dma_offset;
90 len += dma_offset;
91
92 while (len) {
93 partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
94
95 err = mic_sync_dma(mdev, mvr->buf_da, daddr,
96 ALIGN(partlen, dma_alignment));
97 if (err)
98 goto err;
99
100 if (copy_to_user(ubuf, mvr->buf + dma_offset,
101 partlen - dma_offset)) {
102 err = -EFAULT;
103 goto err;
104 }
105 daddr += partlen;
106 ubuf += partlen;
107 dbuf += partlen;
108 mvdev->in_bytes_dma += partlen;
109 mvdev->in_bytes += partlen;
110 len -= partlen;
111 dma_offset = 0;
49 } 112 }
50 mvdev->in_bytes += len; 113 return 0;
51 err = 0;
52err: 114err:
115 dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
53 return err; 116 return err;
54} 117}
55 118
56/* 119/*
57 * Initiates copies across the PCIe bus from a user space 120 * Initiates copies across the PCIe bus from a user space buffer to card
58 * buffer to card memory. 121 * memory. When transfers are done using DMA, source/destination addresses
122 * and transfer length must follow the alignment requirements of the MIC
123 * DMA engine.
59 */ 124 */
60static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, 125static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
61 void __user *ubuf, size_t len, u64 addr) 126 size_t len, u64 daddr, size_t dlen,
127 int vr_idx)
62{ 128{
129 struct mic_device *mdev = mvdev->mdev;
130 void __iomem *dbuf = mdev->aper.va + daddr;
131 struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
132 size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
133 size_t partlen;
63 int err; 134 int err;
64 void __iomem *dbuf = mvdev->mdev->aper.va + addr; 135
136 if (daddr & (dma_alignment - 1)) {
137 mvdev->tx_dst_unaligned += len;
138 goto memcpy;
139 } else if (ALIGN(len, dma_alignment) > dlen) {
140 mvdev->tx_len_unaligned += len;
141 goto memcpy;
142 }
143
144 while (len) {
145 partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
146
147 if (copy_from_user(mvr->buf, ubuf, partlen)) {
148 err = -EFAULT;
149 goto err;
150 }
151 err = mic_sync_dma(mdev, daddr, mvr->buf_da,
152 ALIGN(partlen, dma_alignment));
153 if (err)
154 goto err;
155 daddr += partlen;
156 ubuf += partlen;
157 dbuf += partlen;
158 mvdev->out_bytes_dma += partlen;
159 mvdev->out_bytes += partlen;
160 len -= partlen;
161 }
162memcpy:
65 /* 163 /*
66 * We are copying to IO below and should ideally use something 164 * We are copying to IO below and should ideally use something
67 * like copy_from_user_toio(..) if it existed. 165 * like copy_from_user_toio(..) if it existed.
68 */ 166 */
69 if (copy_from_user((void __force *)dbuf, ubuf, len)) { 167 if (copy_from_user((void __force *)dbuf, ubuf, len)) {
70 err = -EFAULT; 168 err = -EFAULT;
71 dev_err(mic_dev(mvdev), "%s %d err %d\n",
72 __func__, __LINE__, err);
73 goto err; 169 goto err;
74 } 170 }
75 mvdev->out_bytes += len; 171 mvdev->out_bytes += len;
76 err = 0; 172 return 0;
77err: 173err:
174 dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
78 return err; 175 return err;
79} 176}
80 177
@@ -110,7 +207,8 @@ static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
110 * way to override the VRINGH xfer(..) routines as of v3.10. 207 * way to override the VRINGH xfer(..) routines as of v3.10.
111 */ 208 */
112static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, 209static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
113 void __user *ubuf, size_t len, bool read, size_t *out_len) 210 void __user *ubuf, size_t len, bool read, int vr_idx,
211 size_t *out_len)
114{ 212{
115 int ret = 0; 213 int ret = 0;
116 size_t partlen, tot_len = 0; 214 size_t partlen, tot_len = 0;
@@ -118,13 +216,15 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
118 while (len && iov->i < iov->used) { 216 while (len && iov->i < iov->used) {
119 partlen = min(iov->iov[iov->i].iov_len, len); 217 partlen = min(iov->iov[iov->i].iov_len, len);
120 if (read) 218 if (read)
121 ret = mic_virtio_copy_to_user(mvdev, 219 ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
122 ubuf, partlen, 220 (u64)iov->iov[iov->i].iov_base,
123 (u64)iov->iov[iov->i].iov_base); 221 iov->iov[iov->i].iov_len,
222 vr_idx);
124 else 223 else
125 ret = mic_virtio_copy_from_user(mvdev, 224 ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
126 ubuf, partlen, 225 (u64)iov->iov[iov->i].iov_base,
127 (u64)iov->iov[iov->i].iov_base); 226 iov->iov[iov->i].iov_len,
227 vr_idx);
128 if (ret) { 228 if (ret) {
129 dev_err(mic_dev(mvdev), "%s %d err %d\n", 229 dev_err(mic_dev(mvdev), "%s %d err %d\n",
130 __func__, __LINE__, ret); 230 __func__, __LINE__, ret);
@@ -192,8 +292,8 @@ static int _mic_virtio_copy(struct mic_vdev *mvdev,
192 ubuf = iov.iov_base; 292 ubuf = iov.iov_base;
193 } 293 }
194 /* Issue all the read descriptors first */ 294 /* Issue all the read descriptors first */
195 ret = mic_vringh_copy(mvdev, riov, ubuf, len, 295 ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
196 MIC_VRINGH_READ, &out_len); 296 copy->vr_idx, &out_len);
197 if (ret) { 297 if (ret) {
198 dev_err(mic_dev(mvdev), "%s %d err %d\n", 298 dev_err(mic_dev(mvdev), "%s %d err %d\n",
199 __func__, __LINE__, ret); 299 __func__, __LINE__, ret);
@@ -203,8 +303,8 @@ static int _mic_virtio_copy(struct mic_vdev *mvdev,
203 ubuf += out_len; 303 ubuf += out_len;
204 copy->out_len += out_len; 304 copy->out_len += out_len;
205 /* Issue the write descriptors next */ 305 /* Issue the write descriptors next */
206 ret = mic_vringh_copy(mvdev, wiov, ubuf, len, 306 ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
207 !MIC_VRINGH_READ, &out_len); 307 copy->vr_idx, &out_len);
208 if (ret) { 308 if (ret) {
209 dev_err(mic_dev(mvdev), "%s %d err %d\n", 309 dev_err(mic_dev(mvdev), "%s %d err %d\n",
210 __func__, __LINE__, ret); 310 __func__, __LINE__, ret);
@@ -589,13 +689,19 @@ int mic_virtio_add_device(struct mic_vdev *mvdev,
589 dev_dbg(mdev->sdev->parent, 689 dev_dbg(mdev->sdev->parent,
590 "%s %d index %d va %p info %p vr_size 0x%x\n", 690 "%s %d index %d va %p info %p vr_size 0x%x\n",
591 __func__, __LINE__, i, vr->va, vr->info, vr_size); 691 __func__, __LINE__, i, vr->va, vr->info, vr_size);
692 mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
693 get_order(MIC_INT_DMA_BUF_SIZE));
694 mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
695 MIC_INT_DMA_BUF_SIZE);
592 } 696 }
593 697
594 snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id, 698 snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
595 mvdev->virtio_id); 699 mvdev->virtio_id);
596 mvdev->virtio_db = mic_next_db(mdev); 700 mvdev->virtio_db = mic_next_db(mdev);
597 mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler, 701 mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
598 irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB); 702 mic_virtio_intr_handler,
703 NULL, irqname, mvdev,
704 mvdev->virtio_db, MIC_INTR_DB);
599 if (IS_ERR(mvdev->virtio_cookie)) { 705 if (IS_ERR(mvdev->virtio_cookie)) {
600 ret = PTR_ERR(mvdev->virtio_cookie); 706 ret = PTR_ERR(mvdev->virtio_cookie);
601 dev_dbg(mdev->sdev->parent, "request irq failed\n"); 707 dev_dbg(mdev->sdev->parent, "request irq failed\n");
@@ -671,6 +777,11 @@ skip_hot_remove:
671 vqconfig = mic_vq_config(mvdev->dd); 777 vqconfig = mic_vq_config(mvdev->dd);
672 for (i = 0; i < mvdev->dd->num_vq; i++) { 778 for (i = 0; i < mvdev->dd->num_vq; i++) {
673 struct mic_vringh *mvr = &mvdev->mvr[i]; 779 struct mic_vringh *mvr = &mvdev->mvr[i];
780
781 mic_unmap_single(mvdev->mdev, mvr->buf_da,
782 MIC_INT_DMA_BUF_SIZE);
783 free_pages((unsigned long)mvr->buf,
784 get_order(MIC_INT_DMA_BUF_SIZE));
674 vringh_kiov_cleanup(&mvr->riov); 785 vringh_kiov_cleanup(&mvr->riov);
675 vringh_kiov_cleanup(&mvr->wiov); 786 vringh_kiov_cleanup(&mvr->wiov);
676 mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address), 787 mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/host/mic_virtio.h
index 184f3c84805b..d574efb853d9 100644
--- a/drivers/misc/mic/host/mic_virtio.h
+++ b/drivers/misc/mic/host/mic_virtio.h
@@ -46,18 +46,23 @@
46 * @vrh: The host VRINGH used for accessing the card vrings. 46 * @vrh: The host VRINGH used for accessing the card vrings.
47 * @riov: The VRINGH read kernel IOV. 47 * @riov: The VRINGH read kernel IOV.
48 * @wiov: The VRINGH write kernel IOV. 48 * @wiov: The VRINGH write kernel IOV.
49 * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
50 * @vr_mutex: Mutex for synchronizing access to the VRING. 49 * @vr_mutex: Mutex for synchronizing access to the VRING.
50 * @buf: Temporary kernel buffer used to copy in/out data
51 * from/to the card via DMA.
52 * @buf_da: dma address of buf.
51 * @mvdev: Back pointer to MIC virtio device for vringh_notify(..). 53 * @mvdev: Back pointer to MIC virtio device for vringh_notify(..).
54 * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
52 */ 55 */
53struct mic_vringh { 56struct mic_vringh {
54 struct mic_vring vring; 57 struct mic_vring vring;
55 struct vringh vrh; 58 struct vringh vrh;
56 struct vringh_kiov riov; 59 struct vringh_kiov riov;
57 struct vringh_kiov wiov; 60 struct vringh_kiov wiov;
58 u16 head;
59 struct mutex vr_mutex; 61 struct mutex vr_mutex;
62 void *buf;
63 dma_addr_t buf_da;
60 struct mic_vdev *mvdev; 64 struct mic_vdev *mvdev;
65 u16 head;
61}; 66};
62 67
63/** 68/**
@@ -69,6 +74,14 @@ struct mic_vringh {
69 * @poll_wake - Used for waking up threads blocked in poll. 74 * @poll_wake - Used for waking up threads blocked in poll.
70 * @out_bytes - Debug stats for number of bytes copied from host to card. 75 * @out_bytes - Debug stats for number of bytes copied from host to card.
71 * @in_bytes - Debug stats for number of bytes copied from card to host. 76 * @in_bytes - Debug stats for number of bytes copied from card to host.
77 * @out_bytes_dma - Debug stats for number of bytes copied from host to card
78 * using DMA.
79 * @in_bytes_dma - Debug stats for number of bytes copied from card to host
80 * using DMA.
81 * @tx_len_unaligned - Debug stats for number of bytes copied to the card where
82 * the transfer length did not have the required DMA alignment.
83 * @tx_dst_unaligned - Debug stats for number of bytes copied where the
84 * destination address on the card did not have the required DMA alignment.
72 * @mvr - Store per VRING data structures. 85 * @mvr - Store per VRING data structures.
73 * @virtio_bh_work - Work struct used to schedule virtio bottom half handling. 86 * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
74 * @dd - Virtio device descriptor. 87 * @dd - Virtio device descriptor.
@@ -84,6 +97,10 @@ struct mic_vdev {
84 int poll_wake; 97 int poll_wake;
85 unsigned long out_bytes; 98 unsigned long out_bytes;
86 unsigned long in_bytes; 99 unsigned long in_bytes;
100 unsigned long out_bytes_dma;
101 unsigned long in_bytes_dma;
102 unsigned long tx_len_unaligned;
103 unsigned long tx_dst_unaligned;
87 struct mic_vringh mvr[MIC_MAX_VRINGS]; 104 struct mic_vringh mvr[MIC_MAX_VRINGS];
88 struct work_struct virtio_bh_work; 105 struct work_struct virtio_bh_work;
89 struct mic_device_desc *dd; 106 struct mic_device_desc *dd;
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 5562fdd3ef4e..b7a21e11dcdf 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -549,6 +549,13 @@ struct mic_smpt_ops mic_x100_smpt_ops = {
549 .set = mic_x100_smpt_set, 549 .set = mic_x100_smpt_set,
550}; 550};
551 551
552static bool mic_x100_dma_filter(struct dma_chan *chan, void *param)
553{
554 if (chan->device->dev->parent == (struct device *)param)
555 return true;
556 return false;
557}
558
552struct mic_hw_ops mic_x100_ops = { 559struct mic_hw_ops mic_x100_ops = {
553 .aper_bar = MIC_X100_APER_BAR, 560 .aper_bar = MIC_X100_APER_BAR,
554 .mmio_bar = MIC_X100_MMIO_BAR, 561 .mmio_bar = MIC_X100_MMIO_BAR,
@@ -563,6 +570,7 @@ struct mic_hw_ops mic_x100_ops = {
563 .send_firmware_intr = mic_x100_send_firmware_intr, 570 .send_firmware_intr = mic_x100_send_firmware_intr,
564 .load_mic_fw = mic_x100_load_firmware, 571 .load_mic_fw = mic_x100_load_firmware,
565 .get_postcode = mic_x100_get_postcode, 572 .get_postcode = mic_x100_get_postcode,
573 .dma_filter = mic_x100_dma_filter,
566}; 574};
567 575
568struct mic_hw_intr_ops mic_x100_intr_ops = { 576struct mic_hw_intr_ops mic_x100_intr_ops = {
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 9d3dbb28734b..21c2337bad68 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -244,7 +244,8 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
244 if (version & 0x8000) 244 if (version & 0x8000)
245 maj_ver |= 0x0008; 245 maj_ver |= 0x0008;
246 246
247 sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver); 247 sprintf(bts_scr_name, "ti-connectivity/TIInit_%d.%d.%d.bts",
248 chip, maj_ver, min_ver);
248 249
249 /* to be accessed later via sysfs entry */ 250 /* to be accessed later via sysfs entry */
250 kim_gdata->version.full = version; 251 kim_gdata->version.full = version;
@@ -287,7 +288,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
287 long len = 0; 288 long len = 0;
288 unsigned char *ptr = NULL; 289 unsigned char *ptr = NULL;
289 unsigned char *action_ptr = NULL; 290 unsigned char *action_ptr = NULL;
290 unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */ 291 unsigned char bts_scr_name[40] = { 0 }; /* 40 char long bts scr name? */
291 int wr_room_space; 292 int wr_room_space;
292 int cmd_size; 293 int cmd_size;
293 unsigned long timeout; 294 unsigned long timeout;
@@ -778,7 +779,7 @@ static int kim_probe(struct platform_device *pdev)
778 pr_info("sysfs entries created\n"); 779 pr_info("sysfs entries created\n");
779 780
780 kim_debugfs_dir = debugfs_create_dir("ti-st", NULL); 781 kim_debugfs_dir = debugfs_create_dir("ti-st", NULL);
781 if (IS_ERR(kim_debugfs_dir)) { 782 if (!kim_debugfs_dir) {
782 pr_err(" debugfs entries creation failed "); 783 pr_err(" debugfs entries creation failed ");
783 err = -EIO; 784 err = -EIO;
784 goto err_debugfs_dir; 785 goto err_debugfs_dir;
@@ -788,7 +789,6 @@ static int kim_probe(struct platform_device *pdev)
788 kim_gdata, &version_debugfs_fops); 789 kim_gdata, &version_debugfs_fops);
789 debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir, 790 debugfs_create_file("protocols", S_IRUGO, kim_debugfs_dir,
790 kim_gdata, &list_debugfs_fops); 791 kim_gdata, &list_debugfs_fops);
791 pr_info(" debugfs entries created ");
792 return 0; 792 return 0;
793 793
794err_debugfs_dir: 794err_debugfs_dir:
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 3250fc1df0aa..b3a812384a6f 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -130,7 +130,7 @@ static int vexpress_syscfg_write(void *context, unsigned int index,
130 return vexpress_syscfg_exec(func, index, true, &val); 130 return vexpress_syscfg_exec(func, index, true, &val);
131} 131}
132 132
133struct regmap_config vexpress_syscfg_regmap_config = { 133static struct regmap_config vexpress_syscfg_regmap_config = {
134 .lock = vexpress_config_lock, 134 .lock = vexpress_config_lock,
135 .unlock = vexpress_config_unlock, 135 .unlock = vexpress_config_unlock,
136 .reg_bits = 32, 136 .reg_bits = 32,
@@ -276,7 +276,7 @@ int vexpress_syscfg_device_register(struct platform_device *pdev)
276} 276}
277 277
278 278
279int vexpress_syscfg_probe(struct platform_device *pdev) 279static int vexpress_syscfg_probe(struct platform_device *pdev)
280{ 280{
281 struct vexpress_syscfg *syscfg; 281 struct vexpress_syscfg *syscfg;
282 struct resource *res; 282 struct resource *res;
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index e0d5017785e5..248399a881af 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -748,7 +748,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
748 /* The rest are managed resources and will be freed by PCI core */ 748 /* The rest are managed resources and will be freed by PCI core */
749} 749}
750 750
751static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = { 751static const struct pci_device_id vmci_ids[] = {
752 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, 752 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
753 { 0 }, 753 { 0 },
754}; 754};
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3f8e3dbcaa7c..d04c5adafc16 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -582,7 +582,7 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
582 WARN_ONCE(pci_dev->current_state != prev, 582 WARN_ONCE(pci_dev->current_state != prev,
583 "PCI PM: Device state not saved by %pF\n", 583 "PCI PM: Device state not saved by %pF\n",
584 drv->suspend_late); 584 drv->suspend_late);
585 return 0; 585 goto Fixup;
586 } 586 }
587 } 587 }
588 588
@@ -591,6 +591,9 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
591 591
592 pci_pm_set_unknown_state(pci_dev); 592 pci_pm_set_unknown_state(pci_dev);
593 593
594Fixup:
595 pci_fixup_device(pci_fixup_suspend_late, pci_dev);
596
594 return 0; 597 return 0;
595} 598}
596 599
@@ -734,7 +737,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
734 737
735 if (!pm) { 738 if (!pm) {
736 pci_save_state(pci_dev); 739 pci_save_state(pci_dev);
737 return 0; 740 goto Fixup;
738 } 741 }
739 742
740 if (pm->suspend_noirq) { 743 if (pm->suspend_noirq) {
@@ -751,7 +754,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
751 WARN_ONCE(pci_dev->current_state != prev, 754 WARN_ONCE(pci_dev->current_state != prev,
752 "PCI PM: State of device not saved by %pF\n", 755 "PCI PM: State of device not saved by %pF\n",
753 pm->suspend_noirq); 756 pm->suspend_noirq);
754 return 0; 757 goto Fixup;
755 } 758 }
756 } 759 }
757 760
@@ -775,6 +778,9 @@ static int pci_pm_suspend_noirq(struct device *dev)
775 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) 778 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
776 pci_write_config_word(pci_dev, PCI_COMMAND, 0); 779 pci_write_config_word(pci_dev, PCI_COMMAND, 0);
777 780
781Fixup:
782 pci_fixup_device(pci_fixup_suspend_late, pci_dev);
783
778 return 0; 784 return 0;
779} 785}
780 786
@@ -999,8 +1005,10 @@ static int pci_pm_poweroff_noirq(struct device *dev)
999 if (pci_has_legacy_pm_support(to_pci_dev(dev))) 1005 if (pci_has_legacy_pm_support(to_pci_dev(dev)))
1000 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); 1006 return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
1001 1007
1002 if (!drv || !drv->pm) 1008 if (!drv || !drv->pm) {
1009 pci_fixup_device(pci_fixup_suspend_late, pci_dev);
1003 return 0; 1010 return 0;
1011 }
1004 1012
1005 if (drv->pm->poweroff_noirq) { 1013 if (drv->pm->poweroff_noirq) {
1006 int error; 1014 int error;
@@ -1021,6 +1029,8 @@ static int pci_pm_poweroff_noirq(struct device *dev)
1021 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) 1029 if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
1022 pci_write_config_word(pci_dev, PCI_COMMAND, 0); 1030 pci_write_config_word(pci_dev, PCI_COMMAND, 0);
1023 1031
1032 pci_fixup_device(pci_fixup_suspend_late, pci_dev);
1033
1024 if (pcibios_pm_ops.poweroff_noirq) 1034 if (pcibios_pm_ops.poweroff_noirq)
1025 return pcibios_pm_ops.poweroff_noirq(dev); 1035 return pcibios_pm_ops.poweroff_noirq(dev);
1026 1036
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ad566827b547..80c2d014283d 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2986,6 +2986,103 @@ DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
2986DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, 2986DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
2987 quirk_broken_intx_masking); 2987 quirk_broken_intx_masking);
2988 2988
2989#ifdef CONFIG_ACPI
2990/*
2991 * Apple: Shutdown Cactus Ridge Thunderbolt controller.
2992 *
2993 * On Apple hardware the Cactus Ridge Thunderbolt controller needs to be
2994 * shutdown before suspend. Otherwise the native host interface (NHI) will not
2995 * be present after resume if a device was plugged in before suspend.
2996 *
2997 * The thunderbolt controller consists of a pcie switch with downstream
2998 * bridges leading to the NHI and to the tunnel pci bridges.
2999 *
3000 * This quirk cuts power to the whole chip. Therefore we have to apply it
3001 * during suspend_noirq of the upstream bridge.
3002 *
3003 * Power is automagically restored before resume. No action is needed.
3004 */
3005static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3006{
3007 acpi_handle bridge, SXIO, SXFP, SXLV;
3008
3009 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
3010 return;
3011 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3012 return;
3013 bridge = ACPI_HANDLE(&dev->dev);
3014 if (!bridge)
3015 return;
3016 /*
3017 * SXIO and SXLV are present only on machines requiring this quirk.
3018 * TB bridges in external devices might have the same device id as those
3019 * on the host, but they will not have the associated ACPI methods. This
3020 * implicitly checks that we are at the right bridge.
3021 */
3022 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3023 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3024 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3025 return;
3026 dev_info(&dev->dev, "quirk: cutting power to thunderbolt controller...\n");
3027
3028 /* magic sequence */
3029 acpi_execute_simple_method(SXIO, NULL, 1);
3030 acpi_execute_simple_method(SXFP, NULL, 0);
3031 msleep(300);
3032 acpi_execute_simple_method(SXLV, NULL, 0);
3033 acpi_execute_simple_method(SXIO, NULL, 0);
3034 acpi_execute_simple_method(SXLV, NULL, 0);
3035}
3036DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547,
3037 quirk_apple_poweroff_thunderbolt);
3038
3039/*
3040 * Apple: Wait for the thunderbolt controller to reestablish pci tunnels.
3041 *
3042 * During suspend the thunderbolt controller is reset and all pci
3043 * tunnels are lost. The NHI driver will try to reestablish all tunnels
3044 * during resume. We have to manually wait for the NHI since there is
3045 * no parent child relationship between the NHI and the tunneled
3046 * bridges.
3047 */
3048static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3049{
3050 struct pci_dev *sibling = NULL;
3051 struct pci_dev *nhi = NULL;
3052
3053 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
3054 return;
3055 if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
3056 return;
3057 /*
3058 * Find the NHI and confirm that we are a bridge on the tb host
3059 * controller and not on a tb endpoint.
3060 */
3061 sibling = pci_get_slot(dev->bus, 0x0);
3062 if (sibling == dev)
3063 goto out; /* we are the downstream bridge to the NHI */
3064 if (!sibling || !sibling->subordinate)
3065 goto out;
3066 nhi = pci_get_slot(sibling->subordinate, 0x0);
3067 if (!nhi)
3068 goto out;
3069 if (nhi->vendor != PCI_VENDOR_ID_INTEL
3070 || (nhi->device != 0x1547 && nhi->device != 0x156c)
3071 || nhi->subsystem_vendor != 0x2222
3072 || nhi->subsystem_device != 0x1111)
3073 goto out;
3074 dev_info(&dev->dev, "quirk: wating for thunderbolt to reestablish pci tunnels...\n");
3075 device_pm_wait_for_dev(&dev->dev, &nhi->dev);
3076out:
3077 pci_dev_put(nhi);
3078 pci_dev_put(sibling);
3079}
3080DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547,
3081 quirk_apple_wait_for_thunderbolt);
3082DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d,
3083 quirk_apple_wait_for_thunderbolt);
3084#endif
3085
2989static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 3086static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
2990 struct pci_fixup *end) 3087 struct pci_fixup *end)
2991{ 3088{
@@ -3018,6 +3115,8 @@ extern struct pci_fixup __start_pci_fixups_resume_early[];
3018extern struct pci_fixup __end_pci_fixups_resume_early[]; 3115extern struct pci_fixup __end_pci_fixups_resume_early[];
3019extern struct pci_fixup __start_pci_fixups_suspend[]; 3116extern struct pci_fixup __start_pci_fixups_suspend[];
3020extern struct pci_fixup __end_pci_fixups_suspend[]; 3117extern struct pci_fixup __end_pci_fixups_suspend[];
3118extern struct pci_fixup __start_pci_fixups_suspend_late[];
3119extern struct pci_fixup __end_pci_fixups_suspend_late[];
3021 3120
3022static bool pci_apply_fixup_final_quirks; 3121static bool pci_apply_fixup_final_quirks;
3023 3122
@@ -3063,6 +3162,11 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
3063 end = __end_pci_fixups_suspend; 3162 end = __end_pci_fixups_suspend;
3064 break; 3163 break;
3065 3164
3165 case pci_fixup_suspend_late:
3166 start = __start_pci_fixups_suspend_late;
3167 end = __end_pci_fixups_suspend_late;
3168 break;
3169
3066 default: 3170 default:
3067 /* stupid compiler warning, you would think with an enum... */ 3171 /* stupid compiler warning, you would think with an enum... */
3068 return; 3172 return;
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 0c657d6af03d..51cf8083b299 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -202,6 +202,7 @@ config PCMCIA_SA1111
202 depends on ARM && SA1111 && PCMCIA 202 depends on ARM && SA1111 && PCMCIA
203 select PCMCIA_SOC_COMMON 203 select PCMCIA_SOC_COMMON
204 select PCMCIA_SA11XX_BASE if ARCH_SA1100 204 select PCMCIA_SA11XX_BASE if ARCH_SA1100
205 select PCMCIA_PXA2XX if ARCH_LUBBOCK && SA1111
205 help 206 help
206 Say Y here to include support for SA1111-based PCMCIA or CF 207 Say Y here to include support for SA1111-based PCMCIA or CF
207 sockets, found on the Jornada 720, Graphicsmaster and other 208 sockets, found on the Jornada 720, Graphicsmaster and other
@@ -217,7 +218,6 @@ config PCMCIA_PXA2XX
217 || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \ 218 || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
218 || MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \ 219 || MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \
219 || MACH_COLIBRI320 || MACH_H4700) 220 || MACH_COLIBRI320 || MACH_H4700)
220 select PCMCIA_SA1111 if ARCH_LUBBOCK && SA1111
221 select PCMCIA_SOC_COMMON 221 select PCMCIA_SOC_COMMON
222 help 222 help
223 Say Y here to include support for the PXA2xx PCMCIA controller 223 Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 7745b512a87c..fd55a6951402 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -49,6 +49,7 @@ sa1100_cs-y += sa1100_generic.o
49sa1100_cs-$(CONFIG_SA1100_ASSABET) += sa1100_assabet.o 49sa1100_cs-$(CONFIG_SA1100_ASSABET) += sa1100_assabet.o
50sa1100_cs-$(CONFIG_SA1100_CERF) += sa1100_cerf.o 50sa1100_cs-$(CONFIG_SA1100_CERF) += sa1100_cerf.o
51sa1100_cs-$(CONFIG_SA1100_COLLIE) += pxa2xx_sharpsl.o 51sa1100_cs-$(CONFIG_SA1100_COLLIE) += pxa2xx_sharpsl.o
52sa1100_cs-$(CONFIG_SA1100_H3100) += sa1100_h3600.o
52sa1100_cs-$(CONFIG_SA1100_H3600) += sa1100_h3600.o 53sa1100_cs-$(CONFIG_SA1100_H3600) += sa1100_h3600.o
53sa1100_cs-$(CONFIG_SA1100_NANOENGINE) += sa1100_nanoengine.o 54sa1100_cs-$(CONFIG_SA1100_NANOENGINE) += sa1100_nanoengine.o
54sa1100_cs-$(CONFIG_SA1100_SHANNON) += sa1100_shannon.o 55sa1100_cs-$(CONFIG_SA1100_SHANNON) += sa1100_shannon.o
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index 0c6aac1232fc..0802e0bc7d0c 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -475,7 +475,7 @@ static void bcm63xx_cb_exit(struct pci_dev *dev)
475 bcm63xx_cb_dev = NULL; 475 bcm63xx_cb_dev = NULL;
476} 476}
477 477
478static DEFINE_PCI_DEVICE_TABLE(bcm63xx_cb_table) = { 478static const struct pci_device_id bcm63xx_cb_table[] = {
479 { 479 {
480 .vendor = PCI_VENDOR_ID_BROADCOM, 480 .vendor = PCI_VENDOR_ID_BROADCOM,
481 .device = BCM6348_CPU_ID, 481 .device = BCM6348_CPU_ID,
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 7d47456429a1..aae7e6df99cd 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -25,7 +25,7 @@
25MODULE_LICENSE("GPL"); 25MODULE_LICENSE("GPL");
26 26
27/* PCI core routines */ 27/* PCI core routines */
28static DEFINE_PCI_DEVICE_TABLE(i82092aa_pci_ids) = { 28static const struct pci_device_id i82092aa_pci_ids[] = {
29 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82092AA_0) }, 29 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82092AA_0) },
30 { } 30 { }
31}; 31};
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 622dd6fe7347..34ace4854dc2 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -764,7 +764,7 @@ static void pd6729_pci_remove(struct pci_dev *dev)
764 kfree(socket); 764 kfree(socket);
765} 765}
766 766
767static DEFINE_PCI_DEVICE_TABLE(pd6729_pci_ids) = { 767static const struct pci_device_id pd6729_pci_ids[] = {
768 { PCI_DEVICE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6729) }, 768 { PCI_DEVICE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6729) },
769 { } 769 { }
770}; 770};
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
index 3baa3ef09682..40e040314503 100644
--- a/drivers/pcmcia/sa1111_jornada720.c
+++ b/drivers/pcmcia/sa1111_jornada720.c
@@ -9,6 +9,7 @@
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/io.h>
12 13
13#include <mach/hardware.h> 14#include <mach/hardware.h>
14#include <asm/hardware/sa1111.h> 15#include <asm/hardware/sa1111.h>
@@ -94,6 +95,7 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
94int pcmcia_jornada720_init(struct device *dev) 95int pcmcia_jornada720_init(struct device *dev)
95{ 96{
96 int ret = -ENODEV; 97 int ret = -ENODEV;
98 struct sa1111_dev *sadev = SA1111_DEV(dev);
97 99
98 if (machine_is_jornada720()) { 100 if (machine_is_jornada720()) {
99 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; 101 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
@@ -101,12 +103,12 @@ int pcmcia_jornada720_init(struct device *dev)
101 GRER |= 0x00000002; 103 GRER |= 0x00000002;
102 104
103 /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */ 105 /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
104 sa1111_set_io_dir(dev, pin, 0, 0); 106 sa1111_set_io_dir(sadev, pin, 0, 0);
105 sa1111_set_io(dev, pin, 0); 107 sa1111_set_io(sadev, pin, 0);
106 sa1111_set_sleep_io(dev, pin, 0); 108 sa1111_set_sleep_io(sadev, pin, 0);
107 109
108 sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops); 110 sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
109 ret = sa1111_pcmcia_add(dev, &jornada720_pcmcia_ops, 111 ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
110 sa11xx_drv_pcmcia_add_one); 112 sa11xx_drv_pcmcia_add_one);
111 } 113 }
112 114
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index d92692056e24..9fb0c3addfd4 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -563,7 +563,7 @@ static int vrc4173_cardu_setup(char *options)
563 563
564__setup("vrc4173_cardu=", vrc4173_cardu_setup); 564__setup("vrc4173_cardu=", vrc4173_cardu_setup);
565 565
566static DEFINE_PCI_DEVICE_TABLE(vrc4173_cardu_id_table) = { 566static const struct pci_device_id vrc4173_cardu_id_table[] = {
567 { PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NAPCCARD) }, 567 { PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NAPCCARD) },
568 {0, } 568 {0, }
569}; 569};
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 946f90ef6020..8a23ccb41213 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1352,7 +1352,7 @@ static const struct dev_pm_ops yenta_pm_ops = {
1352 .driver_data = CARDBUS_TYPE_##type, \ 1352 .driver_data = CARDBUS_TYPE_##type, \
1353 } 1353 }
1354 1354
1355static DEFINE_PCI_DEVICE_TABLE(yenta_table) = { 1355static const struct pci_device_id yenta_table[] = {
1356 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI), 1356 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI),
1357 1357
1358 /* 1358 /*
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index 653a58b49cdf..c67ff05fc1dd 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -31,6 +31,7 @@
31#include <linux/mfd/max77693.h> 31#include <linux/mfd/max77693.h>
32#include <linux/mfd/max77693-private.h> 32#include <linux/mfd/max77693-private.h>
33#include <linux/regulator/of_regulator.h> 33#include <linux/regulator/of_regulator.h>
34#include <linux/regmap.h>
34 35
35#define CHGIN_ILIM_STEP_20mA 20000 36#define CHGIN_ILIM_STEP_20mA 20000
36 37
@@ -39,9 +40,9 @@
39static int max77693_chg_is_enabled(struct regulator_dev *rdev) 40static int max77693_chg_is_enabled(struct regulator_dev *rdev)
40{ 41{
41 int ret; 42 int ret;
42 u8 val; 43 unsigned int val;
43 44
44 ret = max77693_read_reg(rdev->regmap, rdev->desc->enable_reg, &val); 45 ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
45 if (ret) 46 if (ret)
46 return ret; 47 return ret;
47 48
@@ -57,12 +58,11 @@ static int max77693_chg_get_current_limit(struct regulator_dev *rdev)
57{ 58{
58 unsigned int chg_min_uA = rdev->constraints->min_uA; 59 unsigned int chg_min_uA = rdev->constraints->min_uA;
59 unsigned int chg_max_uA = rdev->constraints->max_uA; 60 unsigned int chg_max_uA = rdev->constraints->max_uA;
60 u8 reg, sel; 61 unsigned int reg, sel;
61 unsigned int val; 62 unsigned int val;
62 int ret; 63 int ret;
63 64
64 ret = max77693_read_reg(rdev->regmap, 65 ret = regmap_read(rdev->regmap, MAX77693_CHG_REG_CHG_CNFG_09, &reg);
65 MAX77693_CHG_REG_CHG_CNFG_09, &reg);
66 if (ret < 0) 66 if (ret < 0)
67 return ret; 67 return ret;
68 68
@@ -96,7 +96,7 @@ static int max77693_chg_set_current_limit(struct regulator_dev *rdev,
96 /* the first four codes for charger current are all 60mA */ 96 /* the first four codes for charger current are all 60mA */
97 sel += 3; 97 sel += 3;
98 98
99 return max77693_write_reg(rdev->regmap, 99 return regmap_write(rdev->regmap,
100 MAX77693_CHG_REG_CHG_CNFG_09, sel); 100 MAX77693_CHG_REG_CHG_CNFG_09, sel);
101} 101}
102/* end of CHARGER regulator ops */ 102/* end of CHARGER regulator ops */
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 3b5780710d50..1d92f5103ebf 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -18,7 +18,6 @@
18#include <linux/of_device.h> 18#include <linux/of_device.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/spmi.h> 20#include <linux/spmi.h>
21#include <linux/module.h>
22#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
23 22
24#include <dt-bindings/spmi/spmi.h> 23#include <dt-bindings/spmi/spmi.h>
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
new file mode 100644
index 000000000000..c121acc15bfe
--- /dev/null
+++ b/drivers/thunderbolt/Kconfig
@@ -0,0 +1,13 @@
1menuconfig THUNDERBOLT
2 tristate "Thunderbolt support for Apple devices"
3 depends on PCI
4 select CRC32
5 help
6 Cactus Ridge Thunderbolt Controller driver
7 This driver is required if you want to hotplug Thunderbolt devices on
8 Apple hardware.
9
10 Device chaining is currently not supported.
11
12 To compile this driver a module, choose M here. The module will be
13 called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
new file mode 100644
index 000000000000..5d1053cdfa54
--- /dev/null
+++ b/drivers/thunderbolt/Makefile
@@ -0,0 +1,3 @@
1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
3
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
new file mode 100644
index 000000000000..a7b47e7cddbd
--- /dev/null
+++ b/drivers/thunderbolt/cap.c
@@ -0,0 +1,116 @@
1/*
2 * Thunderbolt Cactus Ridge driver - capabilities lookup
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/slab.h>
8#include <linux/errno.h>
9
10#include "tb.h"
11
12
13struct tb_cap_any {
14 union {
15 struct tb_cap_basic basic;
16 struct tb_cap_extended_short extended_short;
17 struct tb_cap_extended_long extended_long;
18 };
19} __packed;
20
21static bool tb_cap_is_basic(struct tb_cap_any *cap)
22{
23 /* basic.cap is u8. This checks only the lower 8 bit of cap. */
24 return cap->basic.cap != 5;
25}
26
27static bool tb_cap_is_long(struct tb_cap_any *cap)
28{
29 return !tb_cap_is_basic(cap)
30 && cap->extended_short.next == 0
31 && cap->extended_short.length == 0;
32}
33
34static enum tb_cap tb_cap(struct tb_cap_any *cap)
35{
36 if (tb_cap_is_basic(cap))
37 return cap->basic.cap;
38 else
39 /* extended_short/long have cap at the same offset. */
40 return cap->extended_short.cap;
41}
42
43static u32 tb_cap_next(struct tb_cap_any *cap, u32 offset)
44{
45 int next;
46 if (offset == 1) {
47 /*
48 * The first pointer is part of the switch header and always
49 * a simple pointer.
50 */
51 next = cap->basic.next;
52 } else {
53 /*
54 * Somehow Intel decided to use 3 different types of capability
55 * headers. It is not like anyone could have predicted that
56 * single byte offsets are not enough...
57 */
58 if (tb_cap_is_basic(cap))
59 next = cap->basic.next;
60 else if (!tb_cap_is_long(cap))
61 next = cap->extended_short.next;
62 else
63 next = cap->extended_long.next;
64 }
65 /*
66 * "Hey, we could terminate some capability lists with a null offset
67 * and others with a pointer to the last element." - "Great idea!"
68 */
69 if (next == offset)
70 return 0;
71 return next;
72}
73
74/**
75 * tb_find_cap() - find a capability
76 *
77 * Return: Returns a positive offset if the capability was found and 0 if not.
78 * Returns an error code on failure.
79 */
80int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap)
81{
82 u32 offset = 1;
83 struct tb_cap_any header;
84 int res;
85 int retries = 10;
86 while (retries--) {
87 res = tb_port_read(port, &header, space, offset, 1);
88 if (res) {
89 /* Intel needs some help with linked lists. */
90 if (space == TB_CFG_PORT && offset == 0xa
91 && port->config.type == TB_TYPE_DP_HDMI_OUT) {
92 offset = 0x39;
93 continue;
94 }
95 return res;
96 }
97 if (offset != 1) {
98 if (tb_cap(&header) == cap)
99 return offset;
100 if (tb_cap_is_long(&header)) {
101 /* tb_cap_extended_long is 2 dwords */
102 res = tb_port_read(port, &header, space,
103 offset, 2);
104 if (res)
105 return res;
106 }
107 }
108 offset = tb_cap_next(&header, offset);
109 if (!offset)
110 return 0;
111 }
112 tb_port_WARN(port,
113 "run out of retries while looking for cap %#x in config space %d, last offset: %#x\n",
114 cap, space, offset);
115 return -EIO;
116}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
new file mode 100644
index 000000000000..799634b382c6
--- /dev/null
+++ b/drivers/thunderbolt/ctl.c
@@ -0,0 +1,731 @@
1/*
2 * Thunderbolt Cactus Ridge driver - control channel and configuration commands
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/crc32.h>
8#include <linux/slab.h>
9#include <linux/pci.h>
10#include <linux/dmapool.h>
11#include <linux/workqueue.h>
12#include <linux/kfifo.h>
13
14#include "ctl.h"
15
16
17struct ctl_pkg {
18 struct tb_ctl *ctl;
19 void *buffer;
20 struct ring_frame frame;
21};
22
23#define TB_CTL_RX_PKG_COUNT 10
24
25/**
26 * struct tb_cfg - thunderbolt control channel
27 */
28struct tb_ctl {
29 struct tb_nhi *nhi;
30 struct tb_ring *tx;
31 struct tb_ring *rx;
32
33 struct dma_pool *frame_pool;
34 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
35 DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16);
36 struct completion response_ready;
37
38 hotplug_cb callback;
39 void *callback_data;
40};
41
42
43#define tb_ctl_WARN(ctl, format, arg...) \
44 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
45
46#define tb_ctl_err(ctl, format, arg...) \
47 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
48
49#define tb_ctl_warn(ctl, format, arg...) \
50 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
51
52#define tb_ctl_info(ctl, format, arg...) \
53 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
54
55
56/* configuration packets definitions */
57
58enum tb_cfg_pkg_type {
59 TB_CFG_PKG_READ = 1,
60 TB_CFG_PKG_WRITE = 2,
61 TB_CFG_PKG_ERROR = 3,
62 TB_CFG_PKG_NOTIFY_ACK = 4,
63 TB_CFG_PKG_EVENT = 5,
64 TB_CFG_PKG_XDOMAIN_REQ = 6,
65 TB_CFG_PKG_XDOMAIN_RESP = 7,
66 TB_CFG_PKG_OVERRIDE = 8,
67 TB_CFG_PKG_RESET = 9,
68 TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
69};
70
71/* common header */
72struct tb_cfg_header {
73 u32 route_hi:22;
74 u32 unknown:10; /* highest order bit is set on replies */
75 u32 route_lo;
76} __packed;
77
78/* additional header for read/write packets */
79struct tb_cfg_address {
80 u32 offset:13; /* in dwords */
81 u32 length:6; /* in dwords */
82 u32 port:6;
83 enum tb_cfg_space space:2;
84 u32 seq:2; /* sequence number */
85 u32 zero:3;
86} __packed;
87
88/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
89struct cfg_read_pkg {
90 struct tb_cfg_header header;
91 struct tb_cfg_address addr;
92} __packed;
93
94/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
95struct cfg_write_pkg {
96 struct tb_cfg_header header;
97 struct tb_cfg_address addr;
98 u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
99} __packed;
100
101/* TB_CFG_PKG_ERROR */
102struct cfg_error_pkg {
103 struct tb_cfg_header header;
104 enum tb_cfg_error error:4;
105 u32 zero1:4;
106 u32 port:6;
107 u32 zero2:2; /* Both should be zero, still they are different fields. */
108 u32 zero3:16;
109} __packed;
110
111/* TB_CFG_PKG_EVENT */
112struct cfg_event_pkg {
113 struct tb_cfg_header header;
114 u32 port:6;
115 u32 zero:25;
116 bool unplug:1;
117} __packed;
118
119/* TB_CFG_PKG_RESET */
120struct cfg_reset_pkg {
121 struct tb_cfg_header header;
122} __packed;
123
124/* TB_CFG_PKG_PREPARE_TO_SLEEP */
125struct cfg_pts_pkg {
126 struct tb_cfg_header header;
127 u32 data;
128} __packed;
129
130
131/* utility functions */
132
133static u64 get_route(struct tb_cfg_header header)
134{
135 return (u64) header.route_hi << 32 | header.route_lo;
136}
137
138static struct tb_cfg_header make_header(u64 route)
139{
140 struct tb_cfg_header header = {
141 .route_hi = route >> 32,
142 .route_lo = route,
143 };
144 /* check for overflow, route_hi is not 32 bits! */
145 WARN_ON(get_route(header) != route);
146 return header;
147}
148
149static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type,
150 u64 route)
151{
152 struct tb_cfg_header *header = pkg->buffer;
153
154 /* check frame, TODO: frame flags */
155 if (WARN(len != pkg->frame.size,
156 "wrong framesize (expected %#x, got %#x)\n",
157 len, pkg->frame.size))
158 return -EIO;
159 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
160 type, pkg->frame.eof))
161 return -EIO;
162 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
163 pkg->frame.sof))
164 return -EIO;
165
166 /* check header */
167 if (WARN(header->unknown != 1 << 9,
168 "header->unknown is %#x\n", header->unknown))
169 return -EIO;
170 if (WARN(route != get_route(*header),
171 "wrong route (expected %llx, got %llx)",
172 route, get_route(*header)))
173 return -EIO;
174 return 0;
175}
176
177static int check_config_address(struct tb_cfg_address addr,
178 enum tb_cfg_space space, u32 offset,
179 u32 length)
180{
181 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
182 return -EIO;
183 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
184 space, addr.space))
185 return -EIO;
186 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
187 offset, addr.offset))
188 return -EIO;
189 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
190 length, addr.length))
191 return -EIO;
192 if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq))
193 return -EIO;
194 /*
195 * We cannot check addr->port as it is set to the upstream port of the
196 * sender.
197 */
198 return 0;
199}
200
201static struct tb_cfg_result decode_error(struct ctl_pkg *response)
202{
203 struct cfg_error_pkg *pkg = response->buffer;
204 struct tb_cfg_result res = { 0 };
205 res.response_route = get_route(pkg->header);
206 res.response_port = 0;
207 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
208 get_route(pkg->header));
209 if (res.err)
210 return res;
211
212 WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
213 WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
214 WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
215 res.err = 1;
216 res.tb_error = pkg->error;
217 res.response_port = pkg->port;
218 return res;
219
220}
221
222static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len,
223 enum tb_cfg_pkg_type type, u64 route)
224{
225 struct tb_cfg_header *header = pkg->buffer;
226 struct tb_cfg_result res = { 0 };
227
228 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
229 return decode_error(pkg);
230
231 res.response_port = 0; /* will be updated later for cfg_read/write */
232 res.response_route = get_route(*header);
233 res.err = check_header(pkg, len, type, route);
234 return res;
235}
236
237static void tb_cfg_print_error(struct tb_ctl *ctl,
238 const struct tb_cfg_result *res)
239{
240 WARN_ON(res->err != 1);
241 switch (res->tb_error) {
242 case TB_CFG_ERROR_PORT_NOT_CONNECTED:
243 /* Port is not connected. This can happen during surprise
244 * removal. Do not warn. */
245 return;
246 case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
247 /*
248 * Invalid cfg_space/offset/length combination in
249 * cfg_read/cfg_write.
250 */
251 tb_ctl_WARN(ctl,
252 "CFG_ERROR(%llx:%x): Invalid config space of offset\n",
253 res->response_route, res->response_port);
254 return;
255 case TB_CFG_ERROR_NO_SUCH_PORT:
256 /*
257 * - The route contains a non-existent port.
258 * - The route contains a non-PHY port (e.g. PCIe).
259 * - The port in cfg_read/cfg_write does not exist.
260 */
261 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
262 res->response_route, res->response_port);
263 return;
264 case TB_CFG_ERROR_LOOP:
265 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
266 res->response_route, res->response_port);
267 return;
268 default:
269 /* 5,6,7,9 and 11 are also valid error codes */
270 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
271 res->response_route, res->response_port);
272 return;
273 }
274}
275
276static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len)
277{
278 int i;
279 for (i = 0; i < len; i++)
280 dst[i] = cpu_to_be32(src[i]);
281}
282
283static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
284{
285 int i;
286 for (i = 0; i < len; i++)
287 dst[i] = be32_to_cpu(src[i]);
288}
289
290static __be32 tb_crc(void *data, size_t len)
291{
292 return cpu_to_be32(~__crc32c_le(~0, data, len));
293}
294
295static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
296{
297 if (pkg) {
298 dma_pool_free(pkg->ctl->frame_pool,
299 pkg->buffer, pkg->frame.buffer_phy);
300 kfree(pkg);
301 }
302}
303
304static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
305{
306 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
307 if (!pkg)
308 return NULL;
309 pkg->ctl = ctl;
310 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
311 &pkg->frame.buffer_phy);
312 if (!pkg->buffer) {
313 kfree(pkg);
314 return NULL;
315 }
316 return pkg;
317}
318
319
320/* RX/TX handling */
321
322static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
323 bool canceled)
324{
325 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
326 tb_ctl_pkg_free(pkg);
327}
328
329/**
330 * tb_cfg_tx() - transmit a packet on the control channel
331 *
332 * len must be a multiple of four.
333 *
334 * Return: Returns 0 on success or an error code on failure.
335 */
336static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len,
337 enum tb_cfg_pkg_type type)
338{
339 int res;
340 struct ctl_pkg *pkg;
341 if (len % 4 != 0) { /* required for le->be conversion */
342 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
343 return -EINVAL;
344 }
345 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
346 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
347 len, TB_FRAME_SIZE - 4);
348 return -EINVAL;
349 }
350 pkg = tb_ctl_pkg_alloc(ctl);
351 if (!pkg)
352 return -ENOMEM;
353 pkg->frame.callback = tb_ctl_tx_callback;
354 pkg->frame.size = len + 4;
355 pkg->frame.sof = type;
356 pkg->frame.eof = type;
357 cpu_to_be32_array(pkg->buffer, data, len / 4);
358 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
359
360 res = ring_tx(ctl->tx, &pkg->frame);
361 if (res) /* ring is stopped */
362 tb_ctl_pkg_free(pkg);
363 return res;
364}
365
366/**
367 * tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback
368 */
369static void tb_ctl_handle_plug_event(struct tb_ctl *ctl,
370 struct ctl_pkg *response)
371{
372 struct cfg_event_pkg *pkg = response->buffer;
373 u64 route = get_route(pkg->header);
374
375 if (check_header(response, sizeof(*pkg), TB_CFG_PKG_EVENT, route)) {
376 tb_ctl_warn(ctl, "malformed TB_CFG_PKG_EVENT\n");
377 return;
378 }
379
380 if (tb_cfg_error(ctl, route, pkg->port, TB_CFG_ERROR_ACK_PLUG_EVENT))
381 tb_ctl_warn(ctl, "could not ack plug event on %llx:%x\n",
382 route, pkg->port);
383 WARN(pkg->zero, "pkg->zero is %#x\n", pkg->zero);
384 ctl->callback(ctl->callback_data, route, pkg->port, pkg->unplug);
385}
386
387static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
388{
389 ring_rx(pkg->ctl->rx, &pkg->frame); /*
390 * We ignore failures during stop.
391 * All rx packets are referenced
392 * from ctl->rx_packets, so we do
393 * not loose them.
394 */
395}
396
397static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
398 bool canceled)
399{
400 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
401
402 if (canceled)
403 return; /*
404 * ring is stopped, packet is referenced from
405 * ctl->rx_packets.
406 */
407
408 if (frame->size < 4 || frame->size % 4 != 0) {
409 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
410 frame->size);
411 goto rx;
412 }
413
414 frame->size -= 4; /* remove checksum */
415 if (*(__be32 *) (pkg->buffer + frame->size)
416 != tb_crc(pkg->buffer, frame->size)) {
417 tb_ctl_err(pkg->ctl,
418 "RX: checksum mismatch, dropping packet\n");
419 goto rx;
420 }
421 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
422
423 if (frame->eof == TB_CFG_PKG_EVENT) {
424 tb_ctl_handle_plug_event(pkg->ctl, pkg);
425 goto rx;
426 }
427 if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) {
428 tb_ctl_err(pkg->ctl, "RX: fifo is full\n");
429 goto rx;
430 }
431 complete(&pkg->ctl->response_ready);
432 return;
433rx:
434 tb_ctl_rx_submit(pkg);
435}
436
437/**
438 * tb_ctl_rx() - receive a packet from the control channel
439 */
440static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer,
441 size_t length, int timeout_msec,
442 u64 route, enum tb_cfg_pkg_type type)
443{
444 struct tb_cfg_result res;
445 struct ctl_pkg *pkg;
446
447 if (!wait_for_completion_timeout(&ctl->response_ready,
448 msecs_to_jiffies(timeout_msec))) {
449 tb_ctl_WARN(ctl, "RX: timeout\n");
450 return (struct tb_cfg_result) { .err = -ETIMEDOUT };
451 }
452 if (!kfifo_get(&ctl->response_fifo, &pkg)) {
453 tb_ctl_WARN(ctl, "empty kfifo\n");
454 return (struct tb_cfg_result) { .err = -EIO };
455 }
456
457 res = parse_header(pkg, length, type, route);
458 if (!res.err)
459 memcpy(buffer, pkg->buffer, length);
460 tb_ctl_rx_submit(pkg);
461 return res;
462}
463
464
465/* public interface, alloc/start/stop/free */
466
467/**
468 * tb_ctl_alloc() - allocate a control channel
469 *
470 * cb will be invoked once for every hot plug event.
471 *
472 * Return: Returns a pointer on success or NULL on failure.
473 */
474struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
475{
476 int i;
477 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
478 if (!ctl)
479 return NULL;
480 ctl->nhi = nhi;
481 ctl->callback = cb;
482 ctl->callback_data = cb_data;
483
484 init_completion(&ctl->response_ready);
485 INIT_KFIFO(ctl->response_fifo);
486 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
487 TB_FRAME_SIZE, 4, 0);
488 if (!ctl->frame_pool)
489 goto err;
490
491 ctl->tx = ring_alloc_tx(nhi, 0, 10);
492 if (!ctl->tx)
493 goto err;
494
495 ctl->rx = ring_alloc_rx(nhi, 0, 10);
496 if (!ctl->rx)
497 goto err;
498
499 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
500 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
501 if (!ctl->rx_packets[i])
502 goto err;
503 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
504 }
505
506 tb_ctl_info(ctl, "control channel created\n");
507 return ctl;
508err:
509 tb_ctl_free(ctl);
510 return NULL;
511}
512
513/**
514 * tb_ctl_free() - free a control channel
515 *
516 * Must be called after tb_ctl_stop.
517 *
518 * Must NOT be called from ctl->callback.
519 */
520void tb_ctl_free(struct tb_ctl *ctl)
521{
522 int i;
523 if (ctl->rx)
524 ring_free(ctl->rx);
525 if (ctl->tx)
526 ring_free(ctl->tx);
527
528 /* free RX packets */
529 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
530 tb_ctl_pkg_free(ctl->rx_packets[i]);
531
532
533 if (ctl->frame_pool)
534 dma_pool_destroy(ctl->frame_pool);
535 kfree(ctl);
536}
537
538/**
539 * tb_cfg_start() - start/resume the control channel
540 */
541void tb_ctl_start(struct tb_ctl *ctl)
542{
543 int i;
544 tb_ctl_info(ctl, "control channel starting...\n");
545 ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
546 ring_start(ctl->rx);
547 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
548 tb_ctl_rx_submit(ctl->rx_packets[i]);
549}
550
551/**
552 * control() - pause the control channel
553 *
554 * All invocations of ctl->callback will have finished after this method
555 * returns.
556 *
557 * Must NOT be called from ctl->callback.
558 */
559void tb_ctl_stop(struct tb_ctl *ctl)
560{
561 ring_stop(ctl->rx);
562 ring_stop(ctl->tx);
563
564 if (!kfifo_is_empty(&ctl->response_fifo))
565 tb_ctl_WARN(ctl, "dangling response in response_fifo\n");
566 kfifo_reset(&ctl->response_fifo);
567 tb_ctl_info(ctl, "control channel stopped\n");
568}
569
570/* public interface, commands */
571
572/**
573 * tb_cfg_error() - send error packet
574 *
575 * Return: Returns 0 on success or an error code on failure.
576 */
577int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
578 enum tb_cfg_error error)
579{
580 struct cfg_error_pkg pkg = {
581 .header = make_header(route),
582 .port = port,
583 .error = error,
584 };
585 tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
586 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
587}
588
589/**
590 * tb_cfg_reset() - send a reset packet and wait for a response
591 *
592 * If the switch at route is incorrectly configured then we will not receive a
593 * reply (even though the switch will reset). The caller should check for
594 * -ETIMEDOUT and attempt to reconfigure the switch.
595 */
596struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
597 int timeout_msec)
598{
599 int err;
600 struct cfg_reset_pkg request = { .header = make_header(route) };
601 struct tb_cfg_header reply;
602
603 err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET);
604 if (err)
605 return (struct tb_cfg_result) { .err = err };
606
607 return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
608 TB_CFG_PKG_RESET);
609}
610
611/**
612 * tb_cfg_read() - read from config space into buffer
613 *
614 * Offset and length are in dwords.
615 */
616struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
617 u64 route, u32 port, enum tb_cfg_space space,
618 u32 offset, u32 length, int timeout_msec)
619{
620 struct tb_cfg_result res = { 0 };
621 struct cfg_read_pkg request = {
622 .header = make_header(route),
623 .addr = {
624 .port = port,
625 .space = space,
626 .offset = offset,
627 .length = length,
628 },
629 };
630 struct cfg_write_pkg reply;
631
632 res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ);
633 if (res.err)
634 return res;
635
636 res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route,
637 TB_CFG_PKG_READ);
638 if (res.err)
639 return res;
640
641 res.response_port = reply.addr.port;
642 res.err = check_config_address(reply.addr, space, offset, length);
643 if (!res.err)
644 memcpy(buffer, &reply.data, 4 * length);
645 return res;
646}
647
648/**
649 * tb_cfg_write() - write from buffer into config space
650 *
651 * Offset and length are in dwords.
652 */
653struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
654 u64 route, u32 port, enum tb_cfg_space space,
655 u32 offset, u32 length, int timeout_msec)
656{
657 struct tb_cfg_result res = { 0 };
658 struct cfg_write_pkg request = {
659 .header = make_header(route),
660 .addr = {
661 .port = port,
662 .space = space,
663 .offset = offset,
664 .length = length,
665 },
666 };
667 struct cfg_read_pkg reply;
668
669 memcpy(&request.data, buffer, length * 4);
670
671 res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE);
672 if (res.err)
673 return res;
674
675 res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
676 TB_CFG_PKG_WRITE);
677 if (res.err)
678 return res;
679
680 res.response_port = reply.addr.port;
681 res.err = check_config_address(reply.addr, space, offset, length);
682 return res;
683}
684
685int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
686 enum tb_cfg_space space, u32 offset, u32 length)
687{
688 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
689 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
690 if (res.err == 1) {
691 tb_cfg_print_error(ctl, &res);
692 return -EIO;
693 }
694 WARN(res.err, "tb_cfg_read: %d\n", res.err);
695 return res.err;
696}
697
698int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
699 enum tb_cfg_space space, u32 offset, u32 length)
700{
701 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
702 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
703 if (res.err == 1) {
704 tb_cfg_print_error(ctl, &res);
705 return -EIO;
706 }
707 WARN(res.err, "tb_cfg_write: %d\n", res.err);
708 return res.err;
709}
710
711/**
712 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
713 *
714 * Reads the first dword from the switches TB_CFG_SWITCH config area and
715 * returns the port number from which the reply originated.
716 *
717 * Return: Returns the upstream port number on success or an error code on
718 * failure.
719 */
720int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
721{
722 u32 dummy;
723 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
724 TB_CFG_SWITCH, 0, 1,
725 TB_CFG_DEFAULT_TIMEOUT);
726 if (res.err == 1)
727 return -EIO;
728 if (res.err)
729 return res.err;
730 return res.response_port;
731}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
new file mode 100644
index 000000000000..ba87d6e731dd
--- /dev/null
+++ b/drivers/thunderbolt/ctl.h
@@ -0,0 +1,75 @@
1/*
2 * Thunderbolt Cactus Ridge driver - control channel and configuration commands
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef _TB_CFG
8#define _TB_CFG
9
10#include "nhi.h"
11
12/* control channel */
13struct tb_ctl;
14
15typedef void (*hotplug_cb)(void *data, u64 route, u8 port, bool unplug);
16
17struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data);
18void tb_ctl_start(struct tb_ctl *ctl);
19void tb_ctl_stop(struct tb_ctl *ctl);
20void tb_ctl_free(struct tb_ctl *ctl);
21
22/* configuration commands */
23
24#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
25
26enum tb_cfg_space {
27 TB_CFG_HOPS = 0,
28 TB_CFG_PORT = 1,
29 TB_CFG_SWITCH = 2,
30 TB_CFG_COUNTERS = 3,
31};
32
33enum tb_cfg_error {
34 TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
35 TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
36 TB_CFG_ERROR_NO_SUCH_PORT = 4,
37 TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
38 TB_CFG_ERROR_LOOP = 8,
39};
40
41struct tb_cfg_result {
42 u64 response_route;
43 u32 response_port; /*
44 * If err = 1 then this is the port that send the
45 * error.
46 * If err = 0 and if this was a cfg_read/write then
47 * this is the the upstream port of the responding
48 * switch.
49 * Otherwise the field is set to zero.
50 */
51 int err; /* negative errors, 0 for success, 1 for tb errors */
52 enum tb_cfg_error tb_error; /* valid if err == 1 */
53};
54
55
56int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
57 enum tb_cfg_error error);
58struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
59 int timeout_msec);
60struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
61 u64 route, u32 port,
62 enum tb_cfg_space space, u32 offset,
63 u32 length, int timeout_msec);
64struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
65 u64 route, u32 port,
66 enum tb_cfg_space space, u32 offset,
67 u32 length, int timeout_msec);
68int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
69 enum tb_cfg_space space, u32 offset, u32 length);
70int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
71 enum tb_cfg_space space, u32 offset, u32 length);
72int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
73
74
75#endif
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
new file mode 100644
index 000000000000..0dde34e3a7c5
--- /dev/null
+++ b/drivers/thunderbolt/eeprom.c
@@ -0,0 +1,449 @@
1/*
2 * Thunderbolt Cactus Ridge driver - eeprom access
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/crc32.h>
8#include <linux/slab.h>
9#include "tb.h"
10
11/**
12 * tb_eeprom_ctl_write() - write control word
13 */
14static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
15{
16 return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
17}
18
19/**
20 * tb_eeprom_ctl_write() - read control word
21 */
22static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
23{
24 return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
25}
26
27enum tb_eeprom_transfer {
28 TB_EEPROM_IN,
29 TB_EEPROM_OUT,
30};
31
32/**
33 * tb_eeprom_active - enable rom access
34 *
35 * WARNING: Always disable access after usage. Otherwise the controller will
36 * fail to reprobe.
37 */
38static int tb_eeprom_active(struct tb_switch *sw, bool enable)
39{
40 struct tb_eeprom_ctl ctl;
41 int res = tb_eeprom_ctl_read(sw, &ctl);
42 if (res)
43 return res;
44 if (enable) {
45 ctl.access_high = 1;
46 res = tb_eeprom_ctl_write(sw, &ctl);
47 if (res)
48 return res;
49 ctl.access_low = 0;
50 return tb_eeprom_ctl_write(sw, &ctl);
51 } else {
52 ctl.access_low = 1;
53 res = tb_eeprom_ctl_write(sw, &ctl);
54 if (res)
55 return res;
56 ctl.access_high = 0;
57 return tb_eeprom_ctl_write(sw, &ctl);
58 }
59}
60
61/**
62 * tb_eeprom_transfer - transfer one bit
63 *
64 * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
65 * If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
66 */
67static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
68 enum tb_eeprom_transfer direction)
69{
70 int res;
71 if (direction == TB_EEPROM_OUT) {
72 res = tb_eeprom_ctl_write(sw, ctl);
73 if (res)
74 return res;
75 }
76 ctl->clock = 1;
77 res = tb_eeprom_ctl_write(sw, ctl);
78 if (res)
79 return res;
80 if (direction == TB_EEPROM_IN) {
81 res = tb_eeprom_ctl_read(sw, ctl);
82 if (res)
83 return res;
84 }
85 ctl->clock = 0;
86 return tb_eeprom_ctl_write(sw, ctl);
87}
88
89/**
90 * tb_eeprom_out - write one byte to the bus
91 */
92static int tb_eeprom_out(struct tb_switch *sw, u8 val)
93{
94 struct tb_eeprom_ctl ctl;
95 int i;
96 int res = tb_eeprom_ctl_read(sw, &ctl);
97 if (res)
98 return res;
99 for (i = 0; i < 8; i++) {
100 ctl.data_out = val & 0x80;
101 res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
102 if (res)
103 return res;
104 val <<= 1;
105 }
106 return 0;
107}
108
109/**
110 * tb_eeprom_in - read one byte from the bus
111 */
112static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
113{
114 struct tb_eeprom_ctl ctl;
115 int i;
116 int res = tb_eeprom_ctl_read(sw, &ctl);
117 if (res)
118 return res;
119 *val = 0;
120 for (i = 0; i < 8; i++) {
121 *val <<= 1;
122 res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
123 if (res)
124 return res;
125 *val |= ctl.data_in;
126 }
127 return 0;
128}
129
130/**
131 * tb_eeprom_read_n - read count bytes from offset into val
132 */
133static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
134 size_t count)
135{
136 int i, res;
137 res = tb_eeprom_active(sw, true);
138 if (res)
139 return res;
140 res = tb_eeprom_out(sw, 3);
141 if (res)
142 return res;
143 res = tb_eeprom_out(sw, offset >> 8);
144 if (res)
145 return res;
146 res = tb_eeprom_out(sw, offset);
147 if (res)
148 return res;
149 for (i = 0; i < count; i++) {
150 res = tb_eeprom_in(sw, val + i);
151 if (res)
152 return res;
153 }
154 return tb_eeprom_active(sw, false);
155}
156
157static u8 tb_crc8(u8 *data, int len)
158{
159 int i, j;
160 u8 val = 0xff;
161 for (i = 0; i < len; i++) {
162 val ^= data[i];
163 for (j = 0; j < 8; j++)
164 val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
165 }
166 return val;
167}
168
169static u32 tb_crc32(void *data, size_t len)
170{
171 return ~__crc32c_le(~0, data, len);
172}
173
174#define TB_DROM_DATA_START 13
175struct tb_drom_header {
176 /* BYTE 0 */
177 u8 uid_crc8; /* checksum for uid */
178 /* BYTES 1-8 */
179 u64 uid;
180 /* BYTES 9-12 */
181 u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
182 /* BYTE 13 */
183 u8 device_rom_revision; /* should be <= 1 */
184 u16 data_len:10;
185 u8 __unknown1:6;
186 /* BYTES 16-21 */
187 u16 vendor_id;
188 u16 model_id;
189 u8 model_rev;
190 u8 eeprom_rev;
191} __packed;
192
193enum tb_drom_entry_type {
194 /* force unsigned to prevent "one-bit signed bitfield" warning */
195 TB_DROM_ENTRY_GENERIC = 0U,
196 TB_DROM_ENTRY_PORT,
197};
198
199struct tb_drom_entry_header {
200 u8 len;
201 u8 index:6;
202 bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
203 enum tb_drom_entry_type type:1;
204} __packed;
205
206struct tb_drom_entry_port {
207 /* BYTES 0-1 */
208 struct tb_drom_entry_header header;
209 /* BYTE 2 */
210 u8 dual_link_port_rid:4;
211 u8 link_nr:1;
212 u8 unknown1:2;
213 bool has_dual_link_port:1;
214
215 /* BYTE 3 */
216 u8 dual_link_port_nr:6;
217 u8 unknown2:2;
218
219 /* BYTES 4 - 5 TODO decode */
220 u8 micro2:4;
221 u8 micro1:4;
222 u8 micro3;
223
224 /* BYTES 5-6, TODO: verify (find hardware that has these set) */
225 u8 peer_port_rid:4;
226 u8 unknown3:3;
227 bool has_peer_port:1;
228 u8 peer_port_nr:6;
229 u8 unknown4:2;
230} __packed;
231
232
233/**
234 * tb_eeprom_get_drom_offset - get drom offset within eeprom
235 */
236static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
237{
238 struct tb_cap_plug_events cap;
239 int res;
240 if (!sw->cap_plug_events) {
241 tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
242 return -ENOSYS;
243 }
244 res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
245 sizeof(cap) / 4);
246 if (res)
247 return res;
248
249 if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
250 tb_sw_warn(sw, "no NVM\n");
251 return -ENOSYS;
252 }
253
254 if (cap.drom_offset > 0xffff) {
255 tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
256 cap.drom_offset);
257 return -ENXIO;
258 }
259 *offset = cap.drom_offset;
260 return 0;
261}
262
263/**
264 * tb_drom_read_uid_only - read uid directly from drom
265 *
266 * Does not use the cached copy in sw->drom. Used during resume to check switch
267 * identity.
268 */
269int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
270{
271 u8 data[9];
272 u16 drom_offset;
273 u8 crc;
274 int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
275 if (res)
276 return res;
277
278 /* read uid */
279 res = tb_eeprom_read_n(sw, drom_offset, data, 9);
280 if (res)
281 return res;
282
283 crc = tb_crc8(data + 1, 8);
284 if (crc != data[0]) {
285 tb_sw_warn(sw, "uid crc8 missmatch (expected: %#x, got: %#x)\n",
286 data[0], crc);
287 return -EIO;
288 }
289
290 *uid = *(u64 *)(data+1);
291 return 0;
292}
293
294static void tb_drom_parse_port_entry(struct tb_port *port,
295 struct tb_drom_entry_port *entry)
296{
297 port->link_nr = entry->link_nr;
298 if (entry->has_dual_link_port)
299 port->dual_link_port =
300 &port->sw->ports[entry->dual_link_port_nr];
301}
302
303static int tb_drom_parse_entry(struct tb_switch *sw,
304 struct tb_drom_entry_header *header)
305{
306 struct tb_port *port;
307 int res;
308 enum tb_port_type type;
309
310 if (header->type != TB_DROM_ENTRY_PORT)
311 return 0;
312
313 port = &sw->ports[header->index];
314 port->disabled = header->port_disabled;
315 if (port->disabled)
316 return 0;
317
318 res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
319 if (res)
320 return res;
321 type &= 0xffffff;
322
323 if (type == TB_TYPE_PORT) {
324 struct tb_drom_entry_port *entry = (void *) header;
325 if (header->len != sizeof(*entry)) {
326 tb_sw_warn(sw,
327 "port entry has size %#x (expected %#zx)\n",
328 header->len, sizeof(struct tb_drom_entry_port));
329 return -EIO;
330 }
331 tb_drom_parse_port_entry(port, entry);
332 }
333 return 0;
334}
335
336/**
337 * tb_drom_parse_entries - parse the linked list of drom entries
338 *
339 * Drom must have been copied to sw->drom.
340 */
341static int tb_drom_parse_entries(struct tb_switch *sw)
342{
343 struct tb_drom_header *header = (void *) sw->drom;
344 u16 pos = sizeof(*header);
345 u16 drom_size = header->data_len + TB_DROM_DATA_START;
346
347 while (pos < drom_size) {
348 struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
349 if (pos + 1 == drom_size || pos + entry->len > drom_size
350 || !entry->len) {
351 tb_sw_warn(sw, "drom buffer overrun, aborting\n");
352 return -EIO;
353 }
354
355 tb_drom_parse_entry(sw, entry);
356
357 pos += entry->len;
358 }
359 return 0;
360}
361
362/**
363 * tb_drom_read - copy drom to sw->drom and parse it
364 */
365int tb_drom_read(struct tb_switch *sw)
366{
367 u16 drom_offset;
368 u16 size;
369 u32 crc;
370 struct tb_drom_header *header;
371 int res;
372 if (sw->drom)
373 return 0;
374
375 if (tb_route(sw) == 0) {
376 /*
377 * The root switch contains only a dummy drom (header only,
378 * no entries). Hardcode the configuration here.
379 */
380 tb_drom_read_uid_only(sw, &sw->uid);
381
382 sw->ports[1].link_nr = 0;
383 sw->ports[2].link_nr = 1;
384 sw->ports[1].dual_link_port = &sw->ports[2];
385 sw->ports[2].dual_link_port = &sw->ports[1];
386
387 sw->ports[3].link_nr = 0;
388 sw->ports[4].link_nr = 1;
389 sw->ports[3].dual_link_port = &sw->ports[4];
390 sw->ports[4].dual_link_port = &sw->ports[3];
391 return 0;
392 }
393
394 res = tb_eeprom_get_drom_offset(sw, &drom_offset);
395 if (res)
396 return res;
397
398 res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
399 if (res)
400 return res;
401 size &= 0x3ff;
402 size += TB_DROM_DATA_START;
403 tb_sw_info(sw, "reading drom (length: %#x)\n", size);
404 if (size < sizeof(*header)) {
405 tb_sw_warn(sw, "drom too small, aborting\n");
406 return -EIO;
407 }
408
409 sw->drom = kzalloc(size, GFP_KERNEL);
410 if (!sw->drom)
411 return -ENOMEM;
412 res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
413 if (res)
414 goto err;
415
416 header = (void *) sw->drom;
417
418 if (header->data_len + TB_DROM_DATA_START != size) {
419 tb_sw_warn(sw, "drom size mismatch, aborting\n");
420 goto err;
421 }
422
423 crc = tb_crc8((u8 *) &header->uid, 8);
424 if (crc != header->uid_crc8) {
425 tb_sw_warn(sw,
426 "drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
427 header->uid_crc8, crc);
428 goto err;
429 }
430 sw->uid = header->uid;
431
432 crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
433 if (crc != header->data_crc32) {
434 tb_sw_warn(sw,
435 "drom data crc32 mismatch (expected: %#x, got: %#x), aborting\n",
436 header->data_crc32, crc);
437 goto err;
438 }
439
440 if (header->device_rom_revision > 1)
441 tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
442 header->device_rom_revision);
443
444 return tb_drom_parse_entries(sw);
445err:
446 kfree(sw->drom);
447 return -EIO;
448
449}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
new file mode 100644
index 000000000000..c68fe1222c16
--- /dev/null
+++ b/drivers/thunderbolt/nhi.c
@@ -0,0 +1,675 @@
1/*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * The NHI (native host interface) is the pci device that allows us to send and
5 * receive frames from the thunderbolt bus.
6 *
7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
8 */
9
10#include <linux/pm_runtime.h>
11#include <linux/slab.h>
12#include <linux/errno.h>
13#include <linux/pci.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/dmi.h>
17
18#include "nhi.h"
19#include "nhi_regs.h"
20#include "tb.h"
21
22#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
23
24
25static int ring_interrupt_index(struct tb_ring *ring)
26{
27 int bit = ring->hop;
28 if (!ring->is_tx)
29 bit += ring->nhi->hop_count;
30 return bit;
31}
32
33/**
34 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
35 *
36 * ring->nhi->lock must be held.
37 */
38static void ring_interrupt_active(struct tb_ring *ring, bool active)
39{
40 int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
41 int bit = ring_interrupt_index(ring) & 31;
42 int mask = 1 << bit;
43 u32 old, new;
44 old = ioread32(ring->nhi->iobase + reg);
45 if (active)
46 new = old | mask;
47 else
48 new = old & ~mask;
49
50 dev_info(&ring->nhi->pdev->dev,
51 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
52 active ? "enabling" : "disabling", reg, bit, old, new);
53
54 if (new == old)
55 dev_WARN(&ring->nhi->pdev->dev,
56 "interrupt for %s %d is already %s\n",
57 RING_TYPE(ring), ring->hop,
58 active ? "enabled" : "disabled");
59 iowrite32(new, ring->nhi->iobase + reg);
60}
61
62/**
63 * nhi_disable_interrupts() - disable interrupts for all rings
64 *
65 * Use only during init and shutdown.
66 */
67static void nhi_disable_interrupts(struct tb_nhi *nhi)
68{
69 int i = 0;
70 /* disable interrupts */
71 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
72 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
73
74 /* clear interrupt status bits */
75 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
76 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
77}
78
79/* ring helper methods */
80
81static void __iomem *ring_desc_base(struct tb_ring *ring)
82{
83 void __iomem *io = ring->nhi->iobase;
84 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
85 io += ring->hop * 16;
86 return io;
87}
88
89static void __iomem *ring_options_base(struct tb_ring *ring)
90{
91 void __iomem *io = ring->nhi->iobase;
92 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
93 io += ring->hop * 32;
94 return io;
95}
96
97static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
98{
99 iowrite16(value, ring_desc_base(ring) + offset);
100}
101
102static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
103{
104 iowrite32(value, ring_desc_base(ring) + offset);
105}
106
107static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
108{
109 iowrite32(value, ring_desc_base(ring) + offset);
110 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
111}
112
113static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
114{
115 iowrite32(value, ring_options_base(ring) + offset);
116}
117
118static bool ring_full(struct tb_ring *ring)
119{
120 return ((ring->head + 1) % ring->size) == ring->tail;
121}
122
123static bool ring_empty(struct tb_ring *ring)
124{
125 return ring->head == ring->tail;
126}
127
128/**
129 * ring_write_descriptors() - post frames from ring->queue to the controller
130 *
131 * ring->lock is held.
132 */
133static void ring_write_descriptors(struct tb_ring *ring)
134{
135 struct ring_frame *frame, *n;
136 struct ring_desc *descriptor;
137 list_for_each_entry_safe(frame, n, &ring->queue, list) {
138 if (ring_full(ring))
139 break;
140 list_move_tail(&frame->list, &ring->in_flight);
141 descriptor = &ring->descriptors[ring->head];
142 descriptor->phys = frame->buffer_phy;
143 descriptor->time = 0;
144 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
145 if (ring->is_tx) {
146 descriptor->length = frame->size;
147 descriptor->eof = frame->eof;
148 descriptor->sof = frame->sof;
149 }
150 ring->head = (ring->head + 1) % ring->size;
151 ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
152 }
153}
154
155/**
156 * ring_work() - progress completed frames
157 *
158 * If the ring is shutting down then all frames are marked as canceled and
159 * their callbacks are invoked.
160 *
161 * Otherwise we collect all completed frame from the ring buffer, write new
162 * frame to the ring buffer and invoke the callbacks for the completed frames.
163 */
164static void ring_work(struct work_struct *work)
165{
166 struct tb_ring *ring = container_of(work, typeof(*ring), work);
167 struct ring_frame *frame;
168 bool canceled = false;
169 LIST_HEAD(done);
170 mutex_lock(&ring->lock);
171
172 if (!ring->running) {
173 /* Move all frames to done and mark them as canceled. */
174 list_splice_tail_init(&ring->in_flight, &done);
175 list_splice_tail_init(&ring->queue, &done);
176 canceled = true;
177 goto invoke_callback;
178 }
179
180 while (!ring_empty(ring)) {
181 if (!(ring->descriptors[ring->tail].flags
182 & RING_DESC_COMPLETED))
183 break;
184 frame = list_first_entry(&ring->in_flight, typeof(*frame),
185 list);
186 list_move_tail(&frame->list, &done);
187 if (!ring->is_tx) {
188 frame->size = ring->descriptors[ring->tail].length;
189 frame->eof = ring->descriptors[ring->tail].eof;
190 frame->sof = ring->descriptors[ring->tail].sof;
191 frame->flags = ring->descriptors[ring->tail].flags;
192 if (frame->sof != 0)
193 dev_WARN(&ring->nhi->pdev->dev,
194 "%s %d got unexpected SOF: %#x\n",
195 RING_TYPE(ring), ring->hop,
196 frame->sof);
197 /*
198 * known flags:
199 * raw not enabled, interupt not set: 0x2=0010
200 * raw enabled: 0xa=1010
201 * raw not enabled: 0xb=1011
202 * partial frame (>MAX_FRAME_SIZE): 0xe=1110
203 */
204 if (frame->flags != 0xa)
205 dev_WARN(&ring->nhi->pdev->dev,
206 "%s %d got unexpected flags: %#x\n",
207 RING_TYPE(ring), ring->hop,
208 frame->flags);
209 }
210 ring->tail = (ring->tail + 1) % ring->size;
211 }
212 ring_write_descriptors(ring);
213
214invoke_callback:
215 mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
216 while (!list_empty(&done)) {
217 frame = list_first_entry(&done, typeof(*frame), list);
218 /*
219 * The callback may reenqueue or delete frame.
220 * Do not hold on to it.
221 */
222 list_del_init(&frame->list);
223 frame->callback(ring, frame, canceled);
224 }
225}
226
227int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
228{
229 int ret = 0;
230 mutex_lock(&ring->lock);
231 if (ring->running) {
232 list_add_tail(&frame->list, &ring->queue);
233 ring_write_descriptors(ring);
234 } else {
235 ret = -ESHUTDOWN;
236 }
237 mutex_unlock(&ring->lock);
238 return ret;
239}
240
241static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
242 bool transmit)
243{
244 struct tb_ring *ring = NULL;
245 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
246 transmit ? "TX" : "RX", hop, size);
247
248 mutex_lock(&nhi->lock);
249 if (hop >= nhi->hop_count) {
250 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
251 goto err;
252 }
253 if (transmit && nhi->tx_rings[hop]) {
254 dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
255 goto err;
256 } else if (!transmit && nhi->rx_rings[hop]) {
257 dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
258 goto err;
259 }
260 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
261 if (!ring)
262 goto err;
263
264 mutex_init(&ring->lock);
265 INIT_LIST_HEAD(&ring->queue);
266 INIT_LIST_HEAD(&ring->in_flight);
267 INIT_WORK(&ring->work, ring_work);
268
269 ring->nhi = nhi;
270 ring->hop = hop;
271 ring->is_tx = transmit;
272 ring->size = size;
273 ring->head = 0;
274 ring->tail = 0;
275 ring->running = false;
276 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
277 size * sizeof(*ring->descriptors),
278 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
279 if (!ring->descriptors)
280 goto err;
281
282 if (transmit)
283 nhi->tx_rings[hop] = ring;
284 else
285 nhi->rx_rings[hop] = ring;
286 mutex_unlock(&nhi->lock);
287 return ring;
288
289err:
290 if (ring)
291 mutex_destroy(&ring->lock);
292 kfree(ring);
293 mutex_unlock(&nhi->lock);
294 return NULL;
295}
296
297struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
298{
299 return ring_alloc(nhi, hop, size, true);
300}
301
302struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
303{
304 return ring_alloc(nhi, hop, size, false);
305}
306
307/**
308 * ring_start() - enable a ring
309 *
310 * Must not be invoked in parallel with ring_stop().
311 */
312void ring_start(struct tb_ring *ring)
313{
314 mutex_lock(&ring->nhi->lock);
315 mutex_lock(&ring->lock);
316 if (ring->running) {
317 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
318 goto err;
319 }
320 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
321 RING_TYPE(ring), ring->hop);
322
323 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
324 if (ring->is_tx) {
325 ring_iowrite32desc(ring, ring->size, 12);
326 ring_iowrite32options(ring, 0, 4); /* time releated ? */
327 ring_iowrite32options(ring,
328 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
329 } else {
330 ring_iowrite32desc(ring,
331 (TB_FRAME_SIZE << 16) | ring->size, 12);
332 ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
333 ring_iowrite32options(ring,
334 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
335 }
336 ring_interrupt_active(ring, true);
337 ring->running = true;
338err:
339 mutex_unlock(&ring->lock);
340 mutex_unlock(&ring->nhi->lock);
341}
342
343
344/**
345 * ring_stop() - shutdown a ring
346 *
347 * Must not be invoked from a callback.
348 *
349 * This method will disable the ring. Further calls to ring_tx/ring_rx will
350 * return -ESHUTDOWN until ring_stop has been called.
351 *
352 * All enqueued frames will be canceled and their callbacks will be executed
353 * with frame->canceled set to true (on the callback thread). This method
354 * returns only after all callback invocations have finished.
355 */
356void ring_stop(struct tb_ring *ring)
357{
358 mutex_lock(&ring->nhi->lock);
359 mutex_lock(&ring->lock);
360 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
361 RING_TYPE(ring), ring->hop);
362 if (!ring->running) {
363 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
364 RING_TYPE(ring), ring->hop);
365 goto err;
366 }
367 ring_interrupt_active(ring, false);
368
369 ring_iowrite32options(ring, 0, 0);
370 ring_iowrite64desc(ring, 0, 0);
371 ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
372 ring_iowrite32desc(ring, 0, 12);
373 ring->head = 0;
374 ring->tail = 0;
375 ring->running = false;
376
377err:
378 mutex_unlock(&ring->lock);
379 mutex_unlock(&ring->nhi->lock);
380
381 /*
382 * schedule ring->work to invoke callbacks on all remaining frames.
383 */
384 schedule_work(&ring->work);
385 flush_work(&ring->work);
386}
387
388/*
389 * ring_free() - free ring
390 *
391 * When this method returns all invocations of ring->callback will have
392 * finished.
393 *
394 * Ring must be stopped.
395 *
396 * Must NOT be called from ring_frame->callback!
397 */
398void ring_free(struct tb_ring *ring)
399{
400 mutex_lock(&ring->nhi->lock);
401 /*
402 * Dissociate the ring from the NHI. This also ensures that
403 * nhi_interrupt_work cannot reschedule ring->work.
404 */
405 if (ring->is_tx)
406 ring->nhi->tx_rings[ring->hop] = NULL;
407 else
408 ring->nhi->rx_rings[ring->hop] = NULL;
409
410 if (ring->running) {
411 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
412 RING_TYPE(ring), ring->hop);
413 }
414
415 dma_free_coherent(&ring->nhi->pdev->dev,
416 ring->size * sizeof(*ring->descriptors),
417 ring->descriptors, ring->descriptors_dma);
418
419 ring->descriptors = NULL;
420 ring->descriptors_dma = 0;
421
422
423 dev_info(&ring->nhi->pdev->dev,
424 "freeing %s %d\n",
425 RING_TYPE(ring),
426 ring->hop);
427
428 mutex_unlock(&ring->nhi->lock);
429 /**
430 * ring->work can no longer be scheduled (it is scheduled only by
431 * nhi_interrupt_work and ring_stop). Wait for it to finish before
432 * freeing the ring.
433 */
434 flush_work(&ring->work);
435 mutex_destroy(&ring->lock);
436 kfree(ring);
437}
438
439static void nhi_interrupt_work(struct work_struct *work)
440{
441 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
442 int value = 0; /* Suppress uninitialized usage warning. */
443 int bit;
444 int hop = -1;
445 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
446 struct tb_ring *ring;
447
448 mutex_lock(&nhi->lock);
449
450 /*
451 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
452 * (TX, RX, RX overflow). We iterate over the bits and read a new
453 * dwords as required. The registers are cleared on read.
454 */
455 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
456 if (bit % 32 == 0)
457 value = ioread32(nhi->iobase
458 + REG_RING_NOTIFY_BASE
459 + 4 * (bit / 32));
460 if (++hop == nhi->hop_count) {
461 hop = 0;
462 type++;
463 }
464 if ((value & (1 << (bit % 32))) == 0)
465 continue;
466 if (type == 2) {
467 dev_warn(&nhi->pdev->dev,
468 "RX overflow for ring %d\n",
469 hop);
470 continue;
471 }
472 if (type == 0)
473 ring = nhi->tx_rings[hop];
474 else
475 ring = nhi->rx_rings[hop];
476 if (ring == NULL) {
477 dev_warn(&nhi->pdev->dev,
478 "got interrupt for inactive %s ring %d\n",
479 type ? "RX" : "TX",
480 hop);
481 continue;
482 }
483 /* we do not check ring->running, this is done in ring->work */
484 schedule_work(&ring->work);
485 }
486 mutex_unlock(&nhi->lock);
487}
488
489static irqreturn_t nhi_msi(int irq, void *data)
490{
491 struct tb_nhi *nhi = data;
492 schedule_work(&nhi->interrupt_work);
493 return IRQ_HANDLED;
494}
495
496static int nhi_suspend_noirq(struct device *dev)
497{
498 struct pci_dev *pdev = to_pci_dev(dev);
499 struct tb *tb = pci_get_drvdata(pdev);
500 thunderbolt_suspend(tb);
501 return 0;
502}
503
504static int nhi_resume_noirq(struct device *dev)
505{
506 struct pci_dev *pdev = to_pci_dev(dev);
507 struct tb *tb = pci_get_drvdata(pdev);
508 thunderbolt_resume(tb);
509 return 0;
510}
511
512static void nhi_shutdown(struct tb_nhi *nhi)
513{
514 int i;
515 dev_info(&nhi->pdev->dev, "shutdown\n");
516
517 for (i = 0; i < nhi->hop_count; i++) {
518 if (nhi->tx_rings[i])
519 dev_WARN(&nhi->pdev->dev,
520 "TX ring %d is still active\n", i);
521 if (nhi->rx_rings[i])
522 dev_WARN(&nhi->pdev->dev,
523 "RX ring %d is still active\n", i);
524 }
525 nhi_disable_interrupts(nhi);
526 /*
527 * We have to release the irq before calling flush_work. Otherwise an
528 * already executing IRQ handler could call schedule_work again.
529 */
530 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
531 flush_work(&nhi->interrupt_work);
532 mutex_destroy(&nhi->lock);
533}
534
535static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
536{
537 struct tb_nhi *nhi;
538 struct tb *tb;
539 int res;
540
541 res = pcim_enable_device(pdev);
542 if (res) {
543 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
544 return res;
545 }
546
547 res = pci_enable_msi(pdev);
548 if (res) {
549 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
550 return res;
551 }
552
553 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
554 if (res) {
555 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
556 return res;
557 }
558
559 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
560 if (!nhi)
561 return -ENOMEM;
562
563 nhi->pdev = pdev;
564 /* cannot fail - table is allocated bin pcim_iomap_regions */
565 nhi->iobase = pcim_iomap_table(pdev)[0];
566 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
567 if (nhi->hop_count != 12)
568 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
569 nhi->hop_count);
570 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
571
572 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
573 sizeof(*nhi->tx_rings), GFP_KERNEL);
574 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
575 sizeof(*nhi->rx_rings), GFP_KERNEL);
576 if (!nhi->tx_rings || !nhi->rx_rings)
577 return -ENOMEM;
578
579 nhi_disable_interrupts(nhi); /* In case someone left them on. */
580 res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
581 IRQF_NO_SUSPEND, /* must work during _noirq */
582 "thunderbolt", nhi);
583 if (res) {
584 dev_err(&pdev->dev, "request_irq failed, aborting\n");
585 return res;
586 }
587
588 mutex_init(&nhi->lock);
589
590 pci_set_master(pdev);
591
592 /* magic value - clock related? */
593 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
594
595 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
596 tb = thunderbolt_alloc_and_start(nhi);
597 if (!tb) {
598 /*
599 * At this point the RX/TX rings might already have been
600 * activated. Do a proper shutdown.
601 */
602 nhi_shutdown(nhi);
603 return -EIO;
604 }
605 pci_set_drvdata(pdev, tb);
606
607 return 0;
608}
609
610static void nhi_remove(struct pci_dev *pdev)
611{
612 struct tb *tb = pci_get_drvdata(pdev);
613 struct tb_nhi *nhi = tb->nhi;
614 thunderbolt_shutdown_and_free(tb);
615 nhi_shutdown(nhi);
616}
617
618/*
619 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
620 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
621 * resume_noirq until we are done.
622 */
623static const struct dev_pm_ops nhi_pm_ops = {
624 .suspend_noirq = nhi_suspend_noirq,
625 .resume_noirq = nhi_resume_noirq,
626 .freeze_noirq = nhi_suspend_noirq, /*
627 * we just disable hotplug, the
628 * pci-tunnels stay alive.
629 */
630 .restore_noirq = nhi_resume_noirq,
631};
632
633static struct pci_device_id nhi_ids[] = {
634 /*
635 * We have to specify class, the TB bridges use the same device and
636 * vendor (sub)id.
637 */
638 {
639 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
640 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
641 .subvendor = 0x2222, .subdevice = 0x1111,
642 },
643 {
644 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
645 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
646 .subvendor = 0x2222, .subdevice = 0x1111,
647 },
648 { 0,}
649};
650
651MODULE_DEVICE_TABLE(pci, nhi_ids);
652MODULE_LICENSE("GPL");
653
654static struct pci_driver nhi_driver = {
655 .name = "thunderbolt",
656 .id_table = nhi_ids,
657 .probe = nhi_probe,
658 .remove = nhi_remove,
659 .driver.pm = &nhi_pm_ops,
660};
661
662static int __init nhi_init(void)
663{
664 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
665 return -ENOSYS;
666 return pci_register_driver(&nhi_driver);
667}
668
669static void __exit nhi_unload(void)
670{
671 pci_unregister_driver(&nhi_driver);
672}
673
674module_init(nhi_init);
675module_exit(nhi_unload);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
new file mode 100644
index 000000000000..317242939b31
--- /dev/null
+++ b/drivers/thunderbolt/nhi.h
@@ -0,0 +1,114 @@
1/*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef DSL3510_H_
8#define DSL3510_H_
9
10#include <linux/mutex.h>
11#include <linux/workqueue.h>
12
13/**
14 * struct tb_nhi - thunderbolt native host interface
15 */
16struct tb_nhi {
17 struct mutex lock; /*
18 * Must be held during ring creation/destruction.
19 * Is acquired by interrupt_work when dispatching
20 * interrupts to individual rings.
21 **/
22 struct pci_dev *pdev;
23 void __iomem *iobase;
24 struct tb_ring **tx_rings;
25 struct tb_ring **rx_rings;
26 struct work_struct interrupt_work;
27 u32 hop_count; /* Number of rings (end point hops) supported by NHI. */
28};
29
30/**
31 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
32 */
33struct tb_ring {
34 struct mutex lock; /* must be acquired after nhi->lock */
35 struct tb_nhi *nhi;
36 int size;
37 int hop;
38 int head; /* write next descriptor here */
39 int tail; /* complete next descriptor here */
40 struct ring_desc *descriptors;
41 dma_addr_t descriptors_dma;
42 struct list_head queue;
43 struct list_head in_flight;
44 struct work_struct work;
45 bool is_tx:1; /* rx otherwise */
46 bool running:1;
47};
48
49struct ring_frame;
50typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
51
52/**
53 * struct ring_frame - for use with ring_rx/ring_tx
54 */
55struct ring_frame {
56 dma_addr_t buffer_phy;
57 ring_cb callback;
58 struct list_head list;
59 u32 size:12; /* TX: in, RX: out*/
60 u32 flags:12; /* RX: out */
61 u32 eof:4; /* TX:in, RX: out */
62 u32 sof:4; /* TX:in, RX: out */
63};
64
65#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
66
67struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size);
68struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size);
69void ring_start(struct tb_ring *ring);
70void ring_stop(struct tb_ring *ring);
71void ring_free(struct tb_ring *ring);
72
73int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
74
75/**
76 * ring_rx() - enqueue a frame on an RX ring
77 *
78 * frame->buffer, frame->buffer_phy and frame->callback have to be set. The
79 * buffer must contain at least TB_FRAME_SIZE bytes.
80 *
81 * frame->callback will be invoked with frame->size, frame->flags, frame->eof,
82 * frame->sof set once the frame has been received.
83 *
84 * If ring_stop is called after the packet has been enqueued frame->callback
85 * will be called with canceled set to true.
86 *
87 * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
88 */
89static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame)
90{
91 WARN_ON(ring->is_tx);
92 return __ring_enqueue(ring, frame);
93}
94
95/**
96 * ring_tx() - enqueue a frame on an TX ring
97 *
98 * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof
99 * and frame->sof have to be set.
100 *
101 * frame->callback will be invoked with once the frame has been transmitted.
102 *
103 * If ring_stop is called after the packet has been enqueued frame->callback
104 * will be called with canceled set to true.
105 *
106 * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
107 */
108static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
109{
110 WARN_ON(!ring->is_tx);
111 return __ring_enqueue(ring, frame);
112}
113
114#endif
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
new file mode 100644
index 000000000000..86b996c702a0
--- /dev/null
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -0,0 +1,101 @@
1/*
2 * Thunderbolt Cactus Ridge driver - NHI registers
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef DSL3510_REGS_H_
8#define DSL3510_REGS_H_
9
10#include <linux/types.h>
11
12enum ring_flags {
13 RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only? */
14 RING_FLAG_E2E_FLOW_CONTROL = 1 << 28,
15 RING_FLAG_PCI_NO_SNOOP = 1 << 29,
16 RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */
17 RING_FLAG_ENABLE = 1 << 31,
18};
19
20enum ring_desc_flags {
21 RING_DESC_ISOCH = 0x1, /* TX only? */
22 RING_DESC_COMPLETED = 0x2, /* set by NHI */
23 RING_DESC_POSTED = 0x4, /* always set this */
24 RING_DESC_INTERRUPT = 0x8, /* request an interrupt on completion */
25};
26
27/**
28 * struct ring_desc - TX/RX ring entry
29 *
30 * For TX set length/eof/sof.
31 * For RX length/eof/sof are set by the NHI.
32 */
33struct ring_desc {
34 u64 phys;
35 u32 length:12;
36 u32 eof:4;
37 u32 sof:4;
38 enum ring_desc_flags flags:12;
39 u32 time; /* write zero */
40} __packed;
41
42/* NHI registers in bar 0 */
43
44/*
45 * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
46 * 00: physical pointer to an array of struct ring_desc
47 * 08: ring tail (set by NHI)
48 * 10: ring head (index of first non posted descriptor)
49 * 12: descriptor count
50 */
51#define REG_TX_RING_BASE 0x00000
52
53/*
54 * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
55 * 00: physical pointer to an array of struct ring_desc
56 * 08: ring head (index of first not posted descriptor)
57 * 10: ring tail (set by NHI)
58 * 12: descriptor count
59 * 14: max frame sizes (anything larger than 0x100 has no effect)
60 */
61#define REG_RX_RING_BASE 0x08000
62
63/*
64 * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
65 * 00: enum_ring_flags
66 * 04: isoch time stamp ?? (write 0)
67 * ..: unknown
68 */
69#define REG_TX_OPTIONS_BASE 0x19800
70
71/*
72 * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
73 * 00: enum ring_flags
74 * If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
75 * the corresponding TX hop id.
76 * 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings)
77 * ..: unknown
78 */
79#define REG_RX_OPTIONS_BASE 0x29800
80
81/*
82 * three bitfields: tx, rx, rx overflow
83 * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
84 * cleared on read. New interrupts are fired only after ALL registers have been
85 * read (even those containing only disabled rings).
86 */
87#define REG_RING_NOTIFY_BASE 0x37800
88#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
89
90/*
91 * two bitfields: rx, tx
92 * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
93 * enable/disable interrupts set/clear the corresponding bits.
94 */
95#define REG_RING_INTERRUPT_BASE 0x38200
96#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
97
98/* The last 11 bits contain the number of hops supported by the NHI port. */
99#define REG_HOP_COUNT 0x39640
100
101#endif
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
new file mode 100644
index 000000000000..8fcf8a7b6c22
--- /dev/null
+++ b/drivers/thunderbolt/path.c
@@ -0,0 +1,215 @@
1/*
2 * Thunderbolt Cactus Ridge driver - path/tunnel functionality
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/slab.h>
8#include <linux/errno.h>
9
10#include "tb.h"
11
12
13static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
14{
15 tb_port_info(port, " Hop through port %d to hop %d (%s)\n",
16 hop->out_port, hop->next_hop,
17 hop->enable ? "enabled" : "disabled");
18 tb_port_info(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
19 hop->weight, hop->priority,
20 hop->initial_credits, hop->drop_packages);
21 tb_port_info(port, " Counter enabled: %d Counter index: %d\n",
22 hop->counter_enable, hop->counter);
23 tb_port_info(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
24 hop->ingress_fc, hop->egress_fc,
25 hop->ingress_shared_buffer, hop->egress_shared_buffer);
26 tb_port_info(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
27 hop->unknown1, hop->unknown2, hop->unknown3);
28}
29
30/**
31 * tb_path_alloc() - allocate a thunderbolt path
32 *
33 * Return: Returns a tb_path on success or NULL on failure.
34 */
35struct tb_path *tb_path_alloc(struct tb *tb, int num_hops)
36{
37 struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL);
38 if (!path)
39 return NULL;
40 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
41 if (!path->hops) {
42 kfree(path);
43 return NULL;
44 }
45 path->tb = tb;
46 path->path_length = num_hops;
47 return path;
48}
49
50/**
51 * tb_path_free() - free a deactivated path
52 */
53void tb_path_free(struct tb_path *path)
54{
55 if (path->activated) {
56 tb_WARN(path->tb, "trying to free an activated path\n")
57 return;
58 }
59 kfree(path->hops);
60 kfree(path);
61}
62
63static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
64{
65 int i, res;
66 for (i = first_hop; i < path->path_length; i++) {
67 res = tb_port_add_nfc_credits(path->hops[i].in_port,
68 -path->nfc_credits);
69 if (res)
70 tb_port_warn(path->hops[i].in_port,
71 "nfc credits deallocation failed for hop %d\n",
72 i);
73 }
74}
75
76static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
77{
78 int i, res;
79 struct tb_regs_hop hop = { };
80 for (i = first_hop; i < path->path_length; i++) {
81 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
82 2 * path->hops[i].in_hop_index, 2);
83 if (res)
84 tb_port_warn(path->hops[i].in_port,
85 "hop deactivation failed for hop %d, index %d\n",
86 i, path->hops[i].in_hop_index);
87 }
88}
89
90void tb_path_deactivate(struct tb_path *path)
91{
92 if (!path->activated) {
93 tb_WARN(path->tb, "trying to deactivate an inactive path\n");
94 return;
95 }
96 tb_info(path->tb,
97 "deactivating path from %llx:%x to %llx:%x\n",
98 tb_route(path->hops[0].in_port->sw),
99 path->hops[0].in_port->port,
100 tb_route(path->hops[path->path_length - 1].out_port->sw),
101 path->hops[path->path_length - 1].out_port->port);
102 __tb_path_deactivate_hops(path, 0);
103 __tb_path_deallocate_nfc(path, 0);
104 path->activated = false;
105}
106
107/**
108 * tb_path_activate() - activate a path
109 *
110 * Activate a path starting with the last hop and iterating backwards. The
111 * caller must fill path->hops before calling tb_path_activate().
112 *
113 * Return: Returns 0 on success or an error code on failure.
114 */
115int tb_path_activate(struct tb_path *path)
116{
117 int i, res;
118 enum tb_path_port out_mask, in_mask;
119 if (path->activated) {
120 tb_WARN(path->tb, "trying to activate already activated path\n");
121 return -EINVAL;
122 }
123
124 tb_info(path->tb,
125 "activating path from %llx:%x to %llx:%x\n",
126 tb_route(path->hops[0].in_port->sw),
127 path->hops[0].in_port->port,
128 tb_route(path->hops[path->path_length - 1].out_port->sw),
129 path->hops[path->path_length - 1].out_port->port);
130
131 /* Clear counters. */
132 for (i = path->path_length - 1; i >= 0; i--) {
133 if (path->hops[i].in_counter_index == -1)
134 continue;
135 res = tb_port_clear_counter(path->hops[i].in_port,
136 path->hops[i].in_counter_index);
137 if (res)
138 goto err;
139 }
140
141 /* Add non flow controlled credits. */
142 for (i = path->path_length - 1; i >= 0; i--) {
143 res = tb_port_add_nfc_credits(path->hops[i].in_port,
144 path->nfc_credits);
145 if (res) {
146 __tb_path_deallocate_nfc(path, i);
147 goto err;
148 }
149 }
150
151 /* Activate hops. */
152 for (i = path->path_length - 1; i >= 0; i--) {
153 struct tb_regs_hop hop;
154
155 /* dword 0 */
156 hop.next_hop = path->hops[i].next_hop_index;
157 hop.out_port = path->hops[i].out_port->port;
158 /* TODO: figure out why these are good values */
159 hop.initial_credits = (i == path->path_length - 1) ? 16 : 7;
160 hop.unknown1 = 0;
161 hop.enable = 1;
162
163 /* dword 1 */
164 out_mask = (i == path->path_length - 1) ?
165 TB_PATH_DESTINATION : TB_PATH_INTERNAL;
166 in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
167 hop.weight = path->weight;
168 hop.unknown2 = 0;
169 hop.priority = path->priority;
170 hop.drop_packages = path->drop_packages;
171 hop.counter = path->hops[i].in_counter_index;
172 hop.counter_enable = path->hops[i].in_counter_index != -1;
173 hop.ingress_fc = path->ingress_fc_enable & in_mask;
174 hop.egress_fc = path->egress_fc_enable & out_mask;
175 hop.ingress_shared_buffer = path->ingress_shared_buffer
176 & in_mask;
177 hop.egress_shared_buffer = path->egress_shared_buffer
178 & out_mask;
179 hop.unknown3 = 0;
180
181 tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d",
182 i, path->hops[i].in_hop_index);
183 tb_dump_hop(path->hops[i].in_port, &hop);
184 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
185 2 * path->hops[i].in_hop_index, 2);
186 if (res) {
187 __tb_path_deactivate_hops(path, i);
188 __tb_path_deallocate_nfc(path, 0);
189 goto err;
190 }
191 }
192 path->activated = true;
193 tb_info(path->tb, "path activation complete\n");
194 return 0;
195err:
196 tb_WARN(path->tb, "path activation failed\n");
197 return res;
198}
199
200/**
201 * tb_path_is_invalid() - check whether any ports on the path are invalid
202 *
203 * Return: Returns true if the path is invalid, false otherwise.
204 */
205bool tb_path_is_invalid(struct tb_path *path)
206{
207 int i = 0;
208 for (i = 0; i < path->path_length; i++) {
209 if (path->hops[i].in_port->sw->is_unplugged)
210 return true;
211 if (path->hops[i].out_port->sw->is_unplugged)
212 return true;
213 }
214 return false;
215}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
new file mode 100644
index 000000000000..aeb982969629
--- /dev/null
+++ b/drivers/thunderbolt/switch.c
@@ -0,0 +1,507 @@
1/*
2 * Thunderbolt Cactus Ridge driver - switch/port utility functions
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/delay.h>
8#include <linux/slab.h>
9
10#include "tb.h"
11
12/* port utility functions */
13
14static const char *tb_port_type(struct tb_regs_port_header *port)
15{
16 switch (port->type >> 16) {
17 case 0:
18 switch ((u8) port->type) {
19 case 0:
20 return "Inactive";
21 case 1:
22 return "Port";
23 case 2:
24 return "NHI";
25 default:
26 return "unknown";
27 }
28 case 0x2:
29 return "Ethernet";
30 case 0x8:
31 return "SATA";
32 case 0xe:
33 return "DP/HDMI";
34 case 0x10:
35 return "PCIe";
36 case 0x20:
37 return "USB";
38 default:
39 return "unknown";
40 }
41}
42
43static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
44{
45 tb_info(tb,
46 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
47 port->port_number, port->vendor_id, port->device_id,
48 port->revision, port->thunderbolt_version, tb_port_type(port),
49 port->type);
50 tb_info(tb, " Max hop id (in/out): %d/%d\n",
51 port->max_in_hop_id, port->max_out_hop_id);
52 tb_info(tb, " Max counters: %d\n", port->max_counters);
53 tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits);
54}
55
56/**
57 * tb_port_state() - get connectedness state of a port
58 *
59 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
60 *
61 * Return: Returns an enum tb_port_state on success or an error code on failure.
62 */
63static int tb_port_state(struct tb_port *port)
64{
65 struct tb_cap_phy phy;
66 int res;
67 if (port->cap_phy == 0) {
68 tb_port_WARN(port, "does not have a PHY\n");
69 return -EINVAL;
70 }
71 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
72 if (res)
73 return res;
74 return phy.state;
75}
76
77/**
78 * tb_wait_for_port() - wait for a port to become ready
79 *
80 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
81 * wait_if_unplugged is set then we also wait if the port is in state
82 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
83 * switch resume). Otherwise we only wait if a device is registered but the link
84 * has not yet been established.
85 *
86 * Return: Returns an error code on failure. Returns 0 if the port is not
87 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
88 * if the port is connected and in state TB_PORT_UP.
89 */
90int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
91{
92 int retries = 10;
93 int state;
94 if (!port->cap_phy) {
95 tb_port_WARN(port, "does not have PHY\n");
96 return -EINVAL;
97 }
98 if (tb_is_upstream_port(port)) {
99 tb_port_WARN(port, "is the upstream port\n");
100 return -EINVAL;
101 }
102
103 while (retries--) {
104 state = tb_port_state(port);
105 if (state < 0)
106 return state;
107 if (state == TB_PORT_DISABLED) {
108 tb_port_info(port, "is disabled (state: 0)\n");
109 return 0;
110 }
111 if (state == TB_PORT_UNPLUGGED) {
112 if (wait_if_unplugged) {
113 /* used during resume */
114 tb_port_info(port,
115 "is unplugged (state: 7), retrying...\n");
116 msleep(100);
117 continue;
118 }
119 tb_port_info(port, "is unplugged (state: 7)\n");
120 return 0;
121 }
122 if (state == TB_PORT_UP) {
123 tb_port_info(port,
124 "is connected, link is up (state: 2)\n");
125 return 1;
126 }
127
128 /*
129 * After plug-in the state is TB_PORT_CONNECTING. Give it some
130 * time.
131 */
132 tb_port_info(port,
133 "is connected, link is not up (state: %d), retrying...\n",
134 state);
135 msleep(100);
136 }
137 tb_port_warn(port,
138 "failed to reach state TB_PORT_UP. Ignoring port...\n");
139 return 0;
140}
141
142/**
143 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
144 *
145 * Change the number of NFC credits allocated to @port by @credits. To remove
146 * NFC credits pass a negative amount of credits.
147 *
148 * Return: Returns 0 on success or an error code on failure.
149 */
150int tb_port_add_nfc_credits(struct tb_port *port, int credits)
151{
152 if (credits == 0)
153 return 0;
154 tb_port_info(port,
155 "adding %#x NFC credits (%#x -> %#x)",
156 credits,
157 port->config.nfc_credits,
158 port->config.nfc_credits + credits);
159 port->config.nfc_credits += credits;
160 return tb_port_write(port, &port->config.nfc_credits,
161 TB_CFG_PORT, 4, 1);
162}
163
164/**
165 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
166 *
167 * Return: Returns 0 on success or an error code on failure.
168 */
169int tb_port_clear_counter(struct tb_port *port, int counter)
170{
171 u32 zero[3] = { 0, 0, 0 };
172 tb_port_info(port, "clearing counter %d\n", counter);
173 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
174}
175
176/**
177 * tb_init_port() - initialize a port
178 *
179 * This is a helper method for tb_switch_alloc. Does not check or initialize
180 * any downstream switches.
181 *
182 * Return: Returns 0 on success or an error code on failure.
183 */
184static int tb_init_port(struct tb_port *port)
185{
186 int res;
187 int cap;
188
189 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
190 if (res)
191 return res;
192
193 /* Port 0 is the switch itself and has no PHY. */
194 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
195 cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PHY);
196
197 if (cap > 0)
198 port->cap_phy = cap;
199 else
200 tb_port_WARN(port, "non switch port without a PHY\n");
201 }
202
203 tb_dump_port(port->sw->tb, &port->config);
204
205 /* TODO: Read dual link port, DP port and more from EEPROM. */
206 return 0;
207
208}
209
210/* switch utility functions */
211
212static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
213{
214 tb_info(tb,
215 " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
216 sw->vendor_id, sw->device_id, sw->revision,
217 sw->thunderbolt_version);
218 tb_info(tb, " Max Port Number: %d\n", sw->max_port_number);
219 tb_info(tb, " Config:\n");
220 tb_info(tb,
221 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
222 sw->upstream_port_number, sw->depth,
223 (((u64) sw->route_hi) << 32) | sw->route_lo,
224 sw->enabled, sw->plug_events_delay);
225 tb_info(tb,
226 " unknown1: %#x unknown4: %#x\n",
227 sw->__unknown1, sw->__unknown4);
228}
229
230/**
231 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
232 *
233 * Return: Returns 0 on success or an error code on failure.
234 */
235int tb_switch_reset(struct tb *tb, u64 route)
236{
237 struct tb_cfg_result res;
238 struct tb_regs_switch_header header = {
239 header.route_hi = route >> 32,
240 header.route_lo = route,
241 header.enabled = true,
242 };
243 tb_info(tb, "resetting switch at %llx\n", route);
244 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
245 0, 2, 2, 2);
246 if (res.err)
247 return res.err;
248 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
249 if (res.err > 0)
250 return -EIO;
251 return res.err;
252}
253
254struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
255{
256 u8 next_port = route; /*
257 * Routes use a stride of 8 bits,
258 * eventhough a port index has 6 bits at most.
259 * */
260 if (route == 0)
261 return sw;
262 if (next_port > sw->config.max_port_number)
263 return NULL;
264 if (tb_is_upstream_port(&sw->ports[next_port]))
265 return NULL;
266 if (!sw->ports[next_port].remote)
267 return NULL;
268 return get_switch_at_route(sw->ports[next_port].remote->sw,
269 route >> TB_ROUTE_SHIFT);
270}
271
272/**
273 * tb_plug_events_active() - enable/disable plug events on a switch
274 *
275 * Also configures a sane plug_events_delay of 255ms.
276 *
277 * Return: Returns 0 on success or an error code on failure.
278 */
279static int tb_plug_events_active(struct tb_switch *sw, bool active)
280{
281 u32 data;
282 int res;
283
284 sw->config.plug_events_delay = 0xff;
285 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
286 if (res)
287 return res;
288
289 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
290 if (res)
291 return res;
292
293 if (active) {
294 data = data & 0xFFFFFF83;
295 switch (sw->config.device_id) {
296 case 0x1513:
297 case 0x151a:
298 case 0x1549:
299 break;
300 default:
301 data |= 4;
302 }
303 } else {
304 data = data | 0x7c;
305 }
306 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
307 sw->cap_plug_events + 1, 1);
308}
309
310
311/**
312 * tb_switch_free() - free a tb_switch and all downstream switches
313 */
314void tb_switch_free(struct tb_switch *sw)
315{
316 int i;
317 /* port 0 is the switch itself and never has a remote */
318 for (i = 1; i <= sw->config.max_port_number; i++) {
319 if (tb_is_upstream_port(&sw->ports[i]))
320 continue;
321 if (sw->ports[i].remote)
322 tb_switch_free(sw->ports[i].remote->sw);
323 sw->ports[i].remote = NULL;
324 }
325
326 if (!sw->is_unplugged)
327 tb_plug_events_active(sw, false);
328
329 kfree(sw->ports);
330 kfree(sw->drom);
331 kfree(sw);
332}
333
334/**
335 * tb_switch_alloc() - allocate and initialize a switch
336 *
337 * Return: Returns a NULL on failure.
338 */
339struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
340{
341 int i;
342 int cap;
343 struct tb_switch *sw;
344 int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
345 if (upstream_port < 0)
346 return NULL;
347
348 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
349 if (!sw)
350 return NULL;
351
352 sw->tb = tb;
353 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5))
354 goto err;
355 tb_info(tb,
356 "initializing Switch at %#llx (depth: %d, up port: %d)\n",
357 route, tb_route_length(route), upstream_port);
358 tb_info(tb, "old switch config:\n");
359 tb_dump_switch(tb, &sw->config);
360
361 /* configure switch */
362 sw->config.upstream_port_number = upstream_port;
363 sw->config.depth = tb_route_length(route);
364 sw->config.route_lo = route;
365 sw->config.route_hi = route >> 32;
366 sw->config.enabled = 1;
367 /* from here on we may use the tb_sw_* functions & macros */
368
369 if (sw->config.vendor_id != 0x8086)
370 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
371 sw->config.vendor_id);
372
373 if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549)
374 tb_sw_warn(sw, "unsupported switch device id %#x\n",
375 sw->config.device_id);
376
377 /* upload configuration */
378 if (tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3))
379 goto err;
380
381 /* initialize ports */
382 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
383 GFP_KERNEL);
384 if (!sw->ports)
385 goto err;
386
387 for (i = 0; i <= sw->config.max_port_number; i++) {
388 /* minimum setup for tb_find_cap and tb_drom_read to work */
389 sw->ports[i].sw = sw;
390 sw->ports[i].port = i;
391 }
392
393 cap = tb_find_cap(&sw->ports[0], TB_CFG_SWITCH, TB_CAP_PLUG_EVENTS);
394 if (cap < 0) {
395 tb_sw_warn(sw, "cannot find TB_CAP_PLUG_EVENTS aborting\n");
396 goto err;
397 }
398 sw->cap_plug_events = cap;
399
400 /* read drom */
401 if (tb_drom_read(sw))
402 tb_sw_warn(sw, "tb_eeprom_read_rom failed, continuing\n");
403 tb_sw_info(sw, "uid: %#llx\n", sw->uid);
404
405 for (i = 0; i <= sw->config.max_port_number; i++) {
406 if (sw->ports[i].disabled) {
407 tb_port_info(&sw->ports[i], "disabled by eeprom\n");
408 continue;
409 }
410 if (tb_init_port(&sw->ports[i]))
411 goto err;
412 }
413
414 /* TODO: I2C, IECS, link controller */
415
416 if (tb_plug_events_active(sw, true))
417 goto err;
418
419 return sw;
420err:
421 kfree(sw->ports);
422 kfree(sw->drom);
423 kfree(sw);
424 return NULL;
425}
426
427/**
428 * tb_sw_set_unpplugged() - set is_unplugged on switch and downstream switches
429 */
430void tb_sw_set_unpplugged(struct tb_switch *sw)
431{
432 int i;
433 if (sw == sw->tb->root_switch) {
434 tb_sw_WARN(sw, "cannot unplug root switch\n");
435 return;
436 }
437 if (sw->is_unplugged) {
438 tb_sw_WARN(sw, "is_unplugged already set\n");
439 return;
440 }
441 sw->is_unplugged = true;
442 for (i = 0; i <= sw->config.max_port_number; i++) {
443 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
444 tb_sw_set_unpplugged(sw->ports[i].remote->sw);
445 }
446}
447
448int tb_switch_resume(struct tb_switch *sw)
449{
450 int i, err;
451 u64 uid;
452 tb_sw_info(sw, "resuming switch\n");
453
454 err = tb_drom_read_uid_only(sw, &uid);
455 if (err) {
456 tb_sw_warn(sw, "uid read failed\n");
457 return err;
458 }
459 if (sw->uid != uid) {
460 tb_sw_info(sw,
461 "changed while suspended (uid %#llx -> %#llx)\n",
462 sw->uid, uid);
463 return -ENODEV;
464 }
465
466 /* upload configuration */
467 err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
468 if (err)
469 return err;
470
471 err = tb_plug_events_active(sw, true);
472 if (err)
473 return err;
474
475 /* check for surviving downstream switches */
476 for (i = 1; i <= sw->config.max_port_number; i++) {
477 struct tb_port *port = &sw->ports[i];
478 if (tb_is_upstream_port(port))
479 continue;
480 if (!port->remote)
481 continue;
482 if (tb_wait_for_port(port, true) <= 0
483 || tb_switch_resume(port->remote->sw)) {
484 tb_port_warn(port,
485 "lost during suspend, disconnecting\n");
486 tb_sw_set_unpplugged(port->remote->sw);
487 }
488 }
489 return 0;
490}
491
492void tb_switch_suspend(struct tb_switch *sw)
493{
494 int i, err;
495 err = tb_plug_events_active(sw, false);
496 if (err)
497 return;
498
499 for (i = 1; i <= sw->config.max_port_number; i++) {
500 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
501 tb_switch_suspend(sw->ports[i].remote->sw);
502 }
503 /*
504 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
505 * effect?
506 */
507}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
new file mode 100644
index 000000000000..d2c3fe346e91
--- /dev/null
+++ b/drivers/thunderbolt/tb.c
@@ -0,0 +1,436 @@
1/*
2 * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/slab.h>
8#include <linux/errno.h>
9#include <linux/delay.h>
10
11#include "tb.h"
12#include "tb_regs.h"
13#include "tunnel_pci.h"
14
15
16/* enumeration & hot plug handling */
17
18
19static void tb_scan_port(struct tb_port *port);
20
21/**
22 * tb_scan_switch() - scan for and initialize downstream switches
23 */
24static void tb_scan_switch(struct tb_switch *sw)
25{
26 int i;
27 for (i = 1; i <= sw->config.max_port_number; i++)
28 tb_scan_port(&sw->ports[i]);
29}
30
31/**
32 * tb_scan_port() - check for and initialize switches below port
33 */
34static void tb_scan_port(struct tb_port *port)
35{
36 struct tb_switch *sw;
37 if (tb_is_upstream_port(port))
38 return;
39 if (port->config.type != TB_TYPE_PORT)
40 return;
41 if (port->dual_link_port && port->link_nr)
42 return; /*
43 * Downstream switch is reachable through two ports.
44 * Only scan on the primary port (link_nr == 0).
45 */
46 if (tb_wait_for_port(port, false) <= 0)
47 return;
48 if (port->remote) {
49 tb_port_WARN(port, "port already has a remote!\n");
50 return;
51 }
52 sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port));
53 if (!sw)
54 return;
55 port->remote = tb_upstream_port(sw);
56 tb_upstream_port(sw)->remote = port;
57 tb_scan_switch(sw);
58}
59
60/**
61 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
62 */
63static void tb_free_invalid_tunnels(struct tb *tb)
64{
65 struct tb_pci_tunnel *tunnel;
66 struct tb_pci_tunnel *n;
67 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
68 {
69 if (tb_pci_is_invalid(tunnel)) {
70 tb_pci_deactivate(tunnel);
71 tb_pci_free(tunnel);
72 }
73 }
74}
75
76/**
77 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
78 */
79static void tb_free_unplugged_children(struct tb_switch *sw)
80{
81 int i;
82 for (i = 1; i <= sw->config.max_port_number; i++) {
83 struct tb_port *port = &sw->ports[i];
84 if (tb_is_upstream_port(port))
85 continue;
86 if (!port->remote)
87 continue;
88 if (port->remote->sw->is_unplugged) {
89 tb_switch_free(port->remote->sw);
90 port->remote = NULL;
91 } else {
92 tb_free_unplugged_children(port->remote->sw);
93 }
94 }
95}
96
97
98/**
99 * find_pci_up_port() - return the first PCIe up port on @sw or NULL
100 */
101static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
102{
103 int i;
104 for (i = 1; i <= sw->config.max_port_number; i++)
105 if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
106 return &sw->ports[i];
107 return NULL;
108}
109
110/**
111 * find_unused_down_port() - return the first inactive PCIe down port on @sw
112 */
113static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
114{
115 int i;
116 int cap;
117 int res;
118 int data;
119 for (i = 1; i <= sw->config.max_port_number; i++) {
120 if (tb_is_upstream_port(&sw->ports[i]))
121 continue;
122 if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
123 continue;
124 cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
125 if (cap <= 0)
126 continue;
127 res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
128 if (res < 0)
129 continue;
130 if (data & 0x80000000)
131 continue;
132 return &sw->ports[i];
133 }
134 return NULL;
135}
136
137/**
138 * tb_activate_pcie_devices() - scan for and activate PCIe devices
139 *
140 * This method is somewhat ad hoc. For now it only supports one device
141 * per port and only devices at depth 1.
142 */
143static void tb_activate_pcie_devices(struct tb *tb)
144{
145 int i;
146 int cap;
147 u32 data;
148 struct tb_switch *sw;
149 struct tb_port *up_port;
150 struct tb_port *down_port;
151 struct tb_pci_tunnel *tunnel;
152 /* scan for pcie devices at depth 1*/
153 for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
154 if (tb_is_upstream_port(&tb->root_switch->ports[i]))
155 continue;
156 if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
157 continue;
158 if (!tb->root_switch->ports[i].remote)
159 continue;
160 sw = tb->root_switch->ports[i].remote->sw;
161 up_port = tb_find_pci_up_port(sw);
162 if (!up_port) {
163 tb_sw_info(sw, "no PCIe devices found, aborting\n");
164 continue;
165 }
166
167 /* check whether port is already activated */
168 cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
169 if (cap <= 0)
170 continue;
171 if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
172 continue;
173 if (data & 0x80000000) {
174 tb_port_info(up_port,
175 "PCIe port already activated, aborting\n");
176 continue;
177 }
178
179 down_port = tb_find_unused_down_port(tb->root_switch);
180 if (!down_port) {
181 tb_port_info(up_port,
182 "All PCIe down ports are occupied, aborting\n");
183 continue;
184 }
185 tunnel = tb_pci_alloc(tb, up_port, down_port);
186 if (!tunnel) {
187 tb_port_info(up_port,
188 "PCIe tunnel allocation failed, aborting\n");
189 continue;
190 }
191
192 if (tb_pci_activate(tunnel)) {
193 tb_port_info(up_port,
194 "PCIe tunnel activation failed, aborting\n");
195 tb_pci_free(tunnel);
196 }
197
198 }
199}
200
201/* hotplug handling */
202
203struct tb_hotplug_event {
204 struct work_struct work;
205 struct tb *tb;
206 u64 route;
207 u8 port;
208 bool unplug;
209};
210
211/**
212 * tb_handle_hotplug() - handle hotplug event
213 *
214 * Executes on tb->wq.
215 */
216static void tb_handle_hotplug(struct work_struct *work)
217{
218 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
219 struct tb *tb = ev->tb;
220 struct tb_switch *sw;
221 struct tb_port *port;
222 mutex_lock(&tb->lock);
223 if (!tb->hotplug_active)
224 goto out; /* during init, suspend or shutdown */
225
226 sw = get_switch_at_route(tb->root_switch, ev->route);
227 if (!sw) {
228 tb_warn(tb,
229 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
230 ev->route, ev->port, ev->unplug);
231 goto out;
232 }
233 if (ev->port > sw->config.max_port_number) {
234 tb_warn(tb,
235 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
236 ev->route, ev->port, ev->unplug);
237 goto out;
238 }
239 port = &sw->ports[ev->port];
240 if (tb_is_upstream_port(port)) {
241 tb_warn(tb,
242 "hotplug event for upstream port %llx:%x (unplug: %d)\n",
243 ev->route, ev->port, ev->unplug);
244 goto out;
245 }
246 if (ev->unplug) {
247 if (port->remote) {
248 tb_port_info(port, "unplugged\n");
249 tb_sw_set_unpplugged(port->remote->sw);
250 tb_free_invalid_tunnels(tb);
251 tb_switch_free(port->remote->sw);
252 port->remote = NULL;
253 } else {
254 tb_port_info(port,
255 "got unplug event for disconnected port, ignoring\n");
256 }
257 } else if (port->remote) {
258 tb_port_info(port,
259 "got plug event for connected port, ignoring\n");
260 } else {
261 tb_port_info(port, "hotplug: scanning\n");
262 tb_scan_port(port);
263 if (!port->remote) {
264 tb_port_info(port, "hotplug: no switch found\n");
265 } else if (port->remote->sw->config.depth > 1) {
266 tb_sw_warn(port->remote->sw,
267 "hotplug: chaining not supported\n");
268 } else {
269 tb_sw_info(port->remote->sw,
270 "hotplug: activating pcie devices\n");
271 tb_activate_pcie_devices(tb);
272 }
273 }
274out:
275 mutex_unlock(&tb->lock);
276 kfree(ev);
277}
278
279/**
280 * tb_schedule_hotplug_handler() - callback function for the control channel
281 *
282 * Delegates to tb_handle_hotplug.
283 */
284static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
285 bool unplug)
286{
287 struct tb *tb = data;
288 struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL);
289 if (!ev)
290 return;
291 INIT_WORK(&ev->work, tb_handle_hotplug);
292 ev->tb = tb;
293 ev->route = route;
294 ev->port = port;
295 ev->unplug = unplug;
296 queue_work(tb->wq, &ev->work);
297}
298
299/**
300 * thunderbolt_shutdown_and_free() - shutdown everything
301 *
302 * Free all switches and the config channel.
303 *
304 * Used in the error path of thunderbolt_alloc_and_start.
305 */
306void thunderbolt_shutdown_and_free(struct tb *tb)
307{
308 struct tb_pci_tunnel *tunnel;
309 struct tb_pci_tunnel *n;
310
311 mutex_lock(&tb->lock);
312
313 /* tunnels are only present after everything has been initialized */
314 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
315 tb_pci_deactivate(tunnel);
316 tb_pci_free(tunnel);
317 }
318
319 if (tb->root_switch)
320 tb_switch_free(tb->root_switch);
321 tb->root_switch = NULL;
322
323 if (tb->ctl) {
324 tb_ctl_stop(tb->ctl);
325 tb_ctl_free(tb->ctl);
326 }
327 tb->ctl = NULL;
328 tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
329
330 /* allow tb_handle_hotplug to acquire the lock */
331 mutex_unlock(&tb->lock);
332 if (tb->wq) {
333 flush_workqueue(tb->wq);
334 destroy_workqueue(tb->wq);
335 tb->wq = NULL;
336 }
337 mutex_destroy(&tb->lock);
338 kfree(tb);
339}
340
341/**
342 * thunderbolt_alloc_and_start() - setup the thunderbolt bus
343 *
344 * Allocates a tb_cfg control channel, initializes the root switch, enables
345 * plug events and activates pci devices.
346 *
347 * Return: Returns NULL on error.
348 */
349struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
350{
351 struct tb *tb;
352
353 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
354 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
355 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
356
357 tb = kzalloc(sizeof(*tb), GFP_KERNEL);
358 if (!tb)
359 return NULL;
360
361 tb->nhi = nhi;
362 mutex_init(&tb->lock);
363 mutex_lock(&tb->lock);
364 INIT_LIST_HEAD(&tb->tunnel_list);
365
366 tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
367 if (!tb->wq)
368 goto err_locked;
369
370 tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
371 if (!tb->ctl)
372 goto err_locked;
373 /*
374 * tb_schedule_hotplug_handler may be called as soon as the config
375 * channel is started. Thats why we have to hold the lock here.
376 */
377 tb_ctl_start(tb->ctl);
378
379 tb->root_switch = tb_switch_alloc(tb, 0);
380 if (!tb->root_switch)
381 goto err_locked;
382
383 /* Full scan to discover devices added before the driver was loaded. */
384 tb_scan_switch(tb->root_switch);
385 tb_activate_pcie_devices(tb);
386
387 /* Allow tb_handle_hotplug to progress events */
388 tb->hotplug_active = true;
389 mutex_unlock(&tb->lock);
390 return tb;
391
392err_locked:
393 mutex_unlock(&tb->lock);
394 thunderbolt_shutdown_and_free(tb);
395 return NULL;
396}
397
398void thunderbolt_suspend(struct tb *tb)
399{
400 tb_info(tb, "suspending...\n");
401 mutex_lock(&tb->lock);
402 tb_switch_suspend(tb->root_switch);
403 tb_ctl_stop(tb->ctl);
404 tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
405 mutex_unlock(&tb->lock);
406 tb_info(tb, "suspend finished\n");
407}
408
409void thunderbolt_resume(struct tb *tb)
410{
411 struct tb_pci_tunnel *tunnel, *n;
412 tb_info(tb, "resuming...\n");
413 mutex_lock(&tb->lock);
414 tb_ctl_start(tb->ctl);
415
416 /* remove any pci devices the firmware might have setup */
417 tb_switch_reset(tb, 0);
418
419 tb_switch_resume(tb->root_switch);
420 tb_free_invalid_tunnels(tb);
421 tb_free_unplugged_children(tb->root_switch);
422 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
423 tb_pci_restart(tunnel);
424 if (!list_empty(&tb->tunnel_list)) {
425 /*
426 * the pcie links need some time to get going.
427 * 100ms works for me...
428 */
429 tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
430 msleep(100);
431 }
432 /* Allow tb_handle_hotplug to progress events */
433 tb->hotplug_active = true;
434 mutex_unlock(&tb->lock);
435 tb_info(tb, "resume finished\n");
436}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
new file mode 100644
index 000000000000..8b0d7cf2b6d6
--- /dev/null
+++ b/drivers/thunderbolt/tb.h
@@ -0,0 +1,271 @@
1/*
2 * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef TB_H_
8#define TB_H_
9
10#include <linux/pci.h>
11
12#include "tb_regs.h"
13#include "ctl.h"
14
15/**
16 * struct tb_switch - a thunderbolt switch
17 */
18struct tb_switch {
19 struct tb_regs_switch_header config;
20 struct tb_port *ports;
21 struct tb *tb;
22 u64 uid;
23 int cap_plug_events; /* offset, zero if not found */
24 bool is_unplugged; /* unplugged, will go away */
25 u8 *drom;
26};
27
28/**
29 * struct tb_port - a thunderbolt port, part of a tb_switch
30 */
31struct tb_port {
32 struct tb_regs_port_header config;
33 struct tb_switch *sw;
34 struct tb_port *remote; /* remote port, NULL if not connected */
35 int cap_phy; /* offset, zero if not found */
36 u8 port; /* port number on switch */
37 bool disabled; /* disabled by eeprom */
38 struct tb_port *dual_link_port;
39 u8 link_nr:1;
40};
41
42/**
43 * struct tb_path_hop - routing information for a tb_path
44 *
45 * Hop configuration is always done on the IN port of a switch.
46 * in_port and out_port have to be on the same switch. Packets arriving on
47 * in_port with "hop" = in_hop_index will get routed to through out_port. The
48 * next hop to take (on out_port->remote) is determined by next_hop_index.
49 *
50 * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
51 * port.
52 */
53struct tb_path_hop {
54 struct tb_port *in_port;
55 struct tb_port *out_port;
56 int in_hop_index;
57 int in_counter_index; /* write -1 to disable counters for this hop. */
58 int next_hop_index;
59};
60
61/**
62 * enum tb_path_port - path options mask
63 */
64enum tb_path_port {
65 TB_PATH_NONE = 0,
66 TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */
67 TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */
68 TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */
69 TB_PATH_ALL = 7,
70};
71
72/**
73 * struct tb_path - a unidirectional path between two ports
74 *
75 * A path consists of a number of hops (see tb_path_hop). To establish a PCIe
76 * tunnel two paths have to be created between the two PCIe ports.
77 *
78 */
79struct tb_path {
80 struct tb *tb;
81 int nfc_credits; /* non flow controlled credits */
82 enum tb_path_port ingress_shared_buffer;
83 enum tb_path_port egress_shared_buffer;
84 enum tb_path_port ingress_fc_enable;
85 enum tb_path_port egress_fc_enable;
86
87 int priority:3;
88 int weight:4;
89 bool drop_packages;
90 bool activated;
91 struct tb_path_hop *hops;
92 int path_length; /* number of hops */
93};
94
95
96/**
97 * struct tb - main thunderbolt bus structure
98 */
99struct tb {
100 struct mutex lock; /*
101 * Big lock. Must be held when accessing cfg or
102 * any struct tb_switch / struct tb_port.
103 */
104 struct tb_nhi *nhi;
105 struct tb_ctl *ctl;
106 struct workqueue_struct *wq; /* ordered workqueue for plug events */
107 struct tb_switch *root_switch;
108 struct list_head tunnel_list; /* list of active PCIe tunnels */
109 bool hotplug_active; /*
110 * tb_handle_hotplug will stop progressing plug
111 * events and exit if this is not set (it needs to
112 * acquire the lock one more time). Used to drain
113 * wq after cfg has been paused.
114 */
115
116};
117
118/* helper functions & macros */
119
120/**
121 * tb_upstream_port() - return the upstream port of a switch
122 *
123 * Every switch has an upstream port (for the root switch it is the NHI).
124 *
125 * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
126 * non root switches (on the NHI port remote is always NULL).
127 *
128 * Return: Returns the upstream port of the switch.
129 */
130static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
131{
132 return &sw->ports[sw->config.upstream_port_number];
133}
134
135static inline u64 tb_route(struct tb_switch *sw)
136{
137 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
138}
139
140static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
141 enum tb_cfg_space space, u32 offset, u32 length)
142{
143 return tb_cfg_read(sw->tb->ctl,
144 buffer,
145 tb_route(sw),
146 0,
147 space,
148 offset,
149 length);
150}
151
152static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
153 enum tb_cfg_space space, u32 offset, u32 length)
154{
155 return tb_cfg_write(sw->tb->ctl,
156 buffer,
157 tb_route(sw),
158 0,
159 space,
160 offset,
161 length);
162}
163
164static inline int tb_port_read(struct tb_port *port, void *buffer,
165 enum tb_cfg_space space, u32 offset, u32 length)
166{
167 return tb_cfg_read(port->sw->tb->ctl,
168 buffer,
169 tb_route(port->sw),
170 port->port,
171 space,
172 offset,
173 length);
174}
175
176static inline int tb_port_write(struct tb_port *port, void *buffer,
177 enum tb_cfg_space space, u32 offset, u32 length)
178{
179 return tb_cfg_write(port->sw->tb->ctl,
180 buffer,
181 tb_route(port->sw),
182 port->port,
183 space,
184 offset,
185 length);
186}
187
188#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
189#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
190#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
191#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
192
193
194#define __TB_SW_PRINT(level, sw, fmt, arg...) \
195 do { \
196 struct tb_switch *__sw = (sw); \
197 level(__sw->tb, "%llx: " fmt, \
198 tb_route(__sw), ## arg); \
199 } while (0)
200#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
201#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
202#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
203
204
205#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
206 do { \
207 struct tb_port *__port = (_port); \
208 level(__port->sw->tb, "%llx:%x: " fmt, \
209 tb_route(__port->sw), __port->port, ## arg); \
210 } while (0)
211#define tb_port_WARN(port, fmt, arg...) \
212 __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
213#define tb_port_warn(port, fmt, arg...) \
214 __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
215#define tb_port_info(port, fmt, arg...) \
216 __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
217
218
219struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
220void thunderbolt_shutdown_and_free(struct tb *tb);
221void thunderbolt_suspend(struct tb *tb);
222void thunderbolt_resume(struct tb *tb);
223
224struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route);
225void tb_switch_free(struct tb_switch *sw);
226void tb_switch_suspend(struct tb_switch *sw);
227int tb_switch_resume(struct tb_switch *sw);
228int tb_switch_reset(struct tb *tb, u64 route);
229void tb_sw_set_unpplugged(struct tb_switch *sw);
230struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
231
232int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
233int tb_port_add_nfc_credits(struct tb_port *port, int credits);
234int tb_port_clear_counter(struct tb_port *port, int counter);
235
236int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap);
237
238struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
239void tb_path_free(struct tb_path *path);
240int tb_path_activate(struct tb_path *path);
241void tb_path_deactivate(struct tb_path *path);
242bool tb_path_is_invalid(struct tb_path *path);
243
244int tb_drom_read(struct tb_switch *sw);
245int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
246
247
248static inline int tb_route_length(u64 route)
249{
250 return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
251}
252
253static inline bool tb_is_upstream_port(struct tb_port *port)
254{
255 return port == tb_upstream_port(port->sw);
256}
257
258/**
259 * tb_downstream_route() - get route to downstream switch
260 *
261 * Port must not be the upstream port (otherwise a loop is created).
262 *
263 * Return: Returns a route to the switch behind @port.
264 */
265static inline u64 tb_downstream_route(struct tb_port *port)
266{
267 return tb_route(port->sw)
268 | ((u64) port->port << (port->sw->config.depth * 8));
269}
270
271#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
new file mode 100644
index 000000000000..6577af75d9dc
--- /dev/null
+++ b/drivers/thunderbolt/tb_regs.h
@@ -0,0 +1,213 @@
1/*
2 * Thunderbolt Cactus Ridge driver - Port/Switch config area registers
3 *
4 * Every thunderbolt device consists (logically) of a switch with multiple
5 * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
6 * COUNTERS) which are used to configure the device.
7 *
8 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
9 */
10
11#ifndef _TB_REGS
12#define _TB_REGS
13
14#include <linux/types.h>
15
16
17#define TB_ROUTE_SHIFT 8 /* number of bits in a port entry of a route */
18
19
20/*
21 * TODO: should be 63? But we do not know how to receive frames larger than 256
22 * bytes at the frame level. (header + checksum = 16, 60*4 = 240)
23 */
24#define TB_MAX_CONFIG_RW_LENGTH 60
25
26enum tb_cap {
27 TB_CAP_PHY = 0x0001,
28 TB_CAP_TIME1 = 0x0003,
29 TB_CAP_PCIE = 0x0004,
30 TB_CAP_I2C = 0x0005,
31 TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */
32 TB_CAP_TIME2 = 0x0305,
33 TB_CAL_IECS = 0x0405,
34 TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */
35};
36
37enum tb_port_state {
38 TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */
39 TB_PORT_CONNECTING = 1, /* retry */
40 TB_PORT_UP = 2,
41 TB_PORT_UNPLUGGED = 7,
42};
43
44/* capability headers */
45
46struct tb_cap_basic {
47 u8 next;
48 /* enum tb_cap cap:8; prevent "narrower than values of its type" */
49 u8 cap; /* if cap == 0x05 then we have a extended capability */
50} __packed;
51
52struct tb_cap_extended_short {
53 u8 next; /* if next and length are zero then we have a long cap */
54 enum tb_cap cap:16;
55 u8 length;
56} __packed;
57
58struct tb_cap_extended_long {
59 u8 zero1;
60 enum tb_cap cap:16;
61 u8 zero2;
62 u16 next;
63 u16 length;
64} __packed;
65
66/* capabilities */
67
68struct tb_cap_link_controller {
69 struct tb_cap_extended_long cap_header;
70 u32 count:4; /* number of link controllers */
71 u32 unknown1:4;
72 u32 base_offset:8; /*
73 * offset (into this capability) of the configuration
74 * area of the first link controller
75 */
76 u32 length:12; /* link controller configuration area length */
77 u32 unknown2:4; /* TODO check that length is correct */
78} __packed;
79
80struct tb_cap_phy {
81 struct tb_cap_basic cap_header;
82 u32 unknown1:16;
83 u32 unknown2:14;
84 bool disable:1;
85 u32 unknown3:11;
86 enum tb_port_state state:4;
87 u32 unknown4:2;
88} __packed;
89
90struct tb_eeprom_ctl {
91 bool clock:1; /* send pulse to transfer one bit */
92 bool access_low:1; /* set to 0 before access */
93 bool data_out:1; /* to eeprom */
94 bool data_in:1; /* from eeprom */
95 bool access_high:1; /* set to 1 before access */
96 bool not_present:1; /* should be 0 */
97 bool unknown1:1;
98 bool present:1; /* should be 1 */
99 u32 unknown2:24;
100} __packed;
101
102struct tb_cap_plug_events {
103 struct tb_cap_extended_short cap_header;
104 u32 __unknown1:2;
105 u32 plug_events:5;
106 u32 __unknown2:25;
107 u32 __unknown3;
108 u32 __unknown4;
109 struct tb_eeprom_ctl eeprom_ctl;
110 u32 __unknown5[7];
111 u32 drom_offset; /* 32 bit register, but eeprom addresses are 16 bit */
112} __packed;
113
114/* device headers */
115
116/* Present on port 0 in TB_CFG_SWITCH at address zero. */
117struct tb_regs_switch_header {
118 /* DWORD 0 */
119 u16 vendor_id;
120 u16 device_id;
121 /* DWORD 1 */
122 u32 first_cap_offset:8;
123 u32 upstream_port_number:6;
124 u32 max_port_number:6;
125 u32 depth:3;
126 u32 __unknown1:1;
127 u32 revision:8;
128 /* DWORD 2 */
129 u32 route_lo;
130 /* DWORD 3 */
131 u32 route_hi:31;
132 bool enabled:1;
133 /* DWORD 4 */
134 u32 plug_events_delay:8; /*
135 * RW, pause between plug events in
136 * milliseconds. Writing 0x00 is interpreted
137 * as 255ms.
138 */
139 u32 __unknown4:16;
140 u32 thunderbolt_version:8;
141} __packed;
142
143enum tb_port_type {
144 TB_TYPE_INACTIVE = 0x000000,
145 TB_TYPE_PORT = 0x000001,
146 TB_TYPE_NHI = 0x000002,
147 /* TB_TYPE_ETHERNET = 0x020000, lower order bits are not known */
148 /* TB_TYPE_SATA = 0x080000, lower order bits are not known */
149 TB_TYPE_DP_HDMI_IN = 0x0e0101,
150 TB_TYPE_DP_HDMI_OUT = 0x0e0102,
151 TB_TYPE_PCIE_DOWN = 0x100101,
152 TB_TYPE_PCIE_UP = 0x100102,
153 /* TB_TYPE_USB = 0x200000, lower order bits are not known */
154};
155
156/* Present on every port in TB_CF_PORT at address zero. */
157struct tb_regs_port_header {
158 /* DWORD 0 */
159 u16 vendor_id;
160 u16 device_id;
161 /* DWORD 1 */
162 u32 first_cap_offset:8;
163 u32 max_counters:11;
164 u32 __unknown1:5;
165 u32 revision:8;
166 /* DWORD 2 */
167 enum tb_port_type type:24;
168 u32 thunderbolt_version:8;
169 /* DWORD 3 */
170 u32 __unknown2:20;
171 u32 port_number:6;
172 u32 __unknown3:6;
173 /* DWORD 4 */
174 u32 nfc_credits;
175 /* DWORD 5 */
176 u32 max_in_hop_id:11;
177 u32 max_out_hop_id:11;
178 u32 __unkown4:10;
179 /* DWORD 6 */
180 u32 __unknown5;
181 /* DWORD 7 */
182 u32 __unknown6;
183
184} __packed;
185
186/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
187struct tb_regs_hop {
188 /* DWORD 0 */
189 u32 next_hop:11; /*
190 * hop to take after sending the packet through
191 * out_port (on the incoming port of the next switch)
192 */
193 u32 out_port:6; /* next port of the path (on the same switch) */
194 u32 initial_credits:8;
195 u32 unknown1:6; /* set to zero */
196 bool enable:1;
197
198 /* DWORD 1 */
199 u32 weight:4;
200 u32 unknown2:4; /* set to zero */
201 u32 priority:3;
202 bool drop_packages:1;
203 u32 counter:11; /* index into TB_CFG_COUNTERS on this port */
204 bool counter_enable:1;
205 bool ingress_fc:1;
206 bool egress_fc:1;
207 bool ingress_shared_buffer:1;
208 bool egress_shared_buffer:1;
209 u32 unknown3:4; /* set to zero */
210} __packed;
211
212
213#endif
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
new file mode 100644
index 000000000000..baf1cd370446
--- /dev/null
+++ b/drivers/thunderbolt/tunnel_pci.c
@@ -0,0 +1,232 @@
1/*
2 * Thunderbolt Cactus Ridge driver - PCIe tunnel
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/slab.h>
8#include <linux/list.h>
9
10#include "tunnel_pci.h"
11#include "tb.h"
12
13#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
14 do { \
15 struct tb_pci_tunnel *__tunnel = (tunnel); \
16 level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \
17 tb_route(__tunnel->down_port->sw), \
18 __tunnel->down_port->port, \
19 tb_route(__tunnel->up_port->sw), \
20 __tunnel->up_port->port, \
21 ## arg); \
22 } while (0)
23
24#define tb_tunnel_WARN(tunnel, fmt, arg...) \
25 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
26#define tb_tunnel_warn(tunnel, fmt, arg...) \
27 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
28#define tb_tunnel_info(tunnel, fmt, arg...) \
29 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
30
31static void tb_pci_init_path(struct tb_path *path)
32{
33 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
34 path->egress_shared_buffer = TB_PATH_NONE;
35 path->ingress_fc_enable = TB_PATH_ALL;
36 path->ingress_shared_buffer = TB_PATH_NONE;
37 path->priority = 3;
38 path->weight = 1;
39 path->drop_packages = 0;
40 path->nfc_credits = 0;
41}
42
43/**
44 * tb_pci_alloc() - allocate a pci tunnel
45 *
46 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
47 * TB_TYPE_PCIE_DOWN.
48 *
49 * Currently only paths consisting of two hops are supported (that is the
50 * ports must be on "adjacent" switches).
51 *
52 * The paths are hard-coded to use hop 8 (the only working hop id available on
53 * my thunderbolt devices). Therefore at most ONE path per device may be
54 * activated.
55 *
56 * Return: Returns a tb_pci_tunnel on success or NULL on failure.
57 */
58struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
59 struct tb_port *down)
60{
61 struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
62 if (!tunnel)
63 goto err;
64 tunnel->tb = tb;
65 tunnel->down_port = down;
66 tunnel->up_port = up;
67 INIT_LIST_HEAD(&tunnel->list);
68 tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2);
69 if (!tunnel->path_to_up)
70 goto err;
71 tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2);
72 if (!tunnel->path_to_down)
73 goto err;
74 tb_pci_init_path(tunnel->path_to_up);
75 tb_pci_init_path(tunnel->path_to_down);
76
77 tunnel->path_to_up->hops[0].in_port = down;
78 tunnel->path_to_up->hops[0].in_hop_index = 8;
79 tunnel->path_to_up->hops[0].in_counter_index = -1;
80 tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
81 tunnel->path_to_up->hops[0].next_hop_index = 8;
82
83 tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
84 tunnel->path_to_up->hops[1].in_hop_index = 8;
85 tunnel->path_to_up->hops[1].in_counter_index = -1;
86 tunnel->path_to_up->hops[1].out_port = up;
87 tunnel->path_to_up->hops[1].next_hop_index = 8;
88
89 tunnel->path_to_down->hops[0].in_port = up;
90 tunnel->path_to_down->hops[0].in_hop_index = 8;
91 tunnel->path_to_down->hops[0].in_counter_index = -1;
92 tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
93 tunnel->path_to_down->hops[0].next_hop_index = 8;
94
95 tunnel->path_to_down->hops[1].in_port =
96 tb_upstream_port(up->sw)->remote;
97 tunnel->path_to_down->hops[1].in_hop_index = 8;
98 tunnel->path_to_down->hops[1].in_counter_index = -1;
99 tunnel->path_to_down->hops[1].out_port = down;
100 tunnel->path_to_down->hops[1].next_hop_index = 8;
101 return tunnel;
102
103err:
104 if (tunnel) {
105 if (tunnel->path_to_down)
106 tb_path_free(tunnel->path_to_down);
107 if (tunnel->path_to_up)
108 tb_path_free(tunnel->path_to_up);
109 kfree(tunnel);
110 }
111 return NULL;
112}
113
114/**
115 * tb_pci_free() - free a tunnel
116 *
117 * The tunnel must have been deactivated.
118 */
119void tb_pci_free(struct tb_pci_tunnel *tunnel)
120{
121 if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
122 tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n");
123 return;
124 }
125 tb_path_free(tunnel->path_to_up);
126 tb_path_free(tunnel->path_to_down);
127 kfree(tunnel);
128}
129
130/**
131 * tb_pci_is_invalid - check whether an activated path is still valid
132 */
133bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
134{
135 WARN_ON(!tunnel->path_to_up->activated);
136 WARN_ON(!tunnel->path_to_down->activated);
137
138 return tb_path_is_invalid(tunnel->path_to_up)
139 || tb_path_is_invalid(tunnel->path_to_down);
140}
141
142/**
143 * tb_pci_port_active() - activate/deactivate PCI capability
144 *
145 * Return: Returns 0 on success or an error code on failure.
146 */
147static int tb_pci_port_active(struct tb_port *port, bool active)
148{
149 u32 word = active ? 0x80000000 : 0x0;
150 int cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PCIE);
151 if (cap <= 0) {
152 tb_port_warn(port, "TB_CAP_PCIE not found: %d\n", cap);
153 return cap ? cap : -ENXIO;
154 }
155 return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
156}
157
158/**
159 * tb_pci_restart() - activate a tunnel after a hardware reset
160 */
161int tb_pci_restart(struct tb_pci_tunnel *tunnel)
162{
163 int res;
164 tunnel->path_to_up->activated = false;
165 tunnel->path_to_down->activated = false;
166
167 tb_tunnel_info(tunnel, "activating\n");
168
169 res = tb_path_activate(tunnel->path_to_up);
170 if (res)
171 goto err;
172 res = tb_path_activate(tunnel->path_to_down);
173 if (res)
174 goto err;
175
176 res = tb_pci_port_active(tunnel->down_port, true);
177 if (res)
178 goto err;
179
180 res = tb_pci_port_active(tunnel->up_port, true);
181 if (res)
182 goto err;
183 return 0;
184err:
185 tb_tunnel_warn(tunnel, "activation failed\n");
186 tb_pci_deactivate(tunnel);
187 return res;
188}
189
190/**
191 * tb_pci_activate() - activate a tunnel
192 *
193 * Return: Returns 0 on success or an error code on failure.
194 */
195int tb_pci_activate(struct tb_pci_tunnel *tunnel)
196{
197 int res;
198 if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
199 tb_tunnel_WARN(tunnel,
200 "trying to activate an already activated tunnel\n");
201 return -EINVAL;
202 }
203
204 res = tb_pci_restart(tunnel);
205 if (res)
206 return res;
207
208 list_add(&tunnel->list, &tunnel->tb->tunnel_list);
209 return 0;
210}
211
212
213
214/**
215 * tb_pci_deactivate() - deactivate a tunnel
216 */
217void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
218{
219 tb_tunnel_info(tunnel, "deactivating\n");
220 /*
221 * TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up
222 * port. Seems to have no effect?
223 */
224 tb_pci_port_active(tunnel->up_port, false);
225 tb_pci_port_active(tunnel->down_port, false);
226 if (tunnel->path_to_down->activated)
227 tb_path_deactivate(tunnel->path_to_down);
228 if (tunnel->path_to_up->activated)
229 tb_path_deactivate(tunnel->path_to_up);
230 list_del_init(&tunnel->list);
231}
232
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h
new file mode 100644
index 000000000000..a67f93c140fa
--- /dev/null
+++ b/drivers/thunderbolt/tunnel_pci.h
@@ -0,0 +1,30 @@
1/*
2 * Thunderbolt Cactus Ridge driver - PCIe tunnel
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#ifndef TB_PCI_H_
8#define TB_PCI_H_
9
10#include "tb.h"
11
12struct tb_pci_tunnel {
13 struct tb *tb;
14 struct tb_port *up_port;
15 struct tb_port *down_port;
16 struct tb_path *path_to_up;
17 struct tb_path *path_to_down;
18 struct list_head list;
19};
20
21struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
22 struct tb_port *down);
23void tb_pci_free(struct tb_pci_tunnel *tunnel);
24int tb_pci_activate(struct tb_pci_tunnel *tunnel);
25int tb_pci_restart(struct tb_pci_tunnel *tunnel);
26void tb_pci_deactivate(struct tb_pci_tunnel *tunnel);
27bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel);
28
29#endif
30
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 96c4a19b1918..c28d6e2e3df2 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -91,8 +91,7 @@ static irqreturn_t pruss_handler(int irq, struct uio_info *info)
91 return IRQ_HANDLED; 91 return IRQ_HANDLED;
92} 92}
93 93
94static void pruss_cleanup(struct platform_device *dev, 94static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev)
95 struct uio_pruss_dev *gdev)
96{ 95{
97 int cnt; 96 int cnt;
98 struct uio_info *p = gdev->info; 97 struct uio_info *p = gdev->info;
@@ -103,7 +102,7 @@ static void pruss_cleanup(struct platform_device *dev,
103 } 102 }
104 iounmap(gdev->prussio_vaddr); 103 iounmap(gdev->prussio_vaddr);
105 if (gdev->ddr_vaddr) { 104 if (gdev->ddr_vaddr) {
106 dma_free_coherent(&dev->dev, extram_pool_sz, gdev->ddr_vaddr, 105 dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr,
107 gdev->ddr_paddr); 106 gdev->ddr_paddr);
108 } 107 }
109 if (gdev->sram_vaddr) 108 if (gdev->sram_vaddr)
@@ -115,13 +114,14 @@ static void pruss_cleanup(struct platform_device *dev,
115 kfree(gdev); 114 kfree(gdev);
116} 115}
117 116
118static int pruss_probe(struct platform_device *dev) 117static int pruss_probe(struct platform_device *pdev)
119{ 118{
120 struct uio_info *p; 119 struct uio_info *p;
121 struct uio_pruss_dev *gdev; 120 struct uio_pruss_dev *gdev;
122 struct resource *regs_prussio; 121 struct resource *regs_prussio;
122 struct device *dev = &pdev->dev;
123 int ret = -ENODEV, cnt = 0, len; 123 int ret = -ENODEV, cnt = 0, len;
124 struct uio_pruss_pdata *pdata = dev_get_platdata(&dev->dev); 124 struct uio_pruss_pdata *pdata = dev_get_platdata(dev);
125 125
126 gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL); 126 gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
127 if (!gdev) 127 if (!gdev)
@@ -132,10 +132,11 @@ static int pruss_probe(struct platform_device *dev)
132 kfree(gdev); 132 kfree(gdev);
133 return -ENOMEM; 133 return -ENOMEM;
134 } 134 }
135
135 /* Power on PRU in case its not done as part of boot-loader */ 136 /* Power on PRU in case its not done as part of boot-loader */
136 gdev->pruss_clk = clk_get(&dev->dev, "pruss"); 137 gdev->pruss_clk = clk_get(dev, "pruss");
137 if (IS_ERR(gdev->pruss_clk)) { 138 if (IS_ERR(gdev->pruss_clk)) {
138 dev_err(&dev->dev, "Failed to get clock\n"); 139 dev_err(dev, "Failed to get clock\n");
139 ret = PTR_ERR(gdev->pruss_clk); 140 ret = PTR_ERR(gdev->pruss_clk);
140 kfree(gdev->info); 141 kfree(gdev->info);
141 kfree(gdev); 142 kfree(gdev);
@@ -144,14 +145,14 @@ static int pruss_probe(struct platform_device *dev)
144 clk_enable(gdev->pruss_clk); 145 clk_enable(gdev->pruss_clk);
145 } 146 }
146 147
147 regs_prussio = platform_get_resource(dev, IORESOURCE_MEM, 0); 148 regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
148 if (!regs_prussio) { 149 if (!regs_prussio) {
149 dev_err(&dev->dev, "No PRUSS I/O resource specified\n"); 150 dev_err(dev, "No PRUSS I/O resource specified\n");
150 goto out_free; 151 goto out_free;
151 } 152 }
152 153
153 if (!regs_prussio->start) { 154 if (!regs_prussio->start) {
154 dev_err(&dev->dev, "Invalid memory resource\n"); 155 dev_err(dev, "Invalid memory resource\n");
155 goto out_free; 156 goto out_free;
156 } 157 }
157 158
@@ -161,27 +162,27 @@ static int pruss_probe(struct platform_device *dev)
161 (unsigned long)gen_pool_dma_alloc(gdev->sram_pool, 162 (unsigned long)gen_pool_dma_alloc(gdev->sram_pool,
162 sram_pool_sz, &gdev->sram_paddr); 163 sram_pool_sz, &gdev->sram_paddr);
163 if (!gdev->sram_vaddr) { 164 if (!gdev->sram_vaddr) {
164 dev_err(&dev->dev, "Could not allocate SRAM pool\n"); 165 dev_err(dev, "Could not allocate SRAM pool\n");
165 goto out_free; 166 goto out_free;
166 } 167 }
167 } 168 }
168 169
169 gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz, 170 gdev->ddr_vaddr = dma_alloc_coherent(dev, extram_pool_sz,
170 &(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA); 171 &(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
171 if (!gdev->ddr_vaddr) { 172 if (!gdev->ddr_vaddr) {
172 dev_err(&dev->dev, "Could not allocate external memory\n"); 173 dev_err(dev, "Could not allocate external memory\n");
173 goto out_free; 174 goto out_free;
174 } 175 }
175 176
176 len = resource_size(regs_prussio); 177 len = resource_size(regs_prussio);
177 gdev->prussio_vaddr = ioremap(regs_prussio->start, len); 178 gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
178 if (!gdev->prussio_vaddr) { 179 if (!gdev->prussio_vaddr) {
179 dev_err(&dev->dev, "Can't remap PRUSS I/O address range\n"); 180 dev_err(dev, "Can't remap PRUSS I/O address range\n");
180 goto out_free; 181 goto out_free;
181 } 182 }
182 183
183 gdev->pintc_base = pdata->pintc_base; 184 gdev->pintc_base = pdata->pintc_base;
184 gdev->hostirq_start = platform_get_irq(dev, 0); 185 gdev->hostirq_start = platform_get_irq(pdev, 0);
185 186
186 for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) { 187 for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
187 p->mem[0].addr = regs_prussio->start; 188 p->mem[0].addr = regs_prussio->start;
@@ -204,12 +205,12 @@ static int pruss_probe(struct platform_device *dev)
204 p->handler = pruss_handler; 205 p->handler = pruss_handler;
205 p->priv = gdev; 206 p->priv = gdev;
206 207
207 ret = uio_register_device(&dev->dev, p); 208 ret = uio_register_device(dev, p);
208 if (ret < 0) 209 if (ret < 0)
209 goto out_free; 210 goto out_free;
210 } 211 }
211 212
212 platform_set_drvdata(dev, gdev); 213 platform_set_drvdata(pdev, gdev);
213 return 0; 214 return 0;
214 215
215out_free: 216out_free:
@@ -221,7 +222,7 @@ static int pruss_remove(struct platform_device *dev)
221{ 222{
222 struct uio_pruss_dev *gdev = platform_get_drvdata(dev); 223 struct uio_pruss_dev *gdev = platform_get_drvdata(dev);
223 224
224 pruss_cleanup(dev, gdev); 225 pruss_cleanup(&dev->dev, gdev);
225 return 0; 226 return 0;
226} 227}
227 228
diff --git a/drivers/vme/bridges/vme_ca91cx42.h b/drivers/vme/bridges/vme_ca91cx42.h
index 02a7c794db05..d46b12dc3b82 100644
--- a/drivers/vme/bridges/vme_ca91cx42.h
+++ b/drivers/vme/bridges/vme_ca91cx42.h
@@ -360,7 +360,6 @@ static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
360 */ 360 */
361#define CA91CX42_DCTL_L2V (1<<31) 361#define CA91CX42_DCTL_L2V (1<<31)
362#define CA91CX42_DCTL_VDW_M (3<<22) 362#define CA91CX42_DCTL_VDW_M (3<<22)
363#define CA91CX42_DCTL_VDW_M (3<<22)
364#define CA91CX42_DCTL_VDW_D8 0 363#define CA91CX42_DCTL_VDW_D8 0
365#define CA91CX42_DCTL_VDW_D16 (1<<22) 364#define CA91CX42_DCTL_VDW_D16 (1<<22)
366#define CA91CX42_DCTL_VDW_D32 (1<<23) 365#define CA91CX42_DCTL_VDW_D32 (1<<23)
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 02df3b1381d2..e0b8a4bc73df 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -563,7 +563,7 @@ static struct platform_driver ds1wm_driver = {
563 563
564static int __init ds1wm_init(void) 564static int __init ds1wm_init(void)
565{ 565{
566 printk("DS1WM w1 busmaster driver - (c) 2004 Szabolcs Gyurko\n"); 566 pr_info("DS1WM w1 busmaster driver - (c) 2004 Szabolcs Gyurko\n");
567 return platform_driver_register(&ds1wm_driver); 567 return platform_driver_register(&ds1wm_driver);
568} 568}
569 569
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index e033491fe308..e76a9b39abb2 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -226,7 +226,7 @@ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev)
226 } 226 }
227 227
228 if (retries >= DS2482_WAIT_IDLE_TIMEOUT) 228 if (retries >= DS2482_WAIT_IDLE_TIMEOUT)
229 printk(KERN_ERR "%s: timeout on channel %d\n", 229 pr_err("%s: timeout on channel %d\n",
230 __func__, pdev->channel); 230 __func__, pdev->channel);
231 231
232 return temp; 232 return temp;
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 7404ad3062b7..1de6df87bfa3 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -206,7 +206,7 @@ static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
206 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), 206 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
207 CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000); 207 CONTROL_CMD, VENDOR, value, index, NULL, 0, 1000);
208 if (err < 0) { 208 if (err < 0) {
209 printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n", 209 pr_err("Failed to send command control message %x.%x: err=%d.\n",
210 value, index, err); 210 value, index, err);
211 return err; 211 return err;
212 } 212 }
@@ -221,7 +221,7 @@ static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index)
221 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), 221 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
222 MODE_CMD, VENDOR, value, index, NULL, 0, 1000); 222 MODE_CMD, VENDOR, value, index, NULL, 0, 1000);
223 if (err < 0) { 223 if (err < 0) {
224 printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n", 224 pr_err("Failed to send mode control message %x.%x: err=%d.\n",
225 value, index, err); 225 value, index, err);
226 return err; 226 return err;
227 } 227 }
@@ -236,7 +236,7 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
236 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), 236 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]),
237 COMM_CMD, VENDOR, value, index, NULL, 0, 1000); 237 COMM_CMD, VENDOR, value, index, NULL, 0, 1000);
238 if (err < 0) { 238 if (err < 0) {
239 printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n", 239 pr_err("Failed to send control message %x.%x: err=%d.\n",
240 value, index, err); 240 value, index, err);
241 return err; 241 return err;
242 } 242 }
@@ -255,7 +255,8 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
255 err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev, 255 err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
256 dev->ep[EP_STATUS]), buf, size, &count, 100); 256 dev->ep[EP_STATUS]), buf, size, &count, 100);
257 if (err < 0) { 257 if (err < 0) {
258 printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err); 258 pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
259 dev->ep[EP_STATUS], err);
259 return err; 260 return err;
260 } 261 }
261 262
@@ -267,17 +268,17 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
267 268
268static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off) 269static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
269{ 270{
270 printk(KERN_INFO "%45s: %8x\n", str, buf[off]); 271 pr_info("%45s: %8x\n", str, buf[off]);
271} 272}
272 273
273static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count) 274static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
274{ 275{
275 int i; 276 int i;
276 277
277 printk(KERN_INFO "0x%x: count=%d, status: ", dev->ep[EP_STATUS], count); 278 pr_info("0x%x: count=%d, status: ", dev->ep[EP_STATUS], count);
278 for (i=0; i<count; ++i) 279 for (i=0; i<count; ++i)
279 printk("%02x ", buf[i]); 280 pr_info("%02x ", buf[i]);
280 printk(KERN_INFO "\n"); 281 pr_info("\n");
281 282
282 if (count >= 16) { 283 if (count >= 16) {
283 ds_print_msg(buf, "enable flag", 0); 284 ds_print_msg(buf, "enable flag", 0);
@@ -305,21 +306,21 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
305 } 306 }
306 ds_print_msg(buf, "Result Register Value: ", i); 307 ds_print_msg(buf, "Result Register Value: ", i);
307 if (buf[i] & RR_NRS) 308 if (buf[i] & RR_NRS)
308 printk(KERN_INFO "NRS: Reset no presence or ...\n"); 309 pr_info("NRS: Reset no presence or ...\n");
309 if (buf[i] & RR_SH) 310 if (buf[i] & RR_SH)
310 printk(KERN_INFO "SH: short on reset or set path\n"); 311 pr_info("SH: short on reset or set path\n");
311 if (buf[i] & RR_APP) 312 if (buf[i] & RR_APP)
312 printk(KERN_INFO "APP: alarming presence on reset\n"); 313 pr_info("APP: alarming presence on reset\n");
313 if (buf[i] & RR_VPP) 314 if (buf[i] & RR_VPP)
314 printk(KERN_INFO "VPP: 12V expected not seen\n"); 315 pr_info("VPP: 12V expected not seen\n");
315 if (buf[i] & RR_CMP) 316 if (buf[i] & RR_CMP)
316 printk(KERN_INFO "CMP: compare error\n"); 317 pr_info("CMP: compare error\n");
317 if (buf[i] & RR_CRC) 318 if (buf[i] & RR_CRC)
318 printk(KERN_INFO "CRC: CRC error detected\n"); 319 pr_info("CRC: CRC error detected\n");
319 if (buf[i] & RR_RDP) 320 if (buf[i] & RR_RDP)
320 printk(KERN_INFO "RDP: redirected page\n"); 321 pr_info("RDP: redirected page\n");
321 if (buf[i] & RR_EOS) 322 if (buf[i] & RR_EOS)
322 printk(KERN_INFO "EOS: end of search error\n"); 323 pr_info("EOS: end of search error\n");
323 } 324 }
324} 325}
325 326
@@ -330,15 +331,13 @@ static void ds_reset_device(struct ds_device *dev)
330 * the strong pullup. 331 * the strong pullup.
331 */ 332 */
332 if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE)) 333 if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE))
333 printk(KERN_ERR "ds_reset_device: " 334 pr_err("ds_reset_device: Error allowing strong pullup\n");
334 "Error allowing strong pullup\n");
335 /* Chip strong pullup time was cleared. */ 335 /* Chip strong pullup time was cleared. */
336 if (dev->spu_sleep) { 336 if (dev->spu_sleep) {
337 /* lower 4 bits are 0, see ds_set_pullup */ 337 /* lower 4 bits are 0, see ds_set_pullup */
338 u8 del = dev->spu_sleep>>4; 338 u8 del = dev->spu_sleep>>4;
339 if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del)) 339 if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del))
340 printk(KERN_ERR "ds_reset_device: " 340 pr_err("ds_reset_device: Error setting duration\n");
341 "Error setting duration\n");
342 } 341 }
343} 342}
344 343
@@ -363,7 +362,7 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
363 u8 buf[ST_SIZE]; 362 u8 buf[ST_SIZE];
364 int count; 363 int count;
365 364
366 printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); 365 pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
367 usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN])); 366 usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
368 367
369 count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); 368 count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
@@ -391,7 +390,7 @@ static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len)
391 count = 0; 390 count = 0;
392 err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000); 391 err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000);
393 if (err < 0) { 392 if (err < 0) {
394 printk(KERN_ERR "Failed to write 1-wire data to ep0x%x: " 393 pr_err("Failed to write 1-wire data to ep0x%x: "
395 "err=%d.\n", dev->ep[EP_DATA_OUT], err); 394 "err=%d.\n", dev->ep[EP_DATA_OUT], err);
396 return err; 395 return err;
397 } 396 }
@@ -475,7 +474,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
475 } while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100); 474 } while (!(st->status & ST_IDLE) && !(err < 0) && ++count < 100);
476 475
477 if (err >= 16 && st->status & ST_EPOF) { 476 if (err >= 16 && st->status & ST_EPOF) {
478 printk(KERN_INFO "Resetting device after ST_EPOF.\n"); 477 pr_info("Resetting device after ST_EPOF.\n");
479 ds_reset_device(dev); 478 ds_reset_device(dev);
480 /* Always dump the device status. */ 479 /* Always dump the device status. */
481 count = 101; 480 count = 101;
@@ -992,7 +991,7 @@ static int ds_probe(struct usb_interface *intf,
992 991
993 dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL); 992 dev = kzalloc(sizeof(struct ds_device), GFP_KERNEL);
994 if (!dev) { 993 if (!dev) {
995 printk(KERN_INFO "Failed to allocate new DS9490R structure.\n"); 994 pr_info("Failed to allocate new DS9490R structure.\n");
996 return -ENOMEM; 995 return -ENOMEM;
997 } 996 }
998 dev->udev = usb_get_dev(udev); 997 dev->udev = usb_get_dev(udev);
@@ -1024,7 +1023,8 @@ static int ds_probe(struct usb_interface *intf,
1024 1023
1025 iface_desc = &intf->altsetting[alt]; 1024 iface_desc = &intf->altsetting[alt];
1026 if (iface_desc->desc.bNumEndpoints != NUM_EP-1) { 1025 if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
1027 printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints); 1026 pr_info("Num endpoints=%d. It is not DS9490R.\n",
1027 iface_desc->desc.bNumEndpoints);
1028 err = -EINVAL; 1028 err = -EINVAL;
1029 goto err_out_clear; 1029 goto err_out_clear;
1030 } 1030 }
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a5df5e89d456..da3d0f0ad63c 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -15,16 +15,13 @@
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/jiffies.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20 21
21#include "../w1.h" 22#include "../w1.h"
22#include "../w1_int.h" 23#include "../w1_int.h"
23 24
24/* According to the mx27 Datasheet the reset procedure should take up to about
25 * 1350us. We set the timeout to 500*100us = 50ms for sure */
26#define MXC_W1_RESET_TIMEOUT 500
27
28/* 25/*
29 * MXC W1 Register offsets 26 * MXC W1 Register offsets
30 */ 27 */
@@ -35,6 +32,7 @@
35# define MXC_W1_CONTROL_RPP BIT(7) 32# define MXC_W1_CONTROL_RPP BIT(7)
36#define MXC_W1_TIME_DIVIDER 0x02 33#define MXC_W1_TIME_DIVIDER 0x02
37#define MXC_W1_RESET 0x04 34#define MXC_W1_RESET 0x04
35# define MXC_W1_RESET_RST BIT(0)
38 36
39struct mxc_w1_device { 37struct mxc_w1_device {
40 void __iomem *regs; 38 void __iomem *regs;
@@ -49,24 +47,25 @@ struct mxc_w1_device {
49 */ 47 */
50static u8 mxc_w1_ds2_reset_bus(void *data) 48static u8 mxc_w1_ds2_reset_bus(void *data)
51{ 49{
52 u8 reg_val;
53 unsigned int timeout_cnt = 0;
54 struct mxc_w1_device *dev = data; 50 struct mxc_w1_device *dev = data;
51 unsigned long timeout;
52
53 writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL);
55 54
56 writeb(MXC_W1_CONTROL_RPP, (dev->regs + MXC_W1_CONTROL)); 55 /* Wait for reset sequence 511+512us, use 1500us for sure */
56 timeout = jiffies + usecs_to_jiffies(1500);
57 57
58 while (1) { 58 udelay(511 + 512);
59 reg_val = readb(dev->regs + MXC_W1_CONTROL);
60 59
61 if (!(reg_val & MXC_W1_CONTROL_RPP) || 60 do {
62 timeout_cnt > MXC_W1_RESET_TIMEOUT) 61 u8 ctrl = readb(dev->regs + MXC_W1_CONTROL);
63 break;
64 else
65 timeout_cnt++;
66 62
67 udelay(100); 63 /* PST bit is valid after the RPP bit is self-cleared */
68 } 64 if (!(ctrl & MXC_W1_CONTROL_RPP))
69 return !(reg_val & MXC_W1_CONTROL_PST); 65 return !(ctrl & MXC_W1_CONTROL_PST);
66 } while (time_is_after_jiffies(timeout));
67
68 return 1;
70} 69}
71 70
72/* 71/*
@@ -76,22 +75,25 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
76 */ 75 */
77static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) 76static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
78{ 77{
79 struct mxc_w1_device *mdev = data; 78 struct mxc_w1_device *dev = data;
80 void __iomem *ctrl_addr = mdev->regs + MXC_W1_CONTROL; 79 unsigned long timeout;
81 unsigned int timeout_cnt = 400; /* Takes max. 120us according to
82 * datasheet.
83 */
84 80
85 writeb(MXC_W1_CONTROL_WR(bit), ctrl_addr); 81 writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL);
86 82
87 while (timeout_cnt--) { 83 /* Wait for read/write bit (60us, Max 120us), use 200us for sure */
88 if (!(readb(ctrl_addr) & MXC_W1_CONTROL_WR(bit))) 84 timeout = jiffies + usecs_to_jiffies(200);
89 break;
90 85
91 udelay(1); 86 udelay(60);
92 }
93 87
94 return !!(readb(ctrl_addr) & MXC_W1_CONTROL_RDST); 88 do {
89 u8 ctrl = readb(dev->regs + MXC_W1_CONTROL);
90
91 /* RDST bit is valid after the WR1/RD bit is self-cleared */
92 if (!(ctrl & MXC_W1_CONTROL_WR(bit)))
93 return !!(ctrl & MXC_W1_CONTROL_RDST);
94 } while (time_is_after_jiffies(timeout));
95
96 return 0;
95} 97}
96 98
97static int mxc_w1_probe(struct platform_device *pdev) 99static int mxc_w1_probe(struct platform_device *pdev)
@@ -131,6 +133,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
131 if (err) 133 if (err)
132 return err; 134 return err;
133 135
136 /* Software reset 1-Wire module */
137 writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
138 writeb(0, mdev->regs + MXC_W1_RESET);
139
134 writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER); 140 writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
135 141
136 mdev->bus_master.data = mdev; 142 mdev->bus_master.data = mdev;
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 1cdce80b6abf..cfe74d09932e 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -38,6 +38,14 @@ config W1_SLAVE_DS2413
38 Say Y here if you want to use a 1-wire 38 Say Y here if you want to use a 1-wire
39 DS2413 Dual Channel Addressable Switch device support 39 DS2413 Dual Channel Addressable Switch device support
40 40
41config W1_SLAVE_DS2406
42 tristate "Dual Channel Addressable Switch 0x12 family support (DS2406)"
43 select CRC16
44 help
45 Say Y or M here if you want to use a 1-wire
46 DS2406 Dual Channel Addressable Switch. EPROM read/write
47 support for these devices is not implemented.
48
41config W1_SLAVE_DS2423 49config W1_SLAVE_DS2423
42 tristate "Counter 1-wire device (DS2423)" 50 tristate "Counter 1-wire device (DS2423)"
43 select CRC16 51 select CRC16
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 06529f3157ab..1e9989afe7bf 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o 6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
7obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o 7obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
8obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o 8obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
9obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
9obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o 10obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
10obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o 11obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
11obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o 12obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
new file mode 100644
index 000000000000..d488961a8c90
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -0,0 +1,168 @@
1/*
2 * w1_ds2406.c - w1 family 12 (DS2406) driver
3 * based on w1_ds2413.c by Mariusz Bialonczyk <manio@skyboo.net>
4 *
5 * Copyright (c) 2014 Scott Alfter <scott@alfter.us>
6 *
7 * This source code is licensed under the GNU General Public License,
8 * Version 2. See the file COPYING for more details.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/device.h>
15#include <linux/types.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/crc16.h>
19
20#include "../w1.h"
21#include "../w1_int.h"
22#include "../w1_family.h"
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Scott Alfter <scott@alfter.us>");
26MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO");
27
28#define W1_F12_FUNC_READ_STATUS 0xAA
29#define W1_F12_FUNC_WRITE_STATUS 0x55
30
31static ssize_t w1_f12_read_state(
32 struct file *filp, struct kobject *kobj,
33 struct bin_attribute *bin_attr,
34 char *buf, loff_t off, size_t count)
35{
36 u8 w1_buf[6]={W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0};
37 struct w1_slave *sl = kobj_to_w1_slave(kobj);
38 u16 crc=0;
39 int i;
40 ssize_t rtnval=1;
41
42 if (off != 0)
43 return 0;
44 if (!buf)
45 return -EINVAL;
46
47 mutex_lock(&sl->master->bus_mutex);
48
49 if (w1_reset_select_slave(sl)) {
50 mutex_unlock(&sl->master->bus_mutex);
51 return -EIO;
52 }
53
54 w1_write_block(sl->master, w1_buf, 3);
55 w1_read_block(sl->master, w1_buf+3, 3);
56 for (i=0; i<6; i++)
57 crc=crc16_byte(crc, w1_buf[i]);
58 if (crc==0xb001) /* good read? */
59 *buf=((w1_buf[3]>>5)&3)|0x30;
60 else
61 rtnval=-EIO;
62
63 mutex_unlock(&sl->master->bus_mutex);
64
65 return rtnval;
66}
67
68static ssize_t w1_f12_write_output(
69 struct file *filp, struct kobject *kobj,
70 struct bin_attribute *bin_attr,
71 char *buf, loff_t off, size_t count)
72{
73 struct w1_slave *sl = kobj_to_w1_slave(kobj);
74 u8 w1_buf[6]={W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0};
75 u16 crc=0;
76 int i;
77 ssize_t rtnval=1;
78
79 if (count != 1 || off != 0)
80 return -EFAULT;
81
82 mutex_lock(&sl->master->bus_mutex);
83
84 if (w1_reset_select_slave(sl)) {
85 mutex_unlock(&sl->master->bus_mutex);
86 return -EIO;
87 }
88
89 w1_buf[3] = (((*buf)&3)<<5)|0x1F;
90 w1_write_block(sl->master, w1_buf, 4);
91 w1_read_block(sl->master, w1_buf+4, 2);
92 for (i=0; i<6; i++)
93 crc=crc16_byte(crc, w1_buf[i]);
94 if (crc==0xb001) /* good read? */
95 w1_write_8(sl->master, 0xFF);
96 else
97 rtnval=-EIO;
98
99 mutex_unlock(&sl->master->bus_mutex);
100 return rtnval;
101}
102
103#define NB_SYSFS_BIN_FILES 2
104static struct bin_attribute w1_f12_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
105 {
106 .attr = {
107 .name = "state",
108 .mode = S_IRUGO,
109 },
110 .size = 1,
111 .read = w1_f12_read_state,
112 },
113 {
114 .attr = {
115 .name = "output",
116 .mode = S_IRUGO | S_IWUSR | S_IWGRP,
117 },
118 .size = 1,
119 .write = w1_f12_write_output,
120 }
121};
122
123static int w1_f12_add_slave(struct w1_slave *sl)
124{
125 int err = 0;
126 int i;
127
128 for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
129 err = sysfs_create_bin_file(
130 &sl->dev.kobj,
131 &(w1_f12_sysfs_bin_files[i]));
132 if (err)
133 while (--i >= 0)
134 sysfs_remove_bin_file(&sl->dev.kobj,
135 &(w1_f12_sysfs_bin_files[i]));
136 return err;
137}
138
139static void w1_f12_remove_slave(struct w1_slave *sl)
140{
141 int i;
142 for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
143 sysfs_remove_bin_file(&sl->dev.kobj,
144 &(w1_f12_sysfs_bin_files[i]));
145}
146
147static struct w1_family_ops w1_f12_fops = {
148 .add_slave = w1_f12_add_slave,
149 .remove_slave = w1_f12_remove_slave,
150};
151
152static struct w1_family w1_family_12 = {
153 .fid = W1_FAMILY_DS2406,
154 .fops = &w1_f12_fops,
155};
156
157static int __init w1_f12_init(void)
158{
159 return w1_register_family(&w1_family_12);
160}
161
162static void __exit w1_f12_exit(void)
163{
164 w1_unregister_family(&w1_family_12);
165}
166
167module_init(w1_f12_init);
168module_exit(w1_f12_exit);
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 65f90dccd60e..d9079d48d112 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -181,8 +181,7 @@ static struct w1_family w1_ds2760_family = {
181 181
182static int __init w1_ds2760_init(void) 182static int __init w1_ds2760_init(void)
183{ 183{
184 printk(KERN_INFO "1-Wire driver for the DS2760 battery monitor " 184 pr_info("1-Wire driver for the DS2760 battery monitor chip - (c) 2004-2005, Szabolcs Gyurko\n");
185 " chip - (c) 2004-2005, Szabolcs Gyurko\n");
186 ida_init(&bat_ida); 185 ida_init(&bat_ida);
187 return w1_register_family(&w1_ds2760_family); 186 return w1_register_family(&w1_ds2760_family);
188} 187}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 5d7341520544..592f7edc671e 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1162,28 +1162,26 @@ static int __init w1_init(void)
1162{ 1162{
1163 int retval; 1163 int retval;
1164 1164
1165 printk(KERN_INFO "Driver for 1-wire Dallas network protocol.\n"); 1165 pr_info("Driver for 1-wire Dallas network protocol.\n");
1166 1166
1167 w1_init_netlink(); 1167 w1_init_netlink();
1168 1168
1169 retval = bus_register(&w1_bus_type); 1169 retval = bus_register(&w1_bus_type);
1170 if (retval) { 1170 if (retval) {
1171 printk(KERN_ERR "Failed to register bus. err=%d.\n", retval); 1171 pr_err("Failed to register bus. err=%d.\n", retval);
1172 goto err_out_exit_init; 1172 goto err_out_exit_init;
1173 } 1173 }
1174 1174
1175 retval = driver_register(&w1_master_driver); 1175 retval = driver_register(&w1_master_driver);
1176 if (retval) { 1176 if (retval) {
1177 printk(KERN_ERR 1177 pr_err("Failed to register master driver. err=%d.\n",
1178 "Failed to register master driver. err=%d.\n",
1179 retval); 1178 retval);
1180 goto err_out_bus_unregister; 1179 goto err_out_bus_unregister;
1181 } 1180 }
1182 1181
1183 retval = driver_register(&w1_slave_driver); 1182 retval = driver_register(&w1_slave_driver);
1184 if (retval) { 1183 if (retval) {
1185 printk(KERN_ERR 1184 pr_err("Failed to register slave driver. err=%d.\n",
1186 "Failed to register slave driver. err=%d.\n",
1187 retval); 1185 retval);
1188 goto err_out_master_unregister; 1186 goto err_out_master_unregister;
1189 } 1187 }
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 3651ec801f45..1dc3051f7d76 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -87,7 +87,7 @@ void w1_unregister_family(struct w1_family *fent)
87 w1_reconnect_slaves(fent, 0); 87 w1_reconnect_slaves(fent, 0);
88 88
89 while (atomic_read(&fent->refcnt)) { 89 while (atomic_read(&fent->refcnt)) {
90 printk(KERN_INFO "Waiting for family %u to become free: refcnt=%d.\n", 90 pr_info("Waiting for family %u to become free: refcnt=%d.\n",
91 fent->fid, atomic_read(&fent->refcnt)); 91 fent->fid, atomic_read(&fent->refcnt));
92 92
93 if (msleep_interruptible(1000)) 93 if (msleep_interruptible(1000))
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 26ca1343055b..0d18365b61ad 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -40,6 +40,7 @@
40#define W1_FAMILY_DS2760 0x30 40#define W1_FAMILY_DS2760 0x30
41#define W1_FAMILY_DS2780 0x32 41#define W1_FAMILY_DS2780 0x32
42#define W1_FAMILY_DS2413 0x3A 42#define W1_FAMILY_DS2413 0x3A
43#define W1_FAMILY_DS2406 0x12
43#define W1_THERM_DS1825 0x3B 44#define W1_THERM_DS1825 0x3B
44#define W1_FAMILY_DS2781 0x3D 45#define W1_FAMILY_DS2781 0x3D
45#define W1_THERM_DS28EA00 0x42 46#define W1_THERM_DS28EA00 0x42
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 728039d2efe1..47249a30eae3 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -38,7 +38,7 @@ module_param_named(search_count, w1_search_count, int, 0);
38static int w1_enable_pullup = 1; 38static int w1_enable_pullup = 1;
39module_param_named(enable_pullup, w1_enable_pullup, int, 0); 39module_param_named(enable_pullup, w1_enable_pullup, int, 0);
40 40
41static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl, 41static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
42 struct device_driver *driver, 42 struct device_driver *driver,
43 struct device *device) 43 struct device *device)
44{ 44{
@@ -50,8 +50,7 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
50 */ 50 */
51 dev = kzalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL); 51 dev = kzalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
52 if (!dev) { 52 if (!dev) {
53 printk(KERN_ERR 53 pr_err("Failed to allocate %zd bytes for new w1 device.\n",
54 "Failed to allocate %zd bytes for new w1 device.\n",
55 sizeof(struct w1_master)); 54 sizeof(struct w1_master));
56 return NULL; 55 return NULL;
57 } 56 }
@@ -91,7 +90,7 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
91 90
92 err = device_register(&dev->dev); 91 err = device_register(&dev->dev);
93 if (err) { 92 if (err) {
94 printk(KERN_ERR "Failed to register master device. err=%d\n", err); 93 pr_err("Failed to register master device. err=%d\n", err);
95 memset(dev, 0, sizeof(struct w1_master)); 94 memset(dev, 0, sizeof(struct w1_master));
96 kfree(dev); 95 kfree(dev);
97 dev = NULL; 96 dev = NULL;
@@ -116,13 +115,13 @@ int w1_add_master_device(struct w1_bus_master *master)
116 struct w1_netlink_msg msg; 115 struct w1_netlink_msg msg;
117 int id, found; 116 int id, found;
118 117
119 /* validate minimum functionality */ 118 /* validate minimum functionality */
120 if (!(master->touch_bit && master->reset_bus) && 119 if (!(master->touch_bit && master->reset_bus) &&
121 !(master->write_bit && master->read_bit) && 120 !(master->write_bit && master->read_bit) &&
122 !(master->write_byte && master->read_byte && master->reset_bus)) { 121 !(master->write_byte && master->read_byte && master->reset_bus)) {
123 printk(KERN_ERR "w1_add_master_device: invalid function set\n"); 122 pr_err("w1_add_master_device: invalid function set\n");
124 return(-EINVAL); 123 return(-EINVAL);
125 } 124 }
126 125
127 /* Lock until the device is added (or not) to w1_masters. */ 126 /* Lock until the device is added (or not) to w1_masters. */
128 mutex_lock(&w1_mlock); 127 mutex_lock(&w1_mlock);
@@ -254,7 +253,7 @@ void w1_remove_master_device(struct w1_bus_master *bm)
254 } 253 }
255 254
256 if (!found) { 255 if (!found) {
257 printk(KERN_ERR "Device doesn't exist.\n"); 256 pr_err("Device doesn't exist.\n");
258 return; 257 return;
259 } 258 }
260 259
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
index 9c7bd62e6bdc..f9eecff23b8d 100644
--- a/drivers/w1/w1_log.h
+++ b/drivers/w1/w1_log.h
@@ -29,8 +29,8 @@
29#else 29#else
30# define assert(expr) \ 30# define assert(expr) \
31 if(unlikely(!(expr))) { \ 31 if(unlikely(!(expr))) { \
32 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ 32 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
33 #expr, __FILE__, __func__, __LINE__); \ 33 #expr, __FILE__, __func__, __LINE__); \
34 } 34 }
35#endif 35#endif
36 36
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 351a2978ba72..dd9656237274 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -680,8 +680,7 @@ static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
680 if (sl) 680 if (sl)
681 dev = sl->master; 681 dev = sl->master;
682 } else { 682 } else {
683 printk(KERN_NOTICE 683 pr_notice("%s: cn: %x.%x, wrong type: %u, len: %u.\n",
684 "%s: cn: %x.%x, wrong type: %u, len: %u.\n",
685 __func__, cn->id.idx, cn->id.val, 684 __func__, cn->id.idx, cn->id.val,
686 msg->type, msg->len); 685 msg->type, msg->len);
687 err = -EPROTO; 686 err = -EPROTO;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c1c0b0cf39b4..5ba0360663a7 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -268,6 +268,9 @@
268 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 268 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
269 *(.pci_fixup_suspend) \ 269 *(.pci_fixup_suspend) \
270 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 270 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
271 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
272 *(.pci_fixup_suspend_late) \
273 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
271 } \ 274 } \
272 \ 275 \
273 /* Built-in firmware blobs */ \ 276 /* Built-in firmware blobs */ \
diff --git a/include/linux/extcon/sm5502.h b/include/linux/extcon/sm5502.h
new file mode 100644
index 000000000000..030526bf8d79
--- /dev/null
+++ b/include/linux/extcon/sm5502.h
@@ -0,0 +1,287 @@
1/*
2 * sm5502.h
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __LINUX_EXTCON_SM5502_H
18#define __LINUX_EXTCON_SM5502_H
19
20enum sm5502_types {
21 TYPE_SM5502,
22};
23
24/* SM5502 registers */
25enum sm5502_reg {
26 SM5502_REG_DEVICE_ID = 0x01,
27 SM5502_REG_CONTROL,
28 SM5502_REG_INT1,
29 SM5502_REG_INT2,
30 SM5502_REG_INTMASK1,
31 SM5502_REG_INTMASK2,
32 SM5502_REG_ADC,
33 SM5502_REG_TIMING_SET1,
34 SM5502_REG_TIMING_SET2,
35 SM5502_REG_DEV_TYPE1,
36 SM5502_REG_DEV_TYPE2,
37 SM5502_REG_BUTTON1,
38 SM5502_REG_BUTTON2,
39 SM5502_REG_CAR_KIT_STATUS,
40 SM5502_REG_RSVD1,
41 SM5502_REG_RSVD2,
42 SM5502_REG_RSVD3,
43 SM5502_REG_RSVD4,
44 SM5502_REG_MANUAL_SW1,
45 SM5502_REG_MANUAL_SW2,
46 SM5502_REG_DEV_TYPE3,
47 SM5502_REG_RSVD5,
48 SM5502_REG_RSVD6,
49 SM5502_REG_RSVD7,
50 SM5502_REG_RSVD8,
51 SM5502_REG_RSVD9,
52 SM5502_REG_RESET,
53 SM5502_REG_RSVD10,
54 SM5502_REG_RESERVED_ID1,
55 SM5502_REG_RSVD11,
56 SM5502_REG_RSVD12,
57 SM5502_REG_RESERVED_ID2,
58 SM5502_REG_RSVD13,
59 SM5502_REG_OCP,
60 SM5502_REG_RSVD14,
61 SM5502_REG_RSVD15,
62 SM5502_REG_RSVD16,
63 SM5502_REG_RSVD17,
64 SM5502_REG_RSVD18,
65 SM5502_REG_RSVD19,
66 SM5502_REG_RSVD20,
67 SM5502_REG_RSVD21,
68 SM5502_REG_RSVD22,
69 SM5502_REG_RSVD23,
70 SM5502_REG_RSVD24,
71 SM5502_REG_RSVD25,
72 SM5502_REG_RSVD26,
73 SM5502_REG_RSVD27,
74 SM5502_REG_RSVD28,
75 SM5502_REG_RSVD29,
76 SM5502_REG_RSVD30,
77 SM5502_REG_RSVD31,
78 SM5502_REG_RSVD32,
79 SM5502_REG_RSVD33,
80 SM5502_REG_RSVD34,
81 SM5502_REG_RSVD35,
82 SM5502_REG_RSVD36,
83 SM5502_REG_RESERVED_ID3,
84
85 SM5502_REG_END,
86};
87
88/* Define SM5502 MASK/SHIFT constant */
89#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0
90#define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3
91#define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT)
92#define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT)
93
94#define SM5502_REG_CONTROL_MASK_INT_SHIFT 0
95#define SM5502_REG_CONTROL_WAIT_SHIFT 1
96#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2
97#define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3
98#define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4
99#define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT)
100#define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT)
101#define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT)
102#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT)
103#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT)
104
105#define SM5502_REG_INTM1_ATTACH_SHIFT 0
106#define SM5502_REG_INTM1_DETACH_SHIFT 1
107#define SM5502_REG_INTM1_KP_SHIFT 2
108#define SM5502_REG_INTM1_LKP_SHIFT 3
109#define SM5502_REG_INTM1_LKR_SHIFT 4
110#define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5
111#define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6
112#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7
113#define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT)
114#define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT)
115#define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT)
116#define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT)
117#define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT)
118#define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT)
119#define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT)
120#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT)
121
122#define SM5502_REG_INTM2_VBUS_DET_SHIFT 0
123#define SM5502_REG_INTM2_REV_ACCE_SHIFT 1
124#define SM5502_REG_INTM2_ADC_CHG_SHIFT 2
125#define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3
126#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4
127#define SM5502_REG_INTM2_MHL_SHIFT 5
128#define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT)
129#define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT)
130#define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT)
131#define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT)
132#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT)
133#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT)
134
135#define SM5502_REG_ADC_SHIFT 0
136#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT)
137
138#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4
139#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT)
140#define TIMING_KEY_PRESS_100MS 0x0
141#define TIMING_KEY_PRESS_200MS 0x1
142#define TIMING_KEY_PRESS_300MS 0x2
143#define TIMING_KEY_PRESS_400MS 0x3
144#define TIMING_KEY_PRESS_500MS 0x4
145#define TIMING_KEY_PRESS_600MS 0x5
146#define TIMING_KEY_PRESS_700MS 0x6
147#define TIMING_KEY_PRESS_800MS 0x7
148#define TIMING_KEY_PRESS_900MS 0x8
149#define TIMING_KEY_PRESS_1000MS 0x9
150#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0
151#define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT)
152#define TIMING_ADC_DET_50MS 0x0
153#define TIMING_ADC_DET_100MS 0x1
154#define TIMING_ADC_DET_150MS 0x2
155#define TIMING_ADC_DET_200MS 0x3
156#define TIMING_ADC_DET_300MS 0x4
157#define TIMING_ADC_DET_400MS 0x5
158#define TIMING_ADC_DET_500MS 0x6
159#define TIMING_ADC_DET_600MS 0x7
160#define TIMING_ADC_DET_700MS 0x8
161#define TIMING_ADC_DET_800MS 0x9
162#define TIMING_ADC_DET_900MS 0xA
163#define TIMING_ADC_DET_1000MS 0xB
164
165#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4
166#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT)
167#define TIMING_SW_WAIT_10MS 0x0
168#define TIMING_SW_WAIT_30MS 0x1
169#define TIMING_SW_WAIT_50MS 0x2
170#define TIMING_SW_WAIT_70MS 0x3
171#define TIMING_SW_WAIT_90MS 0x4
172#define TIMING_SW_WAIT_110MS 0x5
173#define TIMING_SW_WAIT_130MS 0x6
174#define TIMING_SW_WAIT_150MS 0x7
175#define TIMING_SW_WAIT_170MS 0x8
176#define TIMING_SW_WAIT_190MS 0x9
177#define TIMING_SW_WAIT_210MS 0xA
178#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0
179#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT)
180#define TIMING_LONG_KEY_300MS 0x0
181#define TIMING_LONG_KEY_400MS 0x1
182#define TIMING_LONG_KEY_500MS 0x2
183#define TIMING_LONG_KEY_600MS 0x3
184#define TIMING_LONG_KEY_700MS 0x4
185#define TIMING_LONG_KEY_800MS 0x5
186#define TIMING_LONG_KEY_900MS 0x6
187#define TIMING_LONG_KEY_1000MS 0x7
188#define TIMING_LONG_KEY_1100MS 0x8
189#define TIMING_LONG_KEY_1200MS 0x9
190#define TIMING_LONG_KEY_1300MS 0xA
191#define TIMING_LONG_KEY_1400MS 0xB
192#define TIMING_LONG_KEY_1500MS 0xC
193
194#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0
195#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1
196#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2
197#define SM5502_REG_DEV_TYPE1_UART_SHIFT 3
198#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4
199#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5
200#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6
201#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7
202#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT)
203#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT)
204#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT)
205#define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT)
206#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT)
207#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT)
208#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT)
209#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT)
210
211#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0
212#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1
213#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2
214#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3
215#define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4
216#define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5
217#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6
218#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT)
219#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT)
220#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT)
221#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT)
222#define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT)
223#define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT)
224#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT)
225
226#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0
227#define SM5502_REG_MANUAL_SW1_DP_SHIFT 2
228#define SM5502_REG_MANUAL_SW1_DM_SHIFT 5
229#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT)
230#define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT)
231#define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT)
232#define VBUSIN_SWITCH_OPEN 0x0
233#define VBUSIN_SWITCH_VBUSOUT 0x1
234#define VBUSIN_SWITCH_MIC 0x2
235#define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3
236#define DM_DP_CON_SWITCH_OPEN 0x0
237#define DM_DP_CON_SWITCH_USB 0x1
238#define DM_DP_CON_SWITCH_AUDIO 0x2
239#define DM_DP_CON_SWITCH_UART 0x3
240#define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
241 | (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
242#define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
243 | (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
244#define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
245 | (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
246#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
247 | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
248
249/* SM5502 Interrupts */
250enum sm5502_irq {
251 /* INT1 */
252 SM5502_IRQ_INT1_ATTACH,
253 SM5502_IRQ_INT1_DETACH,
254 SM5502_IRQ_INT1_KP,
255 SM5502_IRQ_INT1_LKP,
256 SM5502_IRQ_INT1_LKR,
257 SM5502_IRQ_INT1_OVP_EVENT,
258 SM5502_IRQ_INT1_OCP_EVENT,
259 SM5502_IRQ_INT1_OVP_OCP_DIS,
260
261 /* INT2 */
262 SM5502_IRQ_INT2_VBUS_DET,
263 SM5502_IRQ_INT2_REV_ACCE,
264 SM5502_IRQ_INT2_ADC_CHG,
265 SM5502_IRQ_INT2_STUCK_KEY,
266 SM5502_IRQ_INT2_STUCK_KEY_RCV,
267 SM5502_IRQ_INT2_MHL,
268
269 SM5502_IRQ_NUM,
270};
271
272#define SM5502_IRQ_INT1_ATTACH_MASK BIT(0)
273#define SM5502_IRQ_INT1_DETACH_MASK BIT(1)
274#define SM5502_IRQ_INT1_KP_MASK BIT(2)
275#define SM5502_IRQ_INT1_LKP_MASK BIT(3)
276#define SM5502_IRQ_INT1_LKR_MASK BIT(4)
277#define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5)
278#define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6)
279#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7)
280#define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0)
281#define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1)
282#define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2)
283#define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3)
284#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4)
285#define SM5502_IRQ_INT2_MHL_MASK BIT(5)
286
287#endif /* __LINUX_EXTCON_SM5502_H */
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 12a5c135c746..4578c72c9b86 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -127,6 +127,9 @@ struct arizona_pdata {
127 /** Internal pull on GPIO5 is disabled when used for jack detection */ 127 /** Internal pull on GPIO5 is disabled when used for jack detection */
128 bool jd_gpio5_nopull; 128 bool jd_gpio5_nopull;
129 129
130 /** set to true if jackdet contact opens on insert */
131 bool jd_invert;
132
130 /** Use the headphone detect circuit to identify the accessory */ 133 /** Use the headphone detect circuit to identify the accessory */
131 bool hpdet_acc_id; 134 bool hpdet_acc_id;
132 135
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 3e050b933dd0..c466ff3e16b8 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -262,6 +262,41 @@ enum max77693_irq_source {
262 MAX77693_IRQ_GROUP_NR, 262 MAX77693_IRQ_GROUP_NR,
263}; 263};
264 264
265#define LED_IRQ_FLED2_OPEN BIT(0)
266#define LED_IRQ_FLED2_SHORT BIT(1)
267#define LED_IRQ_FLED1_OPEN BIT(2)
268#define LED_IRQ_FLED1_SHORT BIT(3)
269#define LED_IRQ_MAX_FLASH BIT(4)
270
271#define TOPSYS_IRQ_T120C_INT BIT(0)
272#define TOPSYS_IRQ_T140C_INT BIT(1)
273#define TOPSYS_IRQ_LOWSYS_INT BIT(3)
274
275#define CHG_IRQ_BYP_I BIT(0)
276#define CHG_IRQ_THM_I BIT(2)
277#define CHG_IRQ_BAT_I BIT(3)
278#define CHG_IRQ_CHG_I BIT(4)
279#define CHG_IRQ_CHGIN_I BIT(6)
280
281#define MUIC_IRQ_INT1_ADC BIT(0)
282#define MUIC_IRQ_INT1_ADC_LOW BIT(1)
283#define MUIC_IRQ_INT1_ADC_ERR BIT(2)
284#define MUIC_IRQ_INT1_ADC1K BIT(3)
285
286#define MUIC_IRQ_INT2_CHGTYP BIT(0)
287#define MUIC_IRQ_INT2_CHGDETREUN BIT(1)
288#define MUIC_IRQ_INT2_DCDTMR BIT(2)
289#define MUIC_IRQ_INT2_DXOVP BIT(3)
290#define MUIC_IRQ_INT2_VBVOLT BIT(4)
291#define MUIC_IRQ_INT2_VIDRM BIT(5)
292
293#define MUIC_IRQ_INT3_EOC BIT(0)
294#define MUIC_IRQ_INT3_CGMBC BIT(1)
295#define MUIC_IRQ_INT3_OVP BIT(2)
296#define MUIC_IRQ_INT3_MBCCHG_ERR BIT(3)
297#define MUIC_IRQ_INT3_CHG_ENABLED BIT(4)
298#define MUIC_IRQ_INT3_BAT_DET BIT(5)
299
265enum max77693_irq { 300enum max77693_irq {
266 /* PMIC - FLASH */ 301 /* PMIC - FLASH */
267 MAX77693_LED_IRQ_FLED2_OPEN, 302 MAX77693_LED_IRQ_FLED2_OPEN,
@@ -282,6 +317,10 @@ enum max77693_irq {
282 MAX77693_CHG_IRQ_CHG_I, 317 MAX77693_CHG_IRQ_CHG_I,
283 MAX77693_CHG_IRQ_CHGIN_I, 318 MAX77693_CHG_IRQ_CHGIN_I,
284 319
320 MAX77693_IRQ_NR,
321};
322
323enum max77693_irq_muic {
285 /* MUIC INT1 */ 324 /* MUIC INT1 */
286 MAX77693_MUIC_IRQ_INT1_ADC, 325 MAX77693_MUIC_IRQ_INT1_ADC,
287 MAX77693_MUIC_IRQ_INT1_ADC_LOW, 326 MAX77693_MUIC_IRQ_INT1_ADC_LOW,
@@ -304,7 +343,7 @@ enum max77693_irq {
304 MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, 343 MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,
305 MAX77693_MUIC_IRQ_INT3_BAT_DET, 344 MAX77693_MUIC_IRQ_INT3_BAT_DET,
306 345
307 MAX77693_IRQ_NR, 346 MAX77693_MUIC_IRQ_NR,
308}; 347};
309 348
310struct max77693_dev { 349struct max77693_dev {
@@ -319,7 +358,10 @@ struct max77693_dev {
319 struct regmap *regmap_muic; 358 struct regmap *regmap_muic;
320 struct regmap *regmap_haptic; 359 struct regmap *regmap_haptic;
321 360
322 struct irq_domain *irq_domain; 361 struct regmap_irq_chip_data *irq_data_led;
362 struct regmap_irq_chip_data *irq_data_topsys;
363 struct regmap_irq_chip_data *irq_data_charger;
364 struct regmap_irq_chip_data *irq_data_muic;
323 365
324 int irq; 366 int irq;
325 int irq_gpio; 367 int irq_gpio;
@@ -332,14 +374,6 @@ enum max77693_types {
332 TYPE_MAX77693, 374 TYPE_MAX77693,
333}; 375};
334 376
335extern int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest);
336extern int max77693_bulk_read(struct regmap *map, u8 reg, int count,
337 u8 *buf);
338extern int max77693_write_reg(struct regmap *map, u8 reg, u8 value);
339extern int max77693_bulk_write(struct regmap *map, u8 reg, int count,
340 u8 *buf);
341extern int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask);
342
343extern int max77693_irq_init(struct max77693_dev *max77686); 377extern int max77693_irq_init(struct max77693_dev *max77686);
344extern void max77693_irq_exit(struct max77693_dev *max77686); 378extern void max77693_irq_exit(struct max77693_dev *max77686);
345extern int max77693_irq_resume(struct max77693_dev *max77686); 379extern int max77693_irq_resume(struct max77693_dev *max77686);
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
new file mode 100644
index 000000000000..d5b5f76d57ef
--- /dev/null
+++ b/include/linux/mic_bus.h
@@ -0,0 +1,110 @@
1/*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Intel MIC Bus driver.
19 *
20 * This implementation is very similar to the the virtio bus driver
21 * implementation @ include/linux/virtio.h.
22 */
23#ifndef _MIC_BUS_H_
24#define _MIC_BUS_H_
25/*
26 * Everything a mbus driver needs to work with any particular mbus
27 * implementation.
28 */
29#include <linux/interrupt.h>
30#include <linux/dma-mapping.h>
31
32struct mbus_device_id {
33 __u32 device;
34 __u32 vendor;
35};
36
37#define MBUS_DEV_DMA_HOST 2
38#define MBUS_DEV_DMA_MIC 3
39#define MBUS_DEV_ANY_ID 0xffffffff
40
41/**
42 * mbus_device - representation of a device using mbus
43 * @mmio_va: virtual address of mmio space
44 * @hw_ops: the hardware ops supported by this device.
45 * @id: the device type identification (used to match it with a driver).
46 * @dev: underlying device.
47 * be used to communicate with.
48 * @index: unique position on the mbus bus
49 */
50struct mbus_device {
51 void __iomem *mmio_va;
52 struct mbus_hw_ops *hw_ops;
53 struct mbus_device_id id;
54 struct device dev;
55 int index;
56};
57
58/**
59 * mbus_driver - operations for a mbus I/O driver
60 * @driver: underlying device driver (populate name and owner).
61 * @id_table: the ids serviced by this driver.
62 * @probe: the function to call when a device is found. Returns 0 or -errno.
63 * @remove: the function to call when a device is removed.
64 */
65struct mbus_driver {
66 struct device_driver driver;
67 const struct mbus_device_id *id_table;
68 int (*probe)(struct mbus_device *dev);
69 void (*scan)(struct mbus_device *dev);
70 void (*remove)(struct mbus_device *dev);
71};
72
73/**
74 * struct mic_irq - opaque pointer used as cookie
75 */
76struct mic_irq;
77
78/**
79 * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus.
80 */
81struct mbus_hw_ops {
82 struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev,
83 irq_handler_t handler,
84 irq_handler_t thread_fn,
85 const char *name, void *data,
86 int intr_src);
87 void (*free_irq)(struct mbus_device *mbdev,
88 struct mic_irq *cookie, void *data);
89 void (*ack_interrupt)(struct mbus_device *mbdev, int num);
90};
91
92struct mbus_device *
93mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
94 struct mbus_hw_ops *hw_ops, void __iomem *mmio_va);
95void mbus_unregister_device(struct mbus_device *mbdev);
96
97int mbus_register_driver(struct mbus_driver *drv);
98void mbus_unregister_driver(struct mbus_driver *drv);
99
100static inline struct mbus_device *dev_to_mbus(struct device *_dev)
101{
102 return container_of(_dev, struct mbus_device, dev);
103}
104
105static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv)
106{
107 return container_of(drv, struct mbus_driver, driver);
108}
109
110#endif /* _MIC_BUS_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 6ed3647b38df..61978a460841 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1477,8 +1477,9 @@ enum pci_fixup_pass {
1477 pci_fixup_final, /* Final phase of device fixups */ 1477 pci_fixup_final, /* Final phase of device fixups */
1478 pci_fixup_enable, /* pci_enable_device() time */ 1478 pci_fixup_enable, /* pci_enable_device() time */
1479 pci_fixup_resume, /* pci_device_resume() */ 1479 pci_fixup_resume, /* pci_device_resume() */
1480 pci_fixup_suspend, /* pci_device_suspend */ 1480 pci_fixup_suspend, /* pci_device_suspend() */
1481 pci_fixup_resume_early, /* pci_device_resume_early() */ 1481 pci_fixup_resume_early, /* pci_device_resume_early() */
1482 pci_fixup_suspend_late, /* pci_device_suspend_late() */
1482}; 1483};
1483 1484
1484/* Anonymous variables would be nice... */ 1485/* Anonymous variables would be nice... */
@@ -1519,6 +1520,11 @@ enum pci_fixup_pass {
1519 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1520 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1520 suspend##hook, vendor, device, class, \ 1521 suspend##hook, vendor, device, class, \
1521 class_shift, hook) 1522 class_shift, hook)
1523#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
1524 class_shift, hook) \
1525 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1526 suspend_late##hook, vendor, device, \
1527 class, class_shift, hook)
1522 1528
1523#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1529#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1524 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1530 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
@@ -1544,6 +1550,10 @@ enum pci_fixup_pass {
1544 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1550 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1545 suspend##hook, vendor, device, \ 1551 suspend##hook, vendor, device, \
1546 PCI_ANY_ID, 0, hook) 1552 PCI_ANY_ID, 0, hook)
1553#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
1554 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1555 suspend_late##hook, vendor, device, \
1556 PCI_ANY_ID, 0, hook)
1547 1557
1548#ifdef CONFIG_PCI_QUIRKS 1558#ifdef CONFIG_PCI_QUIRKS
1549void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1559void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
diff --git a/include/uapi/linux/genwqe/genwqe_card.h b/include/uapi/linux/genwqe/genwqe_card.h
index 795e957bb840..4fc065f29255 100644
--- a/include/uapi/linux/genwqe/genwqe_card.h
+++ b/include/uapi/linux/genwqe/genwqe_card.h
@@ -328,6 +328,7 @@ enum genwqe_card_state {
328 GENWQE_CARD_UNUSED = 0, 328 GENWQE_CARD_UNUSED = 0,
329 GENWQE_CARD_USED = 1, 329 GENWQE_CARD_USED = 1,
330 GENWQE_CARD_FATAL_ERROR = 2, 330 GENWQE_CARD_FATAL_ERROR = 2,
331 GENWQE_CARD_RELOAD_BITSTREAM = 3,
331 GENWQE_CARD_STATE_MAX, 332 GENWQE_CARD_STATE_MAX,
332}; 333};
333 334
diff --git a/include/uapi/linux/i8k.h b/include/uapi/linux/i8k.h
index 1c45ba505115..133d02f03c25 100644
--- a/include/uapi/linux/i8k.h
+++ b/include/uapi/linux/i8k.h
@@ -34,7 +34,8 @@
34#define I8K_FAN_OFF 0 34#define I8K_FAN_OFF 0
35#define I8K_FAN_LOW 1 35#define I8K_FAN_LOW 1
36#define I8K_FAN_HIGH 2 36#define I8K_FAN_HIGH 2
37#define I8K_FAN_MAX I8K_FAN_HIGH 37#define I8K_FAN_TURBO 3
38#define I8K_FAN_MAX I8K_FAN_TURBO
38 39
39#define I8K_VOL_UP 1 40#define I8K_VOL_UP 1
40#define I8K_VOL_DOWN 2 41#define I8K_VOL_DOWN 2
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index fba1c75aa484..8f96b3ee0724 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -88,7 +88,8 @@ static int hv_start_fcopy(struct hv_start_fcopy *smsg)
88 } 88 }
89 } 89 }
90 90
91 target_fd = open(target_fname, O_RDWR | O_CREAT | O_CLOEXEC, 0744); 91 target_fd = open(target_fname,
92 O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, 0744);
92 if (target_fd == -1) { 93 if (target_fd == -1) {
93 syslog(LOG_INFO, "Open Failed: %s", strerror(errno)); 94 syslog(LOG_INFO, "Open Failed: %s", strerror(errno));
94 goto done; 95 goto done;
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index e66e710cc595..4c2aa357e12f 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -12,6 +12,9 @@ TARGETS += powerpc
12TARGETS += user 12TARGETS += user
13TARGETS += sysctl 13TARGETS += sysctl
14 14
15TARGETS_HOTPLUG = cpu-hotplug
16TARGETS_HOTPLUG += memory-hotplug
17
15all: 18all:
16 for TARGET in $(TARGETS); do \ 19 for TARGET in $(TARGETS); do \
17 make -C $$TARGET; \ 20 make -C $$TARGET; \
@@ -22,6 +25,21 @@ run_tests: all
22 make -C $$TARGET run_tests; \ 25 make -C $$TARGET run_tests; \
23 done; 26 done;
24 27
28hotplug:
29 for TARGET in $(TARGETS_HOTPLUG); do \
30 make -C $$TARGET; \
31 done;
32
33run_hotplug: hotplug
34 for TARGET in $(TARGETS_HOTPLUG); do \
35 make -C $$TARGET run_full_test; \
36 done;
37
38clean_hotplug:
39 for TARGET in $(TARGETS_HOTPLUG); do \
40 make -C $$TARGET clean; \
41 done;
42
25clean: 43clean:
26 for TARGET in $(TARGETS); do \ 44 for TARGET in $(TARGETS); do \
27 make -C $$TARGET clean; \ 45 make -C $$TARGET clean; \
diff --git a/tools/testing/selftests/README.txt b/tools/testing/selftests/README.txt
index 5e2faf9c55d3..2660d5ff9179 100644
--- a/tools/testing/selftests/README.txt
+++ b/tools/testing/selftests/README.txt
@@ -4,8 +4,15 @@ The kernel contains a set of "self tests" under the tools/testing/selftests/
4directory. These are intended to be small unit tests to exercise individual 4directory. These are intended to be small unit tests to exercise individual
5code paths in the kernel. 5code paths in the kernel.
6 6
7Running the selftests 7On some systems, hot-plug tests could hang forever waiting for cpu and
8===================== 8memory to be ready to be offlined. A special hot-plug target is created
9to run full range of hot-plug tests. In default mode, hot-plug tests run
10in safe mode with a limited scope. In limited mode, cpu-hotplug test is
11run on a single cpu as opposed to all hotplug capable cpus, and memory
12hotplug test is run on 2% of hotplug capable memory instead of 10%.
13
14Running the selftests (hotplug tests are run in limited mode)
15=============================================================
9 16
10To build the tests: 17To build the tests:
11 18
@@ -18,14 +25,26 @@ To run the tests:
18 25
19- note that some tests will require root privileges. 26- note that some tests will require root privileges.
20 27
21 28To run only tests targeted for a single subsystem: (including
22To run only tests targetted for a single subsystem: 29hotplug targets in limited mode)
23 30
24 $ make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests 31 $ make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests
25 32
26See the top-level tools/testing/selftests/Makefile for the list of all possible 33See the top-level tools/testing/selftests/Makefile for the list of all possible
27targets. 34targets.
28 35
36Running the full range hotplug selftests
37========================================
38
39To build the tests:
40
41 $ make -C tools/testing/selftests hotplug
42
43To run the tests:
44
45 $ make -C tools/testing/selftests run_hotplug
46
47- note that some tests will require root privileges.
29 48
30Contributing new tests 49Contributing new tests
31====================== 50======================
diff --git a/tools/testing/selftests/cpu-hotplug/Makefile b/tools/testing/selftests/cpu-hotplug/Makefile
index 790c23a9db44..e9c28d8dc84b 100644
--- a/tools/testing/selftests/cpu-hotplug/Makefile
+++ b/tools/testing/selftests/cpu-hotplug/Makefile
@@ -3,4 +3,7 @@ all:
3run_tests: 3run_tests:
4 @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]" 4 @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
5 5
6run_full_test:
7 @/bin/bash ./on-off-test.sh -a || echo "cpu-hotplug selftests: [FAIL]"
8
6clean: 9clean:
diff --git a/tools/testing/selftests/cpu-hotplug/on-off-test.sh b/tools/testing/selftests/cpu-hotplug/on-off-test.sh
index bdde7cf428bb..98b1d6565f2c 100644
--- a/tools/testing/selftests/cpu-hotplug/on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/on-off-test.sh
@@ -11,6 +11,8 @@ prerequisite()
11 exit 0 11 exit 0
12 fi 12 fi
13 13
14 taskset -p 01 $$
15
14 SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'` 16 SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'`
15 17
16 if [ ! -d "$SYSFS" ]; then 18 if [ ! -d "$SYSFS" ]; then
@@ -22,6 +24,19 @@ prerequisite()
22 echo $msg cpu hotplug is not supported >&2 24 echo $msg cpu hotplug is not supported >&2
23 exit 0 25 exit 0
24 fi 26 fi
27
28 echo "CPU online/offline summary:"
29 online_cpus=`cat $SYSFS/devices/system/cpu/online`
30 online_max=${online_cpus##*-}
31 echo -e "\t Cpus in online state: $online_cpus"
32
33 offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
34 if [[ "a$offline_cpus" = "a" ]]; then
35 offline_cpus=0
36 else
37 offline_max=${offline_cpus##*-}
38 fi
39 echo -e "\t Cpus in offline state: $offline_cpus"
25} 40}
26 41
27# 42#
@@ -113,15 +128,25 @@ offline_cpu_expect_fail()
113} 128}
114 129
115error=-12 130error=-12
131allcpus=0
116priority=0 132priority=0
133online_cpus=0
134online_max=0
135offline_cpus=0
136offline_max=0
117 137
118while getopts e:hp: opt; do 138while getopts e:ahp: opt; do
119 case $opt in 139 case $opt in
120 e) 140 e)
121 error=$OPTARG 141 error=$OPTARG
122 ;; 142 ;;
143 a)
144 allcpus=1
145 ;;
123 h) 146 h)
124 echo "Usage $0 [ -e errno ] [ -p notifier-priority ]" 147 echo "Usage $0 [ -a ] [ -e errno ] [ -p notifier-priority ]"
148 echo -e "\t default offline one cpu"
149 echo -e "\t run with -a option to offline all cpus"
125 exit 150 exit
126 ;; 151 ;;
127 p) 152 p)
@@ -138,6 +163,29 @@ fi
138prerequisite 163prerequisite
139 164
140# 165#
166# Safe test (default) - offline and online one cpu
167#
168if [ $allcpus -eq 0 ]; then
169 echo "Limited scope test: one hotplug cpu"
170 echo -e "\t (leaves cpu in the original state):"
171 echo -e "\t online to offline to online: cpu $online_max"
172 offline_cpu_expect_success $online_max
173 online_cpu_expect_success $online_max
174
175 if [[ $offline_cpus -gt 0 ]]; then
176 echo -e "\t offline to online to offline: cpu $offline_max"
177 online_cpu_expect_success $offline_max
178 offline_cpu_expect_success $offline_max
179 fi
180 exit 0
181else
182 echo "Full scope test: all hotplug cpus"
183 echo -e "\t online all offline cpus"
184 echo -e "\t offline all online cpus"
185 echo -e "\t online all offline cpus"
186fi
187
188#
141# Online all hot-pluggable CPUs 189# Online all hot-pluggable CPUs
142# 190#
143for cpu in `hotplaggable_offline_cpus`; do 191for cpu in `hotplaggable_offline_cpus`; do
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
index fa4f1b37e045..dbba4084869c 100644
--- a/tools/testing/selftests/kcmp/kcmp_test.c
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -81,7 +81,7 @@ int main(int argc, char **argv)
81 /* Compare with self */ 81 /* Compare with self */
82 ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0); 82 ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
83 if (ret) { 83 if (ret) {
84 printf("FAIL: 0 expected but %li returned (%s)\n", 84 printf("FAIL: 0 expected but %d returned (%s)\n",
85 ret, strerror(errno)); 85 ret, strerror(errno));
86 ret = -1; 86 ret = -1;
87 } else 87 } else
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile
index 058c76f5d102..d46b8d489cd2 100644
--- a/tools/testing/selftests/memory-hotplug/Makefile
+++ b/tools/testing/selftests/memory-hotplug/Makefile
@@ -1,6 +1,9 @@
1all: 1all:
2 2
3run_tests: 3run_tests:
4 @/bin/bash ./on-off-test.sh -r 2 || echo "memory-hotplug selftests: [FAIL]"
5
6run_full_test:
4 @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" 7 @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
5 8
6clean: 9clean:
diff --git a/tools/testing/selftests/memory-hotplug/on-off-test.sh b/tools/testing/selftests/memory-hotplug/on-off-test.sh
index a2816f631542..6cddde0b96f8 100644
--- a/tools/testing/selftests/memory-hotplug/on-off-test.sh
+++ b/tools/testing/selftests/memory-hotplug/on-off-test.sh
@@ -142,10 +142,16 @@ fi
142 142
143prerequisite 143prerequisite
144 144
145echo "Test scope: $ratio% hotplug memory"
146echo -e "\t online all hotplug memory in offline state"
147echo -e "\t offline $ratio% hotplug memory in online state"
148echo -e "\t online all hotplug memory in offline state"
149
145# 150#
146# Online all hot-pluggable memory 151# Online all hot-pluggable memory
147# 152#
148for memory in `hotplaggable_offline_memory`; do 153for memory in `hotplaggable_offline_memory`; do
154 echo offline-online $memory
149 online_memory_expect_success $memory 155 online_memory_expect_success $memory
150done 156done
151 157
@@ -154,6 +160,7 @@ done
154# 160#
155for memory in `hotpluggable_online_memory`; do 161for memory in `hotpluggable_online_memory`; do
156 if [ $((RANDOM % 100)) -lt $ratio ]; then 162 if [ $((RANDOM % 100)) -lt $ratio ]; then
163 echo online-offline $memory
157 offline_memory_expect_success $memory 164 offline_memory_expect_success $memory
158 fi 165 fi
159done 166done
@@ -162,6 +169,7 @@ done
162# Online all hot-pluggable memory again 169# Online all hot-pluggable memory again
163# 170#
164for memory in `hotplaggable_offline_memory`; do 171for memory in `hotplaggable_offline_memory`; do
172 echo offline-online $memory
165 online_memory_expect_success $memory 173 online_memory_expect_success $memory
166done 174done
167 175
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 218a122c7951..8056e2e68fa4 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -1,6 +1,6 @@
1all: 1all:
2 gcc -O2 -lrt mq_open_tests.c -o mq_open_tests 2 gcc -O2 mq_open_tests.c -o mq_open_tests -lrt
3 gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c 3 gcc -O2 -o mq_perf_tests mq_perf_tests.c -lrt -lpthread -lpopt
4 4
5run_tests: 5run_tests:
6 @./mq_open_tests /test1 || echo "mq_open_tests: [FAIL]" 6 @./mq_open_tests /test1 || echo "mq_open_tests: [FAIL]"
diff --git a/tools/testing/selftests/mqueue/mq_open_tests.c b/tools/testing/selftests/mqueue/mq_open_tests.c
index 711cc2923047..9c1a5d359055 100644
--- a/tools/testing/selftests/mqueue/mq_open_tests.c
+++ b/tools/testing/selftests/mqueue/mq_open_tests.c
@@ -80,7 +80,8 @@ void shutdown(int exit_val, char *err_cause, int line_no)
80 if (in_shutdown++) 80 if (in_shutdown++)
81 return; 81 return;
82 82
83 seteuid(0); 83 if (seteuid(0) == -1)
84 perror("seteuid() failed");
84 85
85 if (queue != -1) 86 if (queue != -1)
86 if (mq_close(queue)) 87 if (mq_close(queue))
@@ -292,8 +293,10 @@ int main(int argc, char *argv[])
292 /* Tell the user our initial state */ 293 /* Tell the user our initial state */
293 printf("\nInitial system state:\n"); 294 printf("\nInitial system state:\n");
294 printf("\tUsing queue path:\t\t%s\n", queue_path); 295 printf("\tUsing queue path:\t\t%s\n", queue_path);
295 printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", saved_limits.rlim_cur); 296 printf("\tRLIMIT_MSGQUEUE(soft):\t\t%ld\n",
296 printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", saved_limits.rlim_max); 297 (long) saved_limits.rlim_cur);
298 printf("\tRLIMIT_MSGQUEUE(hard):\t\t%ld\n",
299 (long) saved_limits.rlim_max);
297 printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize); 300 printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize);
298 printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs); 301 printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs);
299 if (default_settings) { 302 if (default_settings) {
@@ -308,8 +311,8 @@ int main(int argc, char *argv[])
308 validate_current_settings(); 311 validate_current_settings();
309 312
310 printf("Adjusted system state for testing:\n"); 313 printf("Adjusted system state for testing:\n");
311 printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", cur_limits.rlim_cur); 314 printf("\tRLIMIT_MSGQUEUE(soft):\t\t%ld\n", (long) cur_limits.rlim_cur);
312 printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", cur_limits.rlim_max); 315 printf("\tRLIMIT_MSGQUEUE(hard):\t\t%ld\n", (long) cur_limits.rlim_max);
313 printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize); 316 printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize);
314 printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs); 317 printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs);
315 if (default_settings) { 318 if (default_settings) {
@@ -454,7 +457,12 @@ int main(int argc, char *argv[])
454 else 457 else
455 printf("Queue open with total size > 2GB when euid = 0 " 458 printf("Queue open with total size > 2GB when euid = 0 "
456 "failed:\t\t\tPASS\n"); 459 "failed:\t\t\tPASS\n");
457 seteuid(99); 460
461 if (seteuid(99) == -1) {
462 perror("seteuid() failed");
463 exit(1);
464 }
465
458 attr.mq_maxmsg = cur_max_msgs; 466 attr.mq_maxmsg = cur_max_msgs;
459 attr.mq_msgsize = cur_max_msgsize; 467 attr.mq_msgsize = cur_max_msgsize;
460 if (test_queue_fail(&attr, &result)) 468 if (test_queue_fail(&attr, &result))
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index 2fadd4b97045..94dae65eea41 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -296,9 +296,9 @@ static inline void open_queue(struct mq_attr *attr)
296 printf("\n\tQueue %s created:\n", queue_path); 296 printf("\n\tQueue %s created:\n", queue_path);
297 printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ? 297 printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ?
298 "O_NONBLOCK" : "(null)"); 298 "O_NONBLOCK" : "(null)");
299 printf("\t\tmq_maxmsg:\t\t\t%d\n", result.mq_maxmsg); 299 printf("\t\tmq_maxmsg:\t\t\t%lu\n", result.mq_maxmsg);
300 printf("\t\tmq_msgsize:\t\t\t%d\n", result.mq_msgsize); 300 printf("\t\tmq_msgsize:\t\t\t%lu\n", result.mq_msgsize);
301 printf("\t\tmq_curmsgs:\t\t\t%d\n", result.mq_curmsgs); 301 printf("\t\tmq_curmsgs:\t\t\t%lu\n", result.mq_curmsgs);
302} 302}
303 303
304void *fake_cont_thread(void *arg) 304void *fake_cont_thread(void *arg)
@@ -440,7 +440,7 @@ void *perf_test_thread(void *arg)
440 shutdown(2, "clock_getres()", __LINE__); 440 shutdown(2, "clock_getres()", __LINE__);
441 441
442 printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max); 442 printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max);
443 printf("\t\tClock resolution:\t\t%d nsec%s\n", res.tv_nsec, 443 printf("\t\tClock resolution:\t\t%lu nsec%s\n", res.tv_nsec,
444 res.tv_nsec > 1 ? "s" : ""); 444 res.tv_nsec > 1 ? "s" : "");
445 445
446 446
@@ -454,20 +454,20 @@ void *perf_test_thread(void *arg)
454 recv_total.tv_nsec = 0; 454 recv_total.tv_nsec = 0;
455 for (i = 0; i < TEST1_LOOPS; i++) 455 for (i = 0; i < TEST1_LOOPS; i++)
456 do_send_recv(); 456 do_send_recv();
457 printf("\t\tSend msg:\t\t\t%d.%ds total time\n", 457 printf("\t\tSend msg:\t\t\t%ld.%lus total time\n",
458 send_total.tv_sec, send_total.tv_nsec); 458 send_total.tv_sec, send_total.tv_nsec);
459 nsec = ((unsigned long long)send_total.tv_sec * 1000000000 + 459 nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
460 send_total.tv_nsec) / TEST1_LOOPS; 460 send_total.tv_nsec) / TEST1_LOOPS;
461 printf("\t\t\t\t\t\t%d nsec/msg\n", nsec); 461 printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
462 printf("\t\tRecv msg:\t\t\t%d.%ds total time\n", 462 printf("\t\tRecv msg:\t\t\t%ld.%lus total time\n",
463 recv_total.tv_sec, recv_total.tv_nsec); 463 recv_total.tv_sec, recv_total.tv_nsec);
464 nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 + 464 nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
465 recv_total.tv_nsec) / TEST1_LOOPS; 465 recv_total.tv_nsec) / TEST1_LOOPS;
466 printf("\t\t\t\t\t\t%d nsec/msg\n", nsec); 466 printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
467 467
468 468
469 for (cur_test = test2; cur_test->desc != NULL; cur_test++) { 469 for (cur_test = test2; cur_test->desc != NULL; cur_test++) {
470 printf(cur_test->desc); 470 printf("%s:\n", cur_test->desc);
471 printf("\t\t(%d iterations)\n", TEST2_LOOPS); 471 printf("\t\t(%d iterations)\n", TEST2_LOOPS);
472 prio_out = 0; 472 prio_out = 0;
473 send_total.tv_sec = 0; 473 send_total.tv_sec = 0;
@@ -493,16 +493,16 @@ void *perf_test_thread(void *arg)
493 cur_test->func(&prio_out); 493 cur_test->func(&prio_out);
494 } 494 }
495 printf("done.\n"); 495 printf("done.\n");
496 printf("\t\tSend msg:\t\t\t%d.%ds total time\n", 496 printf("\t\tSend msg:\t\t\t%ld.%lus total time\n",
497 send_total.tv_sec, send_total.tv_nsec); 497 send_total.tv_sec, send_total.tv_nsec);
498 nsec = ((unsigned long long)send_total.tv_sec * 1000000000 + 498 nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
499 send_total.tv_nsec) / TEST2_LOOPS; 499 send_total.tv_nsec) / TEST2_LOOPS;
500 printf("\t\t\t\t\t\t%d nsec/msg\n", nsec); 500 printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
501 printf("\t\tRecv msg:\t\t\t%d.%ds total time\n", 501 printf("\t\tRecv msg:\t\t\t%ld.%lus total time\n",
502 recv_total.tv_sec, recv_total.tv_nsec); 502 recv_total.tv_sec, recv_total.tv_nsec);
503 nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 + 503 nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
504 recv_total.tv_nsec) / TEST2_LOOPS; 504 recv_total.tv_nsec) / TEST2_LOOPS;
505 printf("\t\t\t\t\t\t%d nsec/msg\n", nsec); 505 printf("\t\t\t\t\t\t%lld nsec/msg\n", nsec);
506 printf("\t\tDraining queue..."); 506 printf("\t\tDraining queue...");
507 fflush(stdout); 507 fflush(stdout);
508 clock_gettime(clock, &start); 508 clock_gettime(clock, &start);
@@ -653,8 +653,10 @@ int main(int argc, char *argv[])
653 /* Tell the user our initial state */ 653 /* Tell the user our initial state */
654 printf("\nInitial system state:\n"); 654 printf("\nInitial system state:\n");
655 printf("\tUsing queue path:\t\t\t%s\n", queue_path); 655 printf("\tUsing queue path:\t\t\t%s\n", queue_path);
656 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n", saved_limits.rlim_cur); 656 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%ld\n",
657 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n", saved_limits.rlim_max); 657 (long) saved_limits.rlim_cur);
658 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%ld\n",
659 (long) saved_limits.rlim_max);
658 printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize); 660 printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize);
659 printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs); 661 printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs);
660 printf("\tNice value:\t\t\t\t%d\n", cur_nice); 662 printf("\tNice value:\t\t\t\t%d\n", cur_nice);
@@ -667,10 +669,10 @@ int main(int argc, char *argv[])
667 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n"); 669 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n");
668 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n"); 670 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n");
669 } else { 671 } else {
670 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n", 672 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%ld\n",
671 cur_limits.rlim_cur); 673 (long) cur_limits.rlim_cur);
672 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n", 674 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%ld\n",
673 cur_limits.rlim_max); 675 (long) cur_limits.rlim_max);
674 } 676 }
675 printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize); 677 printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize);
676 printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs); 678 printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs);