aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 18:48:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 18:48:19 -0500
commit9f67627a0fea99b080a190d2d24cc1e2634aa2f7 (patch)
tree24dcf714a8b502c7ef91086d9eb6164f68c7d52b
parent82b51734b4f228c76b6064b6e899d9d3d4c17c1a (diff)
parent6adb8efb024a7e413b93b22848fc13395b1a438a (diff)
Merge tag 'char-misc-3.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver patches from Greg KH: "Here's the big char/misc driver patches for 3.14-rc1. Lots of little things, and a new "big" driver, genwqe. Full details are in the shortlog" * tag 'char-misc-3.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (90 commits) mei: limit the number of consecutive resets mei: revamp mei reset state machine drivers/char: don't use module_init in non-modular ttyprintk.c VMCI: fix error handling path when registering guest driver extcon: gpio: Add power resume support Documentation: HOWTO: Updates on subsystem trees, patchwork, -next (vs. -mm) in ko_KR Documentation: HOWTO: update for 2.6.x -> 3.x versioning in ko_KR Documentation: HOWTO: update stable address in ko_KR Documentation: HOWTO: update LXR web link in ko_KR char: nwbutton: open-code interruptible_sleep_on mei: fix syntax in comments and debug output mei: nfc: mei_nfc_free has to be called under lock mei: use hbm idle state to prevent spurious resets mei: do not run reset flow from the interrupt thread misc: genwqe: fix return value check in genwqe_device_create() GenWQE: Fix warnings for sparc GenWQE: Fix compile problems for Alpha Documentation/misc-devices/mei/mei-amt-version.c: remove unneeded call of mei_deinit() GenWQE: Rework return code for flash-update ioctl sgi-xp: open-code interruptible_sleep_on_timeout ...
-rw-r--r--Documentation/ABI/testing/debugfs-driver-genwqe91
-rw-r--r--Documentation/ABI/testing/sysfs-driver-genwqe62
-rw-r--r--Documentation/HOWTO4
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.txt8
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-palmas.txt6
-rw-r--r--Documentation/devicetree/bindings/misc/atmel-ssc.txt5
-rw-r--r--Documentation/devicetree/bindings/misc/bmp085.txt4
-rw-r--r--Documentation/extcon/porting-android-switch-class9
-rw-r--r--Documentation/ja_JP/HOWTO4
-rw-r--r--Documentation/ko_KR/HOWTO130
-rw-r--r--Documentation/misc-devices/mei/mei-amt-version.c2
-rw-r--r--Documentation/zh_CN/HOWTO4
-rw-r--r--MAINTAINERS3
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/i8k.c358
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/nwbutton.c5
-rw-r--r--drivers/char/ttyprintk.c2
-rw-r--r--drivers/extcon/Kconfig10
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-arizona.c73
-rw-r--r--drivers/extcon/extcon-gpio.c32
-rw-r--r--drivers/extcon/extcon-max14577.c752
-rw-r--r--drivers/extcon/extcon-palmas.c17
-rw-r--r--drivers/hv/hv.c2
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c4
-rw-r--r--drivers/misc/bmp085-i2c.c2
-rw-r--r--drivers/misc/bmp085-spi.c2
-rw-r--r--drivers/misc/bmp085.c39
-rw-r--r--drivers/misc/bmp085.h2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/genwqe/Kconfig13
-rw-r--r--drivers/misc/genwqe/Makefile7
-rw-r--r--drivers/misc/genwqe/card_base.c1205
-rw-r--r--drivers/misc/genwqe/card_base.h557
-rw-r--r--drivers/misc/genwqe/card_ddcb.c1376
-rw-r--r--drivers/misc/genwqe/card_ddcb.h188
-rw-r--r--drivers/misc/genwqe/card_debugfs.c500
-rw-r--r--drivers/misc/genwqe/card_dev.c1414
-rw-r--r--drivers/misc/genwqe/card_sysfs.c288
-rw-r--r--drivers/misc/genwqe/card_utils.c944
-rw-r--r--drivers/misc/genwqe/genwqe_driver.h77
-rw-r--r--drivers/misc/lkdtm.c7
-rw-r--r--drivers/misc/mei/amthif.c6
-rw-r--r--drivers/misc/mei/client.c27
-rw-r--r--drivers/misc/mei/debugfs.c4
-rw-r--r--drivers/misc/mei/hbm.c239
-rw-r--r--drivers/misc/mei/hbm.h7
-rw-r--r--drivers/misc/mei/hw-me.c40
-rw-r--r--drivers/misc/mei/hw.h3
-rw-r--r--drivers/misc/mei/init.c278
-rw-r--r--drivers/misc/mei/interrupt.c122
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/mei_dev.h33
-rw-r--r--drivers/misc/mei/nfc.c20
-rw-r--r--drivers/misc/mei/pci-me.c27
-rw-r--r--drivers/misc/mei/wd.c1
-rw-r--r--drivers/misc/mic/host/mic_device.h3
-rw-r--r--drivers/misc/mic/host/mic_main.c2
-rw-r--r--drivers/misc/mic/host/mic_virtio.c2
-rw-r--r--drivers/misc/mic/host/mic_x100.c36
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c5
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c10
-rw-r--r--drivers/parport/parport_pc.c20
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/phy/Kconfig6
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-core.c44
-rw-r--r--drivers/phy/phy-mvebu-sata.c137
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_mf624.c2
-rw-r--r--drivers/w1/masters/mxc_w1.c31
-rw-r--r--include/linux/extcon/extcon-gpio.h1
-rw-r--r--include/linux/mfd/arizona/registers.h9
-rw-r--r--include/uapi/linux/genwqe/genwqe_card.h500
-rw-r--r--tools/hv/hv_kvp_daemon.c1
-rw-r--r--tools/hv/hv_vss_daemon.c1
82 files changed, 9170 insertions, 675 deletions
diff --git a/Documentation/ABI/testing/debugfs-driver-genwqe b/Documentation/ABI/testing/debugfs-driver-genwqe
new file mode 100644
index 000000000000..1c2f25674e8c
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-driver-genwqe
@@ -0,0 +1,91 @@
1What: /sys/kernel/debug/genwqe/genwqe<n>_card/ddcb_info
2Date: Oct 2013
3Contact: haver@linux.vnet.ibm.com
4Description: DDCB queue dump used for debugging queueing problems.
5
6What: /sys/kernel/debug/genwqe/genwqe<n>_card/curr_regs
7Date: Oct 2013
8Contact: haver@linux.vnet.ibm.com
9Description: Dump of the current error registers.
10 Only available for PF.
11
12What: /sys/kernel/debug/genwqe/genwqe<n>_card/curr_dbg_uid0
13Date: Oct 2013
14Contact: haver@linux.vnet.ibm.com
15Description: Internal chip state of UID0 (unit id 0).
16 Only available for PF.
17
18What: /sys/kernel/debug/genwqe/genwqe<n>_card/curr_dbg_uid1
19Date: Oct 2013
20Contact: haver@linux.vnet.ibm.com
21Description: Internal chip state of UID1.
22 Only available for PF.
23
24What: /sys/kernel/debug/genwqe/genwqe<n>_card/curr_dbg_uid2
25Date: Oct 2013
26Contact: haver@linux.vnet.ibm.com
27Description: Internal chip state of UID2.
28 Only available for PF.
29
30What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_regs
31Date: Oct 2013
32Contact: haver@linux.vnet.ibm.com
33Description: Dump of the error registers before the last reset of
34 the card occured.
35 Only available for PF.
36
37What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_dbg_uid0
38Date: Oct 2013
39Contact: haver@linux.vnet.ibm.com
40Description: Internal chip state of UID0 before card was reset.
41 Only available for PF.
42
43What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_dbg_uid1
44Date: Oct 2013
45Contact: haver@linux.vnet.ibm.com
46Description: Internal chip state of UID1 before card was reset.
47 Only available for PF.
48
49What: /sys/kernel/debug/genwqe/genwqe<n>_card/prev_dbg_uid2
50Date: Oct 2013
51Contact: haver@linux.vnet.ibm.com
52Description: Internal chip state of UID2 before card was reset.
53 Only available for PF.
54
55What: /sys/kernel/debug/genwqe/genwqe<n>_card/info
56Date: Oct 2013
57Contact: haver@linux.vnet.ibm.com
58Description: Comprehensive summary of bitstream version and software
59 version. Used bitstream and bitstream clocking information.
60
61What: /sys/kernel/debug/genwqe/genwqe<n>_card/err_inject
62Date: Oct 2013
63Contact: haver@linux.vnet.ibm.com
64Description: Possibility to inject error cases to ensure that the drivers
65 error handling code works well.
66
67What: /sys/kernel/debug/genwqe/genwqe<n>_card/vf<0..14>_jobtimeout_msec
68Date: Oct 2013
69Contact: haver@linux.vnet.ibm.com
70Description: Default VF timeout 250ms. Testing might require 1000ms.
71 Using 0 will use the cards default value (whatever that is).
72
73 The timeout depends on the max number of available cards
74 in the system and the maximum allowed queue size.
75
76 The driver ensures that the settings are done just before
77 the VFs get enabled. Changing the timeouts in flight is not
78 possible.
79 Only available for PF.
80
81What: /sys/kernel/debug/genwqe/genwqe<n>_card/jobtimer
82Date: Oct 2013
83Contact: haver@linux.vnet.ibm.com
84Description: Dump job timeout register values for PF and VFs.
85 Only available for PF.
86
87What: /sys/kernel/debug/genwqe/genwqe<n>_card/queue_working_time
88Date: Dec 2013
89Contact: haver@linux.vnet.ibm.com
90Description: Dump queue working time register values for PF and VFs.
91 Only available for PF.
diff --git a/Documentation/ABI/testing/sysfs-driver-genwqe b/Documentation/ABI/testing/sysfs-driver-genwqe
new file mode 100644
index 000000000000..1870737a1f5e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-genwqe
@@ -0,0 +1,62 @@
1What: /sys/class/genwqe/genwqe<n>_card/version
2Date: Oct 2013
3Contact: haver@linux.vnet.ibm.com
4Description: Unique bitstream identification e.g.
5 '0000000330336283.00000000475a4950'.
6
7What: /sys/class/genwqe/genwqe<n>_card/appid
8Date: Oct 2013
9Contact: haver@linux.vnet.ibm.com
10Description: Identifies the currently active card application e.g. 'GZIP'
11 for compression/decompression.
12
13What: /sys/class/genwqe/genwqe<n>_card/type
14Date: Oct 2013
15Contact: haver@linux.vnet.ibm.com
16Description: Type of the card e.g. 'GenWQE5-A7'.
17
18What: /sys/class/genwqe/genwqe<n>_card/curr_bitstream
19Date: Oct 2013
20Contact: haver@linux.vnet.ibm.com
21Description: Currently active bitstream. 1 is default, 0 is backup.
22
23What: /sys/class/genwqe/genwqe<n>_card/next_bitstream
24Date: Oct 2013
25Contact: haver@linux.vnet.ibm.com
26Description: Interface to set the next bitstream to be used.
27
28What: /sys/class/genwqe/genwqe<n>_card/tempsens
29Date: Oct 2013
30Contact: haver@linux.vnet.ibm.com
31Description: Interface to read the cards temperature sense register.
32
33What: /sys/class/genwqe/genwqe<n>_card/freerunning_timer
34Date: Oct 2013
35Contact: haver@linux.vnet.ibm.com
36Description: Interface to read the cards free running timer.
37 Used for performance and utilization measurements.
38
39What: /sys/class/genwqe/genwqe<n>_card/queue_working_time
40Date: Oct 2013
41Contact: haver@linux.vnet.ibm.com
42Description: Interface to read queue working time.
43 Used for performance and utilization measurements.
44
45What: /sys/class/genwqe/genwqe<n>_card/state
46Date: Oct 2013
47Contact: haver@linux.vnet.ibm.com
48Description: State of the card: "unused", "used", "error".
49
50What: /sys/class/genwqe/genwqe<n>_card/base_clock
51Date: Oct 2013
52Contact: haver@linux.vnet.ibm.com
53Description: Base clock frequency of the card.
54
55What: /sys/class/genwqe/genwqe<n>_card/device/sriov_numvfs
56Date: Oct 2013
57Contact: haver@linux.vnet.ibm.com
58Description: Enable VFs (1..15):
59 sudo sh -c 'echo 15 > \
60 /sys/bus/pci/devices/0000\:1b\:00.0/sriov_numvfs'
61 Disable VFs:
62 Write a 0 into the same sysfs entry.
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 27faae3e3846..57cf5efb044d 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -112,7 +112,7 @@ required reading:
112 112
113 Other excellent descriptions of how to create patches properly are: 113 Other excellent descriptions of how to create patches properly are:
114 "The Perfect Patch" 114 "The Perfect Patch"
115 http://kerneltrap.org/node/3737 115 http://www.ozlabs.org/~akpm/stuff/tpp.txt
116 "Linux kernel patch submission format" 116 "Linux kernel patch submission format"
117 http://linux.yyz.us/patch-format.html 117 http://linux.yyz.us/patch-format.html
118 118
@@ -579,7 +579,7 @@ all time. It should describe the patch completely, containing:
579For more details on what this should all look like, please see the 579For more details on what this should all look like, please see the
580ChangeLog section of the document: 580ChangeLog section of the document:
581 "The Perfect Patch" 581 "The Perfect Patch"
582 http://userweb.kernel.org/~akpm/stuff/tpp.txt 582 http://www.ozlabs.org/~akpm/stuff/tpp.txt
583 583
584 584
585 585
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 1196290082d1..78530e621a1e 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -20,6 +20,10 @@ TC/TCLIB Timer required properties:
20- interrupts: Should contain all interrupts for the TC block 20- interrupts: Should contain all interrupts for the TC block
21 Note that you can specify several interrupt cells if the TC 21 Note that you can specify several interrupt cells if the TC
22 block has one interrupt per channel. 22 block has one interrupt per channel.
23- clock-names: tuple listing input clock names.
24 Required elements: "t0_clk"
25 Optional elements: "t1_clk", "t2_clk"
26- clocks: phandles to input clocks.
23 27
24Examples: 28Examples:
25 29
@@ -28,6 +32,8 @@ One interrupt per TC block:
28 compatible = "atmel,at91rm9200-tcb"; 32 compatible = "atmel,at91rm9200-tcb";
29 reg = <0xfff7c000 0x100>; 33 reg = <0xfff7c000 0x100>;
30 interrupts = <18 4>; 34 interrupts = <18 4>;
35 clocks = <&tcb0_clk>;
36 clock-names = "t0_clk";
31 }; 37 };
32 38
33One interrupt per TC channel in a TC block: 39One interrupt per TC channel in a TC block:
@@ -35,6 +41,8 @@ One interrupt per TC channel in a TC block:
35 compatible = "atmel,at91rm9200-tcb"; 41 compatible = "atmel,at91rm9200-tcb";
36 reg = <0xfffdc000 0x100>; 42 reg = <0xfffdc000 0x100>;
37 interrupts = <26 4 27 4 28 4>; 43 interrupts = <26 4 27 4 28 4>;
44 clocks = <&tcb1_clk>;
45 clock-names = "t0_clk";
38 }; 46 };
39 47
40RSTC Reset Controller required properties: 48RSTC Reset Controller required properties:
diff --git a/Documentation/devicetree/bindings/extcon/extcon-palmas.txt b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
index 7dab6a8f4a0e..45414bbcd945 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
@@ -2,7 +2,11 @@ EXTCON FOR PALMAS/TWL CHIPS
2 2
3PALMAS USB COMPARATOR 3PALMAS USB COMPARATOR
4Required Properties: 4Required Properties:
5 - compatible : Should be "ti,palmas-usb" or "ti,twl6035-usb" 5 - compatible: should contain one of:
6 * "ti,palmas-usb-vid".
7 * "ti,twl6035-usb-vid".
8 * "ti,palmas-usb" (DEPRECATED - use "ti,palmas-usb-vid").
9 * "ti,twl6035-usb" (DEPRECATED - use "ti,twl6035-usb-vid").
6 10
7Optional Properties: 11Optional Properties:
8 - ti,wakeup : To enable the wakeup comparator in probe 12 - ti,wakeup : To enable the wakeup comparator in probe
diff --git a/Documentation/devicetree/bindings/misc/atmel-ssc.txt b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
index a45ae08c8ed1..60960b2755f4 100644
--- a/Documentation/devicetree/bindings/misc/atmel-ssc.txt
+++ b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
@@ -6,6 +6,9 @@ Required properties:
6 - atmel,at91sam9g45-ssc: support dma transfer 6 - atmel,at91sam9g45-ssc: support dma transfer
7- reg: Should contain SSC registers location and length 7- reg: Should contain SSC registers location and length
8- interrupts: Should contain SSC interrupt 8- interrupts: Should contain SSC interrupt
9- clock-names: tuple listing input clock names.
10 Required elements: "pclk"
11- clocks: phandles to input clocks.
9 12
10 13
11Required properties for devices compatible with "atmel,at91sam9g45-ssc": 14Required properties for devices compatible with "atmel,at91sam9g45-ssc":
@@ -20,6 +23,8 @@ ssc0: ssc@fffbc000 {
20 compatible = "atmel,at91rm9200-ssc"; 23 compatible = "atmel,at91rm9200-ssc";
21 reg = <0xfffbc000 0x4000>; 24 reg = <0xfffbc000 0x4000>;
22 interrupts = <14 4 5>; 25 interrupts = <14 4 5>;
26 clocks = <&ssc0_clk>;
27 clock-names = "pclk";
23}; 28};
24 29
25- DMA transfer: 30- DMA transfer:
diff --git a/Documentation/devicetree/bindings/misc/bmp085.txt b/Documentation/devicetree/bindings/misc/bmp085.txt
index 91dfda2e4e11..d7a6deb6b21e 100644
--- a/Documentation/devicetree/bindings/misc/bmp085.txt
+++ b/Documentation/devicetree/bindings/misc/bmp085.txt
@@ -8,6 +8,8 @@ Optional properties:
8- temp-measurement-period: temperature measurement period (milliseconds) 8- temp-measurement-period: temperature measurement period (milliseconds)
9- default-oversampling: default oversampling value to be used at startup, 9- default-oversampling: default oversampling value to be used at startup,
10 value range is 0-3 with rising sensitivity. 10 value range is 0-3 with rising sensitivity.
11- interrupt-parent: should be the phandle for the interrupt controller
12- interrupts: interrupt mapping for IRQ
11 13
12Example: 14Example:
13 15
@@ -17,4 +19,6 @@ pressure@77 {
17 chip-id = <10>; 19 chip-id = <10>;
18 temp-measurement-period = <100>; 20 temp-measurement-period = <100>;
19 default-oversampling = <2>; 21 default-oversampling = <2>;
22 interrupt-parent = <&gpio0>;
23 interrupts = <25 IRQ_TYPE_EDGE_RISING>;
20}; 24};
diff --git a/Documentation/extcon/porting-android-switch-class b/Documentation/extcon/porting-android-switch-class
index 5377f6317961..49c81caef84d 100644
--- a/Documentation/extcon/porting-android-switch-class
+++ b/Documentation/extcon/porting-android-switch-class
@@ -50,7 +50,7 @@ so that they are still compatible with legacy userspace processes.
50 Extcon's extended features for switch device drivers with 50 Extcon's extended features for switch device drivers with
51 complex features usually required magic numbers in state 51 complex features usually required magic numbers in state
52 value of switch_dev. With extcon, such magic numbers that 52 value of switch_dev. With extcon, such magic numbers that
53 support multiple cables ( 53 support multiple cables are no more required or supported.
54 54
55 1. Define cable names at edev->supported_cable. 55 1. Define cable names at edev->supported_cable.
56 2. (Recommended) remove print_state callback. 56 2. (Recommended) remove print_state callback.
@@ -114,11 +114,8 @@ exclusive, the two cables cannot be in ATTACHED state simulteneously.
114 114
115****** ABI Location 115****** ABI Location
116 116
117 If "CONFIG_ANDROID" is enabled and "CONFIG_ANDROID_SWITCH" is 117 If "CONFIG_ANDROID" is enabled, /sys/class/switch/* are created
118disabled, /sys/class/switch/* are created as symbolic links to 118as symbolic links to /sys/class/extcon/*.
119/sys/class/extcon/*. Because CONFIG_ANDROID_SWITCH creates
120/sys/class/switch directory, we disable symboling linking if
121CONFIG_ANDROID_SWITCH is enabled.
122 119
123 The two files of switch class, name and state, are provided with 120 The two files of switch class, name and state, are provided with
124extcon, too. When the multistate support (STEP 2 of CHAPTER 1.) is 121extcon, too. When the multistate support (STEP 2 of CHAPTER 1.) is
diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
index 8148a47fc70e..0091a8215ac1 100644
--- a/Documentation/ja_JP/HOWTO
+++ b/Documentation/ja_JP/HOWTO
@@ -149,7 +149,7 @@ linux-api@ver.kernel.org ã«é€ã‚‹ã“ã¨ã‚’勧ã‚ã¾ã™ã€‚
149 ã“ã®ä»–ã«ãƒ‘ッãƒã‚’作る方法ã«ã¤ã„ã¦ã®ã‚ˆãã§ããŸè¨˜è¿°ã¯- 149 ã“ã®ä»–ã«ãƒ‘ッãƒã‚’作る方法ã«ã¤ã„ã¦ã®ã‚ˆãã§ããŸè¨˜è¿°ã¯-
150 150
151 "The Perfect Patch" 151 "The Perfect Patch"
152 http://userweb.kernel.org/~akpm/stuff/tpp.txt 152 http://www.ozlabs.org/~akpm/stuff/tpp.txt
153 "Linux kernel patch submission format" 153 "Linux kernel patch submission format"
154 http://linux.yyz.us/patch-format.html 154 http://linux.yyz.us/patch-format.html
155 155
@@ -622,7 +622,7 @@ Linux カーãƒãƒ«ã‚³ãƒŸãƒ¥ãƒ‹ãƒ†ã‚£ã¯ã€ä¸€åº¦ã«å¤§é‡ã®ã‚³ãƒ¼ãƒ‰ã®å¡Šã‚’å–
622ã“ã‚Œã«ã¤ã„ã¦å…¨ã¦ãŒã©ã®ã‚ˆã†ã«ã‚ã‚‹ã¹ãã‹ã«ã¤ã„ã¦ã®è©³ç´°ã¯ã€ä»¥ä¸‹ã®ãƒ‰ã‚­ãƒ¥ãƒ¡ 622ã“ã‚Œã«ã¤ã„ã¦å…¨ã¦ãŒã©ã®ã‚ˆã†ã«ã‚ã‚‹ã¹ãã‹ã«ã¤ã„ã¦ã®è©³ç´°ã¯ã€ä»¥ä¸‹ã®ãƒ‰ã‚­ãƒ¥ãƒ¡
623ント㮠ChangeLog セクションを見ã¦ãã ã•ã„- 623ント㮠ChangeLog セクションを見ã¦ãã ã•ã„-
624 "The Perfect Patch" 624 "The Perfect Patch"
625 http://userweb.kernel.org/~akpm/stuff/tpp.txt 625 http://www.ozlabs.org/~akpm/stuff/tpp.txt
626 626
627ã“れらã®ã©ã‚Œã‚‚ãŒã€æ™‚ã«ã¯ã¨ã¦ã‚‚困難ã§ã™ã€‚ã“れらã®æ…£ä¾‹ã‚’完璧ã«å®Ÿæ–½ã™ã‚‹ã« 627ã“れらã®ã©ã‚Œã‚‚ãŒã€æ™‚ã«ã¯ã¨ã¦ã‚‚困難ã§ã™ã€‚ã“れらã®æ…£ä¾‹ã‚’完璧ã«å®Ÿæ–½ã™ã‚‹ã«
628ã¯æ•°å¹´ã‹ã‹ã‚‹ã‹ã‚‚ã—ã‚Œã¾ã›ã‚“。ã“ã‚Œã¯ç¶™ç¶šçš„ãªæ”¹å–„ã®ãƒ—ロセスã§ã‚ã‚Šã€ãã®ãŸ 628ã¯æ•°å¹´ã‹ã‹ã‚‹ã‹ã‚‚ã—ã‚Œã¾ã›ã‚“。ã“ã‚Œã¯ç¶™ç¶šçš„ãªæ”¹å–„ã®ãƒ—ロセスã§ã‚ã‚Šã€ãã®ãŸ
diff --git a/Documentation/ko_KR/HOWTO b/Documentation/ko_KR/HOWTO
index 680e64635958..dc2ff8f611e0 100644
--- a/Documentation/ko_KR/HOWTO
+++ b/Documentation/ko_KR/HOWTO
@@ -122,7 +122,7 @@ mtk.manpages@gmail.comì˜ ë©”ì¸í…Œì´ë„ˆì—게 보낼 ê²ƒì„ ê¶Œìž¥í•œë‹¤.
122 122
123 올바른 íŒ¨ì¹˜ë“¤ì„ ë§Œë“œëŠ” ë²•ì— ê´€í•œ 훌륭한 다른 ë¬¸ì„œë“¤ì´ ìžˆë‹¤. 123 올바른 íŒ¨ì¹˜ë“¤ì„ ë§Œë“œëŠ” ë²•ì— ê´€í•œ 훌륭한 다른 ë¬¸ì„œë“¤ì´ ìžˆë‹¤.
124 "The Perfect Patch" 124 "The Perfect Patch"
125 http://userweb.kernel.org/~akpm/stuff/tpp.txt 125 http://www.ozlabs.org/~akpm/stuff/tpp.txt
126 "Linux kernel patch submission format" 126 "Linux kernel patch submission format"
127 http://linux.yyz.us/patch-format.html 127 http://linux.yyz.us/patch-format.html
128 128
@@ -213,7 +213,7 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
213ê²ƒì€ Linux Cross-Reference projectì´ë©° ê·¸ê²ƒì€ ìžê¸° 참조 ë°©ì‹ì´ë©° 213ê²ƒì€ Linux Cross-Reference projectì´ë©° ê·¸ê²ƒì€ ìžê¸° 참조 ë°©ì‹ì´ë©°
214소스코드를 ì¸ë±ìŠ¤ëœ 웹 페ì´ì§€ë“¤ì˜ 형태로 보여준다. ìµœì‹ ì˜ ë©‹ì§„ ì»¤ë„ 214소스코드를 ì¸ë±ìŠ¤ëœ 웹 페ì´ì§€ë“¤ì˜ 형태로 보여준다. ìµœì‹ ì˜ ë©‹ì§„ 커ë„
215코드 저장소는 다ìŒì„ 통하여 참조할 수 있다. 215코드 저장소는 다ìŒì„ 통하여 참조할 수 있다.
216 http://users.sosdg.org/~qiyong/lxr/ 216 http://lxr.linux.no/+trees
217 217
218 218
219개발 프로세스 219개발 프로세스
@@ -222,20 +222,20 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
222리눅스 ì»¤ë„ ê°œë°œ 프로세스는 현재 몇몇 다른 ë©”ì¸ ì»¤ë„ "브랜치들"ê³¼ 222리눅스 ì»¤ë„ ê°œë°œ 프로세스는 현재 몇몇 다른 ë©”ì¸ ì»¤ë„ "브랜치들"ê³¼
223ì„œë¸Œì‹œìŠ¤í…œì— íŠ¹í™”ëœ ì»¤ë„ ë¸Œëžœì¹˜ë“¤ë¡œ 구성ëœë‹¤. 몇몇 다른 ë©”ì¸ 223ì„œë¸Œì‹œìŠ¤í…œì— íŠ¹í™”ëœ ì»¤ë„ ë¸Œëžœì¹˜ë“¤ë¡œ 구성ëœë‹¤. 몇몇 다른 ë©”ì¸
224ë¸Œëžœì¹˜ë“¤ì€ ë‹¤ìŒê³¼ 같다. 224ë¸Œëžœì¹˜ë“¤ì€ ë‹¤ìŒê³¼ 같다.
225 - main 2.6.x ì»¤ë„ íŠ¸ë¦¬ 225 - main 3.x ì»¤ë„ íŠ¸ë¦¬
226 - 2.6.x.y - ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬ 226 - 3.x.y - ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬
227 - 2.6.x -git ì»¤ë„ íŒ¨ì¹˜ë“¤ 227 - 3.x -git ì»¤ë„ íŒ¨ì¹˜ë“¤
228 - 2.6.x -mm ì»¤ë„ íŒ¨ì¹˜ë“¤
229 - ì„œë¸Œì‹œìŠ¤í…œì„ ìœ„í•œ ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들 228 - ì„œë¸Œì‹œìŠ¤í…œì„ ìœ„í•œ ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
229 - 3.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
230 230
2312.6.x ì»¤ë„ íŠ¸ë¦¬ 2313.x ì»¤ë„ íŠ¸ë¦¬
232--------------- 232---------------
233 233
2342.6.x 커ë„ë“¤ì€ Linux Torvaldsê°€ 관리하며 kernel.orgì˜ pub/linux/kernel/v2.6/ 2343.x 커ë„ë“¤ì€ Linux Torvaldsê°€ 관리하며 kernel.orgì˜ pub/linux/kernel/v3.x/
235디렉토리ì—ì„œ ì°¸ì¡°ë  ìˆ˜ 있다.개발 프로세스는 다ìŒê³¼ 같다. 235디렉토리ì—ì„œ ì°¸ì¡°ë  ìˆ˜ 있다.개발 프로세스는 다ìŒê³¼ 같다.
236 - 새로운 커ë„ì´ ë°°í¬ë˜ìžë§ˆìž 2ì£¼ì˜ ì‹œê°„ì´ ì£¼ì–´ì§„ë‹¤. ì´ ê¸°ê°„ë™ì€ 236 - 새로운 커ë„ì´ ë°°í¬ë˜ìžë§ˆìž 2ì£¼ì˜ ì‹œê°„ì´ ì£¼ì–´ì§„ë‹¤. ì´ ê¸°ê°„ë™ì€
237 ë©”ì¸í…Œì´ë„ˆë“¤ì€ í° diffë“¤ì„ Linusì—게 제출할 수 있다. 대개 ì´ íŒ¨ì¹˜ë“¤ì€ 237 ë©”ì¸í…Œì´ë„ˆë“¤ì€ í° diffë“¤ì„ Linusì—게 제출할 수 있다. 대개 ì´ íŒ¨ì¹˜ë“¤ì€
238 몇 주 ë™ì•ˆ -mm 커ë„ë‚´ì— ì´ë¯¸ ìžˆì—ˆë˜ ê²ƒë“¤ì´ë‹¤. í° ë³€ê²½ë“¤ì„ ì œì¶œí•˜ëŠ” ë° 238 몇 주 ë™ì•ˆ -next 커ë„ë‚´ì— ì´ë¯¸ ìžˆì—ˆë˜ ê²ƒë“¤ì´ë‹¤. í° ë³€ê²½ë“¤ì„ ì œì¶œí•˜ëŠ” ë°
239 선호ë˜ëŠ” ë°©ë²•ì€ git(커ë„ì˜ ì†ŒìŠ¤ 관리 툴, ë” ë§Žì€ ì •ë³´ë“¤ì€ http://git.or.cz/ 239 선호ë˜ëŠ” ë°©ë²•ì€ git(커ë„ì˜ ì†ŒìŠ¤ 관리 툴, ë” ë§Žì€ ì •ë³´ë“¤ì€ http://git.or.cz/
240 ì—ì„œ 참조할 수 있다)를 사용하는 것ì´ì§€ë§Œ 순수한 패치파ì¼ì˜ 형ì‹ìœ¼ë¡œ 보내는 240 ì—ì„œ 참조할 수 있다)를 사용하는 것ì´ì§€ë§Œ 순수한 패치파ì¼ì˜ 형ì‹ìœ¼ë¡œ 보내는
241 ê²ƒë„ ë¬´ê´€í•˜ë‹¤. 241 ê²ƒë„ ë¬´ê´€í•˜ë‹¤.
@@ -262,20 +262,20 @@ Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
262 ë²„ê·¸ì˜ ìƒí™©ì— ë”°ë¼ ë°°í¬ë˜ëŠ” 것ì´ì§€ 미리정해 ë†“ì€ ì‹œê°„ì— ë”°ë¼ 262 ë²„ê·¸ì˜ ìƒí™©ì— ë”°ë¼ ë°°í¬ë˜ëŠ” 것ì´ì§€ 미리정해 ë†“ì€ ì‹œê°„ì— ë”°ë¼
263 ë°°í¬ë˜ëŠ” ê²ƒì€ ì•„ë‹ˆê¸° 때문ì´ë‹¤." 263 ë°°í¬ë˜ëŠ” ê²ƒì€ ì•„ë‹ˆê¸° 때문ì´ë‹¤."
264 264
2652.6.x.y - 안정 ì»¤ë„ íŠ¸ë¦¬ 2653.x.y - 안정 ì»¤ë„ íŠ¸ë¦¬
266------------------------ 266------------------------
267 267
2684 ìžë¦¬ 숫ìžë¡œ ì´ë£¨ì–´ì§„ ë²„ì ¼ì˜ ì»¤ë„ë“¤ì€ -stable 커ë„들ì´ë‹¤. ê·¸ê²ƒë“¤ì€ 2.6.x 2683 ìžë¦¬ 숫ìžë¡œ ì´ë£¨ì–´ì§„ ë²„ì ¼ì˜ ì»¤ë„ë“¤ì€ -stable 커ë„들ì´ë‹¤. ê·¸ê²ƒë“¤ì€ 3.x
269커ë„ì—ì„œ ë°œê²¬ëœ í° íšŒê·€ë“¤ì´ë‚˜ 보안 문제들 중 비êµì  ìž‘ê³  중요한 ìˆ˜ì •ë“¤ì„ 269커ë„ì—ì„œ ë°œê²¬ëœ í° íšŒê·€ë“¤ì´ë‚˜ 보안 문제들 중 비êµì  ìž‘ê³  중요한 수정들ì„
270í¬í•¨í•œë‹¤. 270í¬í•¨í•œë‹¤.
271 271
272ì´ê²ƒì€ 가장 ìµœê·¼ì˜ ì•ˆì •ì ì¸ 커ë„ì„ ì›í•˜ëŠ” 사용ìžì—게 추천ë˜ëŠ” 브랜치ì´ë©°, 272ì´ê²ƒì€ 가장 ìµœê·¼ì˜ ì•ˆì •ì ì¸ 커ë„ì„ ì›í•˜ëŠ” 사용ìžì—게 추천ë˜ëŠ” 브랜치ì´ë©°,
273개발/ì‹¤í—˜ì  ë²„ì ¼ì„ í…ŒìŠ¤íŠ¸í•˜ëŠ” ê²ƒì„ ë•ê³ ìž 하는 사용ìžë“¤ê³¼ëŠ” 별로 ê´€ë ¨ì´ ì—†ë‹¤. 273개발/ì‹¤í—˜ì  ë²„ì ¼ì„ í…ŒìŠ¤íŠ¸í•˜ëŠ” ê²ƒì„ ë•ê³ ìž 하는 사용ìžë“¤ê³¼ëŠ” 별로 ê´€ë ¨ì´ ì—†ë‹¤.
274 274
275ì–´ë–¤ 2.6.x.y 커ë„ë„ ì‚¬ìš©í•  수 없다면 그때는 가장 ë†’ì€ ìˆ«ìžì˜ 2.6.x 275ì–´ë–¤ 3.x.y 커ë„ë„ ì‚¬ìš©í•  수 없다면 그때는 가장 ë†’ì€ ìˆ«ìžì˜ 3.x
276커ë„ì´ í˜„ìž¬ì˜ ì•ˆì • 커ë„ì´ë‹¤. 276커ë„ì´ í˜„ìž¬ì˜ ì•ˆì • 커ë„ì´ë‹¤.
277 277
2782.6.x.y는 "stable" 팀<stable@kernel.org>ì— ì˜í•´ 관리ë˜ë©° ê±°ì˜ ë§¤ë²ˆ 격주로 2783.x.y는 "stable" 팀<stable@vger.kernel.org>ì— ì˜í•´ 관리ë˜ë©° ê±°ì˜ ë§¤ë²ˆ 격주로
279ë°°í¬ëœë‹¤. 279ë°°í¬ëœë‹¤.
280 280
281ì»¤ë„ íŠ¸ë¦¬ 문서들 ë‚´ì— Documentation/stable_kernel_rules.txt 파ì¼ì€ ì–´ë–¤ 281ì»¤ë„ íŠ¸ë¦¬ 문서들 ë‚´ì— Documentation/stable_kernel_rules.txt 파ì¼ì€ ì–´ë–¤
@@ -283,84 +283,46 @@ Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
283진행ë˜ëŠ”지를 설명한다. 283진행ë˜ëŠ”지를 설명한다.
284 284
285 285
2862.6.x -git 패치들 2863.x -git 패치들
287------------------ 287------------------
288git 저장소(그러므로 -gitì´ë¼ëŠ” ì´ë¦„ì´ ë¶™ìŒ)ì—는 날마다 관리ë˜ëŠ” Linusì˜ 288git 저장소(그러므로 -gitì´ë¼ëŠ” ì´ë¦„ì´ ë¶™ìŒ)ì—는 날마다 관리ë˜ëŠ” Linusì˜
289ì»¤ë„ íŠ¸ë¦¬ì˜ snapshot ë“¤ì´ ìžˆë‹¤. ì´ íŒ¨ì¹˜ë“¤ì€ ì¼ë°˜ì ìœ¼ë¡œ 날마다 ë°°í¬ë˜ë©° 289ì»¤ë„ íŠ¸ë¦¬ì˜ snapshot ë“¤ì´ ìžˆë‹¤. ì´ íŒ¨ì¹˜ë“¤ì€ ì¼ë°˜ì ìœ¼ë¡œ 날마다 ë°°í¬ë˜ë©°
290Linusì˜ íŠ¸ë¦¬ì˜ í˜„ìž¬ ìƒíƒœë¥¼ 나타낸다. ì´ íŒ¨ì¹˜ë“¤ì€ ì •ìƒì ì¸ì§€ ì¡°ê¸ˆë„ 290Linusì˜ íŠ¸ë¦¬ì˜ í˜„ìž¬ ìƒíƒœë¥¼ 나타낸다. ì´ íŒ¨ì¹˜ë“¤ì€ ì •ìƒì ì¸ì§€ 조금ë„
291살펴보지 ì•Šê³  ìžë™ì ìœ¼ë¡œ ìƒì„±ëœ 것ì´ë¯€ë¡œ -rc 커ë„들 ë³´ë‹¤ë„ ë” ì‹¤í—˜ì ì´ë‹¤. 291살펴보지 ì•Šê³  ìžë™ì ìœ¼ë¡œ ìƒì„±ëœ 것ì´ë¯€ë¡œ -rc 커ë„들 ë³´ë‹¤ë„ ë” ì‹¤í—˜ì ì´ë‹¤.
292 292
2932.6.x -mm ì»¤ë„ íŒ¨ì¹˜ë“¤
294---------------------
295Andrew Mortonì— ì˜í•´ ë°°í¬ëœ 실험ì ì¸ ì»¤ë„ íŒ¨ì¹˜ë“¤ì´ë‹¤. Andrew는 모든 다른
296서브시스템 ì»¤ë„ íŠ¸ë¦¬ì™€ íŒ¨ì¹˜ë“¤ì„ ê°€ì ¸ì™€ì„œ 리눅스 ì»¤ë„ ë©”ì¼ë§ 리스트로
297온 ë§Žì€ íŒ¨ì¹˜ë“¤ê³¼ í•œë° ë¬¶ëŠ”ë‹¤. ì´ íŠ¸ë¦¬ëŠ” 새로운 기능들과 íŒ¨ì¹˜ë“¤ì„ ìœ„í•œ
298장소를 제공하는 ì—­í• ì„ í•œë‹¤. í•˜ë‚˜ì˜ íŒ¨ì¹˜ê°€ -mmì— í•œë™ì•ˆ 있으면서 ê·¸ 가치가
299ì¦ëª…ë˜ê²Œ ë˜ë©´ Andrew나 서브시스템 ë©”ì¸í…Œì´ë„ˆëŠ” ê·¸ê²ƒì„ ë©”ì¸ë¼ì¸ì— í¬í•¨ì‹œí‚¤ê¸°
300위하여 Linusì—게 보낸다.
301
302ì»¤ë„ íŠ¸ë¦¬ì— í¬í•¨í•˜ê³  ì‹¶ì€ ëª¨ë“  새로운 íŒ¨ì¹˜ë“¤ì€ Linusì—게 보내지기 ì „ì—
303-mm 트리ì—ì„œ 테스트를 하는 ê²ƒì„ ì ê·¹ 추천한다.
304
305ì´ ì»¤ë„ë“¤ì€ ì•ˆì •ë˜ê²Œ 사용할 시스템ì—ì„œì— ì‹¤í–‰í•˜ëŠ” ê²ƒì€ ì í•©í•˜ì§€ 않으며
306다른 ë¸Œëžœì¹˜ë“¤ì˜ ì–´ë–¤ 것들보다 위험하다.
307
308ì—¬ëŸ¬ë¶„ì´ ì»¤ë„ ê°œë°œ 프로세스를 ë•ê¸¸ ì›í•œë‹¤ë©´ ì´ ì»¤ë„ ë°°í¬ë“¤ì„ 사용하고
309테스트한 후 ì–´ë–¤ 문제를 발견하거나 ë˜ëŠ” 모든 ê²ƒì´ ìž˜ ë™ìž‘한다면 리눅스
310ì»¤ë„ ë©”ì¼ë§ 리스트로 í”¼ë“œë°±ì„ í•´ë‹¬ë¼.
311
312ì´ ì»¤ë„ë“¤ì€ ì¼ë°˜ì ìœ¼ë¡œ 모든 다른 실험ì ì¸ 패치들과 ë°°í¬ë  당시ì˜
313사용가능한 ë©”ì¸ë¼ì¸ -git 커ë„ë“¤ì˜ ëª‡ëª‡ ë³€ê²½ì„ í¬í•¨í•œë‹¤.
314
315-mm 커ë„ë“¤ì€ ì •í•´ì§„ ì¼ì •ëŒ€ë¡œ ë°°í¬ë˜ì§€ 않는다. 하지만 대개 몇몇 -mm 커ë„들ì€
316ê° -rc 커ë„(1부터 3ì´ í”함) 사ì´ì—ì„œ ë°°í¬ëœë‹¤.
317
318서브시스템 ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들 293서브시스템 ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
319------------------------------- 294-------------------------------
320ë§Žì€ ë‹¤ë¥¸ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œ 개발ìžë“¤ì€ 커ë„ì˜ ë‹¤ë¥¸ 부분들ì—ì„œ 무슨 ì¼ì´ 295다양한 ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œì˜ ë©”ì¸í…Œì´ë„ˆë“¤ --- 그리고 ë§Žì€ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œ 개발ìžë“¤
321ì¼ì–´ë‚˜ê³  있는지를 볼수 있ë„ë¡ ê·¸ë“¤ì˜ ê°œë°œ 트리를 공개한다. ì´ íŠ¸ë¦¬ë“¤ì€ 296--- ì€ ê·¸ë“¤ì˜ í˜„ìž¬ 개발 ìƒíƒœë¥¼ 소스 저장소로 노출한다. ì´ë¥¼ 통해 다른 사람들ë„
322위ì—ì„œ ì„¤ëª…í•˜ì˜€ë˜ ê²ƒ 처럼 -mm ì»¤ë„ ë°°í¬ë“¤ë¡œ í•©ì³ì§„다. 297커ë„ì˜ ë‹¤ë¥¸ ì˜ì—­ì— ì–´ë–¤ 변화가 ì´ë£¨ì–´ì§€ê³  있는지 ì•Œ 수 있다. 급ì†ížˆ 개발ì´
323 298진행ë˜ëŠ” ì˜ì—­ì´ 있고 그렇지 ì•Šì€ ì˜ì—­ì´ 있으므로, 개발ìžëŠ” 다른 개발ìžê°€ 제출한
324다ìŒì€ 활용가능한 ì»¤ë„ íŠ¸ë¦¬ë“¤ì„ ë‚˜ì—´í•œë‹¤. 299수정 사항과 ìžì‹ ì˜ ìˆ˜ì •ì‚¬í•­ì˜ ì¶©ëŒì´ë‚˜ ë™ì¼í•œ ì¼ì„ ë™ì‹œì— ë‘ì‚¬ëžŒì´ ë”°ë¡œ
325 git trees: 300진행하는 사태를 방지하기 위해 급ì†ížˆ ê°œë°œì´ ì§„í–‰ë˜ê³  있는 ì˜ì—­ì— ìž‘ì—…ì˜
326 - Kbuild development tree, Sam Ravnborg < sam@ravnborg.org> 301ë² ì´ìŠ¤ë¥¼ 맞춰줄 ê²ƒì´ ìš”êµ¬ëœë‹¤.
327 git.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild.git 302
328 303ëŒ€ë¶€ë¶„ì˜ ì´ëŸ¬í•œ 저장소는 git 트리지만, gitì´ ì•„ë‹Œ SCM으로 관리ë˜ê±°ë‚˜, quilt
329 - ACPI development tree, Len Brown <len.brown@intel.com > 304시리즈로 제공ë˜ëŠ” íŒ¨ì¹˜ë“¤ë„ ì¡´ìž¬í•œë‹¤. ì´ëŸ¬í•œ 서브시스템 ì €ìž¥ì†Œë“¤ì€ MAINTAINERS
330 git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git 305파ì¼ì— 나열ë˜ì–´ 있다. ëŒ€ë¶€ë¶„ì€ http://git.kernel.org ì—ì„œ ë³¼ 수 있다.
331 306
332 - Block development tree, Jens Axboe <jens.axboe@oracle.com> 307ì œì•ˆëœ íŒ¨ì¹˜ëŠ” 서브시스템 íŠ¸ë¦¬ì— ì»¤ë°‹ë˜ê¸° ì „ì— ë©”ì¼ë§ 리스트를 통해
333 git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git 308리뷰ëœë‹¤(ì•„ëž˜ì˜ ê´€ë ¨ ì„¹ì…˜ì„ ì°¸ê³ í•˜ê¸° 바란다). ì¼ë¶€ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œì˜ ê²½ìš°, ì´
334 309리뷰 프로세스는 patchworkë¼ëŠ” ë„구를 통해 추ì ëœë‹¤. patchworkì€ ë“±ë¡ëœ 패치와
335 - DRM development tree, Dave Airlie <airlied@linux.ie> 310íŒ¨ì¹˜ì— ëŒ€í•œ 코멘트, íŒ¨ì¹˜ì˜ ë²„ì „ì„ ë³¼ 수 있는 웹 ì¸í„°íŽ˜ì´ìŠ¤ë¥¼ 제공하고,
336 git.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6.git 311ë©”ì¸í…Œì´ë„ˆëŠ” 패치를 리뷰 중, 리뷰 통과, ë˜ëŠ” 반려ë¨ìœ¼ë¡œ 표시할 수 있다.
337 312ëŒ€ë¶€ë¶„ì˜ ì´ëŸ¬í•œ patchwork 사ì´íŠ¸ëŠ” http://patchwork.kernel.org/ ë˜ëŠ”
338 - ia64 development tree, Tony Luck < tony.luck@intel.com> 313http://patchwork.ozlabs.org/ ì— ë‚˜ì—´ë˜ì–´ 있다.
339 git.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git 314
340 3153.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
341 - infiniband, Roland Dreier <rolandd@cisco.com > 316-----------------------------------------
342 git.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git 317서브시스템 íŠ¸ë¦¬ë“¤ì˜ ë³€ê²½ì‚¬í•­ë“¤ì€ mainline 3.x 트리로 들어오기 ì „ì— í†µí•©
343 318테스트를 ê±°ì³ì•¼ 한다. ì´ëŸ° 목ì ìœ¼ë¡œ, 모든 서브시스템 íŠ¸ë¦¬ì˜ ë³€ê²½ì‚¬í•­ì„ ê±°ì˜
344 - libata, Jeff Garzik <jgarzik@pobox.com> 319ë§¤ì¼ ë°›ì•„ê°€ëŠ” 특수한 테스트 저장소가 존재한다:
345 git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git 320 http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
346 321 http://linux.f-seidel.de/linux-next/pmwiki/
347 - network drivers, Jeff Garzik <jgarzik@pobox.com> 322
348 git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git 323ì´ëŸ° ì‹ìœ¼ë¡œ, -next 커ë„ì„ í†µí•´ ë‹¤ìŒ ë¨¸ì§€ ê¸°ê°„ì— ë©”ì¸ë¼ì¸ 커ë„ì— ì–´ë–¤ 변경ì´
349 324가해질 것ì¸ì§€ 간략히 ì•Œ 수 있다. 모험심 ê°•í•œ 테스터ë¼ë©´ -next 커ë„ì—ì„œ 테스트를
350 - pcmcia, Dominik Brodowski < linux@dominikbrodowski.net> 325수행하는 ê²ƒë„ ì¢‹ì„ ê²ƒì´ë‹¤.
351 git.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git
352
353 - SCSI, James Bottomley < James.Bottomley@SteelEye.com>
354 git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
355
356 quilt trees:
357 - USB, PCI, Driver Core, and I2C, Greg Kroah-Hartman < gregkh@linuxfoundation.org>
358 kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
359 - x86-64, partly i386, Andi Kleen < ak@suse.de>
360 ftp.firstfloor.org:/pub/ak/x86_64/quilt/
361
362 다른 ì»¤ë„ íŠ¸ë¦¬ë“¤ì€ http://kernel.org/git와 MAINTAINERS 파ì¼ì—ì„œ 참조할 수
363 있다.
364 326
365버그 보고 327버그 보고
366--------- 328---------
@@ -597,7 +559,7 @@ Patì´ë¼ëŠ” ì´ë¦„ì„ ê°€ì§„ ì—¬ìžê°€ ìžˆì„ ìˆ˜ë„ ìžˆëŠ” 것ì´ë‹¤. 리눅ìŠ
597 559
598ì´ê²ƒì´ 무엇ì¸ì§€ ë” ìžì„¸í•œ ê²ƒì„ ì•Œê³  싶다면 ë‹¤ìŒ ë¬¸ì„œì˜ ChageLog í•­ì„ ë´ë¼. 560ì´ê²ƒì´ 무엇ì¸ì§€ ë” ìžì„¸í•œ ê²ƒì„ ì•Œê³  싶다면 ë‹¤ìŒ ë¬¸ì„œì˜ ChageLog í•­ì„ ë´ë¼.
599 "The Perfect Patch" 561 "The Perfect Patch"
600 http://userweb.kernel.org/~akpm/stuff/tpp.txt 562 http://www.ozlabs.org/~akpm/stuff/tpp.txt
601 563
602 564
603 565
diff --git a/Documentation/misc-devices/mei/mei-amt-version.c b/Documentation/misc-devices/mei/mei-amt-version.c
index 49e4f770864a..57d0d871dcf7 100644
--- a/Documentation/misc-devices/mei/mei-amt-version.c
+++ b/Documentation/misc-devices/mei/mei-amt-version.c
@@ -115,8 +115,6 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
115 struct mei_client *cl; 115 struct mei_client *cl;
116 struct mei_connect_client_data data; 116 struct mei_connect_client_data data;
117 117
118 mei_deinit(me);
119
120 me->verbose = verbose; 118 me->verbose = verbose;
121 119
122 me->fd = open("/dev/mei", O_RDWR); 120 me->fd = open("/dev/mei", O_RDWR);
diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
index 7fba5aab9ef9..6c914aa87e71 100644
--- a/Documentation/zh_CN/HOWTO
+++ b/Documentation/zh_CN/HOWTO
@@ -112,7 +112,7 @@ Linux内核代ç ä¸­åŒ…å«æœ‰å¤§é‡çš„文档。这些文档对于学习如何与
112 112
113 其他关于如何正确地生æˆè¡¥ä¸çš„优秀文档包括: 113 其他关于如何正确地生æˆè¡¥ä¸çš„优秀文档包括:
114 "The Perfect Patch" 114 "The Perfect Patch"
115 http://userweb.kernel.org/~akpm/stuff/tpp.txt 115 http://www.ozlabs.org/~akpm/stuff/tpp.txt
116 "Linux kernel patch submission format" 116 "Linux kernel patch submission format"
117 http://linux.yyz.us/patch-format.html 117 http://linux.yyz.us/patch-format.html
118 118
@@ -515,7 +515,7 @@ Linux内核社区并ä¸å–œæ¬¢ä¸€ä¸‹æŽ¥æ”¶å¤§æ®µçš„代ç ã€‚修改需è¦è¢«æ°å½“
515 515
516想了解它具体应该看起æ¥åƒä»€ä¹ˆï¼Œè¯·æŸ¥é˜…以下文档中的“ChangeLogâ€ç« èŠ‚: 516想了解它具体应该看起æ¥åƒä»€ä¹ˆï¼Œè¯·æŸ¥é˜…以下文档中的“ChangeLogâ€ç« èŠ‚:
517 “The Perfect Patch†517 “The Perfect Patchâ€
518 http://userweb.kernel.org/~akpm/stuff/tpp.txt 518 http://www.ozlabs.org/~akpm/stuff/tpp.txt
519 519
520 520
521这些事情有时候åšèµ·æ¥å¾ˆéš¾ã€‚è¦åœ¨ä»»ä½•æ–¹é¢éƒ½åšåˆ°å®Œç¾Žå¯èƒ½éœ€è¦å¥½å‡ å¹´æ—¶é—´ã€‚这是 521这些事情有时候åšèµ·æ¥å¾ˆéš¾ã€‚è¦åœ¨ä»»ä½•æ–¹é¢éƒ½åšåˆ°å®Œç¾Žå¯èƒ½éœ€è¦å¥½å‡ å¹´æ—¶é—´ã€‚这是
diff --git a/MAINTAINERS b/MAINTAINERS
index ce1645ebe70c..12da4885e753 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2619,7 +2619,7 @@ S: Maintained
2619F: drivers/platform/x86/dell-laptop.c 2619F: drivers/platform/x86/dell-laptop.c
2620 2620
2621DELL LAPTOP SMM DRIVER 2621DELL LAPTOP SMM DRIVER
2622S: Orphan 2622M: Guenter Roeck <linux@roeck-us.net>
2623F: drivers/char/i8k.c 2623F: drivers/char/i8k.c
2624F: include/uapi/linux/i8k.h 2624F: include/uapi/linux/i8k.h
2625 2625
@@ -3335,6 +3335,7 @@ EXTERNAL CONNECTOR SUBSYSTEM (EXTCON)
3335M: MyungJoo Ham <myungjoo.ham@samsung.com> 3335M: MyungJoo Ham <myungjoo.ham@samsung.com>
3336M: Chanwoo Choi <cw00.choi@samsung.com> 3336M: Chanwoo Choi <cw00.choi@samsung.com>
3337L: linux-kernel@vger.kernel.org 3337L: linux-kernel@vger.kernel.org
3338T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/extcon.git
3338S: Maintained 3339S: Maintained
3339F: drivers/extcon/ 3340F: drivers/extcon/
3340F: Documentation/extcon/ 3341F: Documentation/extcon/
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index d79d692d05b8..896413b59aae 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -735,7 +735,7 @@ static struct pci_device_id agp_amd64_pci_table[] = {
735 735
736MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table); 736MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
737 737
738static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = { 738static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
739 { PCI_DEVICE_CLASS(0, 0) }, 739 { PCI_DEVICE_CLASS(0, 0) },
740 { } 740 { }
741}; 741};
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index e6939e13e338..e210f858d3cb 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -1,12 +1,11 @@
1/* 1/*
2 * i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops. 2 * i8k.c -- Linux driver for accessing the SMM BIOS on Dell laptops.
3 * See http://www.debian.org/~dz/i8k/ for more information
4 * and for latest version of this driver.
5 * 3 *
6 * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org> 4 * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org>
7 * 5 *
8 * Hwmon integration: 6 * Hwmon integration:
9 * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org> 7 * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
8 * Copyright (C) 2013 Guenter Roeck <linux@roeck-us.net>
10 * 9 *
11 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 11 * under the terms of the GNU General Public License as published by the
@@ -19,6 +18,8 @@
19 * General Public License for more details. 18 * General Public License for more details.
20 */ 19 */
21 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/types.h> 24#include <linux/types.h>
24#include <linux/init.h> 25#include <linux/init.h>
@@ -29,13 +30,12 @@
29#include <linux/mutex.h> 30#include <linux/mutex.h>
30#include <linux/hwmon.h> 31#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h> 32#include <linux/hwmon-sysfs.h>
32#include <asm/uaccess.h> 33#include <linux/uaccess.h>
33#include <asm/io.h> 34#include <linux/io.h>
35#include <linux/sched.h>
34 36
35#include <linux/i8k.h> 37#include <linux/i8k.h>
36 38
37#define I8K_VERSION "1.14 21/02/2005"
38
39#define I8K_SMM_FN_STATUS 0x0025 39#define I8K_SMM_FN_STATUS 0x0025
40#define I8K_SMM_POWER_STATUS 0x0069 40#define I8K_SMM_POWER_STATUS 0x0069
41#define I8K_SMM_SET_FAN 0x01a3 41#define I8K_SMM_SET_FAN 0x01a3
@@ -44,7 +44,6 @@
44#define I8K_SMM_GET_TEMP 0x10a3 44#define I8K_SMM_GET_TEMP 0x10a3
45#define I8K_SMM_GET_DELL_SIG1 0xfea3 45#define I8K_SMM_GET_DELL_SIG1 0xfea3
46#define I8K_SMM_GET_DELL_SIG2 0xffa3 46#define I8K_SMM_GET_DELL_SIG2 0xffa3
47#define I8K_SMM_BIOS_VERSION 0x00a6
48 47
49#define I8K_FAN_MULT 30 48#define I8K_FAN_MULT 30
50#define I8K_MAX_TEMP 127 49#define I8K_MAX_TEMP 127
@@ -64,6 +63,15 @@
64static DEFINE_MUTEX(i8k_mutex); 63static DEFINE_MUTEX(i8k_mutex);
65static char bios_version[4]; 64static char bios_version[4];
66static struct device *i8k_hwmon_dev; 65static struct device *i8k_hwmon_dev;
66static u32 i8k_hwmon_flags;
67static int i8k_fan_mult;
68
69#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
70#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
71#define I8K_HWMON_HAVE_TEMP3 (1 << 2)
72#define I8K_HWMON_HAVE_TEMP4 (1 << 3)
73#define I8K_HWMON_HAVE_FAN1 (1 << 4)
74#define I8K_HWMON_HAVE_FAN2 (1 << 5)
67 75
68MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); 76MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
69MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops"); 77MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
@@ -103,11 +111,11 @@ static const struct file_operations i8k_fops = {
103 111
104struct smm_regs { 112struct smm_regs {
105 unsigned int eax; 113 unsigned int eax;
106 unsigned int ebx __attribute__ ((packed)); 114 unsigned int ebx __packed;
107 unsigned int ecx __attribute__ ((packed)); 115 unsigned int ecx __packed;
108 unsigned int edx __attribute__ ((packed)); 116 unsigned int edx __packed;
109 unsigned int esi __attribute__ ((packed)); 117 unsigned int esi __packed;
110 unsigned int edi __attribute__ ((packed)); 118 unsigned int edi __packed;
111}; 119};
112 120
113static inline const char *i8k_get_dmi_data(int field) 121static inline const char *i8k_get_dmi_data(int field)
@@ -124,6 +132,17 @@ static int i8k_smm(struct smm_regs *regs)
124{ 132{
125 int rc; 133 int rc;
126 int eax = regs->eax; 134 int eax = regs->eax;
135 cpumask_var_t old_mask;
136
137 /* SMM requires CPU 0 */
138 if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
139 return -ENOMEM;
140 cpumask_copy(old_mask, &current->cpus_allowed);
141 set_cpus_allowed_ptr(current, cpumask_of(0));
142 if (smp_processor_id() != 0) {
143 rc = -EBUSY;
144 goto out;
145 }
127 146
128#if defined(CONFIG_X86_64) 147#if defined(CONFIG_X86_64)
129 asm volatile("pushq %%rax\n\t" 148 asm volatile("pushq %%rax\n\t"
@@ -148,7 +167,7 @@ static int i8k_smm(struct smm_regs *regs)
148 "pushfq\n\t" 167 "pushfq\n\t"
149 "popq %%rax\n\t" 168 "popq %%rax\n\t"
150 "andl $1,%%eax\n" 169 "andl $1,%%eax\n"
151 :"=a"(rc) 170 : "=a"(rc)
152 : "a"(regs) 171 : "a"(regs)
153 : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); 172 : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
154#else 173#else
@@ -174,25 +193,17 @@ static int i8k_smm(struct smm_regs *regs)
174 "lahf\n\t" 193 "lahf\n\t"
175 "shrl $8,%%eax\n\t" 194 "shrl $8,%%eax\n\t"
176 "andl $1,%%eax\n" 195 "andl $1,%%eax\n"
177 :"=a"(rc) 196 : "=a"(rc)
178 : "a"(regs) 197 : "a"(regs)
179 : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); 198 : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
180#endif 199#endif
181 if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax) 200 if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
182 return -EINVAL; 201 rc = -EINVAL;
183 202
184 return 0; 203out:
185} 204 set_cpus_allowed_ptr(current, old_mask);
186 205 free_cpumask_var(old_mask);
187/* 206 return rc;
188 * Read the bios version. Return the version as an integer corresponding
189 * to the ascii value, for example "A17" is returned as 0x00413137.
190 */
191static int i8k_get_bios_version(void)
192{
193 struct smm_regs regs = { .eax = I8K_SMM_BIOS_VERSION, };
194
195 return i8k_smm(&regs) ? : regs.eax;
196} 207}
197 208
198/* 209/*
@@ -203,7 +214,8 @@ static int i8k_get_fn_status(void)
203 struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, }; 214 struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
204 int rc; 215 int rc;
205 216
206 if ((rc = i8k_smm(&regs)) < 0) 217 rc = i8k_smm(&regs);
218 if (rc < 0)
207 return rc; 219 return rc;
208 220
209 switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) { 221 switch ((regs.eax >> I8K_FN_SHIFT) & I8K_FN_MASK) {
@@ -226,7 +238,8 @@ static int i8k_get_power_status(void)
226 struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, }; 238 struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
227 int rc; 239 int rc;
228 240
229 if ((rc = i8k_smm(&regs)) < 0) 241 rc = i8k_smm(&regs);
242 if (rc < 0)
230 return rc; 243 return rc;
231 244
232 return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY; 245 return (regs.eax & 0xff) == I8K_POWER_AC ? I8K_AC : I8K_BATTERY;
@@ -251,7 +264,7 @@ static int i8k_get_fan_speed(int fan)
251 struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, }; 264 struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
252 265
253 regs.ebx = fan & 0xff; 266 regs.ebx = fan & 0xff;
254 return i8k_smm(&regs) ? : (regs.eax & 0xffff) * fan_mult; 267 return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
255} 268}
256 269
257/* 270/*
@@ -277,10 +290,11 @@ static int i8k_get_temp(int sensor)
277 int temp; 290 int temp;
278 291
279#ifdef I8K_TEMPERATURE_BUG 292#ifdef I8K_TEMPERATURE_BUG
280 static int prev; 293 static int prev[4];
281#endif 294#endif
282 regs.ebx = sensor & 0xff; 295 regs.ebx = sensor & 0xff;
283 if ((rc = i8k_smm(&regs)) < 0) 296 rc = i8k_smm(&regs);
297 if (rc < 0)
284 return rc; 298 return rc;
285 299
286 temp = regs.eax & 0xff; 300 temp = regs.eax & 0xff;
@@ -294,10 +308,10 @@ static int i8k_get_temp(int sensor)
294 # 1003655139 00000054 00005c52 308 # 1003655139 00000054 00005c52
295 */ 309 */
296 if (temp > I8K_MAX_TEMP) { 310 if (temp > I8K_MAX_TEMP) {
297 temp = prev; 311 temp = prev[sensor];
298 prev = I8K_MAX_TEMP; 312 prev[sensor] = I8K_MAX_TEMP;
299 } else { 313 } else {
300 prev = temp; 314 prev[sensor] = temp;
301 } 315 }
302#endif 316#endif
303 317
@@ -309,7 +323,8 @@ static int i8k_get_dell_signature(int req_fn)
309 struct smm_regs regs = { .eax = req_fn, }; 323 struct smm_regs regs = { .eax = req_fn, };
310 int rc; 324 int rc;
311 325
312 if ((rc = i8k_smm(&regs)) < 0) 326 rc = i8k_smm(&regs);
327 if (rc < 0)
313 return rc; 328 return rc;
314 329
315 return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1; 330 return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
@@ -328,12 +343,14 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
328 343
329 switch (cmd) { 344 switch (cmd) {
330 case I8K_BIOS_VERSION: 345 case I8K_BIOS_VERSION:
331 val = i8k_get_bios_version(); 346 val = (bios_version[0] << 16) |
347 (bios_version[1] << 8) | bios_version[2];
332 break; 348 break;
333 349
334 case I8K_MACHINE_ID: 350 case I8K_MACHINE_ID:
335 memset(buff, 0, 16); 351 memset(buff, 0, 16);
336 strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), sizeof(buff)); 352 strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
353 sizeof(buff));
337 break; 354 break;
338 355
339 case I8K_FN_STATUS: 356 case I8K_FN_STATUS:
@@ -470,12 +487,13 @@ static ssize_t i8k_hwmon_show_temp(struct device *dev,
470 struct device_attribute *devattr, 487 struct device_attribute *devattr,
471 char *buf) 488 char *buf)
472{ 489{
473 int cpu_temp; 490 int index = to_sensor_dev_attr(devattr)->index;
491 int temp;
474 492
475 cpu_temp = i8k_get_temp(0); 493 temp = i8k_get_temp(index);
476 if (cpu_temp < 0) 494 if (temp < 0)
477 return cpu_temp; 495 return temp;
478 return sprintf(buf, "%d\n", cpu_temp * 1000); 496 return sprintf(buf, "%d\n", temp * 1000);
479} 497}
480 498
481static ssize_t i8k_hwmon_show_fan(struct device *dev, 499static ssize_t i8k_hwmon_show_fan(struct device *dev,
@@ -491,12 +509,44 @@ static ssize_t i8k_hwmon_show_fan(struct device *dev,
491 return sprintf(buf, "%d\n", fan_speed); 509 return sprintf(buf, "%d\n", fan_speed);
492} 510}
493 511
512static ssize_t i8k_hwmon_show_pwm(struct device *dev,
513 struct device_attribute *devattr,
514 char *buf)
515{
516 int index = to_sensor_dev_attr(devattr)->index;
517 int status;
518
519 status = i8k_get_fan_status(index);
520 if (status < 0)
521 return -EIO;
522 return sprintf(buf, "%d\n", clamp_val(status * 128, 0, 255));
523}
524
525static ssize_t i8k_hwmon_set_pwm(struct device *dev,
526 struct device_attribute *attr,
527 const char *buf, size_t count)
528{
529 int index = to_sensor_dev_attr(attr)->index;
530 unsigned long val;
531 int err;
532
533 err = kstrtoul(buf, 10, &val);
534 if (err)
535 return err;
536 val = clamp_val(DIV_ROUND_CLOSEST(val, 128), 0, 2);
537
538 mutex_lock(&i8k_mutex);
539 err = i8k_set_fan(index, val);
540 mutex_unlock(&i8k_mutex);
541
542 return err < 0 ? -EIO : count;
543}
544
494static ssize_t i8k_hwmon_show_label(struct device *dev, 545static ssize_t i8k_hwmon_show_label(struct device *dev,
495 struct device_attribute *devattr, 546 struct device_attribute *devattr,
496 char *buf) 547 char *buf)
497{ 548{
498 static const char *labels[4] = { 549 static const char *labels[3] = {
499 "i8k",
500 "CPU", 550 "CPU",
501 "Left Fan", 551 "Left Fan",
502 "Right Fan", 552 "Right Fan",
@@ -506,108 +556,108 @@ static ssize_t i8k_hwmon_show_label(struct device *dev,
506 return sprintf(buf, "%s\n", labels[index]); 556 return sprintf(buf, "%s\n", labels[index]);
507} 557}
508 558
509static DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL); 559static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 0);
560static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 1);
561static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 2);
562static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, i8k_hwmon_show_temp, NULL, 3);
510static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL, 563static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
511 I8K_FAN_LEFT); 564 I8K_FAN_LEFT);
565static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
566 i8k_hwmon_set_pwm, I8K_FAN_LEFT);
512static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL, 567static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
513 I8K_FAN_RIGHT); 568 I8K_FAN_RIGHT);
514static SENSOR_DEVICE_ATTR(name, S_IRUGO, i8k_hwmon_show_label, NULL, 0); 569static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
515static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1); 570 i8k_hwmon_set_pwm, I8K_FAN_RIGHT);
516static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2); 571static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
517static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 3); 572static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
573static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
574
575static struct attribute *i8k_attrs[] = {
576 &sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */
577 &sensor_dev_attr_temp1_label.dev_attr.attr, /* 1 */
578 &sensor_dev_attr_temp2_input.dev_attr.attr, /* 2 */
579 &sensor_dev_attr_temp3_input.dev_attr.attr, /* 3 */
580 &sensor_dev_attr_temp4_input.dev_attr.attr, /* 4 */
581 &sensor_dev_attr_fan1_input.dev_attr.attr, /* 5 */
582 &sensor_dev_attr_pwm1.dev_attr.attr, /* 6 */
583 &sensor_dev_attr_fan1_label.dev_attr.attr, /* 7 */
584 &sensor_dev_attr_fan2_input.dev_attr.attr, /* 8 */
585 &sensor_dev_attr_pwm2.dev_attr.attr, /* 9 */
586 &sensor_dev_attr_fan2_label.dev_attr.attr, /* 10 */
587 NULL
588};
518 589
519static void i8k_hwmon_remove_files(struct device *dev) 590static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
591 int index)
520{ 592{
521 device_remove_file(dev, &dev_attr_temp1_input); 593 if ((index == 0 || index == 1) &&
522 device_remove_file(dev, &sensor_dev_attr_fan1_input.dev_attr); 594 !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
523 device_remove_file(dev, &sensor_dev_attr_fan2_input.dev_attr); 595 return 0;
524 device_remove_file(dev, &sensor_dev_attr_temp1_label.dev_attr); 596 if (index == 2 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP2))
525 device_remove_file(dev, &sensor_dev_attr_fan1_label.dev_attr); 597 return 0;
526 device_remove_file(dev, &sensor_dev_attr_fan2_label.dev_attr); 598 if (index == 3 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP3))
527 device_remove_file(dev, &sensor_dev_attr_name.dev_attr); 599 return 0;
600 if (index == 4 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP4))
601 return 0;
602 if (index >= 5 && index <= 7 &&
603 !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
604 return 0;
605 if (index >= 8 && index <= 10 &&
606 !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
607 return 0;
608
609 return attr->mode;
528} 610}
529 611
612static const struct attribute_group i8k_group = {
613 .attrs = i8k_attrs,
614 .is_visible = i8k_is_visible,
615};
616__ATTRIBUTE_GROUPS(i8k);
617
530static int __init i8k_init_hwmon(void) 618static int __init i8k_init_hwmon(void)
531{ 619{
532 int err; 620 int err;
533 621
534 i8k_hwmon_dev = hwmon_device_register(NULL); 622 i8k_hwmon_flags = 0;
535 if (IS_ERR(i8k_hwmon_dev)) {
536 err = PTR_ERR(i8k_hwmon_dev);
537 i8k_hwmon_dev = NULL;
538 printk(KERN_ERR "i8k: hwmon registration failed (%d)\n", err);
539 return err;
540 }
541
542 /* Required name attribute */
543 err = device_create_file(i8k_hwmon_dev,
544 &sensor_dev_attr_name.dev_attr);
545 if (err)
546 goto exit_unregister;
547 623
548 /* CPU temperature attributes, if temperature reading is OK */ 624 /* CPU temperature attributes, if temperature reading is OK */
549 err = i8k_get_temp(0); 625 err = i8k_get_temp(0);
550 if (err < 0) { 626 if (err >= 0)
551 dev_dbg(i8k_hwmon_dev, 627 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1;
552 "Not creating temperature attributes (%d)\n", err); 628 /* check for additional temperature sensors */
553 } else { 629 err = i8k_get_temp(1);
554 err = device_create_file(i8k_hwmon_dev, &dev_attr_temp1_input); 630 if (err >= 0)
555 if (err) 631 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2;
556 goto exit_remove_files; 632 err = i8k_get_temp(2);
557 err = device_create_file(i8k_hwmon_dev, 633 if (err >= 0)
558 &sensor_dev_attr_temp1_label.dev_attr); 634 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3;
559 if (err) 635 err = i8k_get_temp(3);
560 goto exit_remove_files; 636 if (err >= 0)
561 } 637 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
562 638
563 /* Left fan attributes, if left fan is present */ 639 /* Left fan attributes, if left fan is present */
564 err = i8k_get_fan_status(I8K_FAN_LEFT); 640 err = i8k_get_fan_status(I8K_FAN_LEFT);
565 if (err < 0) { 641 if (err >= 0)
566 dev_dbg(i8k_hwmon_dev, 642 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
567 "Not creating %s fan attributes (%d)\n", "left", err);
568 } else {
569 err = device_create_file(i8k_hwmon_dev,
570 &sensor_dev_attr_fan1_input.dev_attr);
571 if (err)
572 goto exit_remove_files;
573 err = device_create_file(i8k_hwmon_dev,
574 &sensor_dev_attr_fan1_label.dev_attr);
575 if (err)
576 goto exit_remove_files;
577 }
578 643
579 /* Right fan attributes, if right fan is present */ 644 /* Right fan attributes, if right fan is present */
580 err = i8k_get_fan_status(I8K_FAN_RIGHT); 645 err = i8k_get_fan_status(I8K_FAN_RIGHT);
581 if (err < 0) { 646 if (err >= 0)
582 dev_dbg(i8k_hwmon_dev, 647 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
583 "Not creating %s fan attributes (%d)\n", "right", err);
584 } else {
585 err = device_create_file(i8k_hwmon_dev,
586 &sensor_dev_attr_fan2_input.dev_attr);
587 if (err)
588 goto exit_remove_files;
589 err = device_create_file(i8k_hwmon_dev,
590 &sensor_dev_attr_fan2_label.dev_attr);
591 if (err)
592 goto exit_remove_files;
593 }
594 648
649 i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "i8k", NULL,
650 i8k_groups);
651 if (IS_ERR(i8k_hwmon_dev)) {
652 err = PTR_ERR(i8k_hwmon_dev);
653 i8k_hwmon_dev = NULL;
654 pr_err("hwmon registration failed (%d)\n", err);
655 return err;
656 }
595 return 0; 657 return 0;
596
597 exit_remove_files:
598 i8k_hwmon_remove_files(i8k_hwmon_dev);
599 exit_unregister:
600 hwmon_device_unregister(i8k_hwmon_dev);
601 return err;
602} 658}
603 659
604static void __exit i8k_exit_hwmon(void) 660static struct dmi_system_id i8k_dmi_table[] __initdata = {
605{
606 i8k_hwmon_remove_files(i8k_hwmon_dev);
607 hwmon_device_unregister(i8k_hwmon_dev);
608}
609
610static struct dmi_system_id __initdata i8k_dmi_table[] = {
611 { 661 {
612 .ident = "Dell Inspiron", 662 .ident = "Dell Inspiron",
613 .matches = { 663 .matches = {
@@ -671,7 +721,23 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
671 DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"), 721 DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
672 }, 722 },
673 }, 723 },
674 { } 724 {
725 .ident = "Dell Studio",
726 .matches = {
727 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
728 DMI_MATCH(DMI_PRODUCT_NAME, "Studio"),
729 },
730 .driver_data = (void *)1, /* fan multiplier override */
731 },
732 {
733 .ident = "Dell XPS M140",
734 .matches = {
735 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
736 DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"),
737 },
738 .driver_data = (void *)1, /* fan multiplier override */
739 },
740 { }
675}; 741};
676 742
677/* 743/*
@@ -679,8 +745,7 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
679 */ 745 */
680static int __init i8k_probe(void) 746static int __init i8k_probe(void)
681{ 747{
682 char buff[4]; 748 const struct dmi_system_id *id;
683 int version;
684 749
685 /* 750 /*
686 * Get DMI information 751 * Get DMI information
@@ -689,49 +754,30 @@ static int __init i8k_probe(void)
689 if (!ignore_dmi && !force) 754 if (!ignore_dmi && !force)
690 return -ENODEV; 755 return -ENODEV;
691 756
692 printk(KERN_INFO "i8k: not running on a supported Dell system.\n"); 757 pr_info("not running on a supported Dell system.\n");
693 printk(KERN_INFO "i8k: vendor=%s, model=%s, version=%s\n", 758 pr_info("vendor=%s, model=%s, version=%s\n",
694 i8k_get_dmi_data(DMI_SYS_VENDOR), 759 i8k_get_dmi_data(DMI_SYS_VENDOR),
695 i8k_get_dmi_data(DMI_PRODUCT_NAME), 760 i8k_get_dmi_data(DMI_PRODUCT_NAME),
696 i8k_get_dmi_data(DMI_BIOS_VERSION)); 761 i8k_get_dmi_data(DMI_BIOS_VERSION));
697 } 762 }
698 763
699 strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), sizeof(bios_version)); 764 strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
765 sizeof(bios_version));
700 766
701 /* 767 /*
702 * Get SMM Dell signature 768 * Get SMM Dell signature
703 */ 769 */
704 if (i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG1) && 770 if (i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG1) &&
705 i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG2)) { 771 i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG2)) {
706 printk(KERN_ERR "i8k: unable to get SMM Dell signature\n"); 772 pr_err("unable to get SMM Dell signature\n");
707 if (!force) 773 if (!force)
708 return -ENODEV; 774 return -ENODEV;
709 } 775 }
710 776
711 /* 777 i8k_fan_mult = fan_mult;
712 * Get SMM BIOS version. 778 id = dmi_first_match(i8k_dmi_table);
713 */ 779 if (id && fan_mult == I8K_FAN_MULT && id->driver_data)
714 version = i8k_get_bios_version(); 780 i8k_fan_mult = (unsigned long)id->driver_data;
715 if (version <= 0) {
716 printk(KERN_WARNING "i8k: unable to get SMM BIOS version\n");
717 } else {
718 buff[0] = (version >> 16) & 0xff;
719 buff[1] = (version >> 8) & 0xff;
720 buff[2] = (version) & 0xff;
721 buff[3] = '\0';
722 /*
723 * If DMI BIOS version is unknown use SMM BIOS version.
724 */
725 if (!dmi_get_system_info(DMI_BIOS_VERSION))
726 strlcpy(bios_version, buff, sizeof(bios_version));
727
728 /*
729 * Check if the two versions match.
730 */
731 if (strncmp(buff, bios_version, sizeof(bios_version)) != 0)
732 printk(KERN_WARNING "i8k: BIOS version mismatch: %s != %s\n",
733 buff, bios_version);
734 }
735 781
736 return 0; 782 return 0;
737} 783}
@@ -754,10 +800,6 @@ static int __init i8k_init(void)
754 if (err) 800 if (err)
755 goto exit_remove_proc; 801 goto exit_remove_proc;
756 802
757 printk(KERN_INFO
758 "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
759 I8K_VERSION);
760
761 return 0; 803 return 0;
762 804
763 exit_remove_proc: 805 exit_remove_proc:
@@ -767,7 +809,7 @@ static int __init i8k_init(void)
767 809
768static void __exit i8k_exit(void) 810static void __exit i8k_exit(void)
769{ 811{
770 i8k_exit_hwmon(); 812 hwmon_device_unregister(i8k_hwmon_dev);
771 remove_proc_entry("i8k", NULL); 813 remove_proc_entry("i8k", NULL);
772} 814}
773 815
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 0913d79424d3..c4094c4e22c1 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -587,6 +587,8 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
587 return -ENODEV; 587 return -ENODEV;
588 switch ( cmd ) { 588 switch ( cmd ) {
589 case LPTIME: 589 case LPTIME:
590 if (arg > UINT_MAX / HZ)
591 return -EINVAL;
590 LP_TIME(minor) = arg * HZ/100; 592 LP_TIME(minor) = arg * HZ/100;
591 break; 593 break;
592 case LPCHAR: 594 case LPCHAR:
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 1fd00dc06897..76c490fa0511 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -168,7 +168,10 @@ static irqreturn_t button_handler (int irq, void *dev_id)
168static int button_read (struct file *filp, char __user *buffer, 168static int button_read (struct file *filp, char __user *buffer,
169 size_t count, loff_t *ppos) 169 size_t count, loff_t *ppos)
170{ 170{
171 interruptible_sleep_on (&button_wait_queue); 171 DEFINE_WAIT(wait);
172 prepare_to_wait(&button_wait_queue, &wait, TASK_INTERRUPTIBLE);
173 schedule();
174 finish_wait(&button_wait_queue, &wait);
172 return (copy_to_user (buffer, &button_output_buffer, bcount)) 175 return (copy_to_user (buffer, &button_output_buffer, bcount))
173 ? -EFAULT : bcount; 176 ? -EFAULT : bcount;
174} 177}
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index d5d2e4a985aa..daea84c41743 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -216,4 +216,4 @@ error:
216 ttyprintk_driver = NULL; 216 ttyprintk_driver = NULL;
217 return ret; 217 return ret;
218} 218}
219module_init(ttyprintk_init); 219device_initcall(ttyprintk_init);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index f1d54a3985bd..bdb5a00f1dfa 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -31,6 +31,16 @@ config EXTCON_ADC_JACK
31 help 31 help
32 Say Y here to enable extcon device driver based on ADC values. 32 Say Y here to enable extcon device driver based on ADC values.
33 33
34config EXTCON_MAX14577
35 tristate "MAX14577 EXTCON Support"
36 depends on MFD_MAX14577
37 select IRQ_DOMAIN
38 select REGMAP_I2C
39 help
40 If you say yes here you get support for the MUIC device of
41 Maxim MAX14577 PMIC. The MAX14577 MUIC is a USB port accessory
42 detector and switch.
43
34config EXTCON_MAX77693 44config EXTCON_MAX77693
35 tristate "MAX77693 EXTCON Support" 45 tristate "MAX77693 EXTCON Support"
36 depends on MFD_MAX77693 && INPUT 46 depends on MFD_MAX77693 && INPUT
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 759fdae46f95..43eccc0e3448 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_OF_EXTCON) += of_extcon.o
7obj-$(CONFIG_EXTCON) += extcon-class.o 7obj-$(CONFIG_EXTCON) += extcon-class.o
8obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o 8obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
9obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o 9obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
10obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
10obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o 11obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
11obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o 12obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
12obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o 13obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index a287cece0593..c20602f601ee 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -44,6 +44,15 @@
44#define HPDET_DEBOUNCE 500 44#define HPDET_DEBOUNCE 500
45#define DEFAULT_MICD_TIMEOUT 2000 45#define DEFAULT_MICD_TIMEOUT 2000
46 46
47#define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \
48 ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \
49 ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \
50 ARIZONA_MICD_LVL_7)
51
52#define MICD_LVL_0_TO_7 (ARIZONA_MICD_LVL_0 | MICD_LVL_1_TO_7)
53
54#define MICD_LVL_0_TO_8 (MICD_LVL_0_TO_7 | ARIZONA_MICD_LVL_8)
55
47struct arizona_extcon_info { 56struct arizona_extcon_info {
48 struct device *dev; 57 struct device *dev;
49 struct arizona *arizona; 58 struct arizona *arizona;
@@ -426,26 +435,15 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
426 } 435 }
427 436
428 val &= ARIZONA_HP_LVL_B_MASK; 437 val &= ARIZONA_HP_LVL_B_MASK;
438 /* Convert to ohms, the value is in 0.5 ohm increments */
439 val /= 2;
429 440
430 regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1, 441 regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
431 &range); 442 &range);
432 range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK) 443 range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
433 >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT; 444 >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
434 445
435 /* Skip up or down a range? */ 446 /* Skip up a range, or report? */
436 if (range && (val < arizona_hpdet_c_ranges[range].min)) {
437 range--;
438 dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
439 arizona_hpdet_c_ranges[range].min,
440 arizona_hpdet_c_ranges[range].max);
441 regmap_update_bits(arizona->regmap,
442 ARIZONA_HEADPHONE_DETECT_1,
443 ARIZONA_HP_IMPEDANCE_RANGE_MASK,
444 range <<
445 ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
446 return -EAGAIN;
447 }
448
449 if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 && 447 if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
450 (val >= arizona_hpdet_c_ranges[range].max)) { 448 (val >= arizona_hpdet_c_ranges[range].max)) {
451 range++; 449 range++;
@@ -459,6 +457,12 @@ static int arizona_hpdet_read(struct arizona_extcon_info *info)
459 ARIZONA_HP_IMPEDANCE_RANGE_SHIFT); 457 ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
460 return -EAGAIN; 458 return -EAGAIN;
461 } 459 }
460
461 if (range && (val < arizona_hpdet_c_ranges[range].min)) {
462 dev_dbg(arizona->dev, "Reporting range boundary %d\n",
463 arizona_hpdet_c_ranges[range].min);
464 val = arizona_hpdet_c_ranges[range].min;
465 }
462 } 466 }
463 467
464 dev_dbg(arizona->dev, "HP impedance %d ohms\n", val); 468 dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
@@ -594,9 +598,15 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
594 dev_err(arizona->dev, "Failed to report HP/line: %d\n", 598 dev_err(arizona->dev, "Failed to report HP/line: %d\n",
595 ret); 599 ret);
596 600
601done:
602 /* Reset back to starting range */
603 regmap_update_bits(arizona->regmap,
604 ARIZONA_HEADPHONE_DETECT_1,
605 ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
606 0);
607
597 arizona_extcon_do_magic(info, 0); 608 arizona_extcon_do_magic(info, 0);
598 609
599done:
600 if (id_gpio) 610 if (id_gpio)
601 gpio_set_value_cansleep(id_gpio, 0); 611 gpio_set_value_cansleep(id_gpio, 0);
602 612
@@ -765,7 +775,20 @@ static void arizona_micd_detect(struct work_struct *work)
765 775
766 mutex_lock(&info->lock); 776 mutex_lock(&info->lock);
767 777
768 for (i = 0; i < 10 && !(val & 0x7fc); i++) { 778 /* If the cable was removed while measuring ignore the result */
779 ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
780 if (ret < 0) {
781 dev_err(arizona->dev, "Failed to check cable state: %d\n",
782 ret);
783 mutex_unlock(&info->lock);
784 return;
785 } else if (!ret) {
786 dev_dbg(arizona->dev, "Ignoring MICDET for removed cable\n");
787 mutex_unlock(&info->lock);
788 return;
789 }
790
791 for (i = 0; i < 10 && !(val & MICD_LVL_0_TO_8); i++) {
769 ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val); 792 ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
770 if (ret != 0) { 793 if (ret != 0) {
771 dev_err(arizona->dev, 794 dev_err(arizona->dev,
@@ -784,7 +807,7 @@ static void arizona_micd_detect(struct work_struct *work)
784 } 807 }
785 } 808 }
786 809
787 if (i == 10 && !(val & 0x7fc)) { 810 if (i == 10 && !(val & MICD_LVL_0_TO_8)) {
788 dev_err(arizona->dev, "Failed to get valid MICDET value\n"); 811 dev_err(arizona->dev, "Failed to get valid MICDET value\n");
789 mutex_unlock(&info->lock); 812 mutex_unlock(&info->lock);
790 return; 813 return;
@@ -798,7 +821,7 @@ static void arizona_micd_detect(struct work_struct *work)
798 } 821 }
799 822
800 /* If we got a high impedence we should have a headset, report it. */ 823 /* If we got a high impedence we should have a headset, report it. */
801 if (info->detecting && (val & 0x400)) { 824 if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
802 arizona_identify_headphone(info); 825 arizona_identify_headphone(info);
803 826
804 ret = extcon_update_state(&info->edev, 827 ret = extcon_update_state(&info->edev,
@@ -827,7 +850,7 @@ static void arizona_micd_detect(struct work_struct *work)
827 * plain headphones. If both polarities report a low 850 * plain headphones. If both polarities report a low
828 * impedence then give up and report headphones. 851 * impedence then give up and report headphones.
829 */ 852 */
830 if (info->detecting && (val & 0x3f8)) { 853 if (info->detecting && (val & MICD_LVL_1_TO_7)) {
831 if (info->jack_flips >= info->micd_num_modes * 10) { 854 if (info->jack_flips >= info->micd_num_modes * 10) {
832 dev_dbg(arizona->dev, "Detected HP/line\n"); 855 dev_dbg(arizona->dev, "Detected HP/line\n");
833 arizona_identify_headphone(info); 856 arizona_identify_headphone(info);
@@ -851,7 +874,7 @@ static void arizona_micd_detect(struct work_struct *work)
851 * If we're still detecting and we detect a short then we've 874 * If we're still detecting and we detect a short then we've
852 * got a headphone. Otherwise it's a button press. 875 * got a headphone. Otherwise it's a button press.
853 */ 876 */
854 if (val & 0x3fc) { 877 if (val & MICD_LVL_0_TO_7) {
855 if (info->mic) { 878 if (info->mic) {
856 dev_dbg(arizona->dev, "Mic button detected\n"); 879 dev_dbg(arizona->dev, "Mic button detected\n");
857 880
@@ -1126,6 +1149,16 @@ static int arizona_extcon_probe(struct platform_device *pdev)
1126 break; 1149 break;
1127 } 1150 }
1128 break; 1151 break;
1152 case WM5110:
1153 switch (arizona->rev) {
1154 case 0 ... 2:
1155 break;
1156 default:
1157 info->micd_clamp = true;
1158 info->hpdet_ip = 2;
1159 break;
1160 }
1161 break;
1129 default: 1162 default:
1130 break; 1163 break;
1131 } 1164 }
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 7e0dff58e494..a63a6b21c9ad 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -40,6 +40,7 @@ struct gpio_extcon_data {
40 int irq; 40 int irq;
41 struct delayed_work work; 41 struct delayed_work work;
42 unsigned long debounce_jiffies; 42 unsigned long debounce_jiffies;
43 bool check_on_resume;
43}; 44};
44 45
45static void gpio_extcon_work(struct work_struct *work) 46static void gpio_extcon_work(struct work_struct *work)
@@ -103,8 +104,15 @@ static int gpio_extcon_probe(struct platform_device *pdev)
103 extcon_data->gpio_active_low = pdata->gpio_active_low; 104 extcon_data->gpio_active_low = pdata->gpio_active_low;
104 extcon_data->state_on = pdata->state_on; 105 extcon_data->state_on = pdata->state_on;
105 extcon_data->state_off = pdata->state_off; 106 extcon_data->state_off = pdata->state_off;
107 extcon_data->check_on_resume = pdata->check_on_resume;
106 if (pdata->state_on && pdata->state_off) 108 if (pdata->state_on && pdata->state_off)
107 extcon_data->edev.print_state = extcon_gpio_print_state; 109 extcon_data->edev.print_state = extcon_gpio_print_state;
110
111 ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
112 pdev->name);
113 if (ret < 0)
114 return ret;
115
108 if (pdata->debounce) { 116 if (pdata->debounce) {
109 ret = gpio_set_debounce(extcon_data->gpio, 117 ret = gpio_set_debounce(extcon_data->gpio,
110 pdata->debounce * 1000); 118 pdata->debounce * 1000);
@@ -117,11 +125,6 @@ static int gpio_extcon_probe(struct platform_device *pdev)
117 if (ret < 0) 125 if (ret < 0)
118 return ret; 126 return ret;
119 127
120 ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
121 pdev->name);
122 if (ret < 0)
123 goto err;
124
125 INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); 128 INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work);
126 129
127 extcon_data->irq = gpio_to_irq(extcon_data->gpio); 130 extcon_data->irq = gpio_to_irq(extcon_data->gpio);
@@ -159,12 +162,31 @@ static int gpio_extcon_remove(struct platform_device *pdev)
159 return 0; 162 return 0;
160} 163}
161 164
165#ifdef CONFIG_PM_SLEEP
166static int gpio_extcon_resume(struct device *dev)
167{
168 struct gpio_extcon_data *extcon_data;
169
170 extcon_data = dev_get_drvdata(dev);
171 if (extcon_data->check_on_resume)
172 queue_delayed_work(system_power_efficient_wq,
173 &extcon_data->work, extcon_data->debounce_jiffies);
174
175 return 0;
176}
177#endif
178
179static const struct dev_pm_ops gpio_extcon_pm_ops = {
180 SET_SYSTEM_SLEEP_PM_OPS(NULL, gpio_extcon_resume)
181};
182
162static struct platform_driver gpio_extcon_driver = { 183static struct platform_driver gpio_extcon_driver = {
163 .probe = gpio_extcon_probe, 184 .probe = gpio_extcon_probe,
164 .remove = gpio_extcon_remove, 185 .remove = gpio_extcon_remove,
165 .driver = { 186 .driver = {
166 .name = "extcon-gpio", 187 .name = "extcon-gpio",
167 .owner = THIS_MODULE, 188 .owner = THIS_MODULE,
189 .pm = &gpio_extcon_pm_ops,
168 }, 190 },
169}; 191};
170 192
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
new file mode 100644
index 000000000000..3846941801b8
--- /dev/null
+++ b/drivers/extcon/extcon-max14577.c
@@ -0,0 +1,752 @@
1/*
2 * extcon-max14577.c - MAX14577 extcon driver to support MAX14577 MUIC
3 *
4 * Copyright (C) 2013 Samsung Electrnoics
5 * Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/i2c.h>
21#include <linux/interrupt.h>
22#include <linux/platform_device.h>
23#include <linux/mfd/max14577.h>
24#include <linux/mfd/max14577-private.h>
25#include <linux/extcon.h>
26
27#define DEV_NAME "max14577-muic"
28#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
29
30enum max14577_muic_adc_debounce_time {
31 ADC_DEBOUNCE_TIME_5MS = 0,
32 ADC_DEBOUNCE_TIME_10MS,
33 ADC_DEBOUNCE_TIME_25MS,
34 ADC_DEBOUNCE_TIME_38_62MS,
35};
36
37enum max14577_muic_status {
38 MAX14577_MUIC_STATUS1 = 0,
39 MAX14577_MUIC_STATUS2 = 1,
40 MAX14577_MUIC_STATUS_END,
41};
42
43struct max14577_muic_info {
44 struct device *dev;
45 struct max14577 *max14577;
46 struct extcon_dev *edev;
47 int prev_cable_type;
48 int prev_chg_type;
49 u8 status[MAX14577_MUIC_STATUS_END];
50
51 bool irq_adc;
52 bool irq_chg;
53 struct work_struct irq_work;
54 struct mutex mutex;
55
56 /*
57 * Use delayed workqueue to detect cable state and then
58 * notify cable state to notifiee/platform through uevent.
59 * After completing the booting of platform, the extcon provider
60 * driver should notify cable state to upper layer.
61 */
62 struct delayed_work wq_detcable;
63
64 /*
65 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
66 * h/w path of COMP2/COMN1 on CONTROL1 register.
67 */
68 int path_usb;
69 int path_uart;
70};
71
72enum max14577_muic_cable_group {
73 MAX14577_CABLE_GROUP_ADC = 0,
74 MAX14577_CABLE_GROUP_CHG,
75};
76
77/**
78 * struct max14577_muic_irq
79 * @irq: the index of irq list of MUIC device.
80 * @name: the name of irq.
81 * @virq: the virtual irq to use irq domain
82 */
83struct max14577_muic_irq {
84 unsigned int irq;
85 const char *name;
86 unsigned int virq;
87};
88
89static struct max14577_muic_irq muic_irqs[] = {
90 { MAX14577_IRQ_INT1_ADC, "muic-ADC" },
91 { MAX14577_IRQ_INT1_ADCLOW, "muic-ADCLOW" },
92 { MAX14577_IRQ_INT1_ADCERR, "muic-ADCError" },
93 { MAX14577_IRQ_INT2_CHGTYP, "muic-CHGTYP" },
94 { MAX14577_IRQ_INT2_CHGDETRUN, "muic-CHGDETRUN" },
95 { MAX14577_IRQ_INT2_DCDTMR, "muic-DCDTMR" },
96 { MAX14577_IRQ_INT2_DBCHG, "muic-DBCHG" },
97 { MAX14577_IRQ_INT2_VBVOLT, "muic-VBVOLT" },
98};
99
100/* Define supported accessory type */
101enum max14577_muic_acc_type {
102 MAX14577_MUIC_ADC_GROUND = 0x0,
103 MAX14577_MUIC_ADC_SEND_END_BUTTON,
104 MAX14577_MUIC_ADC_REMOTE_S1_BUTTON,
105 MAX14577_MUIC_ADC_REMOTE_S2_BUTTON,
106 MAX14577_MUIC_ADC_REMOTE_S3_BUTTON,
107 MAX14577_MUIC_ADC_REMOTE_S4_BUTTON,
108 MAX14577_MUIC_ADC_REMOTE_S5_BUTTON,
109 MAX14577_MUIC_ADC_REMOTE_S6_BUTTON,
110 MAX14577_MUIC_ADC_REMOTE_S7_BUTTON,
111 MAX14577_MUIC_ADC_REMOTE_S8_BUTTON,
112 MAX14577_MUIC_ADC_REMOTE_S9_BUTTON,
113 MAX14577_MUIC_ADC_REMOTE_S10_BUTTON,
114 MAX14577_MUIC_ADC_REMOTE_S11_BUTTON,
115 MAX14577_MUIC_ADC_REMOTE_S12_BUTTON,
116 MAX14577_MUIC_ADC_RESERVED_ACC_1,
117 MAX14577_MUIC_ADC_RESERVED_ACC_2,
118 MAX14577_MUIC_ADC_RESERVED_ACC_3,
119 MAX14577_MUIC_ADC_RESERVED_ACC_4,
120 MAX14577_MUIC_ADC_RESERVED_ACC_5,
121 MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2,
122 MAX14577_MUIC_ADC_PHONE_POWERED_DEV,
123 MAX14577_MUIC_ADC_TTY_CONVERTER,
124 MAX14577_MUIC_ADC_UART_CABLE,
125 MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG,
126 MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF,
127 MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON,
128 MAX14577_MUIC_ADC_AV_CABLE_NOLOAD,
129 MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG,
130 MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF,
131 MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON,
132 MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1, /* with Remote and Simple Ctrl */
133 MAX14577_MUIC_ADC_OPEN,
134};
135
136/* max14577 MUIC device support below list of accessories(external connector) */
137enum {
138 EXTCON_CABLE_USB = 0,
139 EXTCON_CABLE_TA,
140 EXTCON_CABLE_FAST_CHARGER,
141 EXTCON_CABLE_SLOW_CHARGER,
142 EXTCON_CABLE_CHARGE_DOWNSTREAM,
143 EXTCON_CABLE_JIG_USB_ON,
144 EXTCON_CABLE_JIG_USB_OFF,
145 EXTCON_CABLE_JIG_UART_OFF,
146 EXTCON_CABLE_JIG_UART_ON,
147
148 _EXTCON_CABLE_NUM,
149};
150
151static const char *max14577_extcon_cable[] = {
152 [EXTCON_CABLE_USB] = "USB",
153 [EXTCON_CABLE_TA] = "TA",
154 [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
155 [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
156 [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
157 [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
158 [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
159 [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
160 [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
161
162 NULL,
163};
164
165/*
166 * max14577_muic_set_debounce_time - Set the debounce time of ADC
167 * @info: the instance including private data of max14577 MUIC
168 * @time: the debounce time of ADC
169 */
170static int max14577_muic_set_debounce_time(struct max14577_muic_info *info,
171 enum max14577_muic_adc_debounce_time time)
172{
173 u8 ret;
174
175 switch (time) {
176 case ADC_DEBOUNCE_TIME_5MS:
177 case ADC_DEBOUNCE_TIME_10MS:
178 case ADC_DEBOUNCE_TIME_25MS:
179 case ADC_DEBOUNCE_TIME_38_62MS:
180 ret = max14577_update_reg(info->max14577->regmap,
181 MAX14577_MUIC_REG_CONTROL3,
182 CTRL3_ADCDBSET_MASK,
183 time << CTRL3_ADCDBSET_SHIFT);
184 if (ret) {
185 dev_err(info->dev, "failed to set ADC debounce time\n");
186 return ret;
187 }
188 break;
189 default:
190 dev_err(info->dev, "invalid ADC debounce time\n");
191 return -EINVAL;
192 }
193
194 return 0;
195};
196
197/*
198 * max14577_muic_set_path - Set hardware line according to attached cable
199 * @info: the instance including private data of max14577 MUIC
200 * @value: the path according to attached cable
201 * @attached: the state of cable (true:attached, false:detached)
202 *
203 * The max14577 MUIC device share outside H/W line among a varity of cables
204 * so, this function set internal path of H/W line according to the type of
205 * attached cable.
206 */
207static int max14577_muic_set_path(struct max14577_muic_info *info,
208 u8 val, bool attached)
209{
210 int ret = 0;
211 u8 ctrl1, ctrl2 = 0;
212
213 /* Set open state to path before changing hw path */
214 ret = max14577_update_reg(info->max14577->regmap,
215 MAX14577_MUIC_REG_CONTROL1,
216 CLEAR_IDBEN_MICEN_MASK, CTRL1_SW_OPEN);
217 if (ret < 0) {
218 dev_err(info->dev, "failed to update MUIC register\n");
219 return ret;
220 }
221
222 if (attached)
223 ctrl1 = val;
224 else
225 ctrl1 = CTRL1_SW_OPEN;
226
227 ret = max14577_update_reg(info->max14577->regmap,
228 MAX14577_MUIC_REG_CONTROL1,
229 CLEAR_IDBEN_MICEN_MASK, ctrl1);
230 if (ret < 0) {
231 dev_err(info->dev, "failed to update MUIC register\n");
232 return ret;
233 }
234
235 if (attached)
236 ctrl2 |= CTRL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
237 else
238 ctrl2 |= CTRL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
239
240 ret = max14577_update_reg(info->max14577->regmap,
241 MAX14577_REG_CONTROL2,
242 CTRL2_LOWPWR_MASK | CTRL2_CPEN_MASK, ctrl2);
243 if (ret < 0) {
244 dev_err(info->dev, "failed to update MUIC register\n");
245 return ret;
246 }
247
248 dev_dbg(info->dev,
249 "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
250 ctrl1, ctrl2, attached ? "attached" : "detached");
251
252 return 0;
253}
254
255/*
256 * max14577_muic_get_cable_type - Return cable type and check cable state
257 * @info: the instance including private data of max14577 MUIC
258 * @group: the path according to attached cable
259 * @attached: store cable state and return
260 *
261 * This function check the cable state either attached or detached,
262 * and then divide precise type of cable according to cable group.
263 * - max14577_CABLE_GROUP_ADC
264 * - max14577_CABLE_GROUP_CHG
265 */
266static int max14577_muic_get_cable_type(struct max14577_muic_info *info,
267 enum max14577_muic_cable_group group, bool *attached)
268{
269 int cable_type = 0;
270 int adc;
271 int chg_type;
272
273 switch (group) {
274 case MAX14577_CABLE_GROUP_ADC:
275 /*
276 * Read ADC value to check cable type and decide cable state
277 * according to cable type
278 */
279 adc = info->status[MAX14577_MUIC_STATUS1] & STATUS1_ADC_MASK;
280 adc >>= STATUS1_ADC_SHIFT;
281
282 /*
283 * Check current cable state/cable type and store cable type
284 * (info->prev_cable_type) for handling cable when cable is
285 * detached.
286 */
287 if (adc == MAX14577_MUIC_ADC_OPEN) {
288 *attached = false;
289
290 cable_type = info->prev_cable_type;
291 info->prev_cable_type = MAX14577_MUIC_ADC_OPEN;
292 } else {
293 *attached = true;
294
295 cable_type = info->prev_cable_type = adc;
296 }
297 break;
298 case MAX14577_CABLE_GROUP_CHG:
299 /*
300 * Read charger type to check cable type and decide cable state
301 * according to type of charger cable.
302 */
303 chg_type = info->status[MAX14577_MUIC_STATUS2] &
304 STATUS2_CHGTYP_MASK;
305 chg_type >>= STATUS2_CHGTYP_SHIFT;
306
307 if (chg_type == MAX14577_CHARGER_TYPE_NONE) {
308 *attached = false;
309
310 cable_type = info->prev_chg_type;
311 info->prev_chg_type = MAX14577_CHARGER_TYPE_NONE;
312 } else {
313 *attached = true;
314
315 /*
316 * Check current cable state/cable type and store cable
317 * type(info->prev_chg_type) for handling cable when
318 * charger cable is detached.
319 */
320 cable_type = info->prev_chg_type = chg_type;
321 }
322
323 break;
324 default:
325 dev_err(info->dev, "Unknown cable group (%d)\n", group);
326 cable_type = -EINVAL;
327 break;
328 }
329
330 return cable_type;
331}
332
333static int max14577_muic_jig_handler(struct max14577_muic_info *info,
334 int cable_type, bool attached)
335{
336 char cable_name[32];
337 int ret = 0;
338 u8 path = CTRL1_SW_OPEN;
339
340 dev_dbg(info->dev,
341 "external connector is %s (adc:0x%02x)\n",
342 attached ? "attached" : "detached", cable_type);
343
344 switch (cable_type) {
345 case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
346 /* PATH:AP_USB */
347 strcpy(cable_name, "JIG-USB-OFF");
348 path = CTRL1_SW_USB;
349 break;
350 case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
351 /* PATH:AP_USB */
352 strcpy(cable_name, "JIG-USB-ON");
353 path = CTRL1_SW_USB;
354 break;
355 case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
356 /* PATH:AP_UART */
357 strcpy(cable_name, "JIG-UART-OFF");
358 path = CTRL1_SW_UART;
359 break;
360 default:
361 dev_err(info->dev, "failed to detect %s jig cable\n",
362 attached ? "attached" : "detached");
363 return -EINVAL;
364 }
365
366 ret = max14577_muic_set_path(info, path, attached);
367 if (ret < 0)
368 return ret;
369
370 extcon_set_cable_state(info->edev, cable_name, attached);
371
372 return 0;
373}
374
375static int max14577_muic_adc_handler(struct max14577_muic_info *info)
376{
377 int cable_type;
378 bool attached;
379 int ret = 0;
380
381 /* Check accessory state which is either detached or attached */
382 cable_type = max14577_muic_get_cable_type(info,
383 MAX14577_CABLE_GROUP_ADC, &attached);
384
385 dev_dbg(info->dev,
386 "external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
387 attached ? "attached" : "detached", cable_type,
388 info->prev_cable_type);
389
390 switch (cable_type) {
391 case MAX14577_MUIC_ADC_FACTORY_MODE_USB_OFF:
392 case MAX14577_MUIC_ADC_FACTORY_MODE_USB_ON:
393 case MAX14577_MUIC_ADC_FACTORY_MODE_UART_OFF:
394 /* JIG */
395 ret = max14577_muic_jig_handler(info, cable_type, attached);
396 if (ret < 0)
397 return ret;
398 break;
399 case MAX14577_MUIC_ADC_GROUND:
400 case MAX14577_MUIC_ADC_SEND_END_BUTTON:
401 case MAX14577_MUIC_ADC_REMOTE_S1_BUTTON:
402 case MAX14577_MUIC_ADC_REMOTE_S2_BUTTON:
403 case MAX14577_MUIC_ADC_REMOTE_S3_BUTTON:
404 case MAX14577_MUIC_ADC_REMOTE_S4_BUTTON:
405 case MAX14577_MUIC_ADC_REMOTE_S5_BUTTON:
406 case MAX14577_MUIC_ADC_REMOTE_S6_BUTTON:
407 case MAX14577_MUIC_ADC_REMOTE_S7_BUTTON:
408 case MAX14577_MUIC_ADC_REMOTE_S8_BUTTON:
409 case MAX14577_MUIC_ADC_REMOTE_S9_BUTTON:
410 case MAX14577_MUIC_ADC_REMOTE_S10_BUTTON:
411 case MAX14577_MUIC_ADC_REMOTE_S11_BUTTON:
412 case MAX14577_MUIC_ADC_REMOTE_S12_BUTTON:
413 case MAX14577_MUIC_ADC_RESERVED_ACC_1:
414 case MAX14577_MUIC_ADC_RESERVED_ACC_2:
415 case MAX14577_MUIC_ADC_RESERVED_ACC_3:
416 case MAX14577_MUIC_ADC_RESERVED_ACC_4:
417 case MAX14577_MUIC_ADC_RESERVED_ACC_5:
418 case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE2:
419 case MAX14577_MUIC_ADC_PHONE_POWERED_DEV:
420 case MAX14577_MUIC_ADC_TTY_CONVERTER:
421 case MAX14577_MUIC_ADC_UART_CABLE:
422 case MAX14577_MUIC_ADC_CEA936A_TYPE1_CHG:
423 case MAX14577_MUIC_ADC_AV_CABLE_NOLOAD:
424 case MAX14577_MUIC_ADC_CEA936A_TYPE2_CHG:
425 case MAX14577_MUIC_ADC_FACTORY_MODE_UART_ON:
426 case MAX14577_MUIC_ADC_AUDIO_DEVICE_TYPE1:
427 /*
428 * This accessory isn't used in general case if it is specially
429 * needed to detect additional accessory, should implement
430 * proper operation when this accessory is attached/detached.
431 */
432 dev_info(info->dev,
433 "accessory is %s but it isn't used (adc:0x%x)\n",
434 attached ? "attached" : "detached", cable_type);
435 return -EAGAIN;
436 default:
437 dev_err(info->dev,
438 "failed to detect %s accessory (adc:0x%x)\n",
439 attached ? "attached" : "detached", cable_type);
440 return -EINVAL;
441 }
442
443 return 0;
444}
445
446static int max14577_muic_chg_handler(struct max14577_muic_info *info)
447{
448 int chg_type;
449 bool attached;
450 int ret = 0;
451
452 chg_type = max14577_muic_get_cable_type(info,
453 MAX14577_CABLE_GROUP_CHG, &attached);
454
455 dev_dbg(info->dev,
456 "external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
457 attached ? "attached" : "detached",
458 chg_type, info->prev_chg_type);
459
460 switch (chg_type) {
461 case MAX14577_CHARGER_TYPE_USB:
462 /* PATH:AP_USB */
463 ret = max14577_muic_set_path(info, info->path_usb, attached);
464 if (ret < 0)
465 return ret;
466
467 extcon_set_cable_state(info->edev, "USB", attached);
468 break;
469 case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
470 extcon_set_cable_state(info->edev, "TA", attached);
471 break;
472 case MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT:
473 extcon_set_cable_state(info->edev,
474 "Charge-downstream", attached);
475 break;
476 case MAX14577_CHARGER_TYPE_SPECIAL_500MA:
477 extcon_set_cable_state(info->edev, "Slow-charger", attached);
478 break;
479 case MAX14577_CHARGER_TYPE_SPECIAL_1A:
480 extcon_set_cable_state(info->edev, "Fast-charger", attached);
481 break;
482 case MAX14577_CHARGER_TYPE_NONE:
483 case MAX14577_CHARGER_TYPE_DEAD_BATTERY:
484 break;
485 default:
486 dev_err(info->dev,
487 "failed to detect %s accessory (chg_type:0x%x)\n",
488 attached ? "attached" : "detached", chg_type);
489 return -EINVAL;
490 }
491
492 return 0;
493}
494
495static void max14577_muic_irq_work(struct work_struct *work)
496{
497 struct max14577_muic_info *info = container_of(work,
498 struct max14577_muic_info, irq_work);
499 int ret = 0;
500
501 if (!info->edev)
502 return;
503
504 mutex_lock(&info->mutex);
505
506 ret = max14577_bulk_read(info->max14577->regmap,
507 MAX14577_MUIC_REG_STATUS1, info->status, 2);
508 if (ret) {
509 dev_err(info->dev, "failed to read MUIC register\n");
510 mutex_unlock(&info->mutex);
511 return;
512 }
513
514 if (info->irq_adc) {
515 ret = max14577_muic_adc_handler(info);
516 info->irq_adc = false;
517 }
518 if (info->irq_chg) {
519 ret = max14577_muic_chg_handler(info);
520 info->irq_chg = false;
521 }
522
523 if (ret < 0)
524 dev_err(info->dev, "failed to handle MUIC interrupt\n");
525
526 mutex_unlock(&info->mutex);
527
528 return;
529}
530
531static irqreturn_t max14577_muic_irq_handler(int irq, void *data)
532{
533 struct max14577_muic_info *info = data;
534 int i, irq_type = -1;
535
536 /*
537 * We may be called multiple times for different nested IRQ-s.
538 * Including changes in INT1_ADC and INT2_CGHTYP at once.
539 * However we only need to know whether it was ADC, charger
540 * or both interrupts so decode IRQ and turn on proper flags.
541 */
542 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
543 if (irq == muic_irqs[i].virq)
544 irq_type = muic_irqs[i].irq;
545
546 switch (irq_type) {
547 case MAX14577_IRQ_INT1_ADC:
548 case MAX14577_IRQ_INT1_ADCLOW:
549 case MAX14577_IRQ_INT1_ADCERR:
550 /* Handle all of accessory except for
551 type of charger accessory */
552 info->irq_adc = true;
553 break;
554 case MAX14577_IRQ_INT2_CHGTYP:
555 case MAX14577_IRQ_INT2_CHGDETRUN:
556 case MAX14577_IRQ_INT2_DCDTMR:
557 case MAX14577_IRQ_INT2_DBCHG:
558 case MAX14577_IRQ_INT2_VBVOLT:
559 /* Handle charger accessory */
560 info->irq_chg = true;
561 break;
562 default:
563 dev_err(info->dev, "muic interrupt: irq %d occurred, skipped\n",
564 irq_type);
565 return IRQ_HANDLED;
566 }
567 schedule_work(&info->irq_work);
568
569 return IRQ_HANDLED;
570}
571
572static int max14577_muic_detect_accessory(struct max14577_muic_info *info)
573{
574 int ret = 0;
575 int adc;
576 int chg_type;
577 bool attached;
578
579 mutex_lock(&info->mutex);
580
581 /* Read STATUSx register to detect accessory */
582 ret = max14577_bulk_read(info->max14577->regmap,
583 MAX14577_MUIC_REG_STATUS1, info->status, 2);
584 if (ret) {
585 dev_err(info->dev, "failed to read MUIC register\n");
586 mutex_unlock(&info->mutex);
587 return ret;
588 }
589
590 adc = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_ADC,
591 &attached);
592 if (attached && adc != MAX14577_MUIC_ADC_OPEN) {
593 ret = max14577_muic_adc_handler(info);
594 if (ret < 0) {
595 dev_err(info->dev, "Cannot detect accessory\n");
596 mutex_unlock(&info->mutex);
597 return ret;
598 }
599 }
600
601 chg_type = max14577_muic_get_cable_type(info, MAX14577_CABLE_GROUP_CHG,
602 &attached);
603 if (attached && chg_type != MAX14577_CHARGER_TYPE_NONE) {
604 ret = max14577_muic_chg_handler(info);
605 if (ret < 0) {
606 dev_err(info->dev, "Cannot detect charger accessory\n");
607 mutex_unlock(&info->mutex);
608 return ret;
609 }
610 }
611
612 mutex_unlock(&info->mutex);
613
614 return 0;
615}
616
617static void max14577_muic_detect_cable_wq(struct work_struct *work)
618{
619 struct max14577_muic_info *info = container_of(to_delayed_work(work),
620 struct max14577_muic_info, wq_detcable);
621
622 max14577_muic_detect_accessory(info);
623}
624
625static int max14577_muic_probe(struct platform_device *pdev)
626{
627 struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
628 struct max14577_muic_info *info;
629 int delay_jiffies;
630 int ret;
631 int i;
632 u8 id;
633
634 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
635 if (!info) {
636 dev_err(&pdev->dev, "failed to allocate memory\n");
637 return -ENOMEM;
638 }
639 info->dev = &pdev->dev;
640 info->max14577 = max14577;
641
642 platform_set_drvdata(pdev, info);
643 mutex_init(&info->mutex);
644
645 INIT_WORK(&info->irq_work, max14577_muic_irq_work);
646
647 /* Support irq domain for max14577 MUIC device */
648 for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
649 struct max14577_muic_irq *muic_irq = &muic_irqs[i];
650 unsigned int virq = 0;
651
652 virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq);
653 if (!virq)
654 return -EINVAL;
655 muic_irq->virq = virq;
656
657 ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
658 max14577_muic_irq_handler,
659 IRQF_NO_SUSPEND,
660 muic_irq->name, info);
661 if (ret) {
662 dev_err(&pdev->dev,
663 "failed: irq request (IRQ: %d,"
664 " error :%d)\n",
665 muic_irq->irq, ret);
666 return ret;
667 }
668 }
669
670 /* Initialize extcon device */
671 info->edev = devm_kzalloc(&pdev->dev, sizeof(*info->edev), GFP_KERNEL);
672 if (!info->edev) {
673 dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
674 return -ENOMEM;
675 }
676 info->edev->name = DEV_NAME;
677 info->edev->supported_cable = max14577_extcon_cable;
678 ret = extcon_dev_register(info->edev);
679 if (ret) {
680 dev_err(&pdev->dev, "failed to register extcon device\n");
681 return ret;
682 }
683
684 /* Default h/w line path */
685 info->path_usb = CTRL1_SW_USB;
686 info->path_uart = CTRL1_SW_UART;
687 delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
688
689 /* Set initial path for UART */
690 max14577_muic_set_path(info, info->path_uart, true);
691
692 /* Check revision number of MUIC device*/
693 ret = max14577_read_reg(info->max14577->regmap,
694 MAX14577_REG_DEVICEID, &id);
695 if (ret < 0) {
696 dev_err(&pdev->dev, "failed to read revision number\n");
697 goto err_extcon;
698 }
699 dev_info(info->dev, "device ID : 0x%x\n", id);
700
701 /* Set ADC debounce time */
702 max14577_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
703
704 /*
705 * Detect accessory after completing the initialization of platform
706 *
707 * - Use delayed workqueue to detect cable state and then
708 * notify cable state to notifiee/platform through uevent.
709 * After completing the booting of platform, the extcon provider
710 * driver should notify cable state to upper layer.
711 */
712 INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq);
713 ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
714 delay_jiffies);
715 if (ret < 0) {
716 dev_err(&pdev->dev,
717 "failed to schedule delayed work for cable detect\n");
718 goto err_extcon;
719 }
720
721 return ret;
722
723err_extcon:
724 extcon_dev_unregister(info->edev);
725 return ret;
726}
727
728static int max14577_muic_remove(struct platform_device *pdev)
729{
730 struct max14577_muic_info *info = platform_get_drvdata(pdev);
731
732 cancel_work_sync(&info->irq_work);
733 extcon_dev_unregister(info->edev);
734
735 return 0;
736}
737
738static struct platform_driver max14577_muic_driver = {
739 .driver = {
740 .name = DEV_NAME,
741 .owner = THIS_MODULE,
742 },
743 .probe = max14577_muic_probe,
744 .remove = max14577_muic_remove,
745};
746
747module_platform_driver(max14577_muic_driver);
748
749MODULE_DESCRIPTION("MAXIM 14577 Extcon driver");
750MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
751MODULE_LICENSE("GPL");
752MODULE_ALIAS("platform:extcon-max14577");
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 6c91976dd823..2aea4bcdd7f3 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -78,20 +78,24 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
78 78
79static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb) 79static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
80{ 80{
81 unsigned int set; 81 unsigned int set, id_src;
82 struct palmas_usb *palmas_usb = _palmas_usb; 82 struct palmas_usb *palmas_usb = _palmas_usb;
83 83
84 palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE, 84 palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
85 PALMAS_USB_ID_INT_LATCH_SET, &set); 85 PALMAS_USB_ID_INT_LATCH_SET, &set);
86 palmas_read(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
87 PALMAS_USB_ID_INT_SRC, &id_src);
86 88
87 if (set & PALMAS_USB_ID_INT_SRC_ID_GND) { 89 if ((set & PALMAS_USB_ID_INT_SRC_ID_GND) &&
90 (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
88 palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, 91 palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
89 PALMAS_USB_ID_INT_LATCH_CLR, 92 PALMAS_USB_ID_INT_LATCH_CLR,
90 PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND); 93 PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
91 palmas_usb->linkstat = PALMAS_USB_STATE_ID; 94 palmas_usb->linkstat = PALMAS_USB_STATE_ID;
92 extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true); 95 extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
93 dev_info(palmas_usb->dev, "USB-HOST cable is attached\n"); 96 dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
94 } else if (set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) { 97 } else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
98 (id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
95 palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE, 99 palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
96 PALMAS_USB_ID_INT_LATCH_CLR, 100 PALMAS_USB_ID_INT_LATCH_CLR,
97 PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT); 101 PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
@@ -103,6 +107,11 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
103 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT; 107 palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
104 extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false); 108 extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false);
105 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n"); 109 dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
110 } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
111 (id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
112 palmas_usb->linkstat = PALMAS_USB_STATE_ID;
113 extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
114 dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
106 } 115 }
107 116
108 return IRQ_HANDLED; 117 return IRQ_HANDLED;
@@ -269,7 +278,9 @@ static const struct dev_pm_ops palmas_pm_ops = {
269 278
270static struct of_device_id of_palmas_match_tbl[] = { 279static struct of_device_id of_palmas_match_tbl[] = {
271 { .compatible = "ti,palmas-usb", }, 280 { .compatible = "ti,palmas-usb", },
281 { .compatible = "ti,palmas-usb-vid", },
272 { .compatible = "ti,twl6035-usb", }, 282 { .compatible = "ti,twl6035-usb", },
283 { .compatible = "ti,twl6035-usb-vid", },
273 { /* end */ } 284 { /* end */ }
274}; 285};
275 286
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index f0c5e07c25ec..bcb49502c3bf 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -301,7 +301,7 @@ err:
301 return -ENOMEM; 301 return -ENOMEM;
302} 302}
303 303
304void hv_synic_free_cpu(int cpu) 304static void hv_synic_free_cpu(int cpu)
305{ 305{
306 kfree(hv_context.event_dpc[cpu]); 306 kfree(hv_context.event_dpc[cpu]);
307 if (hv_context.synic_event_page[cpu]) 307 if (hv_context.synic_event_page[cpu])
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a3e291d0df9a..6cb388e8fb7d 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -525,4 +525,5 @@ source "drivers/misc/altera-stapl/Kconfig"
525source "drivers/misc/mei/Kconfig" 525source "drivers/misc/mei/Kconfig"
526source "drivers/misc/vmw_vmci/Kconfig" 526source "drivers/misc/vmw_vmci/Kconfig"
527source "drivers/misc/mic/Kconfig" 527source "drivers/misc/mic/Kconfig"
528source "drivers/misc/genwqe/Kconfig"
528endmenu 529endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f45473e68bf7..99b9424ce31d 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,3 +53,4 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
53obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o 53obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
54obj-$(CONFIG_SRAM) += sram.o 54obj-$(CONFIG_SRAM) += sram.o
55obj-y += mic/ 55obj-y += mic/
56obj-$(CONFIG_GENWQE) += genwqe/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 0daadcf1ed7a..d3eee113baeb 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -641,7 +641,7 @@ static const struct attribute_group ad525x_group_commands = {
641 .attrs = ad525x_attributes_commands, 641 .attrs = ad525x_attributes_commands,
642}; 642};
643 643
644int ad_dpot_add_files(struct device *dev, 644static int ad_dpot_add_files(struct device *dev,
645 unsigned features, unsigned rdac) 645 unsigned features, unsigned rdac)
646{ 646{
647 int err = sysfs_create_file(&dev->kobj, 647 int err = sysfs_create_file(&dev->kobj,
@@ -666,7 +666,7 @@ int ad_dpot_add_files(struct device *dev,
666 return err; 666 return err;
667} 667}
668 668
669inline void ad_dpot_remove_files(struct device *dev, 669static inline void ad_dpot_remove_files(struct device *dev,
670 unsigned features, unsigned rdac) 670 unsigned features, unsigned rdac)
671{ 671{
672 sysfs_remove_file(&dev->kobj, 672 sysfs_remove_file(&dev->kobj,
diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c
index 3abfcecf8424..a7c16295b816 100644
--- a/drivers/misc/bmp085-i2c.c
+++ b/drivers/misc/bmp085-i2c.c
@@ -49,7 +49,7 @@ static int bmp085_i2c_probe(struct i2c_client *client,
49 return err; 49 return err;
50 } 50 }
51 51
52 return bmp085_probe(&client->dev, regmap); 52 return bmp085_probe(&client->dev, regmap, client->irq);
53} 53}
54 54
55static int bmp085_i2c_remove(struct i2c_client *client) 55static int bmp085_i2c_remove(struct i2c_client *client)
diff --git a/drivers/misc/bmp085-spi.c b/drivers/misc/bmp085-spi.c
index d6a52659cf24..864ecac32373 100644
--- a/drivers/misc/bmp085-spi.c
+++ b/drivers/misc/bmp085-spi.c
@@ -41,7 +41,7 @@ static int bmp085_spi_probe(struct spi_device *client)
41 return err; 41 return err;
42 } 42 }
43 43
44 return bmp085_probe(&client->dev, regmap); 44 return bmp085_probe(&client->dev, regmap, client->irq);
45} 45}
46 46
47static int bmp085_spi_remove(struct spi_device *client) 47static int bmp085_spi_remove(struct spi_device *client)
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 2704d885a9b3..820e53d0048f 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -49,9 +49,11 @@
49#include <linux/device.h> 49#include <linux/device.h>
50#include <linux/init.h> 50#include <linux/init.h>
51#include <linux/slab.h> 51#include <linux/slab.h>
52#include <linux/delay.h>
53#include <linux/of.h> 52#include <linux/of.h>
54#include "bmp085.h" 53#include "bmp085.h"
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/gpio.h>
55 57
56#define BMP085_CHIP_ID 0x55 58#define BMP085_CHIP_ID 0x55
57#define BMP085_CALIBRATION_DATA_START 0xAA 59#define BMP085_CALIBRATION_DATA_START 0xAA
@@ -84,8 +86,19 @@ struct bmp085_data {
84 unsigned long last_temp_measurement; 86 unsigned long last_temp_measurement;
85 u8 chip_id; 87 u8 chip_id;
86 s32 b6; /* calculated temperature correction coefficient */ 88 s32 b6; /* calculated temperature correction coefficient */
89 int irq;
90 struct completion done;
87}; 91};
88 92
93static irqreturn_t bmp085_eoc_isr(int irq, void *devid)
94{
95 struct bmp085_data *data = devid;
96
97 complete(&data->done);
98
99 return IRQ_HANDLED;
100}
101
89static s32 bmp085_read_calibration_data(struct bmp085_data *data) 102static s32 bmp085_read_calibration_data(struct bmp085_data *data)
90{ 103{
91 u16 tmp[BMP085_CALIBRATION_DATA_LENGTH]; 104 u16 tmp[BMP085_CALIBRATION_DATA_LENGTH];
@@ -116,6 +129,9 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
116 s32 status; 129 s32 status;
117 130
118 mutex_lock(&data->lock); 131 mutex_lock(&data->lock);
132
133 init_completion(&data->done);
134
119 status = regmap_write(data->regmap, BMP085_CTRL_REG, 135 status = regmap_write(data->regmap, BMP085_CTRL_REG,
120 BMP085_TEMP_MEASUREMENT); 136 BMP085_TEMP_MEASUREMENT);
121 if (status < 0) { 137 if (status < 0) {
@@ -123,7 +139,8 @@ static s32 bmp085_update_raw_temperature(struct bmp085_data *data)
123 "Error while requesting temperature measurement.\n"); 139 "Error while requesting temperature measurement.\n");
124 goto exit; 140 goto exit;
125 } 141 }
126 msleep(BMP085_TEMP_CONVERSION_TIME); 142 wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies(
143 BMP085_TEMP_CONVERSION_TIME));
127 144
128 status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, 145 status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,
129 &tmp, sizeof(tmp)); 146 &tmp, sizeof(tmp));
@@ -147,6 +164,9 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
147 s32 status; 164 s32 status;
148 165
149 mutex_lock(&data->lock); 166 mutex_lock(&data->lock);
167
168 init_completion(&data->done);
169
150 status = regmap_write(data->regmap, BMP085_CTRL_REG, 170 status = regmap_write(data->regmap, BMP085_CTRL_REG,
151 BMP085_PRESSURE_MEASUREMENT + 171 BMP085_PRESSURE_MEASUREMENT +
152 (data->oversampling_setting << 6)); 172 (data->oversampling_setting << 6));
@@ -157,8 +177,8 @@ static s32 bmp085_update_raw_pressure(struct bmp085_data *data)
157 } 177 }
158 178
159 /* wait for the end of conversion */ 179 /* wait for the end of conversion */
160 msleep(2+(3 << data->oversampling_setting)); 180 wait_for_completion_timeout(&data->done, 1 + msecs_to_jiffies(
161 181 2+(3 << data->oversampling_setting)));
162 /* copy data into a u32 (4 bytes), but skip the first byte. */ 182 /* copy data into a u32 (4 bytes), but skip the first byte. */
163 status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB, 183 status = regmap_bulk_read(data->regmap, BMP085_CONVERSION_REGISTER_MSB,
164 ((u8 *)&tmp)+1, 3); 184 ((u8 *)&tmp)+1, 3);
@@ -420,7 +440,7 @@ struct regmap_config bmp085_regmap_config = {
420}; 440};
421EXPORT_SYMBOL_GPL(bmp085_regmap_config); 441EXPORT_SYMBOL_GPL(bmp085_regmap_config);
422 442
423int bmp085_probe(struct device *dev, struct regmap *regmap) 443int bmp085_probe(struct device *dev, struct regmap *regmap, int irq)
424{ 444{
425 struct bmp085_data *data; 445 struct bmp085_data *data;
426 int err = 0; 446 int err = 0;
@@ -434,6 +454,15 @@ int bmp085_probe(struct device *dev, struct regmap *regmap)
434 dev_set_drvdata(dev, data); 454 dev_set_drvdata(dev, data);
435 data->dev = dev; 455 data->dev = dev;
436 data->regmap = regmap; 456 data->regmap = regmap;
457 data->irq = irq;
458
459 if (data->irq > 0) {
460 err = devm_request_irq(dev, data->irq, bmp085_eoc_isr,
461 IRQF_TRIGGER_RISING, "bmp085",
462 data);
463 if (err < 0)
464 goto exit_free;
465 }
437 466
438 /* Initialize the BMP085 chip */ 467 /* Initialize the BMP085 chip */
439 err = bmp085_init_client(data); 468 err = bmp085_init_client(data);
diff --git a/drivers/misc/bmp085.h b/drivers/misc/bmp085.h
index 2b8f615bca92..8b8e3b1f5ca5 100644
--- a/drivers/misc/bmp085.h
+++ b/drivers/misc/bmp085.h
@@ -26,7 +26,7 @@
26 26
27extern struct regmap_config bmp085_regmap_config; 27extern struct regmap_config bmp085_regmap_config;
28 28
29int bmp085_probe(struct device *dev, struct regmap *regmap); 29int bmp085_probe(struct device *dev, struct regmap *regmap, int irq);
30int bmp085_remove(struct device *dev); 30int bmp085_remove(struct device *dev);
31int bmp085_detect(struct device *dev); 31int bmp085_detect(struct device *dev);
32 32
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 3a015abb444a..78e55b501c94 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -378,7 +378,6 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
378 device_remove_file(&spi->dev, &dev_attr_erase); 378 device_remove_file(&spi->dev, &dev_attr_erase);
379 379
380 sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin); 380 sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
381 spi_set_drvdata(spi, NULL);
382 kfree(edev); 381 kfree(edev);
383 return 0; 382 return 0;
384} 383}
diff --git a/drivers/misc/genwqe/Kconfig b/drivers/misc/genwqe/Kconfig
new file mode 100644
index 000000000000..6069d8cd79d7
--- /dev/null
+++ b/drivers/misc/genwqe/Kconfig
@@ -0,0 +1,13 @@
1#
2# IBM Accelerator Family 'GenWQE'
3#
4
5menuconfig GENWQE
6 tristate "GenWQE PCIe Accelerator"
7 depends on PCI && 64BIT
8 select CRC_ITU_T
9 default n
10 help
11 Enables PCIe card driver for IBM GenWQE accelerators.
12 The user-space interface is described in
13 include/linux/genwqe/genwqe_card.h.
diff --git a/drivers/misc/genwqe/Makefile b/drivers/misc/genwqe/Makefile
new file mode 100644
index 000000000000..98a2b4f0b18b
--- /dev/null
+++ b/drivers/misc/genwqe/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for GenWQE driver
3#
4
5obj-$(CONFIG_GENWQE) := genwqe_card.o
6genwqe_card-objs := card_base.o card_dev.o card_ddcb.o card_sysfs.o \
7 card_debugfs.o card_utils.o
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
new file mode 100644
index 000000000000..74d51c9bb858
--- /dev/null
+++ b/drivers/misc/genwqe/card_base.c
@@ -0,0 +1,1205 @@
1/**
2 * IBM Accelerator Family 'GenWQE'
3 *
4 * (C) Copyright IBM Corp. 2013
5 *
6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8 * Author: Michael Jung <mijung@de.ibm.com>
9 * Author: Michael Ruettger <michael@ibmra.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21/*
22 * Module initialization and PCIe setup. Card health monitoring and
23 * recovery functionality. Character device creation and deletion are
24 * controlled from here.
25 */
26
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/pci.h>
30#include <linux/err.h>
31#include <linux/aer.h>
32#include <linux/string.h>
33#include <linux/sched.h>
34#include <linux/wait.h>
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37#include <linux/module.h>
38#include <linux/notifier.h>
39#include <linux/device.h>
40#include <linux/log2.h>
41#include <linux/genwqe/genwqe_card.h>
42
43#include "card_base.h"
44#include "card_ddcb.h"
45
46MODULE_AUTHOR("Frank Haverkamp <haver@linux.vnet.ibm.com>");
47MODULE_AUTHOR("Michael Ruettger <michael@ibmra.de>");
48MODULE_AUTHOR("Joerg-Stephan Vogt <jsvogt@de.ibm.com>");
49MODULE_AUTHOR("Michal Jung <mijung@de.ibm.com>");
50
51MODULE_DESCRIPTION("GenWQE Card");
52MODULE_VERSION(DRV_VERS_STRING);
53MODULE_LICENSE("GPL");
54
55static char genwqe_driver_name[] = GENWQE_DEVNAME;
56static struct class *class_genwqe;
57static struct dentry *debugfs_genwqe;
58static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX];
59
60/* PCI structure for identifying device by PCI vendor and device ID */
61static DEFINE_PCI_DEVICE_TABLE(genwqe_device_table) = {
62 { .vendor = PCI_VENDOR_ID_IBM,
63 .device = PCI_DEVICE_GENWQE,
64 .subvendor = PCI_SUBVENDOR_ID_IBM,
65 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
66 .class = (PCI_CLASSCODE_GENWQE5 << 8),
67 .class_mask = ~0,
68 .driver_data = 0 },
69
70 /* Initial SR-IOV bring-up image */
71 { .vendor = PCI_VENDOR_ID_IBM,
72 .device = PCI_DEVICE_GENWQE,
73 .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
74 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
75 .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
76 .class_mask = ~0,
77 .driver_data = 0 },
78
79 { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
80 .device = 0x0000, /* VF Device ID */
81 .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
82 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_SRIOV,
83 .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
84 .class_mask = ~0,
85 .driver_data = 0 },
86
87 /* Fixed up image */
88 { .vendor = PCI_VENDOR_ID_IBM,
89 .device = PCI_DEVICE_GENWQE,
90 .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
91 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
92 .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
93 .class_mask = ~0,
94 .driver_data = 0 },
95
96 { .vendor = PCI_VENDOR_ID_IBM, /* VF Vendor ID */
97 .device = 0x0000, /* VF Device ID */
98 .subvendor = PCI_SUBVENDOR_ID_IBM_SRIOV,
99 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5,
100 .class = (PCI_CLASSCODE_GENWQE5_SRIOV << 8),
101 .class_mask = ~0,
102 .driver_data = 0 },
103
104 /* Even one more ... */
105 { .vendor = PCI_VENDOR_ID_IBM,
106 .device = PCI_DEVICE_GENWQE,
107 .subvendor = PCI_SUBVENDOR_ID_IBM,
108 .subdevice = PCI_SUBSYSTEM_ID_GENWQE5_NEW,
109 .class = (PCI_CLASSCODE_GENWQE5 << 8),
110 .class_mask = ~0,
111 .driver_data = 0 },
112
113 { 0, } /* 0 terminated list. */
114};
115
116MODULE_DEVICE_TABLE(pci, genwqe_device_table);
117
118/**
119 * genwqe_dev_alloc() - Create and prepare a new card descriptor
120 *
121 * Return: Pointer to card descriptor, or ERR_PTR(err) on error
122 */
123static struct genwqe_dev *genwqe_dev_alloc(void)
124{
125 unsigned int i = 0, j;
126 struct genwqe_dev *cd;
127
128 for (i = 0; i < GENWQE_CARD_NO_MAX; i++) {
129 if (genwqe_devices[i] == NULL)
130 break;
131 }
132 if (i >= GENWQE_CARD_NO_MAX)
133 return ERR_PTR(-ENODEV);
134
135 cd = kzalloc(sizeof(struct genwqe_dev), GFP_KERNEL);
136 if (!cd)
137 return ERR_PTR(-ENOMEM);
138
139 cd->card_idx = i;
140 cd->class_genwqe = class_genwqe;
141 cd->debugfs_genwqe = debugfs_genwqe;
142
143 init_waitqueue_head(&cd->queue_waitq);
144
145 spin_lock_init(&cd->file_lock);
146 INIT_LIST_HEAD(&cd->file_list);
147
148 cd->card_state = GENWQE_CARD_UNUSED;
149 spin_lock_init(&cd->print_lock);
150
151 cd->ddcb_software_timeout = genwqe_ddcb_software_timeout;
152 cd->kill_timeout = genwqe_kill_timeout;
153
154 for (j = 0; j < GENWQE_MAX_VFS; j++)
155 cd->vf_jobtimeout_msec[j] = genwqe_vf_jobtimeout_msec;
156
157 genwqe_devices[i] = cd;
158 return cd;
159}
160
161static void genwqe_dev_free(struct genwqe_dev *cd)
162{
163 if (!cd)
164 return;
165
166 genwqe_devices[cd->card_idx] = NULL;
167 kfree(cd);
168}
169
170/**
171 * genwqe_bus_reset() - Card recovery
172 *
173 * pci_reset_function() will recover the device and ensure that the
174 * registers are accessible again when it completes with success. If
175 * not, the card will stay dead and registers will be unaccessible
176 * still.
177 */
178static int genwqe_bus_reset(struct genwqe_dev *cd)
179{
180 int bars, rc = 0;
181 struct pci_dev *pci_dev = cd->pci_dev;
182 void __iomem *mmio;
183
184 if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
185 return -EIO;
186
187 mmio = cd->mmio;
188 cd->mmio = NULL;
189 pci_iounmap(pci_dev, mmio);
190
191 bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
192 pci_release_selected_regions(pci_dev, bars);
193
194 /*
195 * Firmware/BIOS might change memory mapping during bus reset.
196 * Settings like enable bus-mastering, ... are backuped and
197 * restored by the pci_reset_function().
198 */
199 dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__);
200 rc = pci_reset_function(pci_dev);
201 if (rc) {
202 dev_err(&pci_dev->dev,
203 "[%s] err: failed reset func (rc %d)\n", __func__, rc);
204 return rc;
205 }
206 dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc);
207
208 /*
209 * Here is the right spot to clear the register read
210 * failure. pci_bus_reset() does this job in real systems.
211 */
212 cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
213 GENWQE_INJECT_GFIR_FATAL |
214 GENWQE_INJECT_GFIR_INFO);
215
216 rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
217 if (rc) {
218 dev_err(&pci_dev->dev,
219 "[%s] err: request bars failed (%d)\n", __func__, rc);
220 return -EIO;
221 }
222
223 cd->mmio = pci_iomap(pci_dev, 0, 0);
224 if (cd->mmio == NULL) {
225 dev_err(&pci_dev->dev,
226 "[%s] err: mapping BAR0 failed\n", __func__);
227 return -ENOMEM;
228 }
229 return 0;
230}
231
232/*
233 * Hardware circumvention section. Certain bitstreams in our test-lab
234 * had different kinds of problems. Here is where we adjust those
235 * bitstreams to function will with this version of our device driver.
236 *
237 * Thise circumventions are applied to the physical function only.
238 * The magical numbers below are identifying development/manufacturing
239 * versions of the bitstream used on the card.
240 *
241 * Turn off error reporting for old/manufacturing images.
242 */
243
244bool genwqe_need_err_masking(struct genwqe_dev *cd)
245{
246 return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
247}
248
249static void genwqe_tweak_hardware(struct genwqe_dev *cd)
250{
251 struct pci_dev *pci_dev = cd->pci_dev;
252
253 /* Mask FIRs for development images */
254 if (((cd->slu_unitcfg & 0xFFFF0ull) >= 0x32000ull) &&
255 ((cd->slu_unitcfg & 0xFFFF0ull) <= 0x33250ull)) {
256 dev_warn(&pci_dev->dev,
257 "FIRs masked due to bitstream %016llx.%016llx\n",
258 cd->slu_unitcfg, cd->app_unitcfg);
259
260 __genwqe_writeq(cd, IO_APP_SEC_LEM_DEBUG_OVR,
261 0xFFFFFFFFFFFFFFFFull);
262
263 __genwqe_writeq(cd, IO_APP_ERR_ACT_MASK,
264 0x0000000000000000ull);
265 }
266}
267
268/**
269 * genwqe_recovery_on_fatal_gfir_required() - Version depended actions
270 *
271 * Bitstreams older than 2013-02-17 have a bug where fatal GFIRs must
272 * be ignored. This is e.g. true for the bitstream we gave to the card
273 * manufacturer, but also for some old bitstreams we released to our
274 * test-lab.
275 */
276int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd)
277{
278 return (cd->slu_unitcfg & 0xFFFF0ull) >= 0x32170ull;
279}
280
281int genwqe_flash_readback_fails(struct genwqe_dev *cd)
282{
283 return (cd->slu_unitcfg & 0xFFFF0ull) < 0x32170ull;
284}
285
286/**
287 * genwqe_T_psec() - Calculate PF/VF timeout register content
288 *
289 * Note: From a design perspective it turned out to be a bad idea to
290 * use codes here to specifiy the frequency/speed values. An old
291 * driver cannot understand new codes and is therefore always a
292 * problem. Better is to measure out the value or put the
293 * speed/frequency directly into a register which is always a valid
294 * value for old as well as for new software.
295 */
296/* T = 1/f */
297static int genwqe_T_psec(struct genwqe_dev *cd)
298{
299 u16 speed; /* 1/f -> 250, 200, 166, 175 */
300 static const int T[] = { 4000, 5000, 6000, 5714 };
301
302 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
303 if (speed >= ARRAY_SIZE(T))
304 return -1; /* illegal value */
305
306 return T[speed];
307}
308
309/**
310 * genwqe_setup_pf_jtimer() - Setup PF hardware timeouts for DDCB execution
311 *
312 * Do this _after_ card_reset() is called. Otherwise the values will
313 * vanish. The settings need to be done when the queues are inactive.
314 *
315 * The max. timeout value is 2^(10+x) * T (6ns for 166MHz) * 15/16.
316 * The min. timeout value is 2^(10+x) * T (6ns for 166MHz) * 14/16.
317 */
318static bool genwqe_setup_pf_jtimer(struct genwqe_dev *cd)
319{
320 u32 T = genwqe_T_psec(cd);
321 u64 x;
322
323 if (genwqe_pf_jobtimeout_msec == 0)
324 return false;
325
326 /* PF: large value needed, flash update 2sec per block */
327 x = ilog2(genwqe_pf_jobtimeout_msec *
328 16000000000uL/(T * 15)) - 10;
329
330 genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
331 0xff00 | (x & 0xff), 0);
332 return true;
333}
334
335/**
336 * genwqe_setup_vf_jtimer() - Setup VF hardware timeouts for DDCB execution
337 */
338static bool genwqe_setup_vf_jtimer(struct genwqe_dev *cd)
339{
340 struct pci_dev *pci_dev = cd->pci_dev;
341 unsigned int vf;
342 u32 T = genwqe_T_psec(cd);
343 u64 x;
344
345 for (vf = 0; vf < pci_sriov_get_totalvfs(pci_dev); vf++) {
346
347 if (cd->vf_jobtimeout_msec[vf] == 0)
348 continue;
349
350 x = ilog2(cd->vf_jobtimeout_msec[vf] *
351 16000000000uL/(T * 15)) - 10;
352
353 genwqe_write_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
354 0xff00 | (x & 0xff), vf + 1);
355 }
356 return true;
357}
358
359static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd)
360{
361 unsigned int type, e = 0;
362
363 for (type = 0; type < GENWQE_DBG_UNITS; type++) {
364 switch (type) {
365 case GENWQE_DBG_UNIT0:
366 e = genwqe_ffdc_buff_size(cd, 0);
367 break;
368 case GENWQE_DBG_UNIT1:
369 e = genwqe_ffdc_buff_size(cd, 1);
370 break;
371 case GENWQE_DBG_UNIT2:
372 e = genwqe_ffdc_buff_size(cd, 2);
373 break;
374 case GENWQE_DBG_REGS:
375 e = GENWQE_FFDC_REGS;
376 break;
377 }
378
379 /* currently support only the debug units mentioned here */
380 cd->ffdc[type].entries = e;
381 cd->ffdc[type].regs = kmalloc(e * sizeof(struct genwqe_reg),
382 GFP_KERNEL);
383 /*
384 * regs == NULL is ok, the using code treats this as no regs,
385 * Printing warning is ok in this case.
386 */
387 }
388 return 0;
389}
390
391static void genwqe_ffdc_buffs_free(struct genwqe_dev *cd)
392{
393 unsigned int type;
394
395 for (type = 0; type < GENWQE_DBG_UNITS; type++) {
396 kfree(cd->ffdc[type].regs);
397 cd->ffdc[type].regs = NULL;
398 }
399}
400
401static int genwqe_read_ids(struct genwqe_dev *cd)
402{
403 int err = 0;
404 int slu_id;
405 struct pci_dev *pci_dev = cd->pci_dev;
406
407 cd->slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
408 if (cd->slu_unitcfg == IO_ILLEGAL_VALUE) {
409 dev_err(&pci_dev->dev,
410 "err: SLUID=%016llx\n", cd->slu_unitcfg);
411 err = -EIO;
412 goto out_err;
413 }
414
415 slu_id = genwqe_get_slu_id(cd);
416 if (slu_id < GENWQE_SLU_ARCH_REQ || slu_id == 0xff) {
417 dev_err(&pci_dev->dev,
418 "err: incompatible SLU Architecture %u\n", slu_id);
419 err = -ENOENT;
420 goto out_err;
421 }
422
423 cd->app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
424 if (cd->app_unitcfg == IO_ILLEGAL_VALUE) {
425 dev_err(&pci_dev->dev,
426 "err: APPID=%016llx\n", cd->app_unitcfg);
427 err = -EIO;
428 goto out_err;
429 }
430 genwqe_read_app_id(cd, cd->app_name, sizeof(cd->app_name));
431
432 /*
433 * Is access to all registers possible? If we are a VF the
434 * answer is obvious. If we run fully virtualized, we need to
435 * check if we can access all registers. If we do not have
436 * full access we will cause an UR and some informational FIRs
437 * in the PF, but that should not harm.
438 */
439 if (pci_dev->is_virtfn)
440 cd->is_privileged = 0;
441 else
442 cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
443 != IO_ILLEGAL_VALUE);
444
445 out_err:
446 return err;
447}
448
449static int genwqe_start(struct genwqe_dev *cd)
450{
451 int err;
452 struct pci_dev *pci_dev = cd->pci_dev;
453
454 err = genwqe_read_ids(cd);
455 if (err)
456 return err;
457
458 if (genwqe_is_privileged(cd)) {
459 /* do this after the tweaks. alloc fail is acceptable */
460 genwqe_ffdc_buffs_alloc(cd);
461 genwqe_stop_traps(cd);
462
463 /* Collect registers e.g. FIRs, UNITIDs, traces ... */
464 genwqe_read_ffdc_regs(cd, cd->ffdc[GENWQE_DBG_REGS].regs,
465 cd->ffdc[GENWQE_DBG_REGS].entries, 0);
466
467 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT0,
468 cd->ffdc[GENWQE_DBG_UNIT0].regs,
469 cd->ffdc[GENWQE_DBG_UNIT0].entries);
470
471 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT1,
472 cd->ffdc[GENWQE_DBG_UNIT1].regs,
473 cd->ffdc[GENWQE_DBG_UNIT1].entries);
474
475 genwqe_ffdc_buff_read(cd, GENWQE_DBG_UNIT2,
476 cd->ffdc[GENWQE_DBG_UNIT2].regs,
477 cd->ffdc[GENWQE_DBG_UNIT2].entries);
478
479 genwqe_start_traps(cd);
480
481 if (cd->card_state == GENWQE_CARD_FATAL_ERROR) {
482 dev_warn(&pci_dev->dev,
483 "[%s] chip reload/recovery!\n", __func__);
484
485 /*
486 * Stealth Mode: Reload chip on either hot
487 * reset or PERST.
488 */
489 cd->softreset = 0x7Cull;
490 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET,
491 cd->softreset);
492
493 err = genwqe_bus_reset(cd);
494 if (err != 0) {
495 dev_err(&pci_dev->dev,
496 "[%s] err: bus reset failed!\n",
497 __func__);
498 goto out;
499 }
500
501 /*
502 * Re-read the IDs because
503 * it could happen that the bitstream load
504 * failed!
505 */
506 err = genwqe_read_ids(cd);
507 if (err)
508 goto out;
509 }
510 }
511
512 err = genwqe_setup_service_layer(cd); /* does a reset to the card */
513 if (err != 0) {
514 dev_err(&pci_dev->dev,
515 "[%s] err: could not setup servicelayer!\n", __func__);
516 err = -ENODEV;
517 goto out;
518 }
519
520 if (genwqe_is_privileged(cd)) { /* code is running _after_ reset */
521 genwqe_tweak_hardware(cd);
522
523 genwqe_setup_pf_jtimer(cd);
524 genwqe_setup_vf_jtimer(cd);
525 }
526
527 err = genwqe_device_create(cd);
528 if (err < 0) {
529 dev_err(&pci_dev->dev,
530 "err: chdev init failed! (err=%d)\n", err);
531 goto out_release_service_layer;
532 }
533 return 0;
534
535 out_release_service_layer:
536 genwqe_release_service_layer(cd);
537 out:
538 if (genwqe_is_privileged(cd))
539 genwqe_ffdc_buffs_free(cd);
540 return -EIO;
541}
542
543/**
544 * genwqe_stop() - Stop card operation
545 *
546 * Recovery notes:
547 * As long as genwqe_thread runs we might access registers during
548 * error data capture. Same is with the genwqe_health_thread.
549 * When genwqe_bus_reset() fails this function might called two times:
550 * first by the genwqe_health_thread() and later by genwqe_remove() to
551 * unbind the device. We must be able to survive that.
552 *
553 * This function must be robust enough to be called twice.
554 */
555static int genwqe_stop(struct genwqe_dev *cd)
556{
557 genwqe_finish_queue(cd); /* no register access */
558 genwqe_device_remove(cd); /* device removed, procs killed */
559 genwqe_release_service_layer(cd); /* here genwqe_thread is stopped */
560
561 if (genwqe_is_privileged(cd)) {
562 pci_disable_sriov(cd->pci_dev); /* access pci config space */
563 genwqe_ffdc_buffs_free(cd);
564 }
565
566 return 0;
567}
568
569/**
570 * genwqe_recover_card() - Try to recover the card if it is possible
571 *
572 * If fatal_err is set no register access is possible anymore. It is
573 * likely that genwqe_start fails in that situation. Proper error
574 * handling is required in this case.
575 *
576 * genwqe_bus_reset() will cause the pci code to call genwqe_remove()
577 * and later genwqe_probe() for all virtual functions.
578 */
579static int genwqe_recover_card(struct genwqe_dev *cd, int fatal_err)
580{
581 int rc;
582 struct pci_dev *pci_dev = cd->pci_dev;
583
584 genwqe_stop(cd);
585
586 /*
587 * Make sure chip is not reloaded to maintain FFDC. Write SLU
588 * Reset Register, CPLDReset field to 0.
589 */
590 if (!fatal_err) {
591 cd->softreset = 0x70ull;
592 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
593 }
594
595 rc = genwqe_bus_reset(cd);
596 if (rc != 0) {
597 dev_err(&pci_dev->dev,
598 "[%s] err: card recovery impossible!\n", __func__);
599 return rc;
600 }
601
602 rc = genwqe_start(cd);
603 if (rc < 0) {
604 dev_err(&pci_dev->dev,
605 "[%s] err: failed to launch device!\n", __func__);
606 return rc;
607 }
608 return 0;
609}
610
611static int genwqe_health_check_cond(struct genwqe_dev *cd, u64 *gfir)
612{
613 *gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
614 return (*gfir & GFIR_ERR_TRIGGER) &&
615 genwqe_recovery_on_fatal_gfir_required(cd);
616}
617
618/**
619 * genwqe_fir_checking() - Check the fault isolation registers of the card
620 *
621 * If this code works ok, can be tried out with help of the genwqe_poke tool:
622 * sudo ./tools/genwqe_poke 0x8 0xfefefefefef
623 *
624 * Now the relevant FIRs/sFIRs should be printed out and the driver should
625 * invoke recovery (devices are removed and readded).
626 */
627static u64 genwqe_fir_checking(struct genwqe_dev *cd)
628{
629 int j, iterations = 0;
630 u64 mask, fir, fec, uid, gfir, gfir_masked, sfir, sfec;
631 u32 fir_addr, fir_clr_addr, fec_addr, sfir_addr, sfec_addr;
632 struct pci_dev *pci_dev = cd->pci_dev;
633
634 healthMonitor:
635 iterations++;
636 if (iterations > 16) {
637 dev_err(&pci_dev->dev, "* exit looping after %d times\n",
638 iterations);
639 goto fatal_error;
640 }
641
642 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
643 if (gfir != 0x0)
644 dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n",
645 IO_SLC_CFGREG_GFIR, gfir);
646 if (gfir == IO_ILLEGAL_VALUE)
647 goto fatal_error;
648
649 /*
650 * Avoid printing when to GFIR bit is on prevents contignous
651 * printout e.g. for the following bug:
652 * FIR set without a 2ndary FIR/FIR cannot be cleared
653 * Comment out the following if to get the prints:
654 */
655 if (gfir == 0)
656 return 0;
657
658 gfir_masked = gfir & GFIR_ERR_TRIGGER; /* fatal errors */
659
660 for (uid = 0; uid < GENWQE_MAX_UNITS; uid++) { /* 0..2 in zEDC */
661
662 /* read the primary FIR (pfir) */
663 fir_addr = (uid << 24) + 0x08;
664 fir = __genwqe_readq(cd, fir_addr);
665 if (fir == 0x0)
666 continue; /* no error in this unit */
667
668 dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fir_addr, fir);
669 if (fir == IO_ILLEGAL_VALUE)
670 goto fatal_error;
671
672 /* read primary FEC */
673 fec_addr = (uid << 24) + 0x18;
674 fec = __genwqe_readq(cd, fec_addr);
675
676 dev_err(&pci_dev->dev, "* 0x%08x 0x%016llx\n", fec_addr, fec);
677 if (fec == IO_ILLEGAL_VALUE)
678 goto fatal_error;
679
680 for (j = 0, mask = 1ULL; j < 64; j++, mask <<= 1) {
681
682 /* secondary fir empty, skip it */
683 if ((fir & mask) == 0x0)
684 continue;
685
686 sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
687 sfir = __genwqe_readq(cd, sfir_addr);
688
689 if (sfir == IO_ILLEGAL_VALUE)
690 goto fatal_error;
691 dev_err(&pci_dev->dev,
692 "* 0x%08x 0x%016llx\n", sfir_addr, sfir);
693
694 sfec_addr = (uid << 24) + 0x300 + 0x08 * j;
695 sfec = __genwqe_readq(cd, sfec_addr);
696
697 if (sfec == IO_ILLEGAL_VALUE)
698 goto fatal_error;
699 dev_err(&pci_dev->dev,
700 "* 0x%08x 0x%016llx\n", sfec_addr, sfec);
701
702 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
703 if (gfir == IO_ILLEGAL_VALUE)
704 goto fatal_error;
705
706 /* gfir turned on during routine! get out and
707 start over. */
708 if ((gfir_masked == 0x0) &&
709 (gfir & GFIR_ERR_TRIGGER)) {
710 goto healthMonitor;
711 }
712
713 /* do not clear if we entered with a fatal gfir */
714 if (gfir_masked == 0x0) {
715
716 /* NEW clear by mask the logged bits */
717 sfir_addr = (uid << 24) + 0x100 + 0x08 * j;
718 __genwqe_writeq(cd, sfir_addr, sfir);
719
720 dev_dbg(&pci_dev->dev,
721 "[HM] Clearing 2ndary FIR 0x%08x "
722 "with 0x%016llx\n", sfir_addr, sfir);
723
724 /*
725 * note, these cannot be error-Firs
726 * since gfir_masked is 0 after sfir
727 * was read. Also, it is safe to do
728 * this write if sfir=0. Still need to
729 * clear the primary. This just means
730 * there is no secondary FIR.
731 */
732
733 /* clear by mask the logged bit. */
734 fir_clr_addr = (uid << 24) + 0x10;
735 __genwqe_writeq(cd, fir_clr_addr, mask);
736
737 dev_dbg(&pci_dev->dev,
738 "[HM] Clearing primary FIR 0x%08x "
739 "with 0x%016llx\n", fir_clr_addr,
740 mask);
741 }
742 }
743 }
744 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
745 if (gfir == IO_ILLEGAL_VALUE)
746 goto fatal_error;
747
748 if ((gfir_masked == 0x0) && (gfir & GFIR_ERR_TRIGGER)) {
749 /*
750 * Check once more that it didn't go on after all the
751 * FIRS were cleared.
752 */
753 dev_dbg(&pci_dev->dev, "ACK! Another FIR! Recursing %d!\n",
754 iterations);
755 goto healthMonitor;
756 }
757 return gfir_masked;
758
759 fatal_error:
760 return IO_ILLEGAL_VALUE;
761}
762
763/**
764 * genwqe_health_thread() - Health checking thread
765 *
766 * This thread is only started for the PF of the card.
767 *
768 * This thread monitors the health of the card. A critical situation
769 * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In
770 * this case we need to be recovered from outside. Writing to
771 * registers will very likely not work either.
772 *
773 * This thread must only exit if kthread_should_stop() becomes true.
774 *
775 * Condition for the health-thread to trigger:
776 * a) when a kthread_stop() request comes in or
777 * b) a critical GFIR occured
778 *
779 * Informational GFIRs are checked and potentially printed in
780 * health_check_interval seconds.
781 */
782static int genwqe_health_thread(void *data)
783{
784 int rc, should_stop = 0;
785 struct genwqe_dev *cd = data;
786 struct pci_dev *pci_dev = cd->pci_dev;
787 u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg;
788
789 while (!kthread_should_stop()) {
790 rc = wait_event_interruptible_timeout(cd->health_waitq,
791 (genwqe_health_check_cond(cd, &gfir) ||
792 (should_stop = kthread_should_stop())),
793 genwqe_health_check_interval * HZ);
794
795 if (should_stop)
796 break;
797
798 if (gfir == IO_ILLEGAL_VALUE) {
799 dev_err(&pci_dev->dev,
800 "[%s] GFIR=%016llx\n", __func__, gfir);
801 goto fatal_error;
802 }
803
804 slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG);
805 if (slu_unitcfg == IO_ILLEGAL_VALUE) {
806 dev_err(&pci_dev->dev,
807 "[%s] SLU_UNITCFG=%016llx\n",
808 __func__, slu_unitcfg);
809 goto fatal_error;
810 }
811
812 app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG);
813 if (app_unitcfg == IO_ILLEGAL_VALUE) {
814 dev_err(&pci_dev->dev,
815 "[%s] APP_UNITCFG=%016llx\n",
816 __func__, app_unitcfg);
817 goto fatal_error;
818 }
819
820 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
821 if (gfir == IO_ILLEGAL_VALUE) {
822 dev_err(&pci_dev->dev,
823 "[%s] %s: GFIR=%016llx\n", __func__,
824 (gfir & GFIR_ERR_TRIGGER) ? "err" : "info",
825 gfir);
826 goto fatal_error;
827 }
828
829 gfir_masked = genwqe_fir_checking(cd);
830 if (gfir_masked == IO_ILLEGAL_VALUE)
831 goto fatal_error;
832
833 /*
834 * GFIR ErrorTrigger bits set => reset the card!
835 * Never do this for old/manufacturing images!
836 */
837 if ((gfir_masked) && !cd->skip_recovery &&
838 genwqe_recovery_on_fatal_gfir_required(cd)) {
839
840 cd->card_state = GENWQE_CARD_FATAL_ERROR;
841
842 rc = genwqe_recover_card(cd, 0);
843 if (rc < 0) {
844 /* FIXME Card is unusable and needs unbind! */
845 goto fatal_error;
846 }
847 }
848
849 cd->last_gfir = gfir;
850 cond_resched();
851 }
852
853 return 0;
854
855 fatal_error:
856 dev_err(&pci_dev->dev,
857 "[%s] card unusable. Please trigger unbind!\n", __func__);
858
859 /* Bring down logical devices to inform user space via udev remove. */
860 cd->card_state = GENWQE_CARD_FATAL_ERROR;
861 genwqe_stop(cd);
862
863 /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */
864 while (!kthread_should_stop())
865 cond_resched();
866
867 return -EIO;
868}
869
870static int genwqe_health_check_start(struct genwqe_dev *cd)
871{
872 int rc;
873
874 if (genwqe_health_check_interval <= 0)
875 return 0; /* valid for disabling the service */
876
877 /* moved before request_irq() */
878 /* init_waitqueue_head(&cd->health_waitq); */
879
880 cd->health_thread = kthread_run(genwqe_health_thread, cd,
881 GENWQE_DEVNAME "%d_health",
882 cd->card_idx);
883 if (IS_ERR(cd->health_thread)) {
884 rc = PTR_ERR(cd->health_thread);
885 cd->health_thread = NULL;
886 return rc;
887 }
888 return 0;
889}
890
891static int genwqe_health_thread_running(struct genwqe_dev *cd)
892{
893 return cd->health_thread != NULL;
894}
895
896static int genwqe_health_check_stop(struct genwqe_dev *cd)
897{
898 int rc;
899
900 if (!genwqe_health_thread_running(cd))
901 return -EIO;
902
903 rc = kthread_stop(cd->health_thread);
904 cd->health_thread = NULL;
905 return 0;
906}
907
908/**
909 * genwqe_pci_setup() - Allocate PCIe related resources for our card
910 */
911static int genwqe_pci_setup(struct genwqe_dev *cd)
912{
913 int err, bars;
914 struct pci_dev *pci_dev = cd->pci_dev;
915
916 bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
917 err = pci_enable_device_mem(pci_dev);
918 if (err) {
919 dev_err(&pci_dev->dev,
920 "err: failed to enable pci memory (err=%d)\n", err);
921 goto err_out;
922 }
923
924 /* Reserve PCI I/O and memory resources */
925 err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
926 if (err) {
927 dev_err(&pci_dev->dev,
928 "[%s] err: request bars failed (%d)\n", __func__, err);
929 err = -EIO;
930 goto err_disable_device;
931 }
932
933 /* check for 64-bit DMA address supported (DAC) */
934 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
935 err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
936 if (err) {
937 dev_err(&pci_dev->dev,
938 "err: DMA64 consistent mask error\n");
939 err = -EIO;
940 goto out_release_resources;
941 }
942 /* check for 32-bit DMA address supported (SAC) */
943 } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
944 err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
945 if (err) {
946 dev_err(&pci_dev->dev,
947 "err: DMA32 consistent mask error\n");
948 err = -EIO;
949 goto out_release_resources;
950 }
951 } else {
952 dev_err(&pci_dev->dev,
953 "err: neither DMA32 nor DMA64 supported\n");
954 err = -EIO;
955 goto out_release_resources;
956 }
957
958 pci_set_master(pci_dev);
959 pci_enable_pcie_error_reporting(pci_dev);
960
961 /* request complete BAR-0 space (length = 0) */
962 cd->mmio_len = pci_resource_len(pci_dev, 0);
963 cd->mmio = pci_iomap(pci_dev, 0, 0);
964 if (cd->mmio == NULL) {
965 dev_err(&pci_dev->dev,
966 "[%s] err: mapping BAR0 failed\n", __func__);
967 err = -ENOMEM;
968 goto out_release_resources;
969 }
970
971 cd->num_vfs = pci_sriov_get_totalvfs(pci_dev);
972
973 err = genwqe_read_ids(cd);
974 if (err)
975 goto out_iounmap;
976
977 return 0;
978
979 out_iounmap:
980 pci_iounmap(pci_dev, cd->mmio);
981 out_release_resources:
982 pci_release_selected_regions(pci_dev, bars);
983 err_disable_device:
984 pci_disable_device(pci_dev);
985 err_out:
986 return err;
987}
988
989/**
990 * genwqe_pci_remove() - Free PCIe related resources for our card
991 */
992static void genwqe_pci_remove(struct genwqe_dev *cd)
993{
994 int bars;
995 struct pci_dev *pci_dev = cd->pci_dev;
996
997 if (cd->mmio)
998 pci_iounmap(pci_dev, cd->mmio);
999
1000 bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
1001 pci_release_selected_regions(pci_dev, bars);
1002 pci_disable_device(pci_dev);
1003}
1004
1005/**
1006 * genwqe_probe() - Device initialization
1007 * @pdev: PCI device information struct
1008 *
1009 * Callable for multiple cards. This function is called on bind.
1010 *
1011 * Return: 0 if succeeded, < 0 when failed
1012 */
1013static int genwqe_probe(struct pci_dev *pci_dev,
1014 const struct pci_device_id *id)
1015{
1016 int err;
1017 struct genwqe_dev *cd;
1018
1019 genwqe_init_crc32();
1020
1021 cd = genwqe_dev_alloc();
1022 if (IS_ERR(cd)) {
1023 dev_err(&pci_dev->dev, "err: could not alloc mem (err=%d)!\n",
1024 (int)PTR_ERR(cd));
1025 return PTR_ERR(cd);
1026 }
1027
1028 dev_set_drvdata(&pci_dev->dev, cd);
1029 cd->pci_dev = pci_dev;
1030
1031 err = genwqe_pci_setup(cd);
1032 if (err < 0) {
1033 dev_err(&pci_dev->dev,
1034 "err: problems with PCI setup (err=%d)\n", err);
1035 goto out_free_dev;
1036 }
1037
1038 err = genwqe_start(cd);
1039 if (err < 0) {
1040 dev_err(&pci_dev->dev,
1041 "err: cannot start card services! (err=%d)\n", err);
1042 goto out_pci_remove;
1043 }
1044
1045 if (genwqe_is_privileged(cd)) {
1046 err = genwqe_health_check_start(cd);
1047 if (err < 0) {
1048 dev_err(&pci_dev->dev,
1049 "err: cannot start health checking! "
1050 "(err=%d)\n", err);
1051 goto out_stop_services;
1052 }
1053 }
1054 return 0;
1055
1056 out_stop_services:
1057 genwqe_stop(cd);
1058 out_pci_remove:
1059 genwqe_pci_remove(cd);
1060 out_free_dev:
1061 genwqe_dev_free(cd);
1062 return err;
1063}
1064
1065/**
1066 * genwqe_remove() - Called when device is removed (hot-plugable)
1067 *
1068 * Or when driver is unloaded respecitively when unbind is done.
1069 */
1070static void genwqe_remove(struct pci_dev *pci_dev)
1071{
1072 struct genwqe_dev *cd = dev_get_drvdata(&pci_dev->dev);
1073
1074 genwqe_health_check_stop(cd);
1075
1076 /*
1077 * genwqe_stop() must survive if it is called twice
1078 * sequentially. This happens when the health thread calls it
1079 * and fails on genwqe_bus_reset().
1080 */
1081 genwqe_stop(cd);
1082 genwqe_pci_remove(cd);
1083 genwqe_dev_free(cd);
1084}
1085
1086/*
1087 * genwqe_err_error_detected() - Error detection callback
1088 *
1089 * This callback is called by the PCI subsystem whenever a PCI bus
1090 * error is detected.
1091 */
1092static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev,
1093 enum pci_channel_state state)
1094{
1095 struct genwqe_dev *cd;
1096
1097 dev_err(&pci_dev->dev, "[%s] state=%d\n", __func__, state);
1098
1099 if (pci_dev == NULL)
1100 return PCI_ERS_RESULT_NEED_RESET;
1101
1102 cd = dev_get_drvdata(&pci_dev->dev);
1103 if (cd == NULL)
1104 return PCI_ERS_RESULT_NEED_RESET;
1105
1106 switch (state) {
1107 case pci_channel_io_normal:
1108 return PCI_ERS_RESULT_CAN_RECOVER;
1109 case pci_channel_io_frozen:
1110 return PCI_ERS_RESULT_NEED_RESET;
1111 case pci_channel_io_perm_failure:
1112 return PCI_ERS_RESULT_DISCONNECT;
1113 }
1114
1115 return PCI_ERS_RESULT_NEED_RESET;
1116}
1117
1118static pci_ers_result_t genwqe_err_result_none(struct pci_dev *dev)
1119{
1120 return PCI_ERS_RESULT_NONE;
1121}
1122
1123static void genwqe_err_resume(struct pci_dev *dev)
1124{
1125}
1126
1127static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
1128{
1129 struct genwqe_dev *cd = dev_get_drvdata(&dev->dev);
1130
1131 if (numvfs > 0) {
1132 genwqe_setup_vf_jtimer(cd);
1133 pci_enable_sriov(dev, numvfs);
1134 return numvfs;
1135 }
1136 if (numvfs == 0) {
1137 pci_disable_sriov(dev);
1138 return 0;
1139 }
1140 return 0;
1141}
1142
1143static struct pci_error_handlers genwqe_err_handler = {
1144 .error_detected = genwqe_err_error_detected,
1145 .mmio_enabled = genwqe_err_result_none,
1146 .link_reset = genwqe_err_result_none,
1147 .slot_reset = genwqe_err_result_none,
1148 .resume = genwqe_err_resume,
1149};
1150
1151static struct pci_driver genwqe_driver = {
1152 .name = genwqe_driver_name,
1153 .id_table = genwqe_device_table,
1154 .probe = genwqe_probe,
1155 .remove = genwqe_remove,
1156 .sriov_configure = genwqe_sriov_configure,
1157 .err_handler = &genwqe_err_handler,
1158};
1159
1160/**
1161 * genwqe_init_module() - Driver registration and initialization
1162 */
1163static int __init genwqe_init_module(void)
1164{
1165 int rc;
1166
1167 class_genwqe = class_create(THIS_MODULE, GENWQE_DEVNAME);
1168 if (IS_ERR(class_genwqe)) {
1169 pr_err("[%s] create class failed\n", __func__);
1170 return -ENOMEM;
1171 }
1172
1173 debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL);
1174 if (!debugfs_genwqe) {
1175 rc = -ENOMEM;
1176 goto err_out;
1177 }
1178
1179 rc = pci_register_driver(&genwqe_driver);
1180 if (rc != 0) {
1181 pr_err("[%s] pci_reg_driver (rc=%d)\n", __func__, rc);
1182 goto err_out0;
1183 }
1184
1185 return rc;
1186
1187 err_out0:
1188 debugfs_remove(debugfs_genwqe);
1189 err_out:
1190 class_destroy(class_genwqe);
1191 return rc;
1192}
1193
1194/**
1195 * genwqe_exit_module() - Driver exit
1196 */
1197static void __exit genwqe_exit_module(void)
1198{
1199 pci_unregister_driver(&genwqe_driver);
1200 debugfs_remove(debugfs_genwqe);
1201 class_destroy(class_genwqe);
1202}
1203
1204module_init(genwqe_init_module);
1205module_exit(genwqe_exit_module);
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
new file mode 100644
index 000000000000..5e4dbd21f89a
--- /dev/null
+++ b/drivers/misc/genwqe/card_base.h
@@ -0,0 +1,557 @@
1#ifndef __CARD_BASE_H__
2#define __CARD_BASE_H__
3
4/**
5 * IBM Accelerator Family 'GenWQE'
6 *
7 * (C) Copyright IBM Corp. 2013
8 *
9 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
10 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
11 * Author: Michael Jung <mijung@de.ibm.com>
12 * Author: Michael Ruettger <michael@ibmra.de>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License (version 2 only)
16 * as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24/*
25 * Interfaces within the GenWQE module. Defines genwqe_card and
26 * ddcb_queue as well as ddcb_requ.
27 */
28
29#include <linux/kernel.h>
30#include <linux/types.h>
31#include <linux/cdev.h>
32#include <linux/stringify.h>
33#include <linux/pci.h>
34#include <linux/semaphore.h>
35#include <linux/uaccess.h>
36#include <linux/io.h>
37#include <linux/version.h>
38#include <linux/debugfs.h>
39#include <linux/slab.h>
40
41#include <linux/genwqe/genwqe_card.h>
42#include "genwqe_driver.h"
43
44#define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */
45#define GENWQE_FLAG_MSI_ENABLED (1 << 0)
46
47#define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */
48#define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */
49#define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS)
50
51/* Compile parameters, some of them appear in debugfs for later adjustment */
52#define genwqe_ddcb_max 32 /* DDCBs on the work-queue */
53#define genwqe_polling_enabled 0 /* in case of irqs not working */
54#define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */
55#define genwqe_kill_timeout 8 /* time until process gets killed */
56#define genwqe_vf_jobtimeout_msec 250 /* 250 msec */
57#define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */
58#define genwqe_health_check_interval 4 /* <= 0: disabled */
59
60/* Sysfs attribute groups used when we create the genwqe device */
61extern const struct attribute_group *genwqe_attribute_groups[];
62
63/*
64 * Config space for Genwqe5 A7:
65 * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00
66 * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00
67 * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04]
68 * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00
69 */
70#define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */
71
72#define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */
73#define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */
74#define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */
75
76#define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000
77#define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */
78#define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */
79
80#define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */
81
82/**
83 * struct genwqe_reg - Genwqe data dump functionality
84 */
85struct genwqe_reg {
86 u32 addr;
87 u32 idx;
88 u64 val;
89};
90
91/*
92 * enum genwqe_dbg_type - Specify chip unit to dump/debug
93 */
94enum genwqe_dbg_type {
95 GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */
96 GENWQE_DBG_UNIT1 = 1,
97 GENWQE_DBG_UNIT2 = 2,
98 GENWQE_DBG_UNIT3 = 3,
99 GENWQE_DBG_UNIT4 = 4,
100 GENWQE_DBG_UNIT5 = 5,
101 GENWQE_DBG_UNIT6 = 6,
102 GENWQE_DBG_UNIT7 = 7,
103 GENWQE_DBG_REGS = 8,
104 GENWQE_DBG_DMA = 9,
105 GENWQE_DBG_UNITS = 10, /* max number of possible debug units */
106};
107
108/* Software error injection to simulate card failures */
109#define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */
110#define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */
111#define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */
112#define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */
113
114/*
115 * Genwqe card description and management data.
116 *
117 * Error-handling in case of card malfunction
118 * ------------------------------------------
119 *
120 * If the card is detected to be defective the outside environment
121 * will cause the PCI layer to call deinit (the cleanup function for
122 * probe). This is the same effect like doing a unbind/bind operation
123 * on the card.
124 *
125 * The genwqe card driver implements a health checking thread which
126 * verifies the card function. If this detects a problem the cards
127 * device is being shutdown and restarted again, along with a reset of
128 * the card and queue.
129 *
130 * All functions accessing the card device return either -EIO or -ENODEV
131 * code to indicate the malfunction to the user. The user has to close
132 * the file descriptor and open a new one, once the card becomes
133 * available again.
134 *
135 * If the open file descriptor is setup to receive SIGIO, the signal is
136 * genereated for the application which has to provide a handler to
137 * react on it. If the application does not close the open
138 * file descriptor a SIGKILL is send to enforce freeing the cards
139 * resources.
140 *
141 * I did not find a different way to prevent kernel problems due to
142 * reference counters for the cards character devices getting out of
143 * sync. The character device deallocation does not block, even if
144 * there is still an open file descriptor pending. If this pending
145 * descriptor is closed, the data structures used by the character
146 * device is reinstantiated, which will lead to the reference counter
147 * dropping below the allowed values.
148 *
149 * Card recovery
150 * -------------
151 *
152 * To test the internal driver recovery the following command can be used:
153 * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject'
154 */
155
156
157/**
158 * struct dma_mapping_type - Mapping type definition
159 *
160 * To avoid memcpying data arround we use user memory directly. To do
161 * this we need to pin/swap-in the memory and request a DMA address
162 * for it.
163 */
164enum dma_mapping_type {
165 GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */
166 GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */
167 GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */
168};
169
170/**
171 * struct dma_mapping - Information about memory mappings done by the driver
172 */
173struct dma_mapping {
174 enum dma_mapping_type type;
175
176 void *u_vaddr; /* user-space vaddr/non-aligned */
177 void *k_vaddr; /* kernel-space vaddr/non-aligned */
178 dma_addr_t dma_addr; /* physical DMA address */
179
180 struct page **page_list; /* list of pages used by user buff */
181 dma_addr_t *dma_list; /* list of dma addresses per page */
182 unsigned int nr_pages; /* number of pages */
183 unsigned int size; /* size in bytes */
184
185 struct list_head card_list; /* list of usr_maps for card */
186 struct list_head pin_list; /* list of pinned memory for dev */
187};
188
189static inline void genwqe_mapping_init(struct dma_mapping *m,
190 enum dma_mapping_type type)
191{
192 memset(m, 0, sizeof(*m));
193 m->type = type;
194}
195
196/**
197 * struct ddcb_queue - DDCB queue data
198 * @ddcb_max: Number of DDCBs on the queue
199 * @ddcb_next: Next free DDCB
200 * @ddcb_act: Next DDCB supposed to finish
201 * @ddcb_seq: Sequence number of last DDCB
202 * @ddcbs_in_flight: Currently enqueued DDCBs
203 * @ddcbs_completed: Number of already completed DDCBs
204 * @busy: Number of -EBUSY returns
205 * @ddcb_daddr: DMA address of first DDCB in the queue
206 * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue
207 * @ddcb_req: Associated requests (one per DDCB)
208 * @ddcb_waitqs: Associated wait queues (one per DDCB)
209 * @ddcb_lock: Lock to protect queuing operations
210 * @ddcb_waitq: Wait on next DDCB finishing
211 */
212
213struct ddcb_queue {
214 int ddcb_max; /* amount of DDCBs */
215 int ddcb_next; /* next available DDCB num */
216 int ddcb_act; /* DDCB to be processed */
217 u16 ddcb_seq; /* slc seq num */
218 unsigned int ddcbs_in_flight; /* number of ddcbs in processing */
219 unsigned int ddcbs_completed;
220 unsigned int ddcbs_max_in_flight;
221 unsigned int busy; /* how many times -EBUSY? */
222
223 dma_addr_t ddcb_daddr; /* DMA address */
224 struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */
225 struct ddcb_requ **ddcb_req; /* ddcb processing parameter */
226 wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */
227
228 spinlock_t ddcb_lock; /* exclusive access to queue */
229 wait_queue_head_t ddcb_waitq; /* wait for ddcb processing */
230
231 /* registers or the respective queue to be used */
232 u32 IO_QUEUE_CONFIG;
233 u32 IO_QUEUE_STATUS;
234 u32 IO_QUEUE_SEGMENT;
235 u32 IO_QUEUE_INITSQN;
236 u32 IO_QUEUE_WRAP;
237 u32 IO_QUEUE_OFFSET;
238 u32 IO_QUEUE_WTIME;
239 u32 IO_QUEUE_ERRCNTS;
240 u32 IO_QUEUE_LRW;
241};
242
243/*
244 * GFIR, SLU_UNITCFG, APP_UNITCFG
245 * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC.
246 */
247#define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64)))
248
249struct genwqe_ffdc {
250 unsigned int entries;
251 struct genwqe_reg *regs;
252};
253
254/**
255 * struct genwqe_dev - GenWQE device information
256 * @card_state: Card operation state, see above
257 * @ffdc: First Failure Data Capture buffers for each unit
258 * @card_thread: Working thread to operate the DDCB queue
259 * @card_waitq: Wait queue used in card_thread
260 * @queue: DDCB queue
261 * @health_thread: Card monitoring thread (only for PFs)
262 * @health_waitq: Wait queue used in health_thread
263 * @pci_dev: Associated PCI device (function)
264 * @mmio: Base address of 64-bit register space
265 * @mmio_len: Length of register area
266 * @file_lock: Lock to protect access to file_list
267 * @file_list: List of all processes with open GenWQE file descriptors
268 *
269 * This struct contains all information needed to communicate with a
270 * GenWQE card. It is initialized when a GenWQE device is found and
271 * destroyed when it goes away. It holds data to maintain the queue as
272 * well as data needed to feed the user interfaces.
273 */
274struct genwqe_dev {
275 enum genwqe_card_state card_state;
276 spinlock_t print_lock;
277
278 int card_idx; /* card index 0..CARD_NO_MAX-1 */
279 u64 flags; /* general flags */
280
281 /* FFDC data gathering */
282 struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS];
283
284 /* DDCB workqueue */
285 struct task_struct *card_thread;
286 wait_queue_head_t queue_waitq;
287 struct ddcb_queue queue; /* genwqe DDCB queue */
288 unsigned int irqs_processed;
289
290 /* Card health checking thread */
291 struct task_struct *health_thread;
292 wait_queue_head_t health_waitq;
293
294 /* char device */
295 dev_t devnum_genwqe; /* major/minor num card */
296 struct class *class_genwqe; /* reference to class object */
297 struct device *dev; /* for device creation */
298 struct cdev cdev_genwqe; /* char device for card */
299
300 struct dentry *debugfs_root; /* debugfs card root directory */
301 struct dentry *debugfs_genwqe; /* debugfs driver root directory */
302
303 /* pci resources */
304 struct pci_dev *pci_dev; /* PCI device */
305 void __iomem *mmio; /* BAR-0 MMIO start */
306 unsigned long mmio_len;
307 u16 num_vfs;
308 u32 vf_jobtimeout_msec[GENWQE_MAX_VFS];
309 int is_privileged; /* access to all regs possible */
310
311 /* config regs which we need often */
312 u64 slu_unitcfg;
313 u64 app_unitcfg;
314 u64 softreset;
315 u64 err_inject;
316 u64 last_gfir;
317 char app_name[5];
318
319 spinlock_t file_lock; /* lock for open files */
320 struct list_head file_list; /* list of open files */
321
322 /* debugfs parameters */
323 int ddcb_software_timeout; /* wait until DDCB times out */
324 int skip_recovery; /* circumvention if recovery fails */
325 int kill_timeout; /* wait after sending SIGKILL */
326};
327
328/**
329 * enum genwqe_requ_state - State of a DDCB execution request
330 */
331enum genwqe_requ_state {
332 GENWQE_REQU_NEW = 0,
333 GENWQE_REQU_ENQUEUED = 1,
334 GENWQE_REQU_TAPPED = 2,
335 GENWQE_REQU_FINISHED = 3,
336 GENWQE_REQU_STATE_MAX,
337};
338
339/**
340 * struct ddcb_requ - Kernel internal representation of the DDCB request
341 * @cmd: User space representation of the DDCB execution request
342 */
343struct ddcb_requ {
344 /* kernel specific content */
345 enum genwqe_requ_state req_state; /* request status */
346 int num; /* ddcb_no for this request */
347 struct ddcb_queue *queue; /* associated queue */
348
349 struct dma_mapping dma_mappings[DDCB_FIXUPS];
350 struct sg_entry *sgl[DDCB_FIXUPS];
351 dma_addr_t sgl_dma_addr[DDCB_FIXUPS];
352 size_t sgl_size[DDCB_FIXUPS];
353
354 /* kernel/user shared content */
355 struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */
356 struct genwqe_debug_data debug_data;
357};
358
359/**
360 * struct genwqe_file - Information for open GenWQE devices
361 */
362struct genwqe_file {
363 struct genwqe_dev *cd;
364 struct genwqe_driver *client;
365 struct file *filp;
366
367 struct fasync_struct *async_queue;
368 struct task_struct *owner;
369 struct list_head list; /* entry in list of open files */
370
371 spinlock_t map_lock; /* lock for dma_mappings */
372 struct list_head map_list; /* list of dma_mappings */
373
374 spinlock_t pin_lock; /* lock for pinned memory */
375 struct list_head pin_list; /* list of pinned memory */
376};
377
378int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */
379int genwqe_finish_queue(struct genwqe_dev *cd);
380int genwqe_release_service_layer(struct genwqe_dev *cd);
381
382/**
383 * genwqe_get_slu_id() - Read Service Layer Unit Id
384 * Return: 0x00: Development code
385 * 0x01: SLC1 (old)
386 * 0x02: SLC2 (sept2012)
387 * 0x03: SLC2 (feb2013, generic driver)
388 */
389static inline int genwqe_get_slu_id(struct genwqe_dev *cd)
390{
391 return (int)((cd->slu_unitcfg >> 32) & 0xff);
392}
393
394int genwqe_ddcbs_in_flight(struct genwqe_dev *cd);
395
396u8 genwqe_card_type(struct genwqe_dev *cd);
397int genwqe_card_reset(struct genwqe_dev *cd);
398int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count);
399void genwqe_reset_interrupt_capability(struct genwqe_dev *cd);
400
401int genwqe_device_create(struct genwqe_dev *cd);
402int genwqe_device_remove(struct genwqe_dev *cd);
403
404/* debugfs */
405int genwqe_init_debugfs(struct genwqe_dev *cd);
406void genqwe_exit_debugfs(struct genwqe_dev *cd);
407
408int genwqe_read_softreset(struct genwqe_dev *cd);
409
410/* Hardware Circumventions */
411int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd);
412int genwqe_flash_readback_fails(struct genwqe_dev *cd);
413
414/**
415 * genwqe_write_vreg() - Write register in VF window
416 * @cd: genwqe device
417 * @reg: register address
418 * @val: value to write
419 * @func: 0: PF, 1: VF0, ..., 15: VF14
420 */
421int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func);
422
423/**
424 * genwqe_read_vreg() - Read register in VF window
425 * @cd: genwqe device
426 * @reg: register address
427 * @func: 0: PF, 1: VF0, ..., 15: VF14
428 *
429 * Return: content of the register
430 */
431u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func);
432
433/* FFDC Buffer Management */
434int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id);
435int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id,
436 struct genwqe_reg *regs, unsigned int max_regs);
437int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
438 unsigned int max_regs, int all);
439int genwqe_ffdc_dump_dma(struct genwqe_dev *cd,
440 struct genwqe_reg *regs, unsigned int max_regs);
441
442int genwqe_init_debug_data(struct genwqe_dev *cd,
443 struct genwqe_debug_data *d);
444
445void genwqe_init_crc32(void);
446int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
447
448/* Memory allocation/deallocation; dma address handling */
449int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
450 void *uaddr, unsigned long size,
451 struct ddcb_requ *req);
452
453int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
454 struct ddcb_requ *req);
455
456struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
457 dma_addr_t *dma_addr, size_t *sgl_size);
458
459void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
460 dma_addr_t dma_addr, size_t size);
461
462int genwqe_setup_sgl(struct genwqe_dev *cd,
463 unsigned long offs,
464 unsigned long size,
465 struct sg_entry *sgl, /* genwqe sgl */
466 dma_addr_t dma_addr, size_t sgl_size,
467 dma_addr_t *dma_list, int page_offs, int num_pages);
468
469int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
470 int size);
471
472static inline bool dma_mapping_used(struct dma_mapping *m)
473{
474 if (!m)
475 return 0;
476 return m->size != 0;
477}
478
479/**
480 * __genwqe_execute_ddcb() - Execute DDCB request with addr translation
481 *
482 * This function will do the address translation changes to the DDCBs
483 * according to the definitions required by the ATS field. It looks up
484 * the memory allocation buffer or does vmap/vunmap for the respective
485 * user-space buffers, inclusive page pinning and scatter gather list
486 * buildup and teardown.
487 */
488int __genwqe_execute_ddcb(struct genwqe_dev *cd,
489 struct genwqe_ddcb_cmd *cmd);
490
491/**
492 * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation
493 *
494 * This version will not do address translation or any modifcation of
495 * the DDCB data. It is used e.g. for the MoveFlash DDCB which is
496 * entirely prepared by the driver itself. That means the appropriate
497 * DMA addresses are already in the DDCB and do not need any
498 * modification.
499 */
500int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
501 struct genwqe_ddcb_cmd *cmd);
502
503int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
504int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
505int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
506
507/* register access */
508int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val);
509u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs);
510int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val);
511u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs);
512
513void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
514 dma_addr_t *dma_handle);
515void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
516 void *vaddr, dma_addr_t dma_handle);
517
518/* Base clock frequency in MHz */
519int genwqe_base_clock_frequency(struct genwqe_dev *cd);
520
521/* Before FFDC is captured the traps should be stopped. */
522void genwqe_stop_traps(struct genwqe_dev *cd);
523void genwqe_start_traps(struct genwqe_dev *cd);
524
525/* Hardware circumvention */
526bool genwqe_need_err_masking(struct genwqe_dev *cd);
527
528/**
529 * genwqe_is_privileged() - Determine operation mode for PCI function
530 *
531 * On Intel with SRIOV support we see:
532 * PF: is_physfn = 1 is_virtfn = 0
533 * VF: is_physfn = 0 is_virtfn = 1
534 *
535 * On Systems with no SRIOV support _and_ virtualized systems we get:
536 * is_physfn = 0 is_virtfn = 0
537 *
538 * Other vendors have individual pci device ids to distinguish between
539 * virtual function drivers and physical function drivers. GenWQE
540 * unfortunately has just on pci device id for both, VFs and PF.
541 *
542 * The following code is used to distinguish if the card is running in
543 * privileged mode, either as true PF or in a virtualized system with
544 * full register access e.g. currently on PowerPC.
545 *
546 * if (pci_dev->is_virtfn)
547 * cd->is_privileged = 0;
548 * else
549 * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
550 * != IO_ILLEGAL_VALUE);
551 */
552static inline int genwqe_is_privileged(struct genwqe_dev *cd)
553{
554 return cd->is_privileged;
555}
556
557#endif /* __CARD_BASE_H__ */
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
new file mode 100644
index 000000000000..6f1acc0ccf88
--- /dev/null
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -0,0 +1,1376 @@
1/**
2 * IBM Accelerator Family 'GenWQE'
3 *
4 * (C) Copyright IBM Corp. 2013
5 *
6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8 * Author: Michael Jung <mijung@de.ibm.com>
9 * Author: Michael Ruettger <michael@ibmra.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21/*
22 * Device Driver Control Block (DDCB) queue support. Definition of
23 * interrupt handlers for queue support as well as triggering the
24 * health monitor code in case of problems. The current hardware uses
25 * an MSI interrupt which is shared between error handling and
26 * functional code.
27 */
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <linux/wait.h>
33#include <linux/pci.h>
34#include <linux/string.h>
35#include <linux/dma-mapping.h>
36#include <linux/delay.h>
37#include <linux/module.h>
38#include <linux/interrupt.h>
39#include <linux/crc-itu-t.h>
40
41#include "card_base.h"
42#include "card_ddcb.h"
43
44/*
45 * N: next DDCB, this is where the next DDCB will be put.
46 * A: active DDCB, this is where the code will look for the next completion.
47 * x: DDCB is enqueued, we are waiting for its completion.
48
49 * Situation (1): Empty queue
50 * +---+---+---+---+---+---+---+---+
51 * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
52 * | | | | | | | | |
53 * +---+---+---+---+---+---+---+---+
54 * A/N
55 * enqueued_ddcbs = A - N = 2 - 2 = 0
56 *
57 * Situation (2): Wrapped, N > A
58 * +---+---+---+---+---+---+---+---+
59 * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
60 * | | | x | x | | | | |
61 * +---+---+---+---+---+---+---+---+
62 * A N
63 * enqueued_ddcbs = N - A = 4 - 2 = 2
64 *
65 * Situation (3): Queue wrapped, A > N
66 * +---+---+---+---+---+---+---+---+
67 * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
68 * | x | x | | | x | x | x | x |
69 * +---+---+---+---+---+---+---+---+
70 * N A
71 * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 2) = 6
72 *
73 * Situation (4a): Queue full N > A
74 * +---+---+---+---+---+---+---+---+
75 * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
76 * | x | x | x | x | x | x | x | |
77 * +---+---+---+---+---+---+---+---+
78 * A N
79 *
80 * enqueued_ddcbs = N - A = 7 - 0 = 7
81 *
82 * Situation (4a): Queue full A > N
83 * +---+---+---+---+---+---+---+---+
84 * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
85 * | x | x | x | | x | x | x | x |
86 * +---+---+---+---+---+---+---+---+
87 * N A
88 * enqueued_ddcbs = queue_max - (A - N) = 8 - (4 - 3) = 7
89 */
90
91static int queue_empty(struct ddcb_queue *queue)
92{
93 return queue->ddcb_next == queue->ddcb_act;
94}
95
96static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
97{
98 if (queue->ddcb_next >= queue->ddcb_act)
99 return queue->ddcb_next - queue->ddcb_act;
100
101 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
102}
103
104static int queue_free_ddcbs(struct ddcb_queue *queue)
105{
106 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
107
108 if (WARN_ON_ONCE(free_ddcbs < 0)) { /* must never ever happen! */
109 return 0;
110 }
111 return free_ddcbs;
112}
113
114/*
115 * Use of the PRIV field in the DDCB for queue debugging:
116 *
117 * (1) Trying to get rid of a DDCB which saw a timeout:
118 * pddcb->priv[6] = 0xcc; # cleared
119 *
120 * (2) Append a DDCB via NEXT bit:
121 * pddcb->priv[7] = 0xaa; # appended
122 *
123 * (3) DDCB needed tapping:
124 * pddcb->priv[7] = 0xbb; # tapped
125 *
126 * (4) DDCB marked as correctly finished:
127 * pddcb->priv[6] = 0xff; # finished
128 */
129
130static inline void ddcb_mark_tapped(struct ddcb *pddcb)
131{
132 pddcb->priv[7] = 0xbb; /* tapped */
133}
134
135static inline void ddcb_mark_appended(struct ddcb *pddcb)
136{
137 pddcb->priv[7] = 0xaa; /* appended */
138}
139
140static inline void ddcb_mark_cleared(struct ddcb *pddcb)
141{
142 pddcb->priv[6] = 0xcc; /* cleared */
143}
144
145static inline void ddcb_mark_finished(struct ddcb *pddcb)
146{
147 pddcb->priv[6] = 0xff; /* finished */
148}
149
150static inline void ddcb_mark_unused(struct ddcb *pddcb)
151{
152 pddcb->priv_64 = cpu_to_be64(0); /* not tapped */
153}
154
155/**
156 * genwqe_crc16() - Generate 16-bit crc as required for DDCBs
157 * @buff: pointer to data buffer
158 * @len: length of data for calculation
159 * @init: initial crc (0xffff at start)
160 *
161 * Polynomial = x^16 + x^12 + x^5 + 1 (0x1021)
162 * Example: 4 bytes 0x01 0x02 0x03 0x04 with init = 0xffff
163 * should result in a crc16 of 0x89c3
164 *
165 * Return: crc16 checksum in big endian format !
166 */
167static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init)
168{
169 return crc_itu_t(init, buff, len);
170}
171
172static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
173{
174 int i;
175 struct ddcb *pddcb;
176 unsigned long flags;
177 struct pci_dev *pci_dev = cd->pci_dev;
178
179 spin_lock_irqsave(&cd->print_lock, flags);
180
181 dev_info(&pci_dev->dev,
182 "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n",
183 cd->card_idx, queue->ddcb_act, queue->ddcb_next);
184
185 pddcb = queue->ddcb_vaddr;
186 for (i = 0; i < queue->ddcb_max; i++) {
187 dev_err(&pci_dev->dev,
188 " %c %-3d: RETC=%03x SEQ=%04x "
189 "HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n",
190 i == queue->ddcb_act ? '>' : ' ',
191 i,
192 be16_to_cpu(pddcb->retc_16),
193 be16_to_cpu(pddcb->seqnum_16),
194 pddcb->hsi,
195 pddcb->shi,
196 be64_to_cpu(pddcb->priv_64),
197 pddcb->cmd);
198 pddcb++;
199 }
200 spin_unlock_irqrestore(&cd->print_lock, flags);
201}
202
203struct genwqe_ddcb_cmd *ddcb_requ_alloc(void)
204{
205 struct ddcb_requ *req;
206
207 req = kzalloc(sizeof(*req), GFP_ATOMIC);
208 if (!req)
209 return NULL;
210
211 return &req->cmd;
212}
213
214void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd)
215{
216 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
217 kfree(req);
218}
219
220static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req)
221{
222 return req->req_state;
223}
224
225static inline void ddcb_requ_set_state(struct ddcb_requ *req,
226 enum genwqe_requ_state new_state)
227{
228 req->req_state = new_state;
229}
230
231static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req)
232{
233 return req->cmd.ddata_addr != 0x0;
234}
235
236/**
237 * ddcb_requ_finished() - Returns the hardware state of the associated DDCB
238 * @cd: pointer to genwqe device descriptor
239 * @req: DDCB work request
240 *
241 * Status of ddcb_requ mirrors this hardware state, but is copied in
242 * the ddcb_requ on interrupt/polling function. The lowlevel code
243 * should check the hardware state directly, the higher level code
244 * should check the copy.
245 *
246 * This function will also return true if the state of the queue is
247 * not GENWQE_CARD_USED. This enables us to purge all DDCBs in the
248 * shutdown case.
249 */
250static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
251{
252 return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) ||
253 (cd->card_state != GENWQE_CARD_USED);
254}
255
256/**
257 * enqueue_ddcb() - Enqueue a DDCB
258 * @cd: pointer to genwqe device descriptor
259 * @queue: queue this operation should be done on
260 * @ddcb_no: pointer to ddcb number being tapped
261 *
262 * Start execution of DDCB by tapping or append to queue via NEXT
263 * bit. This is done by an atomic 'compare and swap' instruction and
264 * checking SHI and HSI of the previous DDCB.
265 *
266 * This function must only be called with ddcb_lock held.
267 *
268 * Return: 1 if new DDCB is appended to previous
269 * 2 if DDCB queue is tapped via register/simulation
270 */
271#define RET_DDCB_APPENDED 1
272#define RET_DDCB_TAPPED 2
273
274static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
275 struct ddcb *pddcb, int ddcb_no)
276{
277 unsigned int try;
278 int prev_no;
279 struct ddcb *prev_ddcb;
280 __be32 old, new, icrc_hsi_shi;
281 u64 num;
282
283 /*
284 * For performance checks a Dispatch Timestamp can be put into
285 * DDCB It is supposed to use the SLU's free running counter,
286 * but this requires PCIe cycles.
287 */
288 ddcb_mark_unused(pddcb);
289
290 /* check previous DDCB if already fetched */
291 prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
292 prev_ddcb = &queue->ddcb_vaddr[prev_no];
293
294 /*
295 * It might have happened that the HSI.FETCHED bit is
296 * set. Retry in this case. Therefore I expect maximum 2 times
297 * trying.
298 */
299 ddcb_mark_appended(pddcb);
300 for (try = 0; try < 2; try++) {
301 old = prev_ddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
302
303 /* try to append via NEXT bit if prev DDCB is not completed */
304 if ((old & DDCB_COMPLETED_BE32) != 0x00000000)
305 break;
306
307 new = (old | DDCB_NEXT_BE32);
308 icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
309
310 if (icrc_hsi_shi == old)
311 return RET_DDCB_APPENDED; /* appended to queue */
312 }
313
314 /* Queue must be re-started by updating QUEUE_OFFSET */
315 ddcb_mark_tapped(pddcb);
316 num = (u64)ddcb_no << 8;
317 __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
318
319 return RET_DDCB_TAPPED;
320}
321
322/**
323 * copy_ddcb_results() - Copy output state from real DDCB to request
324 *
325 * Copy DDCB ASV to request struct. There is no endian
326 * conversion made, since data structure in ASV is still
327 * unknown here.
328 *
329 * This is needed by:
330 * - genwqe_purge_ddcb()
331 * - genwqe_check_ddcb_queue()
332 */
333static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
334{
335 struct ddcb_queue *queue = req->queue;
336 struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
337
338 memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH);
339
340 /* copy status flags of the variant part */
341 req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16);
342 req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64);
343 req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64);
344
345 req->cmd.attn = be16_to_cpu(pddcb->attn_16);
346 req->cmd.progress = be32_to_cpu(pddcb->progress_32);
347 req->cmd.retc = be16_to_cpu(pddcb->retc_16);
348
349 if (ddcb_requ_collect_debug_data(req)) {
350 int prev_no = (ddcb_no == 0) ?
351 queue->ddcb_max - 1 : ddcb_no - 1;
352 struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
353
354 memcpy(&req->debug_data.ddcb_finished, pddcb,
355 sizeof(req->debug_data.ddcb_finished));
356 memcpy(&req->debug_data.ddcb_prev, prev_pddcb,
357 sizeof(req->debug_data.ddcb_prev));
358 }
359}
360
361/**
362 * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
363 * @cd: pointer to genwqe device descriptor
364 *
365 * Return: Number of DDCBs which were finished
366 */
367static int genwqe_check_ddcb_queue(struct genwqe_dev *cd,
368 struct ddcb_queue *queue)
369{
370 unsigned long flags;
371 int ddcbs_finished = 0;
372 struct pci_dev *pci_dev = cd->pci_dev;
373
374 spin_lock_irqsave(&queue->ddcb_lock, flags);
375
376 /* FIXME avoid soft locking CPU */
377 while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
378
379 struct ddcb *pddcb;
380 struct ddcb_requ *req;
381 u16 vcrc, vcrc_16, retc_16;
382
383 pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
384
385 if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) ==
386 0x00000000)
387 goto go_home; /* not completed, continue waiting */
388
389 /* Note: DDCB could be purged */
390
391 req = queue->ddcb_req[queue->ddcb_act];
392 if (req == NULL) {
393 /* this occurs if DDCB is purged, not an error */
394 /* Move active DDCB further; Nothing to do anymore. */
395 goto pick_next_one;
396 }
397
398 /*
399 * HSI=0x44 (fetched and completed), but RETC is
400 * 0x101, or even worse 0x000.
401 *
402 * In case of seeing the queue in inconsistent state
403 * we read the errcnts and the queue status to provide
404 * a trigger for our PCIe analyzer stop capturing.
405 */
406 retc_16 = be16_to_cpu(pddcb->retc_16);
407 if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) {
408 u64 errcnts, status;
409 u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
410
411 errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
412 status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
413
414 dev_err(&pci_dev->dev,
415 "[%s] SEQN=%04x HSI=%02x RETC=%03x "
416 " Q_ERRCNTS=%016llx Q_STATUS=%016llx\n"
417 " DDCB_DMA_ADDR=%016llx\n",
418 __func__, be16_to_cpu(pddcb->seqnum_16),
419 pddcb->hsi, retc_16, errcnts, status,
420 queue->ddcb_daddr + ddcb_offs);
421 }
422
423 copy_ddcb_results(req, queue->ddcb_act);
424 queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */
425
426 dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num);
427 genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
428
429 ddcb_mark_finished(pddcb);
430
431 /* calculate CRC_16 to see if VCRC is correct */
432 vcrc = genwqe_crc16(pddcb->asv,
433 VCRC_LENGTH(req->cmd.asv_length),
434 0xffff);
435 vcrc_16 = be16_to_cpu(pddcb->vcrc_16);
436 if (vcrc != vcrc_16) {
437 printk_ratelimited(KERN_ERR
438 "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d "
439 "bytes vcrc_data=%04x is not vcrc_card=%04x\n",
440 GENWQE_DEVNAME, dev_name(&pci_dev->dev),
441 pddcb->pre, VCRC_LENGTH(req->cmd.asv_length),
442 vcrc, vcrc_16);
443 }
444
445 ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
446 queue->ddcbs_completed++;
447 queue->ddcbs_in_flight--;
448
449 /* wake up process waiting for this DDCB */
450 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
451
452pick_next_one:
453 queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
454 ddcbs_finished++;
455 }
456
457 go_home:
458 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
459 return ddcbs_finished;
460}
461
462/**
463 * __genwqe_wait_ddcb(): Waits until DDCB is completed
464 * @cd: pointer to genwqe device descriptor
465 * @req: pointer to requsted DDCB parameters
466 *
467 * The Service Layer will update the RETC in DDCB when processing is
468 * pending or done.
469 *
470 * Return: > 0 remaining jiffies, DDCB completed
471 * -ETIMEDOUT when timeout
472 * -ERESTARTSYS when ^C
473 * -EINVAL when unknown error condition
474 *
475 * When an error is returned the called needs to ensure that
476 * purge_ddcb() is being called to get the &req removed from the
477 * queue.
478 */
479int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
480{
481 int rc;
482 unsigned int ddcb_no;
483 struct ddcb_queue *queue;
484 struct pci_dev *pci_dev = cd->pci_dev;
485
486 if (req == NULL)
487 return -EINVAL;
488
489 queue = req->queue;
490 if (queue == NULL)
491 return -EINVAL;
492
493 ddcb_no = req->num;
494 if (ddcb_no >= queue->ddcb_max)
495 return -EINVAL;
496
497 rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
498 ddcb_requ_finished(cd, req),
499 genwqe_ddcb_software_timeout * HZ);
500
501 /*
502 * We need to distinguish 3 cases here:
503 * 1. rc == 0 timeout occured
504 * 2. rc == -ERESTARTSYS signal received
505 * 3. rc > 0 remaining jiffies condition is true
506 */
507 if (rc == 0) {
508 struct ddcb_queue *queue = req->queue;
509 struct ddcb *pddcb;
510
511 /*
512 * Timeout may be caused by long task switching time.
513 * When timeout happens, check if the request has
514 * meanwhile completed.
515 */
516 genwqe_check_ddcb_queue(cd, req->queue);
517 if (ddcb_requ_finished(cd, req))
518 return rc;
519
520 dev_err(&pci_dev->dev,
521 "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n",
522 __func__, req->num, rc, ddcb_requ_get_state(req),
523 req);
524 dev_err(&pci_dev->dev,
525 "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__,
526 __genwqe_readq(cd, queue->IO_QUEUE_STATUS));
527
528 pddcb = &queue->ddcb_vaddr[req->num];
529 genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
530
531 print_ddcb_info(cd, req->queue);
532 return -ETIMEDOUT;
533
534 } else if (rc == -ERESTARTSYS) {
535 return rc;
536 /*
537 * EINTR: Stops the application
538 * ERESTARTSYS: Restartable systemcall; called again
539 */
540
541 } else if (rc < 0) {
542 dev_err(&pci_dev->dev,
543 "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n",
544 __func__, req->num, rc, ddcb_requ_get_state(req));
545 return -EINVAL;
546 }
547
548 /* Severe error occured. Driver is forced to stop operation */
549 if (cd->card_state != GENWQE_CARD_USED) {
550 dev_err(&pci_dev->dev,
551 "[%s] err: DDCB#%d forced to stop (rc=%d)\n",
552 __func__, req->num, rc);
553 return -EIO;
554 }
555 return rc;
556}
557
558/**
559 * get_next_ddcb() - Get next available DDCB
560 * @cd: pointer to genwqe device descriptor
561 *
562 * DDCB's content is completely cleared but presets for PRE and
563 * SEQNUM. This function must only be called when ddcb_lock is held.
564 *
565 * Return: NULL if no empty DDCB available otherwise ptr to next DDCB.
566 */
567static struct ddcb *get_next_ddcb(struct genwqe_dev *cd,
568 struct ddcb_queue *queue,
569 int *num)
570{
571 u64 *pu64;
572 struct ddcb *pddcb;
573
574 if (queue_free_ddcbs(queue) == 0) /* queue is full */
575 return NULL;
576
577 /* find new ddcb */
578 pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
579
580 /* if it is not completed, we are not allowed to use it */
581 /* barrier(); */
582 if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000)
583 return NULL;
584
585 *num = queue->ddcb_next; /* internal DDCB number */
586 queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
587
588 /* clear important DDCB fields */
589 pu64 = (u64 *)pddcb;
590 pu64[0] = 0ULL; /* offs 0x00 (ICRC,HSI,SHI,...) */
591 pu64[1] = 0ULL; /* offs 0x01 (ACFUNC,CMD...) */
592
593 /* destroy previous results in ASV */
594 pu64[0x80/8] = 0ULL; /* offs 0x80 (ASV + 0) */
595 pu64[0x88/8] = 0ULL; /* offs 0x88 (ASV + 0x08) */
596 pu64[0x90/8] = 0ULL; /* offs 0x90 (ASV + 0x10) */
597 pu64[0x98/8] = 0ULL; /* offs 0x98 (ASV + 0x18) */
598 pu64[0xd0/8] = 0ULL; /* offs 0xd0 (RETC,ATTN...) */
599
600 pddcb->pre = DDCB_PRESET_PRE; /* 128 */
601 pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
602 return pddcb;
603}
604
605/**
606 * __genwqe_purge_ddcb() - Remove a DDCB from the workqueue
607 * @cd: genwqe device descriptor
608 * @req: DDCB request
609 *
610 * This will fail when the request was already FETCHED. In this case
611 * we need to wait until it is finished. Else the DDCB can be
612 * reused. This function also ensures that the request data structure
613 * is removed from ddcb_req[].
614 *
615 * Do not forget to call this function when genwqe_wait_ddcb() fails,
616 * such that the request gets really removed from ddcb_req[].
617 *
618 * Return: 0 success
619 */
620int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
621{
622 struct ddcb *pddcb = NULL;
623 unsigned int t;
624 unsigned long flags;
625 struct ddcb_queue *queue = req->queue;
626 struct pci_dev *pci_dev = cd->pci_dev;
627 u64 queue_status;
628 __be32 icrc_hsi_shi = 0x0000;
629 __be32 old, new;
630
631 /* unsigned long flags; */
632 if (genwqe_ddcb_software_timeout <= 0) {
633 dev_err(&pci_dev->dev,
634 "[%s] err: software timeout is not set!\n", __func__);
635 return -EFAULT;
636 }
637
638 pddcb = &queue->ddcb_vaddr[req->num];
639
640 for (t = 0; t < genwqe_ddcb_software_timeout * 10; t++) {
641
642 spin_lock_irqsave(&queue->ddcb_lock, flags);
643
644 /* Check if req was meanwhile finished */
645 if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED)
646 goto go_home;
647
648 /* try to set PURGE bit if FETCHED/COMPLETED are not set */
649 old = pddcb->icrc_hsi_shi_32; /* read SHI/HSI in BE32 */
650 if ((old & DDCB_FETCHED_BE32) == 0x00000000) {
651
652 new = (old | DDCB_PURGE_BE32);
653 icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32,
654 old, new);
655 if (icrc_hsi_shi == old)
656 goto finish_ddcb;
657 }
658
659 /* normal finish with HSI bit */
660 barrier();
661 icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
662 if (icrc_hsi_shi & DDCB_COMPLETED_BE32)
663 goto finish_ddcb;
664
665 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
666
667 /*
668 * Here the check_ddcb() function will most likely
669 * discover this DDCB to be finished some point in
670 * time. It will mark the req finished and free it up
671 * in the list.
672 */
673
674 copy_ddcb_results(req, req->num); /* for the failing case */
675 msleep(100); /* sleep for 1/10 second and try again */
676 continue;
677
678finish_ddcb:
679 copy_ddcb_results(req, req->num);
680 ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
681 queue->ddcbs_in_flight--;
682 queue->ddcb_req[req->num] = NULL; /* delete from array */
683 ddcb_mark_cleared(pddcb);
684
685 /* Move active DDCB further; Nothing to do here anymore. */
686
687 /*
688 * We need to ensure that there is at least one free
689 * DDCB in the queue. To do that, we must update
690 * ddcb_act only if the COMPLETED bit is set for the
691 * DDCB we are working on else we treat that DDCB even
692 * if we PURGED it as occupied (hardware is supposed
693 * to set the COMPLETED bit yet!).
694 */
695 icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
696 if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) &&
697 (queue->ddcb_act == req->num)) {
698 queue->ddcb_act = ((queue->ddcb_act + 1) %
699 queue->ddcb_max);
700 }
701go_home:
702 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
703 return 0;
704 }
705
706 /*
707 * If the card is dead and the queue is forced to stop, we
708 * might see this in the queue status register.
709 */
710 queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
711
712 dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num);
713 genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
714
715 dev_err(&pci_dev->dev,
716 "[%s] err: DDCB#%d not purged and not completed "
717 "after %d seconds QSTAT=%016llx!!\n",
718 __func__, req->num, genwqe_ddcb_software_timeout,
719 queue_status);
720
721 print_ddcb_info(cd, req->queue);
722
723 return -EFAULT;
724}
725
726int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d)
727{
728 int len;
729 struct pci_dev *pci_dev = cd->pci_dev;
730
731 if (d == NULL) {
732 dev_err(&pci_dev->dev,
733 "[%s] err: invalid memory for debug data!\n",
734 __func__);
735 return -EFAULT;
736 }
737
738 len = sizeof(d->driver_version);
739 snprintf(d->driver_version, len, "%s", DRV_VERS_STRING);
740 d->slu_unitcfg = cd->slu_unitcfg;
741 d->app_unitcfg = cd->app_unitcfg;
742 return 0;
743}
744
745/**
746 * __genwqe_enqueue_ddcb() - Enqueue a DDCB
747 * @cd: pointer to genwqe device descriptor
748 * @req: pointer to DDCB execution request
749 *
750 * Return: 0 if enqueuing succeeded
751 * -EIO if card is unusable/PCIe problems
752 * -EBUSY if enqueuing failed
753 */
754int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
755{
756 struct ddcb *pddcb;
757 unsigned long flags;
758 struct ddcb_queue *queue;
759 struct pci_dev *pci_dev = cd->pci_dev;
760 u16 icrc;
761
762 if (cd->card_state != GENWQE_CARD_USED) {
763 printk_ratelimited(KERN_ERR
764 "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n",
765 GENWQE_DEVNAME, dev_name(&pci_dev->dev),
766 __func__, req->num);
767 return -EIO;
768 }
769
770 queue = req->queue = &cd->queue;
771
772 /* FIXME circumvention to improve performance when no irq is
773 * there.
774 */
775 if (genwqe_polling_enabled)
776 genwqe_check_ddcb_queue(cd, queue);
777
778 /*
779 * It must be ensured to process all DDCBs in successive
780 * order. Use a lock here in order to prevent nested DDCB
781 * enqueuing.
782 */
783 spin_lock_irqsave(&queue->ddcb_lock, flags);
784
785 pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */
786 if (pddcb == NULL) {
787 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
788 queue->busy++;
789 return -EBUSY;
790 }
791
792 if (queue->ddcb_req[req->num] != NULL) {
793 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
794
795 dev_err(&pci_dev->dev,
796 "[%s] picked DDCB %d with req=%p still in use!!\n",
797 __func__, req->num, req);
798 return -EFAULT;
799 }
800 ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED);
801 queue->ddcb_req[req->num] = req;
802
803 pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts);
804 pddcb->cmd = req->cmd.cmd;
805 pddcb->acfunc = req->cmd.acfunc; /* functional unit */
806
807 /*
808 * We know that we can get retc 0x104 with CRC error, do not
809 * stop the queue in those cases for this command. XDIR = 1
810 * does not work for old SLU versions.
811 *
812 * Last bitstream with the old XDIR behavior had SLU_ID
813 * 0x34199.
814 */
815 if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull)
816 pddcb->xdir = 0x1;
817 else
818 pddcb->xdir = 0x0;
819
820
821 pddcb->psp = (((req->cmd.asiv_length / 8) << 4) |
822 ((req->cmd.asv_length / 8)));
823 pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts);
824
825 /*
826 * If copying the whole DDCB_ASIV_LENGTH is impacting
827 * performance we need to change it to
828 * req->cmd.asiv_length. But simulation benefits from some
829 * non-architectured bits behind the architectured content.
830 *
831 * How much data is copied depends on the availability of the
832 * ATS field, which was introduced late. If the ATS field is
833 * supported ASIV is 8 bytes shorter than it used to be. Since
834 * the ATS field is copied too, the code should do exactly
835 * what it did before, but I wanted to make copying of the ATS
836 * field very explicit.
837 */
838 if (genwqe_get_slu_id(cd) <= 0x2) {
839 memcpy(&pddcb->__asiv[0], /* destination */
840 &req->cmd.__asiv[0], /* source */
841 DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */
842 } else {
843 pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats);
844 memcpy(&pddcb->n.asiv[0], /* destination */
845 &req->cmd.asiv[0], /* source */
846 DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */
847 }
848
849 pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000); /* for crc */
850
851 /*
852 * Calculate CRC_16 for corresponding range PSP(7:4). Include
853 * empty 4 bytes prior to the data.
854 */
855 icrc = genwqe_crc16((const u8 *)pddcb,
856 ICRC_LENGTH(req->cmd.asiv_length), 0xffff);
857 pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
858
859 /* enable DDCB completion irq */
860 if (!genwqe_polling_enabled)
861 pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
862
863 dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
864 genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
865
866 if (ddcb_requ_collect_debug_data(req)) {
867 /* use the kernel copy of debug data. copying back to
868 user buffer happens later */
869
870 genwqe_init_debug_data(cd, &req->debug_data);
871 memcpy(&req->debug_data.ddcb_before, pddcb,
872 sizeof(req->debug_data.ddcb_before));
873 }
874
875 enqueue_ddcb(cd, queue, pddcb, req->num);
876 queue->ddcbs_in_flight++;
877
878 if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
879 queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
880
881 ddcb_requ_set_state(req, GENWQE_REQU_TAPPED);
882 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
883 wake_up_interruptible(&cd->queue_waitq);
884
885 return 0;
886}
887
888/**
889 * __genwqe_execute_raw_ddcb() - Setup and execute DDCB
890 * @cd: pointer to genwqe device descriptor
891 * @req: user provided DDCB request
892 */
893int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
894 struct genwqe_ddcb_cmd *cmd)
895{
896 int rc = 0;
897 struct pci_dev *pci_dev = cd->pci_dev;
898 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
899
900 if (cmd->asiv_length > DDCB_ASIV_LENGTH) {
901 dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n",
902 __func__, cmd->asiv_length);
903 return -EINVAL;
904 }
905 if (cmd->asv_length > DDCB_ASV_LENGTH) {
906 dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
907 __func__, cmd->asiv_length);
908 return -EINVAL;
909 }
910 rc = __genwqe_enqueue_ddcb(cd, req);
911 if (rc != 0)
912 return rc;
913
914 rc = __genwqe_wait_ddcb(cd, req);
915 if (rc < 0) /* error or signal interrupt */
916 goto err_exit;
917
918 if (ddcb_requ_collect_debug_data(req)) {
919 if (copy_to_user((struct genwqe_debug_data __user *)
920 (unsigned long)cmd->ddata_addr,
921 &req->debug_data,
922 sizeof(struct genwqe_debug_data)))
923 return -EFAULT;
924 }
925
926 /*
927 * Higher values than 0x102 indicate completion with faults,
928 * lower values than 0x102 indicate processing faults. Note
929 * that DDCB might have been purged. E.g. Cntl+C.
930 */
931 if (cmd->retc != DDCB_RETC_COMPLETE) {
932 /* This might happen e.g. flash read, and needs to be
933 handled by the upper layer code. */
934 rc = -EBADMSG; /* not processed/error retc */
935 }
936
937 return rc;
938
939 err_exit:
940 __genwqe_purge_ddcb(cd, req);
941
942 if (ddcb_requ_collect_debug_data(req)) {
943 if (copy_to_user((struct genwqe_debug_data __user *)
944 (unsigned long)cmd->ddata_addr,
945 &req->debug_data,
946 sizeof(struct genwqe_debug_data)))
947 return -EFAULT;
948 }
949 return rc;
950}
951
952/**
953 * genwqe_next_ddcb_ready() - Figure out if the next DDCB is already finished
954 *
955 * We use this as condition for our wait-queue code.
956 */
957static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
958{
959 unsigned long flags;
960 struct ddcb *pddcb;
961 struct ddcb_queue *queue = &cd->queue;
962
963 spin_lock_irqsave(&queue->ddcb_lock, flags);
964
965 if (queue_empty(queue)) { /* emtpy queue */
966 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
967 return 0;
968 }
969
970 pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
971 if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) { /* ddcb ready */
972 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
973 return 1;
974 }
975
976 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
977 return 0;
978}
979
980/**
981 * genwqe_ddcbs_in_flight() - Check how many DDCBs are in flight
982 *
983 * Keep track on the number of DDCBs which ware currently in the
984 * queue. This is needed for statistics as well as conditon if we want
985 * to wait or better do polling in case of no interrupts available.
986 */
987int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
988{
989 unsigned long flags;
990 int ddcbs_in_flight = 0;
991 struct ddcb_queue *queue = &cd->queue;
992
993 spin_lock_irqsave(&queue->ddcb_lock, flags);
994 ddcbs_in_flight += queue->ddcbs_in_flight;
995 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
996
997 return ddcbs_in_flight;
998}
999
1000static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
1001{
1002 int rc, i;
1003 struct ddcb *pddcb;
1004 u64 val64;
1005 unsigned int queue_size;
1006 struct pci_dev *pci_dev = cd->pci_dev;
1007
1008 if (genwqe_ddcb_max < 2)
1009 return -EINVAL;
1010
1011 queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
1012
1013 queue->ddcbs_in_flight = 0; /* statistics */
1014 queue->ddcbs_max_in_flight = 0;
1015 queue->ddcbs_completed = 0;
1016 queue->busy = 0;
1017
1018 queue->ddcb_seq = 0x100; /* start sequence number */
1019 queue->ddcb_max = genwqe_ddcb_max; /* module parameter */
1020 queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
1021 &queue->ddcb_daddr);
1022 if (queue->ddcb_vaddr == NULL) {
1023 dev_err(&pci_dev->dev,
1024 "[%s] **err: could not allocate DDCB **\n", __func__);
1025 return -ENOMEM;
1026 }
1027 memset(queue->ddcb_vaddr, 0, queue_size);
1028
1029 queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) *
1030 queue->ddcb_max, GFP_KERNEL);
1031 if (!queue->ddcb_req) {
1032 rc = -ENOMEM;
1033 goto free_ddcbs;
1034 }
1035
1036 queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) *
1037 queue->ddcb_max, GFP_KERNEL);
1038 if (!queue->ddcb_waitqs) {
1039 rc = -ENOMEM;
1040 goto free_requs;
1041 }
1042
1043 for (i = 0; i < queue->ddcb_max; i++) {
1044 pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */
1045 pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32;
1046 pddcb->retc_16 = cpu_to_be16(0xfff);
1047
1048 queue->ddcb_req[i] = NULL; /* requests */
1049 init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */
1050 }
1051
1052 queue->ddcb_act = 0;
1053 queue->ddcb_next = 0; /* queue is empty */
1054
1055 spin_lock_init(&queue->ddcb_lock);
1056 init_waitqueue_head(&queue->ddcb_waitq);
1057
1058 val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */
1059 __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */
1060 __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
1061 __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
1062 __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64);
1063 return 0;
1064
1065 free_requs:
1066 kfree(queue->ddcb_req);
1067 queue->ddcb_req = NULL;
1068 free_ddcbs:
1069 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
1070 queue->ddcb_daddr);
1071 queue->ddcb_vaddr = NULL;
1072 queue->ddcb_daddr = 0ull;
1073 return -ENODEV;
1074
1075}
1076
1077static int ddcb_queue_initialized(struct ddcb_queue *queue)
1078{
1079 return queue->ddcb_vaddr != NULL;
1080}
1081
1082static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
1083{
1084 unsigned int queue_size;
1085
1086 queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
1087
1088 kfree(queue->ddcb_req);
1089 queue->ddcb_req = NULL;
1090
1091 if (queue->ddcb_vaddr) {
1092 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
1093 queue->ddcb_daddr);
1094 queue->ddcb_vaddr = NULL;
1095 queue->ddcb_daddr = 0ull;
1096 }
1097}
1098
1099static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
1100{
1101 u64 gfir;
1102 struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
1103 struct pci_dev *pci_dev = cd->pci_dev;
1104
1105 /*
1106 * In case of fatal FIR error the queue is stopped, such that
1107 * we can safely check it without risking anything.
1108 */
1109 cd->irqs_processed++;
1110 wake_up_interruptible(&cd->queue_waitq);
1111
1112 /*
1113 * Checking for errors before kicking the queue might be
1114 * safer, but slower for the good-case ... See above.
1115 */
1116 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
1117 if ((gfir & GFIR_ERR_TRIGGER) != 0x0) {
1118
1119 wake_up_interruptible(&cd->health_waitq);
1120
1121 /*
1122 * By default GFIRs causes recovery actions. This
1123 * count is just for debug when recovery is masked.
1124 */
1125 printk_ratelimited(KERN_ERR
1126 "%s %s: [%s] GFIR=%016llx\n",
1127 GENWQE_DEVNAME, dev_name(&pci_dev->dev),
1128 __func__, gfir);
1129 }
1130
1131 return IRQ_HANDLED;
1132}
1133
1134static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
1135{
1136 struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
1137
1138 cd->irqs_processed++;
1139 wake_up_interruptible(&cd->queue_waitq);
1140
1141 return IRQ_HANDLED;
1142}
1143
1144/**
1145 * genwqe_card_thread() - Work thread for the DDCB queue
1146 *
1147 * The idea is to check if there are DDCBs in processing. If there are
1148 * some finished DDCBs, we process them and wakeup the
1149 * requestors. Otherwise we give other processes time using
1150 * cond_resched().
1151 */
1152static int genwqe_card_thread(void *data)
1153{
1154 int should_stop = 0, rc = 0;
1155 struct genwqe_dev *cd = (struct genwqe_dev *)data;
1156
1157 while (!kthread_should_stop()) {
1158
1159 genwqe_check_ddcb_queue(cd, &cd->queue);
1160
1161 if (genwqe_polling_enabled) {
1162 rc = wait_event_interruptible_timeout(
1163 cd->queue_waitq,
1164 genwqe_ddcbs_in_flight(cd) ||
1165 (should_stop = kthread_should_stop()), 1);
1166 } else {
1167 rc = wait_event_interruptible_timeout(
1168 cd->queue_waitq,
1169 genwqe_next_ddcb_ready(cd) ||
1170 (should_stop = kthread_should_stop()), HZ);
1171 }
1172 if (should_stop)
1173 break;
1174
1175 /*
1176 * Avoid soft lockups on heavy loads; we do not want
1177 * to disable our interrupts.
1178 */
1179 cond_resched();
1180 }
1181 return 0;
1182}
1183
1184/**
1185 * genwqe_setup_service_layer() - Setup DDCB queue
1186 * @cd: pointer to genwqe device descriptor
1187 *
1188 * Allocate DDCBs. Configure Service Layer Controller (SLC).
1189 *
1190 * Return: 0 success
1191 */
1192int genwqe_setup_service_layer(struct genwqe_dev *cd)
1193{
1194 int rc;
1195 struct ddcb_queue *queue;
1196 struct pci_dev *pci_dev = cd->pci_dev;
1197
1198 if (genwqe_is_privileged(cd)) {
1199 rc = genwqe_card_reset(cd);
1200 if (rc < 0) {
1201 dev_err(&pci_dev->dev,
1202 "[%s] err: reset failed.\n", __func__);
1203 return rc;
1204 }
1205 genwqe_read_softreset(cd);
1206 }
1207
1208 queue = &cd->queue;
1209 queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG;
1210 queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS;
1211 queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
1212 queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
1213 queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET;
1214 queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP;
1215 queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME;
1216 queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
1217 queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW;
1218
1219 rc = setup_ddcb_queue(cd, queue);
1220 if (rc != 0) {
1221 rc = -ENODEV;
1222 goto err_out;
1223 }
1224
1225 init_waitqueue_head(&cd->queue_waitq);
1226 cd->card_thread = kthread_run(genwqe_card_thread, cd,
1227 GENWQE_DEVNAME "%d_thread",
1228 cd->card_idx);
1229 if (IS_ERR(cd->card_thread)) {
1230 rc = PTR_ERR(cd->card_thread);
1231 cd->card_thread = NULL;
1232 goto stop_free_queue;
1233 }
1234
1235 rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
1236 if (rc > 0)
1237 rc = genwqe_set_interrupt_capability(cd, rc);
1238 if (rc != 0) {
1239 rc = -ENODEV;
1240 goto stop_kthread;
1241 }
1242
1243 /*
1244 * We must have all wait-queues initialized when we enable the
1245 * interrupts. Otherwise we might crash if we get an early
1246 * irq.
1247 */
1248 init_waitqueue_head(&cd->health_waitq);
1249
1250 if (genwqe_is_privileged(cd)) {
1251 rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED,
1252 GENWQE_DEVNAME, cd);
1253 } else {
1254 rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED,
1255 GENWQE_DEVNAME, cd);
1256 }
1257 if (rc < 0) {
1258 dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq);
1259 goto stop_irq_cap;
1260 }
1261
1262 cd->card_state = GENWQE_CARD_USED;
1263 return 0;
1264
1265 stop_irq_cap:
1266 genwqe_reset_interrupt_capability(cd);
1267 stop_kthread:
1268 kthread_stop(cd->card_thread);
1269 cd->card_thread = NULL;
1270 stop_free_queue:
1271 free_ddcb_queue(cd, queue);
1272 err_out:
1273 return rc;
1274}
1275
1276/**
1277 * queue_wake_up_all() - Handles fatal error case
1278 *
1279 * The PCI device got unusable and we have to stop all pending
1280 * requests as fast as we can. The code after this must purge the
1281 * DDCBs in question and ensure that all mappings are freed.
1282 */
1283static int queue_wake_up_all(struct genwqe_dev *cd)
1284{
1285 unsigned int i;
1286 unsigned long flags;
1287 struct ddcb_queue *queue = &cd->queue;
1288
1289 spin_lock_irqsave(&queue->ddcb_lock, flags);
1290
1291 for (i = 0; i < queue->ddcb_max; i++)
1292 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
1293
1294 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
1295
1296 return 0;
1297}
1298
1299/**
1300 * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces
1301 *
1302 * Relies on the pre-condition that there are no users of the card
1303 * device anymore e.g. with open file-descriptors.
1304 *
1305 * This function must be robust enough to be called twice.
1306 */
1307int genwqe_finish_queue(struct genwqe_dev *cd)
1308{
1309 int i, rc, in_flight;
1310 int waitmax = genwqe_ddcb_software_timeout;
1311 struct pci_dev *pci_dev = cd->pci_dev;
1312 struct ddcb_queue *queue = &cd->queue;
1313
1314 if (!ddcb_queue_initialized(queue))
1315 return 0;
1316
1317 /* Do not wipe out the error state. */
1318 if (cd->card_state == GENWQE_CARD_USED)
1319 cd->card_state = GENWQE_CARD_UNUSED;
1320
1321 /* Wake up all requests in the DDCB queue such that they
1322 should be removed nicely. */
1323 queue_wake_up_all(cd);
1324
1325 /* We must wait to get rid of the DDCBs in flight */
1326 for (i = 0; i < waitmax; i++) {
1327 in_flight = genwqe_ddcbs_in_flight(cd);
1328
1329 if (in_flight == 0)
1330 break;
1331
1332 dev_dbg(&pci_dev->dev,
1333 " DEBUG [%d/%d] waiting for queue to get empty: "
1334 "%d requests!\n", i, waitmax, in_flight);
1335
1336 /*
1337 * Severe severe error situation: The card itself has
1338 * 16 DDCB queues, each queue has e.g. 32 entries,
1339 * each DDBC has a hardware timeout of currently 250
1340 * msec but the PFs have a hardware timeout of 8 sec
1341 * ... so I take something large.
1342 */
1343 msleep(1000);
1344 }
1345 if (i == waitmax) {
1346 dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n",
1347 __func__);
1348 rc = -EIO;
1349 }
1350 return rc;
1351}
1352
1353/**
1354 * genwqe_release_service_layer() - Shutdown DDCB queue
1355 * @cd: genwqe device descriptor
1356 *
1357 * This function must be robust enough to be called twice.
1358 */
1359int genwqe_release_service_layer(struct genwqe_dev *cd)
1360{
1361 struct pci_dev *pci_dev = cd->pci_dev;
1362
1363 if (!ddcb_queue_initialized(&cd->queue))
1364 return 1;
1365
1366 free_irq(pci_dev->irq, cd);
1367 genwqe_reset_interrupt_capability(cd);
1368
1369 if (cd->card_thread != NULL) {
1370 kthread_stop(cd->card_thread);
1371 cd->card_thread = NULL;
1372 }
1373
1374 free_ddcb_queue(cd, &cd->queue);
1375 return 0;
1376}
diff --git a/drivers/misc/genwqe/card_ddcb.h b/drivers/misc/genwqe/card_ddcb.h
new file mode 100644
index 000000000000..c4f26720753e
--- /dev/null
+++ b/drivers/misc/genwqe/card_ddcb.h
@@ -0,0 +1,188 @@
1#ifndef __CARD_DDCB_H__
2#define __CARD_DDCB_H__
3
4/**
5 * IBM Accelerator Family 'GenWQE'
6 *
7 * (C) Copyright IBM Corp. 2013
8 *
9 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
10 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
11 * Author: Michael Jung <mijung@de.ibm.com>
12 * Author: Michael Ruettger <michael@ibmra.de>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 */
24
25#include <linux/types.h>
26#include <asm/byteorder.h>
27
28#include "genwqe_driver.h"
29#include "card_base.h"
30
31/**
32 * struct ddcb - Device Driver Control Block DDCB
33 * @hsi: Hardware software interlock
34 * @shi: Software hardware interlock. Hsi and shi are used to interlock
35 * software and hardware activities. We are using a compare and
36 * swap operation to ensure that there are no races when
37 * activating new DDCBs on the queue, or when we need to
38 * purge a DDCB from a running queue.
39 * @acfunc: Accelerator function addresses a unit within the chip
40 * @cmd: Command to work on
41 * @cmdopts_16: Options for the command
42 * @asiv: Input data
43 * @asv: Output data
44 *
45 * The DDCB data format is big endian. Multiple consequtive DDBCs form
46 * a DDCB queue.
47 */
48#define ASIV_LENGTH 104 /* Old specification without ATS field */
49#define ASIV_LENGTH_ATS 96 /* New specification with ATS field */
50#define ASV_LENGTH 64
51
52struct ddcb {
53 union {
54 __be32 icrc_hsi_shi_32; /* iCRC, Hardware/SW interlock */
55 struct {
56 __be16 icrc_16;
57 u8 hsi;
58 u8 shi;
59 };
60 };
61 u8 pre; /* Preamble */
62 u8 xdir; /* Execution Directives */
63 __be16 seqnum_16; /* Sequence Number */
64
65 u8 acfunc; /* Accelerator Function.. */
66 u8 cmd; /* Command. */
67 __be16 cmdopts_16; /* Command Options */
68 u8 sur; /* Status Update Rate */
69 u8 psp; /* Protection Section Pointer */
70 __be16 rsvd_0e_16; /* Reserved invariant */
71
72 __be64 fwiv_64; /* Firmware Invariant. */
73
74 union {
75 struct {
76 __be64 ats_64; /* Address Translation Spec */
77 u8 asiv[ASIV_LENGTH_ATS]; /* New ASIV */
78 } n;
79 u8 __asiv[ASIV_LENGTH]; /* obsolete */
80 };
81 u8 asv[ASV_LENGTH]; /* Appl Spec Variant */
82
83 __be16 rsvd_c0_16; /* Reserved Variant */
84 __be16 vcrc_16; /* Variant CRC */
85 __be32 rsvd_32; /* Reserved unprotected */
86
87 __be64 deque_ts_64; /* Deque Time Stamp. */
88
89 __be16 retc_16; /* Return Code */
90 __be16 attn_16; /* Attention/Extended Error Codes */
91 __be32 progress_32; /* Progress indicator. */
92
93 __be64 cmplt_ts_64; /* Completion Time Stamp. */
94
95 /* The following layout matches the new service layer format */
96 __be32 ibdc_32; /* Inbound Data Count (* 256) */
97 __be32 obdc_32; /* Outbound Data Count (* 256) */
98
99 __be64 rsvd_SLH_64; /* Reserved for hardware */
100 union { /* private data for driver */
101 u8 priv[8];
102 __be64 priv_64;
103 };
104 __be64 disp_ts_64; /* Dispatch TimeStamp */
105} __attribute__((__packed__));
106
107/* CRC polynomials for DDCB */
108#define CRC16_POLYNOMIAL 0x1021
109
110/*
111 * SHI: Software to Hardware Interlock
112 * This 1 byte field is written by software to interlock the
113 * movement of one queue entry to another with the hardware in the
114 * chip.
115 */
116#define DDCB_SHI_INTR 0x04 /* Bit 2 */
117#define DDCB_SHI_PURGE 0x02 /* Bit 1 */
118#define DDCB_SHI_NEXT 0x01 /* Bit 0 */
119
120/*
121 * HSI: Hardware to Software interlock
122 * This 1 byte field is written by hardware to interlock the movement
123 * of one queue entry to another with the software in the chip.
124 */
125#define DDCB_HSI_COMPLETED 0x40 /* Bit 6 */
126#define DDCB_HSI_FETCHED 0x04 /* Bit 2 */
127
128/*
129 * Accessing HSI/SHI is done 32-bit wide
130 * Normally 16-bit access would work too, but on some platforms the
131 * 16 compare and swap operation is not supported. Therefore
132 * switching to 32-bit such that those platforms will work too.
133 *
134 * iCRC HSI/SHI
135 */
136#define DDCB_INTR_BE32 cpu_to_be32(0x00000004)
137#define DDCB_PURGE_BE32 cpu_to_be32(0x00000002)
138#define DDCB_NEXT_BE32 cpu_to_be32(0x00000001)
139#define DDCB_COMPLETED_BE32 cpu_to_be32(0x00004000)
140#define DDCB_FETCHED_BE32 cpu_to_be32(0x00000400)
141
142/* Definitions of DDCB presets */
143#define DDCB_PRESET_PRE 0x80
144#define ICRC_LENGTH(n) ((n) + 8 + 8 + 8) /* used ASIV + hdr fields */
145#define VCRC_LENGTH(n) ((n)) /* used ASV */
146
147/*
148 * Genwqe Scatter Gather list
149 * Each element has up to 8 entries.
150 * The chaining element is element 0 cause of prefetching needs.
151 */
152
153/*
154 * 0b0110 Chained descriptor. The descriptor is describing the next
155 * descriptor list.
156 */
157#define SG_CHAINED (0x6)
158
159/*
160 * 0b0010 First entry of a descriptor list. Start from a Buffer-Empty
161 * condition.
162 */
163#define SG_DATA (0x2)
164
165/*
166 * 0b0000 Early terminator. This is the last entry on the list
167 * irregardless of the length indicated.
168 */
169#define SG_END_LIST (0x0)
170
171/**
172 * struct sglist - Scatter gather list
173 * @target_addr: Either a dma addr of memory to work on or a
174 * dma addr or a subsequent sglist block.
175 * @len: Length of the data block.
176 * @flags: See above.
177 *
178 * Depending on the command the GenWQE card can use a scatter gather
179 * list to describe the memory it works on. Always 8 sg_entry's form
180 * a block.
181 */
182struct sg_entry {
183 __be64 target_addr;
184 __be32 len;
185 __be32 flags;
186};
187
188#endif /* __CARD_DDCB_H__ */
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
new file mode 100644
index 000000000000..3bfdc07a7248
--- /dev/null
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -0,0 +1,500 @@
1/**
2 * IBM Accelerator Family 'GenWQE'
3 *
4 * (C) Copyright IBM Corp. 2013
5 *
6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8 * Author: Michael Jung <mijung@de.ibm.com>
9 * Author: Michael Ruettger <michael@ibmra.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21/*
22 * Debugfs interfaces for the GenWQE card. Help to debug potential
23 * problems. Dump internal chip state for debugging and failure
24 * determination.
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/debugfs.h>
31#include <linux/seq_file.h>
32#include <linux/uaccess.h>
33
34#include "card_base.h"
35#include "card_ddcb.h"
36
37#define GENWQE_DEBUGFS_RO(_name, _showfn) \
38 static int genwqe_debugfs_##_name##_open(struct inode *inode, \
39 struct file *file) \
40 { \
41 return single_open(file, _showfn, inode->i_private); \
42 } \
43 static const struct file_operations genwqe_##_name##_fops = { \
44 .open = genwqe_debugfs_##_name##_open, \
45 .read = seq_read, \
46 .llseek = seq_lseek, \
47 .release = single_release, \
48 }
49
50static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs,
51 int entries)
52{
53 unsigned int i;
54 u32 v_hi, v_lo;
55
56 for (i = 0; i < entries; i++) {
57 v_hi = (regs[i].val >> 32) & 0xffffffff;
58 v_lo = (regs[i].val) & 0xffffffff;
59
60 seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x EXT_ERR_REC\n",
61 regs[i].addr, regs[i].idx, v_hi, v_lo);
62 }
63}
64
65static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
66{
67 struct genwqe_dev *cd = s->private;
68 int entries;
69 struct genwqe_reg *regs;
70
71 entries = genwqe_ffdc_buff_size(cd, uid);
72 if (entries < 0)
73 return -EINVAL;
74
75 if (entries == 0)
76 return 0;
77
78 regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL);
79 if (regs == NULL)
80 return -ENOMEM;
81
82 genwqe_stop_traps(cd); /* halt the traps while dumping data */
83 genwqe_ffdc_buff_read(cd, uid, regs, entries);
84 genwqe_start_traps(cd);
85
86 dbg_uidn_show(s, regs, entries);
87 kfree(regs);
88 return 0;
89}
90
91static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused)
92{
93 return curr_dbg_uidn_show(s, unused, 0);
94}
95
96GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show);
97
98static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused)
99{
100 return curr_dbg_uidn_show(s, unused, 1);
101}
102
103GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show);
104
105static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused)
106{
107 return curr_dbg_uidn_show(s, unused, 2);
108}
109
110GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show);
111
112static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid)
113{
114 struct genwqe_dev *cd = s->private;
115
116 dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries);
117 return 0;
118}
119
120static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused)
121{
122 return prev_dbg_uidn_show(s, unused, 0);
123}
124
125GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show);
126
127static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused)
128{
129 return prev_dbg_uidn_show(s, unused, 1);
130}
131
132GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show);
133
134static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused)
135{
136 return prev_dbg_uidn_show(s, unused, 2);
137}
138
139GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show);
140
141static int genwqe_curr_regs_show(struct seq_file *s, void *unused)
142{
143 struct genwqe_dev *cd = s->private;
144 unsigned int i;
145 struct genwqe_reg *regs;
146
147 regs = kcalloc(GENWQE_FFDC_REGS, sizeof(*regs), GFP_KERNEL);
148 if (regs == NULL)
149 return -ENOMEM;
150
151 genwqe_stop_traps(cd);
152 genwqe_read_ffdc_regs(cd, regs, GENWQE_FFDC_REGS, 1);
153 genwqe_start_traps(cd);
154
155 for (i = 0; i < GENWQE_FFDC_REGS; i++) {
156 if (regs[i].addr == 0xffffffff)
157 break; /* invalid entries */
158
159 if (regs[i].val == 0x0ull)
160 continue; /* do not print 0x0 FIRs */
161
162 seq_printf(s, " 0x%08x 0x%016llx\n",
163 regs[i].addr, regs[i].val);
164 }
165 return 0;
166}
167
168GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show);
169
170static int genwqe_prev_regs_show(struct seq_file *s, void *unused)
171{
172 struct genwqe_dev *cd = s->private;
173 unsigned int i;
174 struct genwqe_reg *regs = cd->ffdc[GENWQE_DBG_REGS].regs;
175
176 if (regs == NULL)
177 return -EINVAL;
178
179 for (i = 0; i < GENWQE_FFDC_REGS; i++) {
180 if (regs[i].addr == 0xffffffff)
181 break; /* invalid entries */
182
183 if (regs[i].val == 0x0ull)
184 continue; /* do not print 0x0 FIRs */
185
186 seq_printf(s, " 0x%08x 0x%016llx\n",
187 regs[i].addr, regs[i].val);
188 }
189 return 0;
190}
191
192GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show);
193
194static int genwqe_jtimer_show(struct seq_file *s, void *unused)
195{
196 struct genwqe_dev *cd = s->private;
197 unsigned int vf_num;
198 u64 jtimer;
199
200 jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT, 0);
201 seq_printf(s, " PF 0x%016llx %d msec\n", jtimer,
202 genwqe_pf_jobtimeout_msec);
203
204 for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
205 jtimer = genwqe_read_vreg(cd, IO_SLC_VF_APPJOB_TIMEOUT,
206 vf_num + 1);
207 seq_printf(s, " VF%-2d 0x%016llx %d msec\n", vf_num, jtimer,
208 cd->vf_jobtimeout_msec[vf_num]);
209 }
210 return 0;
211}
212
213GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show);
214
215static int genwqe_queue_working_time_show(struct seq_file *s, void *unused)
216{
217 struct genwqe_dev *cd = s->private;
218 unsigned int vf_num;
219 u64 t;
220
221 t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, 0);
222 seq_printf(s, " PF 0x%016llx\n", t);
223
224 for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) {
225 t = genwqe_read_vreg(cd, IO_SLC_VF_QUEUE_WTIME, vf_num + 1);
226 seq_printf(s, " VF%-2d 0x%016llx\n", vf_num, t);
227 }
228 return 0;
229}
230
231GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show);
232
233static int genwqe_ddcb_info_show(struct seq_file *s, void *unused)
234{
235 struct genwqe_dev *cd = s->private;
236 unsigned int i;
237 struct ddcb_queue *queue;
238 struct ddcb *pddcb;
239
240 queue = &cd->queue;
241 seq_puts(s, "DDCB QUEUE:\n");
242 seq_printf(s, " ddcb_max: %d\n"
243 " ddcb_daddr: %016llx - %016llx\n"
244 " ddcb_vaddr: %016llx\n"
245 " ddcbs_in_flight: %u\n"
246 " ddcbs_max_in_flight: %u\n"
247 " ddcbs_completed: %u\n"
248 " busy: %u\n"
249 " irqs_processed: %u\n",
250 queue->ddcb_max, (long long)queue->ddcb_daddr,
251 (long long)queue->ddcb_daddr +
252 (queue->ddcb_max * DDCB_LENGTH),
253 (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight,
254 queue->ddcbs_max_in_flight, queue->ddcbs_completed,
255 queue->busy, cd->irqs_processed);
256
257 /* Hardware State */
258 seq_printf(s, " 0x%08x 0x%016llx IO_QUEUE_CONFIG\n"
259 " 0x%08x 0x%016llx IO_QUEUE_STATUS\n"
260 " 0x%08x 0x%016llx IO_QUEUE_SEGMENT\n"
261 " 0x%08x 0x%016llx IO_QUEUE_INITSQN\n"
262 " 0x%08x 0x%016llx IO_QUEUE_WRAP\n"
263 " 0x%08x 0x%016llx IO_QUEUE_OFFSET\n"
264 " 0x%08x 0x%016llx IO_QUEUE_WTIME\n"
265 " 0x%08x 0x%016llx IO_QUEUE_ERRCNTS\n"
266 " 0x%08x 0x%016llx IO_QUEUE_LRW\n",
267 queue->IO_QUEUE_CONFIG,
268 __genwqe_readq(cd, queue->IO_QUEUE_CONFIG),
269 queue->IO_QUEUE_STATUS,
270 __genwqe_readq(cd, queue->IO_QUEUE_STATUS),
271 queue->IO_QUEUE_SEGMENT,
272 __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT),
273 queue->IO_QUEUE_INITSQN,
274 __genwqe_readq(cd, queue->IO_QUEUE_INITSQN),
275 queue->IO_QUEUE_WRAP,
276 __genwqe_readq(cd, queue->IO_QUEUE_WRAP),
277 queue->IO_QUEUE_OFFSET,
278 __genwqe_readq(cd, queue->IO_QUEUE_OFFSET),
279 queue->IO_QUEUE_WTIME,
280 __genwqe_readq(cd, queue->IO_QUEUE_WTIME),
281 queue->IO_QUEUE_ERRCNTS,
282 __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS),
283 queue->IO_QUEUE_LRW,
284 __genwqe_readq(cd, queue->IO_QUEUE_LRW));
285
286 seq_printf(s, "DDCB list (ddcb_act=%d/ddcb_next=%d):\n",
287 queue->ddcb_act, queue->ddcb_next);
288
289 pddcb = queue->ddcb_vaddr;
290 for (i = 0; i < queue->ddcb_max; i++) {
291 seq_printf(s, " %-3d: RETC=%03x SEQ=%04x HSI/SHI=%02x/%02x ",
292 i, be16_to_cpu(pddcb->retc_16),
293 be16_to_cpu(pddcb->seqnum_16),
294 pddcb->hsi, pddcb->shi);
295 seq_printf(s, "PRIV=%06llx CMD=%02x\n",
296 be64_to_cpu(pddcb->priv_64), pddcb->cmd);
297 pddcb++;
298 }
299 return 0;
300}
301
302GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show);
303
304static int genwqe_info_show(struct seq_file *s, void *unused)
305{
306 struct genwqe_dev *cd = s->private;
307 u16 val16, type;
308 u64 app_id, slu_id, bitstream = -1;
309 struct pci_dev *pci_dev = cd->pci_dev;
310
311 slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG);
312 app_id = __genwqe_readq(cd, IO_APP_UNITCFG);
313
314 if (genwqe_is_privileged(cd))
315 bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM);
316
317 val16 = (u16)(slu_id & 0x0fLLU);
318 type = (u16)((slu_id >> 20) & 0xffLLU);
319
320 seq_printf(s, "%s driver version: %s\n"
321 " Device Name/Type: %s %s CardIdx: %d\n"
322 " SLU/APP Config : 0x%016llx/0x%016llx\n"
323 " Build Date : %u/%x/%u\n"
324 " Base Clock : %u MHz\n"
325 " Arch/SVN Release: %u/%llx\n"
326 " Bitstream : %llx\n",
327 GENWQE_DEVNAME, DRV_VERS_STRING, dev_name(&pci_dev->dev),
328 genwqe_is_privileged(cd) ?
329 "Physical" : "Virtual or no SR-IOV",
330 cd->card_idx, slu_id, app_id,
331 (u16)((slu_id >> 12) & 0x0fLLU), /* month */
332 (u16)((slu_id >> 4) & 0xffLLU), /* day */
333 (u16)((slu_id >> 16) & 0x0fLLU) + 2010, /* year */
334 genwqe_base_clock_frequency(cd),
335 (u16)((slu_id >> 32) & 0xffLLU), slu_id >> 40,
336 bitstream);
337
338 return 0;
339}
340
341GENWQE_DEBUGFS_RO(info, genwqe_info_show);
342
343int genwqe_init_debugfs(struct genwqe_dev *cd)
344{
345 struct dentry *root;
346 struct dentry *file;
347 int ret;
348 char card_name[64];
349 char name[64];
350 unsigned int i;
351
352 sprintf(card_name, "%s%u_card", GENWQE_DEVNAME, cd->card_idx);
353
354 root = debugfs_create_dir(card_name, cd->debugfs_genwqe);
355 if (!root) {
356 ret = -ENOMEM;
357 goto err0;
358 }
359
360 /* non privileged interfaces are done here */
361 file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd,
362 &genwqe_ddcb_info_fops);
363 if (!file) {
364 ret = -ENOMEM;
365 goto err1;
366 }
367
368 file = debugfs_create_file("info", S_IRUGO, root, cd,
369 &genwqe_info_fops);
370 if (!file) {
371 ret = -ENOMEM;
372 goto err1;
373 }
374
375 file = debugfs_create_x64("err_inject", 0666, root, &cd->err_inject);
376 if (!file) {
377 ret = -ENOMEM;
378 goto err1;
379 }
380
381 file = debugfs_create_u32("ddcb_software_timeout", 0666, root,
382 &cd->ddcb_software_timeout);
383 if (!file) {
384 ret = -ENOMEM;
385 goto err1;
386 }
387
388 file = debugfs_create_u32("kill_timeout", 0666, root,
389 &cd->kill_timeout);
390 if (!file) {
391 ret = -ENOMEM;
392 goto err1;
393 }
394
395 /* privileged interfaces follow here */
396 if (!genwqe_is_privileged(cd)) {
397 cd->debugfs_root = root;
398 return 0;
399 }
400
401 file = debugfs_create_file("curr_regs", S_IRUGO, root, cd,
402 &genwqe_curr_regs_fops);
403 if (!file) {
404 ret = -ENOMEM;
405 goto err1;
406 }
407
408 file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd,
409 &genwqe_curr_dbg_uid0_fops);
410 if (!file) {
411 ret = -ENOMEM;
412 goto err1;
413 }
414
415 file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd,
416 &genwqe_curr_dbg_uid1_fops);
417 if (!file) {
418 ret = -ENOMEM;
419 goto err1;
420 }
421
422 file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd,
423 &genwqe_curr_dbg_uid2_fops);
424 if (!file) {
425 ret = -ENOMEM;
426 goto err1;
427 }
428
429 file = debugfs_create_file("prev_regs", S_IRUGO, root, cd,
430 &genwqe_prev_regs_fops);
431 if (!file) {
432 ret = -ENOMEM;
433 goto err1;
434 }
435
436 file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd,
437 &genwqe_prev_dbg_uid0_fops);
438 if (!file) {
439 ret = -ENOMEM;
440 goto err1;
441 }
442
443 file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd,
444 &genwqe_prev_dbg_uid1_fops);
445 if (!file) {
446 ret = -ENOMEM;
447 goto err1;
448 }
449
450 file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd,
451 &genwqe_prev_dbg_uid2_fops);
452 if (!file) {
453 ret = -ENOMEM;
454 goto err1;
455 }
456
457 for (i = 0; i < GENWQE_MAX_VFS; i++) {
458 sprintf(name, "vf%d_jobtimeout_msec", i);
459
460 file = debugfs_create_u32(name, 0666, root,
461 &cd->vf_jobtimeout_msec[i]);
462 if (!file) {
463 ret = -ENOMEM;
464 goto err1;
465 }
466 }
467
468 file = debugfs_create_file("jobtimer", S_IRUGO, root, cd,
469 &genwqe_jtimer_fops);
470 if (!file) {
471 ret = -ENOMEM;
472 goto err1;
473 }
474
475 file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd,
476 &genwqe_queue_working_time_fops);
477 if (!file) {
478 ret = -ENOMEM;
479 goto err1;
480 }
481
482 file = debugfs_create_u32("skip_recovery", 0666, root,
483 &cd->skip_recovery);
484 if (!file) {
485 ret = -ENOMEM;
486 goto err1;
487 }
488
489 cd->debugfs_root = root;
490 return 0;
491err1:
492 debugfs_remove_recursive(root);
493err0:
494 return ret;
495}
496
497void genqwe_exit_debugfs(struct genwqe_dev *cd)
498{
499 debugfs_remove_recursive(cd->debugfs_root);
500}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
new file mode 100644
index 000000000000..8f8a6b327cdb
--- /dev/null
+++ b/drivers/misc/genwqe/card_dev.c
@@ -0,0 +1,1414 @@
1/**
2 * IBM Accelerator Family 'GenWQE'
3 *
4 * (C) Copyright IBM Corp. 2013
5 *
6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8 * Author: Michael Jung <mijung@de.ibm.com>
9 * Author: Michael Ruettger <michael@ibmra.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21/*
22 * Character device representation of the GenWQE device. This allows
23 * user-space applications to communicate with the card.
24 */
25
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/string.h>
31#include <linux/fs.h>
32#include <linux/sched.h>
33#include <linux/wait.h>
34#include <linux/delay.h>
35#include <linux/atomic.h>
36
37#include "card_base.h"
38#include "card_ddcb.h"
39
40static int genwqe_open_files(struct genwqe_dev *cd)
41{
42 int rc;
43 unsigned long flags;
44
45 spin_lock_irqsave(&cd->file_lock, flags);
46 rc = list_empty(&cd->file_list);
47 spin_unlock_irqrestore(&cd->file_lock, flags);
48 return !rc;
49}
50
51static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
52{
53 unsigned long flags;
54
55 cfile->owner = current;
56 spin_lock_irqsave(&cd->file_lock, flags);
57 list_add(&cfile->list, &cd->file_list);
58 spin_unlock_irqrestore(&cd->file_lock, flags);
59}
60
61static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
62{
63 unsigned long flags;
64
65 spin_lock_irqsave(&cd->file_lock, flags);
66 list_del(&cfile->list);
67 spin_unlock_irqrestore(&cd->file_lock, flags);
68
69 return 0;
70}
71
72static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
73{
74 unsigned long flags;
75
76 spin_lock_irqsave(&cfile->pin_lock, flags);
77 list_add(&m->pin_list, &cfile->pin_list);
78 spin_unlock_irqrestore(&cfile->pin_lock, flags);
79}
80
81static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
82{
83 unsigned long flags;
84
85 spin_lock_irqsave(&cfile->pin_lock, flags);
86 list_del(&m->pin_list);
87 spin_unlock_irqrestore(&cfile->pin_lock, flags);
88
89 return 0;
90}
91
92/**
93 * genwqe_search_pin() - Search for the mapping for a userspace address
94 * @cfile: Descriptor of opened file
95 * @u_addr: User virtual address
96 * @size: Size of buffer
97 * @dma_addr: DMA address to be updated
98 *
99 * Return: Pointer to the corresponding mapping NULL if not found
100 */
101static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
102 unsigned long u_addr,
103 unsigned int size,
104 void **virt_addr)
105{
106 unsigned long flags;
107 struct dma_mapping *m;
108
109 spin_lock_irqsave(&cfile->pin_lock, flags);
110
111 list_for_each_entry(m, &cfile->pin_list, pin_list) {
112 if ((((u64)m->u_vaddr) <= (u_addr)) &&
113 (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
114
115 if (virt_addr)
116 *virt_addr = m->k_vaddr +
117 (u_addr - (u64)m->u_vaddr);
118
119 spin_unlock_irqrestore(&cfile->pin_lock, flags);
120 return m;
121 }
122 }
123 spin_unlock_irqrestore(&cfile->pin_lock, flags);
124 return NULL;
125}
126
127static void __genwqe_add_mapping(struct genwqe_file *cfile,
128 struct dma_mapping *dma_map)
129{
130 unsigned long flags;
131
132 spin_lock_irqsave(&cfile->map_lock, flags);
133 list_add(&dma_map->card_list, &cfile->map_list);
134 spin_unlock_irqrestore(&cfile->map_lock, flags);
135}
136
137static void __genwqe_del_mapping(struct genwqe_file *cfile,
138 struct dma_mapping *dma_map)
139{
140 unsigned long flags;
141
142 spin_lock_irqsave(&cfile->map_lock, flags);
143 list_del(&dma_map->card_list);
144 spin_unlock_irqrestore(&cfile->map_lock, flags);
145}
146
147
148/**
149 * __genwqe_search_mapping() - Search for the mapping for a userspace address
150 * @cfile: descriptor of opened file
151 * @u_addr: user virtual address
152 * @size: size of buffer
153 * @dma_addr: DMA address to be updated
154 * Return: Pointer to the corresponding mapping NULL if not found
155 */
156static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
157 unsigned long u_addr,
158 unsigned int size,
159 dma_addr_t *dma_addr,
160 void **virt_addr)
161{
162 unsigned long flags;
163 struct dma_mapping *m;
164 struct pci_dev *pci_dev = cfile->cd->pci_dev;
165
166 spin_lock_irqsave(&cfile->map_lock, flags);
167 list_for_each_entry(m, &cfile->map_list, card_list) {
168
169 if ((((u64)m->u_vaddr) <= (u_addr)) &&
170 (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
171
172 /* match found: current is as expected and
173 addr is in range */
174 if (dma_addr)
175 *dma_addr = m->dma_addr +
176 (u_addr - (u64)m->u_vaddr);
177
178 if (virt_addr)
179 *virt_addr = m->k_vaddr +
180 (u_addr - (u64)m->u_vaddr);
181
182 spin_unlock_irqrestore(&cfile->map_lock, flags);
183 return m;
184 }
185 }
186 spin_unlock_irqrestore(&cfile->map_lock, flags);
187
188 dev_err(&pci_dev->dev,
189 "[%s] Entry not found: u_addr=%lx, size=%x\n",
190 __func__, u_addr, size);
191
192 return NULL;
193}
194
195static void genwqe_remove_mappings(struct genwqe_file *cfile)
196{
197 int i = 0;
198 struct list_head *node, *next;
199 struct dma_mapping *dma_map;
200 struct genwqe_dev *cd = cfile->cd;
201 struct pci_dev *pci_dev = cfile->cd->pci_dev;
202
203 list_for_each_safe(node, next, &cfile->map_list) {
204 dma_map = list_entry(node, struct dma_mapping, card_list);
205
206 list_del_init(&dma_map->card_list);
207
208 /*
209 * This is really a bug, because those things should
210 * have been already tidied up.
211 *
212 * GENWQE_MAPPING_RAW should have been removed via mmunmap().
213 * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code.
214 */
215 dev_err(&pci_dev->dev,
216 "[%s] %d. cleanup mapping: u_vaddr=%p "
217 "u_kaddr=%016lx dma_addr=%lx\n", __func__, i++,
218 dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr,
219 (unsigned long)dma_map->dma_addr);
220
221 if (dma_map->type == GENWQE_MAPPING_RAW) {
222 /* we allocated this dynamically */
223 __genwqe_free_consistent(cd, dma_map->size,
224 dma_map->k_vaddr,
225 dma_map->dma_addr);
226 kfree(dma_map);
227 } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
228 /* we use dma_map statically from the request */
229 genwqe_user_vunmap(cd, dma_map, NULL);
230 }
231 }
232}
233
234static void genwqe_remove_pinnings(struct genwqe_file *cfile)
235{
236 struct list_head *node, *next;
237 struct dma_mapping *dma_map;
238 struct genwqe_dev *cd = cfile->cd;
239
240 list_for_each_safe(node, next, &cfile->pin_list) {
241 dma_map = list_entry(node, struct dma_mapping, pin_list);
242
243 /*
244 * This is not a bug, because a killed processed might
245 * not call the unpin ioctl, which is supposed to free
246 * the resources.
247 *
248 * Pinnings are dymically allocated and need to be
249 * deleted.
250 */
251 list_del_init(&dma_map->pin_list);
252 genwqe_user_vunmap(cd, dma_map, NULL);
253 kfree(dma_map);
254 }
255}
256
257/**
258 * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files
259 *
260 * E.g. genwqe_send_signal(cd, SIGIO);
261 */
262static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
263{
264 unsigned int files = 0;
265 unsigned long flags;
266 struct genwqe_file *cfile;
267
268 spin_lock_irqsave(&cd->file_lock, flags);
269 list_for_each_entry(cfile, &cd->file_list, list) {
270 if (cfile->async_queue)
271 kill_fasync(&cfile->async_queue, sig, POLL_HUP);
272 files++;
273 }
274 spin_unlock_irqrestore(&cd->file_lock, flags);
275 return files;
276}
277
278static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
279{
280 unsigned int files = 0;
281 unsigned long flags;
282 struct genwqe_file *cfile;
283
284 spin_lock_irqsave(&cd->file_lock, flags);
285 list_for_each_entry(cfile, &cd->file_list, list) {
286 force_sig(sig, cfile->owner);
287 files++;
288 }
289 spin_unlock_irqrestore(&cd->file_lock, flags);
290 return files;
291}
292
293/**
294 * genwqe_open() - file open
295 * @inode: file system information
296 * @filp: file handle
297 *
298 * This function is executed whenever an application calls
299 * open("/dev/genwqe",..).
300 *
301 * Return: 0 if successful or <0 if errors
302 */
303static int genwqe_open(struct inode *inode, struct file *filp)
304{
305 struct genwqe_dev *cd;
306 struct genwqe_file *cfile;
307 struct pci_dev *pci_dev;
308
309 cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
310 if (cfile == NULL)
311 return -ENOMEM;
312
313 cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
314 pci_dev = cd->pci_dev;
315 cfile->cd = cd;
316 cfile->filp = filp;
317 cfile->client = NULL;
318
319 spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */
320 INIT_LIST_HEAD(&cfile->map_list);
321
322 spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */
323 INIT_LIST_HEAD(&cfile->pin_list);
324
325 filp->private_data = cfile;
326
327 genwqe_add_file(cd, cfile);
328 return 0;
329}
330
331/**
332 * genwqe_fasync() - Setup process to receive SIGIO.
333 * @fd: file descriptor
334 * @filp: file handle
335 * @mode: file mode
336 *
337 * Sending a signal is working as following:
338 *
339 * if (cdev->async_queue)
340 * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN);
341 *
342 * Some devices also implement asynchronous notification to indicate
343 * when the device can be written; in this case, of course,
344 * kill_fasync must be called with a mode of POLL_OUT.
345 */
346static int genwqe_fasync(int fd, struct file *filp, int mode)
347{
348 struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
349 return fasync_helper(fd, filp, mode, &cdev->async_queue);
350}
351
352
353/**
354 * genwqe_release() - file close
355 * @inode: file system information
356 * @filp: file handle
357 *
358 * This function is executed whenever an application calls 'close(fd_genwqe)'
359 *
360 * Return: always 0
361 */
362static int genwqe_release(struct inode *inode, struct file *filp)
363{
364 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
365 struct genwqe_dev *cd = cfile->cd;
366
367 /* there must be no entries in these lists! */
368 genwqe_remove_mappings(cfile);
369 genwqe_remove_pinnings(cfile);
370
371 /* remove this filp from the asynchronously notified filp's */
372 genwqe_fasync(-1, filp, 0);
373
374 /*
375 * For this to work we must not release cd when this cfile is
376 * not yet released, otherwise the list entry is invalid,
377 * because the list itself gets reinstantiated!
378 */
379 genwqe_del_file(cd, cfile);
380 kfree(cfile);
381 return 0;
382}
383
384static void genwqe_vma_open(struct vm_area_struct *vma)
385{
386 /* nothing ... */
387}
388
389/**
390 * genwqe_vma_close() - Called each time when vma is unmapped
391 *
392 * Free memory which got allocated by GenWQE mmap().
393 */
394static void genwqe_vma_close(struct vm_area_struct *vma)
395{
396 unsigned long vsize = vma->vm_end - vma->vm_start;
397 struct inode *inode = vma->vm_file->f_dentry->d_inode;
398 struct dma_mapping *dma_map;
399 struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
400 cdev_genwqe);
401 struct pci_dev *pci_dev = cd->pci_dev;
402 dma_addr_t d_addr = 0;
403 struct genwqe_file *cfile = vma->vm_private_data;
404
405 dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
406 &d_addr, NULL);
407 if (dma_map == NULL) {
408 dev_err(&pci_dev->dev,
409 " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
410 __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
411 vsize);
412 return;
413 }
414 __genwqe_del_mapping(cfile, dma_map);
415 __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
416 dma_map->dma_addr);
417 kfree(dma_map);
418}
419
420static struct vm_operations_struct genwqe_vma_ops = {
421 .open = genwqe_vma_open,
422 .close = genwqe_vma_close,
423};
424
425/**
426 * genwqe_mmap() - Provide contignous buffers to userspace
427 *
428 * We use mmap() to allocate contignous buffers used for DMA
429 * transfers. After the buffer is allocated we remap it to user-space
430 * and remember a reference to our dma_mapping data structure, where
431 * we store the associated DMA address and allocated size.
432 *
433 * When we receive a DDCB execution request with the ATS bits set to
434 * plain buffer, we lookup our dma_mapping list to find the
435 * corresponding DMA address for the associated user-space address.
436 */
437static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
438{
439 int rc;
440 unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
441 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
442 struct genwqe_dev *cd = cfile->cd;
443 struct dma_mapping *dma_map;
444
445 if (vsize == 0)
446 return -EINVAL;
447
448 if (get_order(vsize) > MAX_ORDER)
449 return -ENOMEM;
450
451 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
452 if (dma_map == NULL)
453 return -ENOMEM;
454
455 genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
456 dma_map->u_vaddr = (void *)vma->vm_start;
457 dma_map->size = vsize;
458 dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
459 dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
460 &dma_map->dma_addr);
461 if (dma_map->k_vaddr == NULL) {
462 rc = -ENOMEM;
463 goto free_dma_map;
464 }
465
466 if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
467 *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
468
469 pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
470 rc = remap_pfn_range(vma,
471 vma->vm_start,
472 pfn,
473 vsize,
474 vma->vm_page_prot);
475 if (rc != 0) {
476 rc = -EFAULT;
477 goto free_dma_mem;
478 }
479
480 vma->vm_private_data = cfile;
481 vma->vm_ops = &genwqe_vma_ops;
482 __genwqe_add_mapping(cfile, dma_map);
483
484 return 0;
485
486 free_dma_mem:
487 __genwqe_free_consistent(cd, dma_map->size,
488 dma_map->k_vaddr,
489 dma_map->dma_addr);
490 free_dma_map:
491 kfree(dma_map);
492 return rc;
493}
494
495/**
496 * do_flash_update() - Excute flash update (write image or CVPD)
497 * @cd: genwqe device
498 * @load: details about image load
499 *
500 * Return: 0 if successful
501 */
502
503#define FLASH_BLOCK 0x40000 /* we use 256k blocks */
504
505static int do_flash_update(struct genwqe_file *cfile,
506 struct genwqe_bitstream *load)
507{
508 int rc = 0;
509 int blocks_to_flash;
510 dma_addr_t dma_addr;
511 u64 flash = 0;
512 size_t tocopy = 0;
513 u8 __user *buf;
514 u8 *xbuf;
515 u32 crc;
516 u8 cmdopts;
517 struct genwqe_dev *cd = cfile->cd;
518 struct pci_dev *pci_dev = cd->pci_dev;
519
520 if ((load->size & 0x3) != 0)
521 return -EINVAL;
522
523 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
524 return -EINVAL;
525
526 /* FIXME Bits have changed for new service layer! */
527 switch ((char)load->partition) {
528 case '0':
529 cmdopts = 0x14;
530 break; /* download/erase_first/part_0 */
531 case '1':
532 cmdopts = 0x1C;
533 break; /* download/erase_first/part_1 */
534 case 'v': /* cmdopts = 0x0c (VPD) */
535 default:
536 return -EINVAL;
537 }
538
539 buf = (u8 __user *)load->data_addr;
540 xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
541 if (xbuf == NULL)
542 return -ENOMEM;
543
544 blocks_to_flash = load->size / FLASH_BLOCK;
545 while (load->size) {
546 struct genwqe_ddcb_cmd *req;
547
548 /*
549 * We must be 4 byte aligned. Buffer must be 0 appened
550 * to have defined values when calculating CRC.
551 */
552 tocopy = min_t(size_t, load->size, FLASH_BLOCK);
553
554 rc = copy_from_user(xbuf, buf, tocopy);
555 if (rc) {
556 rc = -EFAULT;
557 goto free_buffer;
558 }
559 crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
560
561 dev_dbg(&pci_dev->dev,
562 "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
563 __func__, (unsigned long)dma_addr, crc, tocopy,
564 blocks_to_flash);
565
566 /* prepare DDCB for SLU process */
567 req = ddcb_requ_alloc();
568 if (req == NULL) {
569 rc = -ENOMEM;
570 goto free_buffer;
571 }
572
573 req->cmd = SLCMD_MOVE_FLASH;
574 req->cmdopts = cmdopts;
575
576 /* prepare invariant values */
577 if (genwqe_get_slu_id(cd) <= 0x2) {
578 *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr);
579 *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy);
580 *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
581 *(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
582 req->__asiv[24] = load->uid;
583 *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
584
585 /* for simulation only */
586 *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
587 *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
588 req->asiv_length = 32; /* bytes included in crc calc */
589 } else { /* setup DDCB for ATS architecture */
590 *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr);
591 *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy);
592 *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */
593 *(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
594 *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
595 *(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
596
597 /* for simulation only */
598 *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
599 *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
600
601 /* Rd only */
602 req->ats = 0x4ULL << 44;
603 req->asiv_length = 40; /* bytes included in crc calc */
604 }
605 req->asv_length = 8;
606
607 /* For Genwqe5 we get back the calculated CRC */
608 *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */
609
610 rc = __genwqe_execute_raw_ddcb(cd, req);
611
612 load->retc = req->retc;
613 load->attn = req->attn;
614 load->progress = req->progress;
615
616 if (rc < 0) {
617 ddcb_requ_free(req);
618 goto free_buffer;
619 }
620
621 if (req->retc != DDCB_RETC_COMPLETE) {
622 rc = -EIO;
623 ddcb_requ_free(req);
624 goto free_buffer;
625 }
626
627 load->size -= tocopy;
628 flash += tocopy;
629 buf += tocopy;
630 blocks_to_flash--;
631 ddcb_requ_free(req);
632 }
633
634 free_buffer:
635 __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
636 return rc;
637}
638
639static int do_flash_read(struct genwqe_file *cfile,
640 struct genwqe_bitstream *load)
641{
642 int rc, blocks_to_flash;
643 dma_addr_t dma_addr;
644 u64 flash = 0;
645 size_t tocopy = 0;
646 u8 __user *buf;
647 u8 *xbuf;
648 u8 cmdopts;
649 struct genwqe_dev *cd = cfile->cd;
650 struct pci_dev *pci_dev = cd->pci_dev;
651 struct genwqe_ddcb_cmd *cmd;
652
653 if ((load->size & 0x3) != 0)
654 return -EINVAL;
655
656 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
657 return -EINVAL;
658
659 /* FIXME Bits have changed for new service layer! */
660 switch ((char)load->partition) {
661 case '0':
662 cmdopts = 0x12;
663 break; /* upload/part_0 */
664 case '1':
665 cmdopts = 0x1A;
666 break; /* upload/part_1 */
667 case 'v':
668 default:
669 return -EINVAL;
670 }
671
672 buf = (u8 __user *)load->data_addr;
673 xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
674 if (xbuf == NULL)
675 return -ENOMEM;
676
677 blocks_to_flash = load->size / FLASH_BLOCK;
678 while (load->size) {
679 /*
680 * We must be 4 byte aligned. Buffer must be 0 appened
681 * to have defined values when calculating CRC.
682 */
683 tocopy = min_t(size_t, load->size, FLASH_BLOCK);
684
685 dev_dbg(&pci_dev->dev,
686 "[%s] DMA: %lx SZ: %ld %d\n",
687 __func__, (unsigned long)dma_addr, tocopy,
688 blocks_to_flash);
689
690 /* prepare DDCB for SLU process */
691 cmd = ddcb_requ_alloc();
692 if (cmd == NULL) {
693 rc = -ENOMEM;
694 goto free_buffer;
695 }
696 cmd->cmd = SLCMD_MOVE_FLASH;
697 cmd->cmdopts = cmdopts;
698
699 /* prepare invariant values */
700 if (genwqe_get_slu_id(cd) <= 0x2) {
701 *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr);
702 *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy);
703 *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
704 *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
705 cmd->__asiv[24] = load->uid;
706 *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */;
707 cmd->asiv_length = 32; /* bytes included in crc calc */
708 } else { /* setup DDCB for ATS architecture */
709 *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr);
710 *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy);
711 *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */
712 *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
713 *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
714 *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */
715
716 /* rd/wr */
717 cmd->ats = 0x5ULL << 44;
718 cmd->asiv_length = 40; /* bytes included in crc calc */
719 }
720 cmd->asv_length = 8;
721
722 /* we only get back the calculated CRC */
723 *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */
724
725 rc = __genwqe_execute_raw_ddcb(cd, cmd);
726
727 load->retc = cmd->retc;
728 load->attn = cmd->attn;
729 load->progress = cmd->progress;
730
731 if ((rc < 0) && (rc != -EBADMSG)) {
732 ddcb_requ_free(cmd);
733 goto free_buffer;
734 }
735
736 rc = copy_to_user(buf, xbuf, tocopy);
737 if (rc) {
738 rc = -EFAULT;
739 ddcb_requ_free(cmd);
740 goto free_buffer;
741 }
742
743 /* We know that we can get retc 0x104 with CRC err */
744 if (((cmd->retc == DDCB_RETC_FAULT) &&
745 (cmd->attn != 0x02)) || /* Normally ignore CRC error */
746 ((cmd->retc == DDCB_RETC_COMPLETE) &&
747 (cmd->attn != 0x00))) { /* Everything was fine */
748 rc = -EIO;
749 ddcb_requ_free(cmd);
750 goto free_buffer;
751 }
752
753 load->size -= tocopy;
754 flash += tocopy;
755 buf += tocopy;
756 blocks_to_flash--;
757 ddcb_requ_free(cmd);
758 }
759 rc = 0;
760
761 free_buffer:
762 __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
763 return rc;
764}
765
766static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
767{
768 int rc;
769 struct genwqe_dev *cd = cfile->cd;
770 struct pci_dev *pci_dev = cfile->cd->pci_dev;
771 struct dma_mapping *dma_map;
772 unsigned long map_addr;
773 unsigned long map_size;
774
775 if ((m->addr == 0x0) || (m->size == 0))
776 return -EINVAL;
777
778 map_addr = (m->addr & PAGE_MASK);
779 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
780
781 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC);
782 if (dma_map == NULL)
783 return -ENOMEM;
784
785 genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
786 rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL);
787 if (rc != 0) {
788 dev_err(&pci_dev->dev,
789 "[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
790 return rc;
791 }
792
793 genwqe_add_pin(cfile, dma_map);
794 return 0;
795}
796
797static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
798{
799 struct genwqe_dev *cd = cfile->cd;
800 struct dma_mapping *dma_map;
801 unsigned long map_addr;
802 unsigned long map_size;
803
804 if (m->addr == 0x0)
805 return -EINVAL;
806
807 map_addr = (m->addr & PAGE_MASK);
808 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
809
810 dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
811 if (dma_map == NULL)
812 return -ENOENT;
813
814 genwqe_del_pin(cfile, dma_map);
815 genwqe_user_vunmap(cd, dma_map, NULL);
816 kfree(dma_map);
817 return 0;
818}
819
820/**
821 * ddcb_cmd_cleanup() - Remove dynamically created fixup entries
822 *
823 * Only if there are any. Pinnings are not removed.
824 */
825static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
826{
827 unsigned int i;
828 struct dma_mapping *dma_map;
829 struct genwqe_dev *cd = cfile->cd;
830
831 for (i = 0; i < DDCB_FIXUPS; i++) {
832 dma_map = &req->dma_mappings[i];
833
834 if (dma_mapping_used(dma_map)) {
835 __genwqe_del_mapping(cfile, dma_map);
836 genwqe_user_vunmap(cd, dma_map, req);
837 }
838 if (req->sgl[i] != NULL) {
839 genwqe_free_sgl(cd, req->sgl[i],
840 req->sgl_dma_addr[i],
841 req->sgl_size[i]);
842 req->sgl[i] = NULL;
843 req->sgl_dma_addr[i] = 0x0;
844 req->sgl_size[i] = 0;
845 }
846
847 }
848 return 0;
849}
850
851/**
852 * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references
853 *
854 * Before the DDCB gets executed we need to handle the fixups. We
855 * replace the user-space addresses with DMA addresses or do
856 * additional setup work e.g. generating a scatter-gather list which
857 * is used to describe the memory referred to in the fixup.
858 */
859static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
860{
861 int rc;
862 unsigned int asiv_offs, i;
863 struct genwqe_dev *cd = cfile->cd;
864 struct genwqe_ddcb_cmd *cmd = &req->cmd;
865 struct dma_mapping *m;
866 const char *type = "UNKNOWN";
867
868 for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
869 i++, asiv_offs += 0x08) {
870
871 u64 u_addr;
872 dma_addr_t d_addr;
873 u32 u_size = 0;
874 u64 ats_flags;
875
876 ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
877
878 switch (ats_flags) {
879
880 case ATS_TYPE_DATA:
881 break; /* nothing to do here */
882
883 case ATS_TYPE_FLAT_RDWR:
884 case ATS_TYPE_FLAT_RD: {
885 u_addr = be64_to_cpu(*((__be64 *)&cmd->
886 asiv[asiv_offs]));
887 u_size = be32_to_cpu(*((__be32 *)&cmd->
888 asiv[asiv_offs + 0x08]));
889
890 /*
891 * No data available. Ignore u_addr in this
892 * case and set addr to 0. Hardware must not
893 * fetch the buffer.
894 */
895 if (u_size == 0x0) {
896 *((__be64 *)&cmd->asiv[asiv_offs]) =
897 cpu_to_be64(0x0);
898 break;
899 }
900
901 m = __genwqe_search_mapping(cfile, u_addr, u_size,
902 &d_addr, NULL);
903 if (m == NULL) {
904 rc = -EFAULT;
905 goto err_out;
906 }
907
908 *((__be64 *)&cmd->asiv[asiv_offs]) =
909 cpu_to_be64(d_addr);
910 break;
911 }
912
913 case ATS_TYPE_SGL_RDWR:
914 case ATS_TYPE_SGL_RD: {
915 int page_offs, nr_pages, offs;
916
917 u_addr = be64_to_cpu(*((__be64 *)
918 &cmd->asiv[asiv_offs]));
919 u_size = be32_to_cpu(*((__be32 *)
920 &cmd->asiv[asiv_offs + 0x08]));
921
922 /*
923 * No data available. Ignore u_addr in this
924 * case and set addr to 0. Hardware must not
925 * fetch the empty sgl.
926 */
927 if (u_size == 0x0) {
928 *((__be64 *)&cmd->asiv[asiv_offs]) =
929 cpu_to_be64(0x0);
930 break;
931 }
932
933 m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
934 if (m != NULL) {
935 type = "PINNING";
936 page_offs = (u_addr -
937 (u64)m->u_vaddr)/PAGE_SIZE;
938 } else {
939 type = "MAPPING";
940 m = &req->dma_mappings[i];
941
942 genwqe_mapping_init(m,
943 GENWQE_MAPPING_SGL_TEMP);
944 rc = genwqe_user_vmap(cd, m, (void *)u_addr,
945 u_size, req);
946 if (rc != 0)
947 goto err_out;
948
949 __genwqe_add_mapping(cfile, m);
950 page_offs = 0;
951 }
952
953 offs = offset_in_page(u_addr);
954 nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE);
955
956 /* create genwqe style scatter gather list */
957 req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages,
958 &req->sgl_dma_addr[i],
959 &req->sgl_size[i]);
960 if (req->sgl[i] == NULL) {
961 rc = -ENOMEM;
962 goto err_out;
963 }
964 genwqe_setup_sgl(cd, offs, u_size,
965 req->sgl[i],
966 req->sgl_dma_addr[i],
967 req->sgl_size[i],
968 m->dma_list,
969 page_offs,
970 nr_pages);
971
972 *((__be64 *)&cmd->asiv[asiv_offs]) =
973 cpu_to_be64(req->sgl_dma_addr[i]);
974
975 break;
976 }
977 default:
978 rc = -EINVAL;
979 goto err_out;
980 }
981 }
982 return 0;
983
984 err_out:
985 ddcb_cmd_cleanup(cfile, req);
986 return rc;
987}
988
989/**
990 * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups
991 *
992 * The code will build up the translation tables or lookup the
993 * contignous memory allocation table to find the right translations
994 * and DMA addresses.
995 */
996static int genwqe_execute_ddcb(struct genwqe_file *cfile,
997 struct genwqe_ddcb_cmd *cmd)
998{
999 int rc;
1000 struct genwqe_dev *cd = cfile->cd;
1001 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
1002
1003 rc = ddcb_cmd_fixups(cfile, req);
1004 if (rc != 0)
1005 return rc;
1006
1007 rc = __genwqe_execute_raw_ddcb(cd, cmd);
1008 ddcb_cmd_cleanup(cfile, req);
1009 return rc;
1010}
1011
1012static int do_execute_ddcb(struct genwqe_file *cfile,
1013 unsigned long arg, int raw)
1014{
1015 int rc;
1016 struct genwqe_ddcb_cmd *cmd;
1017 struct ddcb_requ *req;
1018 struct genwqe_dev *cd = cfile->cd;
1019
1020 cmd = ddcb_requ_alloc();
1021 if (cmd == NULL)
1022 return -ENOMEM;
1023
1024 req = container_of(cmd, struct ddcb_requ, cmd);
1025
1026 if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
1027 ddcb_requ_free(cmd);
1028 return -EFAULT;
1029 }
1030
1031 if (!raw)
1032 rc = genwqe_execute_ddcb(cfile, cmd);
1033 else
1034 rc = __genwqe_execute_raw_ddcb(cd, cmd);
1035
1036 /* Copy back only the modifed fields. Do not copy ASIV
1037 back since the copy got modified by the driver. */
1038 if (copy_to_user((void __user *)arg, cmd,
1039 sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
1040 ddcb_requ_free(cmd);
1041 return -EFAULT;
1042 }
1043
1044 ddcb_requ_free(cmd);
1045 return rc;
1046}
1047
1048/**
1049 * genwqe_ioctl() - IO control
1050 * @filp: file handle
1051 * @cmd: command identifier (passed from user)
1052 * @arg: argument (passed from user)
1053 *
1054 * Return: 0 success
1055 */
1056static long genwqe_ioctl(struct file *filp, unsigned int cmd,
1057 unsigned long arg)
1058{
1059 int rc = 0;
1060 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
1061 struct genwqe_dev *cd = cfile->cd;
1062 struct genwqe_reg_io __user *io;
1063 u64 val;
1064 u32 reg_offs;
1065
1066 if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
1067 return -EINVAL;
1068
1069 switch (cmd) {
1070
1071 case GENWQE_GET_CARD_STATE:
1072 put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
1073 return 0;
1074
1075 /* Register access */
1076 case GENWQE_READ_REG64: {
1077 io = (struct genwqe_reg_io __user *)arg;
1078
1079 if (get_user(reg_offs, &io->num))
1080 return -EFAULT;
1081
1082 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1083 return -EINVAL;
1084
1085 val = __genwqe_readq(cd, reg_offs);
1086 put_user(val, &io->val64);
1087 return 0;
1088 }
1089
1090 case GENWQE_WRITE_REG64: {
1091 io = (struct genwqe_reg_io __user *)arg;
1092
1093 if (!capable(CAP_SYS_ADMIN))
1094 return -EPERM;
1095
1096 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1097 return -EPERM;
1098
1099 if (get_user(reg_offs, &io->num))
1100 return -EFAULT;
1101
1102 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1103 return -EINVAL;
1104
1105 if (get_user(val, &io->val64))
1106 return -EFAULT;
1107
1108 __genwqe_writeq(cd, reg_offs, val);
1109 return 0;
1110 }
1111
1112 case GENWQE_READ_REG32: {
1113 io = (struct genwqe_reg_io __user *)arg;
1114
1115 if (get_user(reg_offs, &io->num))
1116 return -EFAULT;
1117
1118 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1119 return -EINVAL;
1120
1121 val = __genwqe_readl(cd, reg_offs);
1122 put_user(val, &io->val64);
1123 return 0;
1124 }
1125
1126 case GENWQE_WRITE_REG32: {
1127 io = (struct genwqe_reg_io __user *)arg;
1128
1129 if (!capable(CAP_SYS_ADMIN))
1130 return -EPERM;
1131
1132 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1133 return -EPERM;
1134
1135 if (get_user(reg_offs, &io->num))
1136 return -EFAULT;
1137
1138 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1139 return -EINVAL;
1140
1141 if (get_user(val, &io->val64))
1142 return -EFAULT;
1143
1144 __genwqe_writel(cd, reg_offs, val);
1145 return 0;
1146 }
1147
1148 /* Flash update/reading */
1149 case GENWQE_SLU_UPDATE: {
1150 struct genwqe_bitstream load;
1151
1152 if (!genwqe_is_privileged(cd))
1153 return -EPERM;
1154
1155 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1156 return -EPERM;
1157
1158 if (copy_from_user(&load, (void __user *)arg,
1159 sizeof(load)))
1160 return -EFAULT;
1161
1162 rc = do_flash_update(cfile, &load);
1163
1164 if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1165 return -EFAULT;
1166
1167 return rc;
1168 }
1169
1170 case GENWQE_SLU_READ: {
1171 struct genwqe_bitstream load;
1172
1173 if (!genwqe_is_privileged(cd))
1174 return -EPERM;
1175
1176 if (genwqe_flash_readback_fails(cd))
1177 return -ENOSPC; /* known to fail for old versions */
1178
1179 if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
1180 return -EFAULT;
1181
1182 rc = do_flash_read(cfile, &load);
1183
1184 if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1185 return -EFAULT;
1186
1187 return rc;
1188 }
1189
1190 /* memory pinning and unpinning */
1191 case GENWQE_PIN_MEM: {
1192 struct genwqe_mem m;
1193
1194 if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1195 return -EFAULT;
1196
1197 return genwqe_pin_mem(cfile, &m);
1198 }
1199
1200 case GENWQE_UNPIN_MEM: {
1201 struct genwqe_mem m;
1202
1203 if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1204 return -EFAULT;
1205
1206 return genwqe_unpin_mem(cfile, &m);
1207 }
1208
1209 /* launch an DDCB and wait for completion */
1210 case GENWQE_EXECUTE_DDCB:
1211 return do_execute_ddcb(cfile, arg, 0);
1212
1213 case GENWQE_EXECUTE_RAW_DDCB: {
1214
1215 if (!capable(CAP_SYS_ADMIN))
1216 return -EPERM;
1217
1218 return do_execute_ddcb(cfile, arg, 1);
1219 }
1220
1221 default:
1222 return -EINVAL;
1223 }
1224
1225 return rc;
1226}
1227
1228#if defined(CONFIG_COMPAT)
1229/**
1230 * genwqe_compat_ioctl() - Compatibility ioctl
1231 *
1232 * Called whenever a 32-bit process running under a 64-bit kernel
1233 * performs an ioctl on /dev/genwqe<n>_card.
1234 *
1235 * @filp: file pointer.
1236 * @cmd: command.
1237 * @arg: user argument.
1238 * Return: zero on success or negative number on failure.
1239 */
1240static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
1241 unsigned long arg)
1242{
1243 return genwqe_ioctl(filp, cmd, arg);
1244}
1245#endif /* defined(CONFIG_COMPAT) */
1246
1247static const struct file_operations genwqe_fops = {
1248 .owner = THIS_MODULE,
1249 .open = genwqe_open,
1250 .fasync = genwqe_fasync,
1251 .mmap = genwqe_mmap,
1252 .unlocked_ioctl = genwqe_ioctl,
1253#if defined(CONFIG_COMPAT)
1254 .compat_ioctl = genwqe_compat_ioctl,
1255#endif
1256 .release = genwqe_release,
1257};
1258
1259static int genwqe_device_initialized(struct genwqe_dev *cd)
1260{
1261 return cd->dev != NULL;
1262}
1263
1264/**
1265 * genwqe_device_create() - Create and configure genwqe char device
1266 * @cd: genwqe device descriptor
1267 *
1268 * This function must be called before we create any more genwqe
1269 * character devices, because it is allocating the major and minor
1270 * number which are supposed to be used by the client drivers.
1271 */
1272int genwqe_device_create(struct genwqe_dev *cd)
1273{
1274 int rc;
1275 struct pci_dev *pci_dev = cd->pci_dev;
1276
1277 /*
1278 * Here starts the individual setup per client. It must
1279 * initialize its own cdev data structure with its own fops.
1280 * The appropriate devnum needs to be created. The ranges must
1281 * not overlap.
1282 */
1283 rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
1284 GENWQE_MAX_MINOR, GENWQE_DEVNAME);
1285 if (rc < 0) {
1286 dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
1287 goto err_dev;
1288 }
1289
1290 cdev_init(&cd->cdev_genwqe, &genwqe_fops);
1291 cd->cdev_genwqe.owner = THIS_MODULE;
1292
1293 rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
1294 if (rc < 0) {
1295 dev_err(&pci_dev->dev, "err: cdev_add failed\n");
1296 goto err_add;
1297 }
1298
1299 /*
1300 * Finally the device in /dev/... must be created. The rule is
1301 * to use card%d_clientname for each created device.
1302 */
1303 cd->dev = device_create_with_groups(cd->class_genwqe,
1304 &cd->pci_dev->dev,
1305 cd->devnum_genwqe, cd,
1306 genwqe_attribute_groups,
1307 GENWQE_DEVNAME "%u_card",
1308 cd->card_idx);
1309 if (IS_ERR(cd->dev)) {
1310 rc = PTR_ERR(cd->dev);
1311 goto err_cdev;
1312 }
1313
1314 rc = genwqe_init_debugfs(cd);
1315 if (rc != 0)
1316 goto err_debugfs;
1317
1318 return 0;
1319
1320 err_debugfs:
1321 device_destroy(cd->class_genwqe, cd->devnum_genwqe);
1322 err_cdev:
1323 cdev_del(&cd->cdev_genwqe);
1324 err_add:
1325 unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1326 err_dev:
1327 cd->dev = NULL;
1328 return rc;
1329}
1330
1331static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
1332{
1333 int rc;
1334 unsigned int i;
1335 struct pci_dev *pci_dev = cd->pci_dev;
1336
1337 if (!genwqe_open_files(cd))
1338 return 0;
1339
1340 dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
1341
1342 rc = genwqe_kill_fasync(cd, SIGIO);
1343 if (rc > 0) {
1344 /* give kill_timeout seconds to close file descriptors ... */
1345 for (i = 0; (i < genwqe_kill_timeout) &&
1346 genwqe_open_files(cd); i++) {
1347 dev_info(&pci_dev->dev, " %d sec ...", i);
1348
1349 cond_resched();
1350 msleep(1000);
1351 }
1352
1353 /* if no open files we can safely continue, else ... */
1354 if (!genwqe_open_files(cd))
1355 return 0;
1356
1357 dev_warn(&pci_dev->dev,
1358 "[%s] send SIGKILL and wait ...\n", __func__);
1359
1360 rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
1361 if (rc) {
1362 /* Give kill_timout more seconds to end processes */
1363 for (i = 0; (i < genwqe_kill_timeout) &&
1364 genwqe_open_files(cd); i++) {
1365 dev_warn(&pci_dev->dev, " %d sec ...", i);
1366
1367 cond_resched();
1368 msleep(1000);
1369 }
1370 }
1371 }
1372 return 0;
1373}
1374
1375/**
1376 * genwqe_device_remove() - Remove genwqe's char device
1377 *
1378 * This function must be called after the client devices are removed
1379 * because it will free the major/minor number range for the genwqe
1380 * drivers.
1381 *
1382 * This function must be robust enough to be called twice.
1383 */
1384int genwqe_device_remove(struct genwqe_dev *cd)
1385{
1386 int rc;
1387 struct pci_dev *pci_dev = cd->pci_dev;
1388
1389 if (!genwqe_device_initialized(cd))
1390 return 1;
1391
1392 genwqe_inform_and_stop_processes(cd);
1393
1394 /*
1395 * We currently do wait until all filedescriptors are
1396 * closed. This leads to a problem when we abort the
1397 * application which will decrease this reference from
1398 * 1/unused to 0/illegal and not from 2/used 1/empty.
1399 */
1400 rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount);
1401 if (rc != 1) {
1402 dev_err(&pci_dev->dev,
1403 "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
1404 panic("Fatal err: cannot free resources with pending references!");
1405 }
1406
1407 genqwe_exit_debugfs(cd);
1408 device_destroy(cd->class_genwqe, cd->devnum_genwqe);
1409 cdev_del(&cd->cdev_genwqe);
1410 unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1411 cd->dev = NULL;
1412
1413 return 0;
1414}
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
new file mode 100644
index 000000000000..a72a99266c3c
--- /dev/null
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -0,0 +1,288 @@
1/**
2 * IBM Accelerator Family 'GenWQE'
3 *
4 * (C) Copyright IBM Corp. 2013
5 *
6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8 * Author: Michael Jung <mijung@de.ibm.com>
9 * Author: Michael Ruettger <michael@ibmra.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21/*
22 * Sysfs interfaces for the GenWQE card. There are attributes to query
23 * the version of the bitstream as well as some for the driver. For
24 * debugging, please also see the debugfs interfaces of this driver.
25 */
26
27#include <linux/version.h>
28#include <linux/kernel.h>
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/string.h>
33#include <linux/fs.h>
34#include <linux/sysfs.h>
35#include <linux/ctype.h>
36#include <linux/device.h>
37
38#include "card_base.h"
39#include "card_ddcb.h"
40
41static const char * const genwqe_types[] = {
42 [GENWQE_TYPE_ALTERA_230] = "GenWQE4-230",
43 [GENWQE_TYPE_ALTERA_530] = "GenWQE4-530",
44 [GENWQE_TYPE_ALTERA_A4] = "GenWQE5-A4",
45 [GENWQE_TYPE_ALTERA_A7] = "GenWQE5-A7",
46};
47
48static ssize_t status_show(struct device *dev, struct device_attribute *attr,
49 char *buf)
50{
51 struct genwqe_dev *cd = dev_get_drvdata(dev);
52 const char *cs[GENWQE_CARD_STATE_MAX] = { "unused", "used", "error" };
53
54 return sprintf(buf, "%s\n", cs[cd->card_state]);
55}
56static DEVICE_ATTR_RO(status);
57
58static ssize_t appid_show(struct device *dev, struct device_attribute *attr,
59 char *buf)
60{
61 char app_name[5];
62 struct genwqe_dev *cd = dev_get_drvdata(dev);
63
64 genwqe_read_app_id(cd, app_name, sizeof(app_name));
65 return sprintf(buf, "%s\n", app_name);
66}
67static DEVICE_ATTR_RO(appid);
68
69static ssize_t version_show(struct device *dev, struct device_attribute *attr,
70 char *buf)
71{
72 u64 slu_id, app_id;
73 struct genwqe_dev *cd = dev_get_drvdata(dev);
74
75 slu_id = __genwqe_readq(cd, IO_SLU_UNITCFG);
76 app_id = __genwqe_readq(cd, IO_APP_UNITCFG);
77
78 return sprintf(buf, "%016llx.%016llx\n", slu_id, app_id);
79}
80static DEVICE_ATTR_RO(version);
81
82static ssize_t type_show(struct device *dev, struct device_attribute *attr,
83 char *buf)
84{
85 u8 card_type;
86 struct genwqe_dev *cd = dev_get_drvdata(dev);
87
88 card_type = genwqe_card_type(cd);
89 return sprintf(buf, "%s\n", (card_type >= ARRAY_SIZE(genwqe_types)) ?
90 "invalid" : genwqe_types[card_type]);
91}
92static DEVICE_ATTR_RO(type);
93
94static ssize_t driver_show(struct device *dev, struct device_attribute *attr,
95 char *buf)
96{
97 return sprintf(buf, "%s\n", DRV_VERS_STRING);
98}
99static DEVICE_ATTR_RO(driver);
100
101static ssize_t tempsens_show(struct device *dev, struct device_attribute *attr,
102 char *buf)
103{
104 u64 tempsens;
105 struct genwqe_dev *cd = dev_get_drvdata(dev);
106
107 tempsens = __genwqe_readq(cd, IO_SLU_TEMPERATURE_SENSOR);
108 return sprintf(buf, "%016llx\n", tempsens);
109}
110static DEVICE_ATTR_RO(tempsens);
111
112static ssize_t freerunning_timer_show(struct device *dev,
113 struct device_attribute *attr,
114 char *buf)
115{
116 u64 t;
117 struct genwqe_dev *cd = dev_get_drvdata(dev);
118
119 t = __genwqe_readq(cd, IO_SLC_FREE_RUNNING_TIMER);
120 return sprintf(buf, "%016llx\n", t);
121}
122static DEVICE_ATTR_RO(freerunning_timer);
123
124static ssize_t queue_working_time_show(struct device *dev,
125 struct device_attribute *attr,
126 char *buf)
127{
128 u64 t;
129 struct genwqe_dev *cd = dev_get_drvdata(dev);
130
131 t = __genwqe_readq(cd, IO_SLC_QUEUE_WTIME);
132 return sprintf(buf, "%016llx\n", t);
133}
134static DEVICE_ATTR_RO(queue_working_time);
135
136static ssize_t base_clock_show(struct device *dev,
137 struct device_attribute *attr,
138 char *buf)
139{
140 u64 base_clock;
141 struct genwqe_dev *cd = dev_get_drvdata(dev);
142
143 base_clock = genwqe_base_clock_frequency(cd);
144 return sprintf(buf, "%lld\n", base_clock);
145}
146static DEVICE_ATTR_RO(base_clock);
147
148/**
149 * curr_bitstream_show() - Show the current bitstream id
150 *
151 * There is a bug in some old versions of the CPLD which selects the
152 * bitstream, which causes the IO_SLU_BITSTREAM register to report
153 * unreliable data in very rare cases. This makes this sysfs
154 * unreliable up to the point were a new CPLD version is being used.
155 *
156 * Unfortunately there is no automatic way yet to query the CPLD
157 * version, such that you need to manually ensure via programming
158 * tools that you have a recent version of the CPLD software.
159 *
160 * The proposed circumvention is to use a special recovery bitstream
161 * on the backup partition (0) to identify problems while loading the
162 * image.
163 */
164static ssize_t curr_bitstream_show(struct device *dev,
165 struct device_attribute *attr, char *buf)
166{
167 int curr_bitstream;
168 struct genwqe_dev *cd = dev_get_drvdata(dev);
169
170 curr_bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
171 return sprintf(buf, "%d\n", curr_bitstream);
172}
173static DEVICE_ATTR_RO(curr_bitstream);
174
175/**
176 * next_bitstream_show() - Show the next activated bitstream
177 *
178 * IO_SLC_CFGREG_SOFTRESET: This register can only be accessed by the PF.
179 */
180static ssize_t next_bitstream_show(struct device *dev,
181 struct device_attribute *attr, char *buf)
182{
183 int next_bitstream;
184 struct genwqe_dev *cd = dev_get_drvdata(dev);
185
186 switch ((cd->softreset & 0xc) >> 2) {
187 case 0x2:
188 next_bitstream = 0;
189 break;
190 case 0x3:
191 next_bitstream = 1;
192 break;
193 default:
194 next_bitstream = -1;
195 break; /* error */
196 }
197 return sprintf(buf, "%d\n", next_bitstream);
198}
199
200static ssize_t next_bitstream_store(struct device *dev,
201 struct device_attribute *attr,
202 const char *buf, size_t count)
203{
204 int partition;
205 struct genwqe_dev *cd = dev_get_drvdata(dev);
206
207 if (kstrtoint(buf, 0, &partition) < 0)
208 return -EINVAL;
209
210 switch (partition) {
211 case 0x0:
212 cd->softreset = 0x78;
213 break;
214 case 0x1:
215 cd->softreset = 0x7c;
216 break;
217 default:
218 return -EINVAL;
219 }
220
221 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, cd->softreset);
222 return count;
223}
224static DEVICE_ATTR_RW(next_bitstream);
225
226/*
227 * Create device_attribute structures / params: name, mode, show, store
228 * additional flag if valid in VF
229 */
230static struct attribute *genwqe_attributes[] = {
231 &dev_attr_tempsens.attr,
232 &dev_attr_next_bitstream.attr,
233 &dev_attr_curr_bitstream.attr,
234 &dev_attr_base_clock.attr,
235 &dev_attr_driver.attr,
236 &dev_attr_type.attr,
237 &dev_attr_version.attr,
238 &dev_attr_appid.attr,
239 &dev_attr_status.attr,
240 &dev_attr_freerunning_timer.attr,
241 &dev_attr_queue_working_time.attr,
242 NULL,
243};
244
245static struct attribute *genwqe_normal_attributes[] = {
246 &dev_attr_driver.attr,
247 &dev_attr_type.attr,
248 &dev_attr_version.attr,
249 &dev_attr_appid.attr,
250 &dev_attr_status.attr,
251 &dev_attr_freerunning_timer.attr,
252 &dev_attr_queue_working_time.attr,
253 NULL,
254};
255
256/**
257 * genwqe_is_visible() - Determine if sysfs attribute should be visible or not
258 *
259 * VFs have restricted mmio capabilities, so not all sysfs entries
260 * are allowed in VFs.
261 */
262static umode_t genwqe_is_visible(struct kobject *kobj,
263 struct attribute *attr, int n)
264{
265 unsigned int j;
266 struct device *dev = container_of(kobj, struct device, kobj);
267 struct genwqe_dev *cd = dev_get_drvdata(dev);
268 umode_t mode = attr->mode;
269
270 if (genwqe_is_privileged(cd))
271 return mode;
272
273 for (j = 0; genwqe_normal_attributes[j] != NULL; j++)
274 if (genwqe_normal_attributes[j] == attr)
275 return mode;
276
277 return 0;
278}
279
280static struct attribute_group genwqe_attribute_group = {
281 .is_visible = genwqe_is_visible,
282 .attrs = genwqe_attributes,
283};
284
285const struct attribute_group *genwqe_attribute_groups[] = {
286 &genwqe_attribute_group,
287 NULL,
288};
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
new file mode 100644
index 000000000000..6b1a6ef9f1a8
--- /dev/null
+++ b/drivers/misc/genwqe/card_utils.c
@@ -0,0 +1,944 @@
1/**
2 * IBM Accelerator Family 'GenWQE'
3 *
4 * (C) Copyright IBM Corp. 2013
5 *
6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8 * Author: Michael Jung <mijung@de.ibm.com>
9 * Author: Michael Ruettger <michael@ibmra.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21/*
22 * Miscelanous functionality used in the other GenWQE driver parts.
23 */
24
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/sched.h>
28#include <linux/vmalloc.h>
29#include <linux/page-flags.h>
30#include <linux/scatterlist.h>
31#include <linux/hugetlb.h>
32#include <linux/iommu.h>
33#include <linux/delay.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/ctype.h>
37#include <linux/module.h>
38#include <linux/platform_device.h>
39#include <linux/delay.h>
40#include <asm/pgtable.h>
41
42#include "genwqe_driver.h"
43#include "card_base.h"
44#include "card_ddcb.h"
45
46/**
47 * __genwqe_writeq() - Write 64-bit register
48 * @cd: genwqe device descriptor
49 * @byte_offs: byte offset within BAR
50 * @val: 64-bit value
51 *
52 * Return: 0 if success; < 0 if error
53 */
54int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
55{
56 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
57 return -EIO;
58
59 if (cd->mmio == NULL)
60 return -EIO;
61
62 __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
63 return 0;
64}
65
66/**
67 * __genwqe_readq() - Read 64-bit register
68 * @cd: genwqe device descriptor
69 * @byte_offs: offset within BAR
70 *
71 * Return: value from register
72 */
73u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
74{
75 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
76 return 0xffffffffffffffffull;
77
78 if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
79 (byte_offs == IO_SLC_CFGREG_GFIR))
80 return 0x000000000000ffffull;
81
82 if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
83 (byte_offs == IO_SLC_CFGREG_GFIR))
84 return 0x00000000ffff0000ull;
85
86 if (cd->mmio == NULL)
87 return 0xffffffffffffffffull;
88
89 return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
90}
91
92/**
93 * __genwqe_writel() - Write 32-bit register
94 * @cd: genwqe device descriptor
95 * @byte_offs: byte offset within BAR
96 * @val: 32-bit value
97 *
98 * Return: 0 if success; < 0 if error
99 */
100int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
101{
102 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
103 return -EIO;
104
105 if (cd->mmio == NULL)
106 return -EIO;
107
108 __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
109 return 0;
110}
111
112/**
113 * __genwqe_readl() - Read 32-bit register
114 * @cd: genwqe device descriptor
115 * @byte_offs: offset within BAR
116 *
117 * Return: Value from register
118 */
119u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
120{
121 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
122 return 0xffffffff;
123
124 if (cd->mmio == NULL)
125 return 0xffffffff;
126
127 return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
128}
129
130/**
131 * genwqe_read_app_id() - Extract app_id
132 *
133 * app_unitcfg need to be filled with valid data first
134 */
135int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
136{
137 int i, j;
138 u32 app_id = (u32)cd->app_unitcfg;
139
140 memset(app_name, 0, len);
141 for (i = 0, j = 0; j < min(len, 4); j++) {
142 char ch = (char)((app_id >> (24 - j*8)) & 0xff);
143 if (ch == ' ')
144 continue;
145 app_name[i++] = isprint(ch) ? ch : 'X';
146 }
147 return i;
148}
149
150/**
151 * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
152 *
153 * Existing kernel functions seem to use a different polynom,
154 * therefore we could not use them here.
155 *
156 * Genwqe's Polynomial = 0x20044009
157 */
158#define CRC32_POLYNOMIAL 0x20044009
159static u32 crc32_tab[256]; /* crc32 lookup table */
160
161void genwqe_init_crc32(void)
162{
163 int i, j;
164 u32 crc;
165
166 for (i = 0; i < 256; i++) {
167 crc = i << 24;
168 for (j = 0; j < 8; j++) {
169 if (crc & 0x80000000)
170 crc = (crc << 1) ^ CRC32_POLYNOMIAL;
171 else
172 crc = (crc << 1);
173 }
174 crc32_tab[i] = crc;
175 }
176}
177
178/**
179 * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
180 * @buff: pointer to data buffer
181 * @len: length of data for calculation
182 * @init: initial crc (0xffffffff at start)
183 *
184 * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
185
186 * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
187 * result in a crc32 of 0xf33cb7d3.
188 *
189 * The existing kernel crc functions did not cover this polynom yet.
190 *
191 * Return: crc32 checksum.
192 */
193u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
194{
195 int i;
196 u32 crc;
197
198 crc = init;
199 while (len--) {
200 i = ((crc >> 24) ^ *buff++) & 0xFF;
201 crc = (crc << 8) ^ crc32_tab[i];
202 }
203 return crc;
204}
205
206void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
207 dma_addr_t *dma_handle)
208{
209 if (get_order(size) > MAX_ORDER)
210 return NULL;
211
212 return pci_alloc_consistent(cd->pci_dev, size, dma_handle);
213}
214
215void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
216 void *vaddr, dma_addr_t dma_handle)
217{
218 if (vaddr == NULL)
219 return;
220
221 pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle);
222}
223
224static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
225 int num_pages)
226{
227 int i;
228 struct pci_dev *pci_dev = cd->pci_dev;
229
230 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
231 pci_unmap_page(pci_dev, dma_list[i],
232 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
233 dma_list[i] = 0x0;
234 }
235}
236
237static int genwqe_map_pages(struct genwqe_dev *cd,
238 struct page **page_list, int num_pages,
239 dma_addr_t *dma_list)
240{
241 int i;
242 struct pci_dev *pci_dev = cd->pci_dev;
243
244 /* establish DMA mapping for requested pages */
245 for (i = 0; i < num_pages; i++) {
246 dma_addr_t daddr;
247
248 dma_list[i] = 0x0;
249 daddr = pci_map_page(pci_dev, page_list[i],
250 0, /* map_offs */
251 PAGE_SIZE,
252 PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */
253
254 if (pci_dma_mapping_error(pci_dev, daddr)) {
255 dev_err(&pci_dev->dev,
256 "[%s] err: no dma addr daddr=%016llx!\n",
257 __func__, (long long)daddr);
258 goto err;
259 }
260
261 dma_list[i] = daddr;
262 }
263 return 0;
264
265 err:
266 genwqe_unmap_pages(cd, dma_list, num_pages);
267 return -EIO;
268}
269
270static int genwqe_sgl_size(int num_pages)
271{
272 int len, num_tlb = num_pages / 7;
273
274 len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
275 return roundup(len, PAGE_SIZE);
276}
277
278struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
279 dma_addr_t *dma_addr, size_t *sgl_size)
280{
281 struct pci_dev *pci_dev = cd->pci_dev;
282 struct sg_entry *sgl;
283
284 *sgl_size = genwqe_sgl_size(num_pages);
285 if (get_order(*sgl_size) > MAX_ORDER) {
286 dev_err(&pci_dev->dev,
287 "[%s] err: too much memory requested!\n", __func__);
288 return NULL;
289 }
290
291 sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr);
292 if (sgl == NULL) {
293 dev_err(&pci_dev->dev,
294 "[%s] err: no memory available!\n", __func__);
295 return NULL;
296 }
297
298 return sgl;
299}
300
301int genwqe_setup_sgl(struct genwqe_dev *cd,
302 unsigned long offs,
303 unsigned long size,
304 struct sg_entry *sgl,
305 dma_addr_t dma_addr, size_t sgl_size,
306 dma_addr_t *dma_list, int page_offs, int num_pages)
307{
308 int i = 0, j = 0, p;
309 unsigned long dma_offs, map_offs;
310 struct pci_dev *pci_dev = cd->pci_dev;
311 dma_addr_t prev_daddr = 0;
312 struct sg_entry *s, *last_s = NULL;
313
314 /* sanity checks */
315 if (offs > PAGE_SIZE) {
316 dev_err(&pci_dev->dev,
317 "[%s] too large start offs %08lx\n", __func__, offs);
318 return -EFAULT;
319 }
320 if (sgl_size < genwqe_sgl_size(num_pages)) {
321 dev_err(&pci_dev->dev,
322 "[%s] sgl_size too small %08lx for %d pages\n",
323 __func__, sgl_size, num_pages);
324 return -EFAULT;
325 }
326
327 dma_offs = 128; /* next block if needed/dma_offset */
328 map_offs = offs; /* offset in first page */
329
330 s = &sgl[0]; /* first set of 8 entries */
331 p = 0; /* page */
332 while (p < num_pages) {
333 dma_addr_t daddr;
334 unsigned int size_to_map;
335
336 /* always write the chaining entry, cleanup is done later */
337 j = 0;
338 s[j].target_addr = cpu_to_be64(dma_addr + dma_offs);
339 s[j].len = cpu_to_be32(128);
340 s[j].flags = cpu_to_be32(SG_CHAINED);
341 j++;
342
343 while (j < 8) {
344 /* DMA mapping for requested page, offs, size */
345 size_to_map = min(size, PAGE_SIZE - map_offs);
346 daddr = dma_list[page_offs + p] + map_offs;
347 size -= size_to_map;
348 map_offs = 0;
349
350 if (prev_daddr == daddr) {
351 u32 prev_len = be32_to_cpu(last_s->len);
352
353 /* pr_info("daddr combining: "
354 "%016llx/%08x -> %016llx\n",
355 prev_daddr, prev_len, daddr); */
356
357 last_s->len = cpu_to_be32(prev_len +
358 size_to_map);
359
360 p++; /* process next page */
361 if (p == num_pages)
362 goto fixup; /* nothing to do */
363
364 prev_daddr = daddr + size_to_map;
365 continue;
366 }
367
368 /* start new entry */
369 s[j].target_addr = cpu_to_be64(daddr);
370 s[j].len = cpu_to_be32(size_to_map);
371 s[j].flags = cpu_to_be32(SG_DATA);
372 prev_daddr = daddr + size_to_map;
373 last_s = &s[j];
374 j++;
375
376 p++; /* process next page */
377 if (p == num_pages)
378 goto fixup; /* nothing to do */
379 }
380 dma_offs += 128;
381 s += 8; /* continue 8 elements further */
382 }
383 fixup:
384 if (j == 1) { /* combining happend on last entry! */
385 s -= 8; /* full shift needed on previous sgl block */
386 j = 7; /* shift all elements */
387 }
388
389 for (i = 0; i < j; i++) /* move elements 1 up */
390 s[i] = s[i + 1];
391
392 s[i].target_addr = cpu_to_be64(0);
393 s[i].len = cpu_to_be32(0);
394 s[i].flags = cpu_to_be32(SG_END_LIST);
395 return 0;
396}
397
398void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
399 dma_addr_t dma_addr, size_t size)
400{
401 __genwqe_free_consistent(cd, size, sg_list, dma_addr);
402}
403
404/**
405 * free_user_pages() - Give pinned pages back
406 *
407 * Documentation of get_user_pages is in mm/memory.c:
408 *
409 * If the page is written to, set_page_dirty (or set_page_dirty_lock,
410 * as appropriate) must be called after the page is finished with, and
411 * before put_page is called.
412 *
413 * FIXME Could be of use to others and might belong in the generic
414 * code, if others agree. E.g.
415 * ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c
416 * ceph_put_page_vector in net/ceph/pagevec.c
417 * maybe more?
418 */
419static int free_user_pages(struct page **page_list, unsigned int nr_pages,
420 int dirty)
421{
422 unsigned int i;
423
424 for (i = 0; i < nr_pages; i++) {
425 if (page_list[i] != NULL) {
426 if (dirty)
427 set_page_dirty_lock(page_list[i]);
428 put_page(page_list[i]);
429 }
430 }
431 return 0;
432}
433
434/**
435 * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
436 * @cd: pointer to genwqe device
437 * @m: mapping params
438 * @uaddr: user virtual address
439 * @size: size of memory to be mapped
440 *
441 * We need to think about how we could speed this up. Of course it is
442 * not a good idea to do this over and over again, like we are
443 * currently doing it. Nevertheless, I am curious where on the path
444 * the performance is spend. Most probably within the memory
445 * allocation functions, but maybe also in the DMA mapping code.
446 *
447 * Restrictions: The maximum size of the possible mapping currently depends
448 * on the amount of memory we can get using kzalloc() for the
449 * page_list and pci_alloc_consistent for the sg_list.
450 * The sg_list is currently itself not scattered, which could
451 * be fixed with some effort. The page_list must be split into
452 * PAGE_SIZE chunks too. All that will make the complicated
453 * code more complicated.
454 *
455 * Return: 0 if success
456 */
457int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
458 unsigned long size, struct ddcb_requ *req)
459{
460 int rc = -EINVAL;
461 unsigned long data, offs;
462 struct pci_dev *pci_dev = cd->pci_dev;
463
464 if ((uaddr == NULL) || (size == 0)) {
465 m->size = 0; /* mark unused and not added */
466 return -EINVAL;
467 }
468 m->u_vaddr = uaddr;
469 m->size = size;
470
471 /* determine space needed for page_list. */
472 data = (unsigned long)uaddr;
473 offs = offset_in_page(data);
474 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
475
476 m->page_list = kcalloc(m->nr_pages,
477 sizeof(struct page *) + sizeof(dma_addr_t),
478 GFP_KERNEL);
479 if (!m->page_list) {
480 dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
481 m->nr_pages = 0;
482 m->u_vaddr = NULL;
483 m->size = 0; /* mark unused and not added */
484 return -ENOMEM;
485 }
486 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
487
488 /* pin user pages in memory */
489 rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
490 m->nr_pages,
491 1, /* write by caller */
492 m->page_list); /* ptrs to pages */
493
494 /* assumption: get_user_pages can be killed by signals. */
495 if (rc < m->nr_pages) {
496 free_user_pages(m->page_list, rc, 0);
497 rc = -EFAULT;
498 goto fail_get_user_pages;
499 }
500
501 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
502 if (rc != 0)
503 goto fail_free_user_pages;
504
505 return 0;
506
507 fail_free_user_pages:
508 free_user_pages(m->page_list, m->nr_pages, 0);
509
510 fail_get_user_pages:
511 kfree(m->page_list);
512 m->page_list = NULL;
513 m->dma_list = NULL;
514 m->nr_pages = 0;
515 m->u_vaddr = NULL;
516 m->size = 0; /* mark unused and not added */
517 return rc;
518}
519
520/**
521 * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
522 * memory
523 * @cd: pointer to genwqe device
524 * @m: mapping params
525 */
526int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
527 struct ddcb_requ *req)
528{
529 struct pci_dev *pci_dev = cd->pci_dev;
530
531 if (!dma_mapping_used(m)) {
532 dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
533 __func__, m);
534 return -EINVAL;
535 }
536
537 if (m->dma_list)
538 genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
539
540 if (m->page_list) {
541 free_user_pages(m->page_list, m->nr_pages, 1);
542
543 kfree(m->page_list);
544 m->page_list = NULL;
545 m->dma_list = NULL;
546 m->nr_pages = 0;
547 }
548
549 m->u_vaddr = NULL;
550 m->size = 0; /* mark as unused and not added */
551 return 0;
552}
553
554/**
555 * genwqe_card_type() - Get chip type SLU Configuration Register
556 * @cd: pointer to the genwqe device descriptor
557 * Return: 0: Altera Stratix-IV 230
558 * 1: Altera Stratix-IV 530
559 * 2: Altera Stratix-V A4
560 * 3: Altera Stratix-V A7
561 */
562u8 genwqe_card_type(struct genwqe_dev *cd)
563{
564 u64 card_type = cd->slu_unitcfg;
565 return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
566}
567
568/**
569 * genwqe_card_reset() - Reset the card
570 * @cd: pointer to the genwqe device descriptor
571 */
572int genwqe_card_reset(struct genwqe_dev *cd)
573{
574 u64 softrst;
575 struct pci_dev *pci_dev = cd->pci_dev;
576
577 if (!genwqe_is_privileged(cd))
578 return -ENODEV;
579
580 /* new SL */
581 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
582 msleep(1000);
583 __genwqe_readq(cd, IO_HSU_FIR_CLR);
584 __genwqe_readq(cd, IO_APP_FIR_CLR);
585 __genwqe_readq(cd, IO_SLU_FIR_CLR);
586
587 /*
588 * Read-modify-write to preserve the stealth bits
589 *
590 * For SL >= 039, Stealth WE bit allows removing
591 * the read-modify-wrote.
592 * r-m-w may require a mask 0x3C to avoid hitting hard
593 * reset again for error reset (should be 0, chicken).
594 */
595 softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
596 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
597
598 /* give ERRORRESET some time to finish */
599 msleep(50);
600
601 if (genwqe_need_err_masking(cd)) {
602 dev_info(&pci_dev->dev,
603 "[%s] masking errors for old bitstreams\n", __func__);
604 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
605 }
606 return 0;
607}
608
609int genwqe_read_softreset(struct genwqe_dev *cd)
610{
611 u64 bitstream;
612
613 if (!genwqe_is_privileged(cd))
614 return -ENODEV;
615
616 bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
617 cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
618 return 0;
619}
620
621/**
622 * genwqe_set_interrupt_capability() - Configure MSI capability structure
623 * @cd: pointer to the device
624 * Return: 0 if no error
625 */
626int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
627{
628 int rc;
629 struct pci_dev *pci_dev = cd->pci_dev;
630
631 rc = pci_enable_msi_block(pci_dev, count);
632 if (rc == 0)
633 cd->flags |= GENWQE_FLAG_MSI_ENABLED;
634 return rc;
635}
636
637/**
638 * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
639 * @cd: pointer to the device
640 */
641void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
642{
643 struct pci_dev *pci_dev = cd->pci_dev;
644
645 if (cd->flags & GENWQE_FLAG_MSI_ENABLED) {
646 pci_disable_msi(pci_dev);
647 cd->flags &= ~GENWQE_FLAG_MSI_ENABLED;
648 }
649}
650
651/**
652 * set_reg_idx() - Fill array with data. Ignore illegal offsets.
653 * @cd: card device
654 * @r: debug register array
655 * @i: index to desired entry
656 * @m: maximum possible entries
657 * @addr: addr which is read
658 * @index: index in debug array
659 * @val: read value
660 */
661static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
662 unsigned int *i, unsigned int m, u32 addr, u32 idx,
663 u64 val)
664{
665 if (WARN_ON_ONCE(*i >= m))
666 return -EFAULT;
667
668 r[*i].addr = addr;
669 r[*i].idx = idx;
670 r[*i].val = val;
671 ++*i;
672 return 0;
673}
674
675static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
676 unsigned int *i, unsigned int m, u32 addr, u64 val)
677{
678 return set_reg_idx(cd, r, i, m, addr, 0, val);
679}
680
681int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
682 unsigned int max_regs, int all)
683{
684 unsigned int i, j, idx = 0;
685 u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
686 u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
687
688 /* Global FIR */
689 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
690 set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
691
692 /* UnitCfg for SLU */
693 sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
694 set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
695
696 /* UnitCfg for APP */
697 appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
698 set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
699
700 /* Check all chip Units */
701 for (i = 0; i < GENWQE_MAX_UNITS; i++) {
702
703 /* Unit FIR */
704 ufir_addr = (i << 24) | 0x008;
705 ufir = __genwqe_readq(cd, ufir_addr);
706 set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
707
708 /* Unit FEC */
709 ufec_addr = (i << 24) | 0x018;
710 ufec = __genwqe_readq(cd, ufec_addr);
711 set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
712
713 for (j = 0; j < 64; j++) {
714 /* wherever there is a primary 1, read the 2ndary */
715 if (!all && (!(ufir & (1ull << j))))
716 continue;
717
718 sfir_addr = (i << 24) | (0x100 + 8 * j);
719 sfir = __genwqe_readq(cd, sfir_addr);
720 set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
721
722 sfec_addr = (i << 24) | (0x300 + 8 * j);
723 sfec = __genwqe_readq(cd, sfec_addr);
724 set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
725 }
726 }
727
728 /* fill with invalid data until end */
729 for (i = idx; i < max_regs; i++) {
730 regs[i].addr = 0xffffffff;
731 regs[i].val = 0xffffffffffffffffull;
732 }
733 return idx;
734}
735
736/**
737 * genwqe_ffdc_buff_size() - Calculates the number of dump registers
738 */
739int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
740{
741 int entries = 0, ring, traps, traces, trace_entries;
742 u32 eevptr_addr, l_addr, d_len, d_type;
743 u64 eevptr, val, addr;
744
745 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
746 eevptr = __genwqe_readq(cd, eevptr_addr);
747
748 if ((eevptr != 0x0) && (eevptr != -1ull)) {
749 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
750
751 while (1) {
752 val = __genwqe_readq(cd, l_addr);
753
754 if ((val == 0x0) || (val == -1ull))
755 break;
756
757 /* 38:24 */
758 d_len = (val & 0x0000007fff000000ull) >> 24;
759
760 /* 39 */
761 d_type = (val & 0x0000008000000000ull) >> 36;
762
763 if (d_type) { /* repeat */
764 entries += d_len;
765 } else { /* size in bytes! */
766 entries += d_len >> 3;
767 }
768
769 l_addr += 8;
770 }
771 }
772
773 for (ring = 0; ring < 8; ring++) {
774 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
775 val = __genwqe_readq(cd, addr);
776
777 if ((val == 0x0ull) || (val == -1ull))
778 continue;
779
780 traps = (val >> 24) & 0xff;
781 traces = (val >> 16) & 0xff;
782 trace_entries = val & 0xffff;
783
784 entries += traps + (traces * trace_entries);
785 }
786 return entries;
787}
788
789/**
790 * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
791 */
792int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
793 struct genwqe_reg *regs, unsigned int max_regs)
794{
795 int i, traps, traces, trace, trace_entries, trace_entry, ring;
796 unsigned int idx = 0;
797 u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
798 u64 eevptr, e, val, addr;
799
800 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
801 eevptr = __genwqe_readq(cd, eevptr_addr);
802
803 if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
804 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
805 while (1) {
806 e = __genwqe_readq(cd, l_addr);
807 if ((e == 0x0) || (e == 0xffffffffffffffffull))
808 break;
809
810 d_addr = (e & 0x0000000000ffffffull); /* 23:0 */
811 d_len = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
812 d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
813 d_addr |= GENWQE_UID_OFFS(uid);
814
815 if (d_type) {
816 for (i = 0; i < (int)d_len; i++) {
817 val = __genwqe_readq(cd, d_addr);
818 set_reg_idx(cd, regs, &idx, max_regs,
819 d_addr, i, val);
820 }
821 } else {
822 d_len >>= 3; /* Size in bytes! */
823 for (i = 0; i < (int)d_len; i++, d_addr += 8) {
824 val = __genwqe_readq(cd, d_addr);
825 set_reg_idx(cd, regs, &idx, max_regs,
826 d_addr, 0, val);
827 }
828 }
829 l_addr += 8;
830 }
831 }
832
833 /*
834 * To save time, there are only 6 traces poplulated on Uid=2,
835 * Ring=1. each with iters=512.
836 */
837 for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
838 2...7 are ASI rings */
839 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
840 val = __genwqe_readq(cd, addr);
841
842 if ((val == 0x0ull) || (val == -1ull))
843 continue;
844
845 traps = (val >> 24) & 0xff; /* Number of Traps */
846 traces = (val >> 16) & 0xff; /* Number of Traces */
847 trace_entries = val & 0xffff; /* Entries per trace */
848
849 /* Note: This is a combined loop that dumps both the traps */
850 /* (for the trace == 0 case) as well as the traces 1 to */
851 /* 'traces'. */
852 for (trace = 0; trace <= traces; trace++) {
853 u32 diag_sel =
854 GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
855
856 addr = (GENWQE_UID_OFFS(uid) |
857 IO_EXTENDED_DIAG_SELECTOR);
858 __genwqe_writeq(cd, addr, diag_sel);
859
860 for (trace_entry = 0;
861 trace_entry < (trace ? trace_entries : traps);
862 trace_entry++) {
863 addr = (GENWQE_UID_OFFS(uid) |
864 IO_EXTENDED_DIAG_READ_MBX);
865 val = __genwqe_readq(cd, addr);
866 set_reg_idx(cd, regs, &idx, max_regs, addr,
867 (diag_sel<<16) | trace_entry, val);
868 }
869 }
870 }
871 return 0;
872}
873
874/**
875 * genwqe_write_vreg() - Write register in virtual window
876 *
877 * Note, these registers are only accessible to the PF through the
878 * VF-window. It is not intended for the VF to access.
879 */
880int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
881{
882 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
883 __genwqe_writeq(cd, reg, val);
884 return 0;
885}
886
887/**
888 * genwqe_read_vreg() - Read register in virtual window
889 *
890 * Note, these registers are only accessible to the PF through the
891 * VF-window. It is not intended for the VF to access.
892 */
893u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
894{
895 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
896 return __genwqe_readq(cd, reg);
897}
898
899/**
900 * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
901 *
902 * Note: From a design perspective it turned out to be a bad idea to
903 * use codes here to specifiy the frequency/speed values. An old
904 * driver cannot understand new codes and is therefore always a
905 * problem. Better is to measure out the value or put the
906 * speed/frequency directly into a register which is always a valid
907 * value for old as well as for new software.
908 *
909 * Return: Card clock in MHz
910 */
911int genwqe_base_clock_frequency(struct genwqe_dev *cd)
912{
913 u16 speed; /* MHz MHz MHz MHz */
914 static const int speed_grade[] = { 250, 200, 166, 175 };
915
916 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
917 if (speed >= ARRAY_SIZE(speed_grade))
918 return 0; /* illegal value */
919
920 return speed_grade[speed];
921}
922
923/**
924 * genwqe_stop_traps() - Stop traps
925 *
926 * Before reading out the analysis data, we need to stop the traps.
927 */
928void genwqe_stop_traps(struct genwqe_dev *cd)
929{
930 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
931}
932
933/**
934 * genwqe_start_traps() - Start traps
935 *
936 * After having read the data, we can/must enable the traps again.
937 */
938void genwqe_start_traps(struct genwqe_dev *cd)
939{
940 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
941
942 if (genwqe_need_err_masking(cd))
943 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
944}
diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h
new file mode 100644
index 000000000000..46e916b36c70
--- /dev/null
+++ b/drivers/misc/genwqe/genwqe_driver.h
@@ -0,0 +1,77 @@
1#ifndef __GENWQE_DRIVER_H__
2#define __GENWQE_DRIVER_H__
3
4/**
5 * IBM Accelerator Family 'GenWQE'
6 *
7 * (C) Copyright IBM Corp. 2013
8 *
9 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
10 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
11 * Author: Michael Jung <mijung@de.ibm.com>
12 * Author: Michael Ruettger <michael@ibmra.de>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License (version 2 only)
16 * as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/types.h>
25#include <linux/stddef.h>
26#include <linux/cdev.h>
27#include <linux/list.h>
28#include <linux/kthread.h>
29#include <linux/scatterlist.h>
30#include <linux/iommu.h>
31#include <linux/spinlock.h>
32#include <linux/mutex.h>
33#include <linux/platform_device.h>
34#include <linux/printk.h>
35
36#include <asm/byteorder.h>
37#include <linux/genwqe/genwqe_card.h>
38
39#define DRV_VERS_STRING "2.0.0"
40
41/*
42 * Static minor number assignement, until we decide/implement
43 * something dynamic.
44 */
45#define GENWQE_MAX_MINOR 128 /* up to 128 possible genwqe devices */
46
47/**
48 * genwqe_requ_alloc() - Allocate a new DDCB execution request
49 *
50 * This data structure contains the user visiable fields of the DDCB
51 * to be executed.
52 *
53 * Return: ptr to genwqe_ddcb_cmd data structure
54 */
55struct genwqe_ddcb_cmd *ddcb_requ_alloc(void);
56
57/**
58 * ddcb_requ_free() - Free DDCB execution request.
59 * @req: ptr to genwqe_ddcb_cmd data structure.
60 */
61void ddcb_requ_free(struct genwqe_ddcb_cmd *req);
62
63u32 genwqe_crc32(u8 *buff, size_t len, u32 init);
64
65static inline void genwqe_hexdump(struct pci_dev *pci_dev,
66 const void *buff, unsigned int size)
67{
68 char prefix[32];
69
70 scnprintf(prefix, sizeof(prefix), "%s %s: ",
71 GENWQE_DEVNAME, pci_name(pci_dev));
72
73 print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, 16, 1, buff,
74 size, true);
75}
76
77#endif /* __GENWQE_DRIVER_H__ */
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index a2edb2ee0921..49c7a23f02fc 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -224,7 +224,7 @@ static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
224} 224}
225 225
226#ifdef CONFIG_IDE 226#ifdef CONFIG_IDE
227int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, 227static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
228 struct block_device *bdev, unsigned int cmd, 228 struct block_device *bdev, unsigned int cmd,
229 unsigned long arg) 229 unsigned long arg)
230{ 230{
@@ -334,9 +334,10 @@ static void execute_location(void *dst)
334 334
335static void execute_user_location(void *dst) 335static void execute_user_location(void *dst)
336{ 336{
337 /* Intentionally crossing kernel/user memory boundary. */
337 void (*func)(void) = dst; 338 void (*func)(void) = dst;
338 339
339 if (copy_to_user(dst, do_nothing, EXEC_SIZE)) 340 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
340 return; 341 return;
341 func(); 342 func();
342} 343}
@@ -408,6 +409,8 @@ static void lkdtm_do_action(enum ctype which)
408 case CT_SPINLOCKUP: 409 case CT_SPINLOCKUP:
409 /* Must be called twice to trigger. */ 410 /* Must be called twice to trigger. */
410 spin_lock(&lock_me_up); 411 spin_lock(&lock_me_up);
412 /* Let sparse know we intended to exit holding the lock. */
413 __release(&lock_me_up);
411 break; 414 break;
412 case CT_HUNG_TASK: 415 case CT_HUNG_TASK:
413 set_current_state(TASK_UNINTERRUPTIBLE); 416 set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index d22c6864508b..2fad84432829 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -177,7 +177,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
177 unsigned long timeout; 177 unsigned long timeout;
178 int i; 178 int i;
179 179
180 /* Only Posible if we are in timeout */ 180 /* Only possible if we are in timeout */
181 if (!cl || cl != &dev->iamthif_cl) { 181 if (!cl || cl != &dev->iamthif_cl) {
182 dev_dbg(&dev->pdev->dev, "bad file ext.\n"); 182 dev_dbg(&dev->pdev->dev, "bad file ext.\n");
183 return -ETIMEDOUT; 183 return -ETIMEDOUT;
@@ -249,7 +249,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
249 cb->response_buffer.size); 249 cb->response_buffer.size);
250 dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); 250 dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
251 251
252 /* length is being turncated to PAGE_SIZE, however, 252 /* length is being truncated to PAGE_SIZE, however,
253 * the buf_idx may point beyond */ 253 * the buf_idx may point beyond */
254 length = min_t(size_t, length, (cb->buf_idx - *offset)); 254 length = min_t(size_t, length, (cb->buf_idx - *offset));
255 255
@@ -316,6 +316,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
316 mei_hdr.host_addr = dev->iamthif_cl.host_client_id; 316 mei_hdr.host_addr = dev->iamthif_cl.host_client_id;
317 mei_hdr.me_addr = dev->iamthif_cl.me_client_id; 317 mei_hdr.me_addr = dev->iamthif_cl.me_client_id;
318 mei_hdr.reserved = 0; 318 mei_hdr.reserved = 0;
319 mei_hdr.internal = 0;
319 dev->iamthif_msg_buf_index += mei_hdr.length; 320 dev->iamthif_msg_buf_index += mei_hdr.length;
320 ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf); 321 ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf);
321 if (ret) 322 if (ret)
@@ -477,6 +478,7 @@ int mei_amthif_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
477 mei_hdr.host_addr = cl->host_client_id; 478 mei_hdr.host_addr = cl->host_client_id;
478 mei_hdr.me_addr = cl->me_client_id; 479 mei_hdr.me_addr = cl->me_client_id;
479 mei_hdr.reserved = 0; 480 mei_hdr.reserved = 0;
481 mei_hdr.internal = 0;
480 482
481 if (*slots >= msg_slots) { 483 if (*slots >= msg_slots) {
482 mei_hdr.length = len; 484 mei_hdr.length = len;
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 87c96e4669e2..1ee2b9492a82 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -154,7 +154,7 @@ int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
154 return 0; 154 return 0;
155} 155}
156/** 156/**
157 * mei_io_cb_alloc_resp_buf - allocate respose buffer 157 * mei_io_cb_alloc_resp_buf - allocate response buffer
158 * 158 *
159 * @cb: io callback structure 159 * @cb: io callback structure
160 * @length: size of the buffer 160 * @length: size of the buffer
@@ -207,7 +207,7 @@ int mei_cl_flush_queues(struct mei_cl *cl)
207 207
208 208
209/** 209/**
210 * mei_cl_init - initializes intialize cl. 210 * mei_cl_init - initializes cl.
211 * 211 *
212 * @cl: host client to be initialized 212 * @cl: host client to be initialized
213 * @dev: mei device 213 * @dev: mei device
@@ -263,10 +263,10 @@ struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
263 return NULL; 263 return NULL;
264} 264}
265 265
266/** mei_cl_link: allocte host id in the host map 266/** mei_cl_link: allocate host id in the host map
267 * 267 *
268 * @cl - host client 268 * @cl - host client
269 * @id - fixed host id or -1 for genereting one 269 * @id - fixed host id or -1 for generic one
270 * 270 *
271 * returns 0 on success 271 * returns 0 on success
272 * -EINVAL on incorrect values 272 * -EINVAL on incorrect values
@@ -282,19 +282,19 @@ int mei_cl_link(struct mei_cl *cl, int id)
282 282
283 dev = cl->dev; 283 dev = cl->dev;
284 284
285 /* If Id is not asigned get one*/ 285 /* If Id is not assigned get one*/
286 if (id == MEI_HOST_CLIENT_ID_ANY) 286 if (id == MEI_HOST_CLIENT_ID_ANY)
287 id = find_first_zero_bit(dev->host_clients_map, 287 id = find_first_zero_bit(dev->host_clients_map,
288 MEI_CLIENTS_MAX); 288 MEI_CLIENTS_MAX);
289 289
290 if (id >= MEI_CLIENTS_MAX) { 290 if (id >= MEI_CLIENTS_MAX) {
291 dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; 291 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
292 return -EMFILE; 292 return -EMFILE;
293 } 293 }
294 294
295 open_handle_count = dev->open_handle_count + dev->iamthif_open_count; 295 open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
296 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 296 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
297 dev_err(&dev->pdev->dev, "open_handle_count exceded %d", 297 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
298 MEI_MAX_OPEN_HANDLE_COUNT); 298 MEI_MAX_OPEN_HANDLE_COUNT);
299 return -EMFILE; 299 return -EMFILE;
300 } 300 }
@@ -344,8 +344,6 @@ int mei_cl_unlink(struct mei_cl *cl)
344 344
345 cl->state = MEI_FILE_INITIALIZING; 345 cl->state = MEI_FILE_INITIALIZING;
346 346
347 list_del_init(&cl->link);
348
349 return 0; 347 return 0;
350} 348}
351 349
@@ -372,13 +370,14 @@ void mei_host_client_init(struct work_struct *work)
372 } 370 }
373 371
374 dev->dev_state = MEI_DEV_ENABLED; 372 dev->dev_state = MEI_DEV_ENABLED;
373 dev->reset_count = 0;
375 374
376 mutex_unlock(&dev->device_lock); 375 mutex_unlock(&dev->device_lock);
377} 376}
378 377
379 378
380/** 379/**
381 * mei_cl_disconnect - disconnect host clinet form the me one 380 * mei_cl_disconnect - disconnect host client from the me one
382 * 381 *
383 * @cl: host client 382 * @cl: host client
384 * 383 *
@@ -457,7 +456,7 @@ free:
457 * 456 *
458 * @cl: private data of the file object 457 * @cl: private data of the file object
459 * 458 *
460 * returns ture if other client is connected, 0 - otherwise. 459 * returns true if other client is connected, false - otherwise.
461 */ 460 */
462bool mei_cl_is_other_connecting(struct mei_cl *cl) 461bool mei_cl_is_other_connecting(struct mei_cl *cl)
463{ 462{
@@ -481,7 +480,7 @@ bool mei_cl_is_other_connecting(struct mei_cl *cl)
481} 480}
482 481
483/** 482/**
484 * mei_cl_connect - connect host clinet to the me one 483 * mei_cl_connect - connect host client to the me one
485 * 484 *
486 * @cl: host client 485 * @cl: host client
487 * 486 *
@@ -729,6 +728,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
729 mei_hdr.host_addr = cl->host_client_id; 728 mei_hdr.host_addr = cl->host_client_id;
730 mei_hdr.me_addr = cl->me_client_id; 729 mei_hdr.me_addr = cl->me_client_id;
731 mei_hdr.reserved = 0; 730 mei_hdr.reserved = 0;
731 mei_hdr.internal = cb->internal;
732 732
733 if (*slots >= msg_slots) { 733 if (*slots >= msg_slots) {
734 mei_hdr.length = len; 734 mei_hdr.length = len;
@@ -775,7 +775,7 @@ int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
775 * @cl: host client 775 * @cl: host client
776 * @cl: write callback with filled data 776 * @cl: write callback with filled data
777 * 777 *
778 * returns numbe of bytes sent on success, <0 on failure. 778 * returns number of bytes sent on success, <0 on failure.
779 */ 779 */
780int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) 780int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
781{ 781{
@@ -828,6 +828,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
828 mei_hdr.host_addr = cl->host_client_id; 828 mei_hdr.host_addr = cl->host_client_id;
829 mei_hdr.me_addr = cl->me_client_id; 829 mei_hdr.me_addr = cl->me_client_id;
830 mei_hdr.reserved = 0; 830 mei_hdr.reserved = 0;
831 mei_hdr.internal = cb->internal;
831 832
832 833
833 rets = mei_write_message(dev, &mei_hdr, buf->data); 834 rets = mei_write_message(dev, &mei_hdr, buf->data);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index e3870f22d238..a3ae154444b2 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -43,7 +43,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
43 43
44 mutex_lock(&dev->device_lock); 44 mutex_lock(&dev->device_lock);
45 45
46 /* if the driver is not enabled the list won't b consitent */ 46 /* if the driver is not enabled the list won't be consistent */
47 if (dev->dev_state != MEI_DEV_ENABLED) 47 if (dev->dev_state != MEI_DEV_ENABLED)
48 goto out; 48 goto out;
49 49
@@ -101,7 +101,7 @@ static const struct file_operations mei_dbgfs_fops_devstate = {
101 101
102/** 102/**
103 * mei_dbgfs_deregister - Remove the debugfs files and directories 103 * mei_dbgfs_deregister - Remove the debugfs files and directories
104 * @mei - pointer to mei device private dat 104 * @mei - pointer to mei device private data
105 */ 105 */
106void mei_dbgfs_deregister(struct mei_device *dev) 106void mei_dbgfs_deregister(struct mei_device *dev)
107{ 107{
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 9b3a0fb7f265..28cd74c073b9 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -28,9 +28,9 @@
28 * 28 *
29 * @dev: the device structure 29 * @dev: the device structure
30 * 30 *
31 * returns none. 31 * returns 0 on success -ENOMEM on allocation failure
32 */ 32 */
33static void mei_hbm_me_cl_allocate(struct mei_device *dev) 33static int mei_hbm_me_cl_allocate(struct mei_device *dev)
34{ 34{
35 struct mei_me_client *clients; 35 struct mei_me_client *clients;
36 int b; 36 int b;
@@ -44,7 +44,7 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
44 dev->me_clients_num++; 44 dev->me_clients_num++;
45 45
46 if (dev->me_clients_num == 0) 46 if (dev->me_clients_num == 0)
47 return; 47 return 0;
48 48
49 kfree(dev->me_clients); 49 kfree(dev->me_clients);
50 dev->me_clients = NULL; 50 dev->me_clients = NULL;
@@ -56,12 +56,10 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev)
56 sizeof(struct mei_me_client), GFP_KERNEL); 56 sizeof(struct mei_me_client), GFP_KERNEL);
57 if (!clients) { 57 if (!clients) {
58 dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); 58 dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
59 dev->dev_state = MEI_DEV_RESETTING; 59 return -ENOMEM;
60 mei_reset(dev, 1);
61 return;
62 } 60 }
63 dev->me_clients = clients; 61 dev->me_clients = clients;
64 return; 62 return 0;
65} 63}
66 64
67/** 65/**
@@ -85,12 +83,12 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
85} 83}
86 84
87/** 85/**
88 * same_disconn_addr - tells if they have the same address 86 * mei_hbm_cl_addr_equal - tells if they have the same address
89 * 87 *
90 * @file: private data of the file object. 88 * @cl: - client
91 * @disconn: disconnection request. 89 * @buf: buffer with cl header
92 * 90 *
93 * returns true if addres are same 91 * returns true if addresses are the same
94 */ 92 */
95static inline 93static inline
96bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) 94bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
@@ -128,6 +126,17 @@ static bool is_treat_specially_client(struct mei_cl *cl,
128 return false; 126 return false;
129} 127}
130 128
129/**
130 * mei_hbm_idle - set hbm to idle state
131 *
132 * @dev: the device structure
133 */
134void mei_hbm_idle(struct mei_device *dev)
135{
136 dev->init_clients_timer = 0;
137 dev->hbm_state = MEI_HBM_IDLE;
138}
139
131int mei_hbm_start_wait(struct mei_device *dev) 140int mei_hbm_start_wait(struct mei_device *dev)
132{ 141{
133 int ret; 142 int ret;
@@ -137,7 +146,7 @@ int mei_hbm_start_wait(struct mei_device *dev)
137 mutex_unlock(&dev->device_lock); 146 mutex_unlock(&dev->device_lock);
138 ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, 147 ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
139 dev->hbm_state == MEI_HBM_IDLE || 148 dev->hbm_state == MEI_HBM_IDLE ||
140 dev->hbm_state > MEI_HBM_START, 149 dev->hbm_state >= MEI_HBM_STARTED,
141 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); 150 mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
142 mutex_lock(&dev->device_lock); 151 mutex_lock(&dev->device_lock);
143 152
@@ -153,12 +162,15 @@ int mei_hbm_start_wait(struct mei_device *dev)
153 * mei_hbm_start_req - sends start request message. 162 * mei_hbm_start_req - sends start request message.
154 * 163 *
155 * @dev: the device structure 164 * @dev: the device structure
165 *
166 * returns 0 on success and < 0 on failure
156 */ 167 */
157int mei_hbm_start_req(struct mei_device *dev) 168int mei_hbm_start_req(struct mei_device *dev)
158{ 169{
159 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; 170 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
160 struct hbm_host_version_request *start_req; 171 struct hbm_host_version_request *start_req;
161 const size_t len = sizeof(struct hbm_host_version_request); 172 const size_t len = sizeof(struct hbm_host_version_request);
173 int ret;
162 174
163 mei_hbm_hdr(mei_hdr, len); 175 mei_hbm_hdr(mei_hdr, len);
164 176
@@ -170,12 +182,13 @@ int mei_hbm_start_req(struct mei_device *dev)
170 start_req->host_version.minor_version = HBM_MINOR_VERSION; 182 start_req->host_version.minor_version = HBM_MINOR_VERSION;
171 183
172 dev->hbm_state = MEI_HBM_IDLE; 184 dev->hbm_state = MEI_HBM_IDLE;
173 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { 185 ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
174 dev_err(&dev->pdev->dev, "version message write failed\n"); 186 if (ret) {
175 dev->dev_state = MEI_DEV_RESETTING; 187 dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n",
176 mei_reset(dev, 1); 188 ret);
177 return -EIO; 189 return ret;
178 } 190 }
191
179 dev->hbm_state = MEI_HBM_START; 192 dev->hbm_state = MEI_HBM_START;
180 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; 193 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
181 return 0; 194 return 0;
@@ -186,13 +199,15 @@ int mei_hbm_start_req(struct mei_device *dev)
186 * 199 *
187 * @dev: the device structure 200 * @dev: the device structure
188 * 201 *
189 * returns none. 202 * returns 0 on success and < 0 on failure
190 */ 203 */
191static void mei_hbm_enum_clients_req(struct mei_device *dev) 204static int mei_hbm_enum_clients_req(struct mei_device *dev)
192{ 205{
193 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; 206 struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
194 struct hbm_host_enum_request *enum_req; 207 struct hbm_host_enum_request *enum_req;
195 const size_t len = sizeof(struct hbm_host_enum_request); 208 const size_t len = sizeof(struct hbm_host_enum_request);
209 int ret;
210
196 /* enumerate clients */ 211 /* enumerate clients */
197 mei_hbm_hdr(mei_hdr, len); 212 mei_hbm_hdr(mei_hdr, len);
198 213
@@ -200,14 +215,15 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev)
200 memset(enum_req, 0, len); 215 memset(enum_req, 0, len);
201 enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; 216 enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
202 217
203 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { 218 ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
204 dev->dev_state = MEI_DEV_RESETTING; 219 if (ret) {
205 dev_err(&dev->pdev->dev, "enumeration request write failed.\n"); 220 dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n",
206 mei_reset(dev, 1); 221 ret);
222 return ret;
207 } 223 }
208 dev->hbm_state = MEI_HBM_ENUM_CLIENTS; 224 dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
209 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; 225 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
210 return; 226 return 0;
211} 227}
212 228
213/** 229/**
@@ -215,7 +231,7 @@ static void mei_hbm_enum_clients_req(struct mei_device *dev)
215 * 231 *
216 * @dev: the device structure 232 * @dev: the device structure
217 * 233 *
218 * returns none. 234 * returns 0 on success and < 0 on failure
219 */ 235 */
220 236
221static int mei_hbm_prop_req(struct mei_device *dev) 237static int mei_hbm_prop_req(struct mei_device *dev)
@@ -226,7 +242,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
226 const size_t len = sizeof(struct hbm_props_request); 242 const size_t len = sizeof(struct hbm_props_request);
227 unsigned long next_client_index; 243 unsigned long next_client_index;
228 unsigned long client_num; 244 unsigned long client_num;
229 245 int ret;
230 246
231 client_num = dev->me_client_presentation_num; 247 client_num = dev->me_client_presentation_num;
232 248
@@ -253,12 +269,11 @@ static int mei_hbm_prop_req(struct mei_device *dev)
253 prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; 269 prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
254 prop_req->address = next_client_index; 270 prop_req->address = next_client_index;
255 271
256 if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { 272 ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
257 dev->dev_state = MEI_DEV_RESETTING; 273 if (ret) {
258 dev_err(&dev->pdev->dev, "properties request write failed\n"); 274 dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n",
259 mei_reset(dev, 1); 275 ret);
260 276 return ret;
261 return -EIO;
262 } 277 }
263 278
264 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; 279 dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
@@ -268,7 +283,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
268} 283}
269 284
270/** 285/**
271 * mei_hbm_stop_req_prepare - perpare stop request message 286 * mei_hbm_stop_req_prepare - prepare stop request message
272 * 287 *
273 * @dev - mei device 288 * @dev - mei device
274 * @mei_hdr - mei message header 289 * @mei_hdr - mei message header
@@ -289,7 +304,7 @@ static void mei_hbm_stop_req_prepare(struct mei_device *dev,
289} 304}
290 305
291/** 306/**
292 * mei_hbm_cl_flow_control_req - sends flow control requst. 307 * mei_hbm_cl_flow_control_req - sends flow control request.
293 * 308 *
294 * @dev: the device structure 309 * @dev: the device structure
295 * @cl: client info 310 * @cl: client info
@@ -451,7 +466,7 @@ int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
451} 466}
452 467
453/** 468/**
454 * mei_hbm_cl_connect_res - connect resposne from the ME 469 * mei_hbm_cl_connect_res - connect response from the ME
455 * 470 *
456 * @dev: the device structure 471 * @dev: the device structure
457 * @rs: connect response bus message 472 * @rs: connect response bus message
@@ -505,8 +520,8 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev,
505 520
506 521
507/** 522/**
508 * mei_hbm_fw_disconnect_req - disconnect request initiated by me 523 * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware
509 * host sends disoconnect response 524 * host sends disconnect response
510 * 525 *
511 * @dev: the device structure. 526 * @dev: the device structure.
512 * @disconnect_req: disconnect request bus message from the me 527 * @disconnect_req: disconnect request bus message from the me
@@ -559,8 +574,10 @@ bool mei_hbm_version_is_supported(struct mei_device *dev)
559 * 574 *
560 * @dev: the device structure 575 * @dev: the device structure
561 * @mei_hdr: header of bus message 576 * @mei_hdr: header of bus message
577 *
578 * returns 0 on success and < 0 on failure
562 */ 579 */
563void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) 580int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
564{ 581{
565 struct mei_bus_message *mei_msg; 582 struct mei_bus_message *mei_msg;
566 struct mei_me_client *me_client; 583 struct mei_me_client *me_client;
@@ -577,8 +594,20 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
577 mei_read_slots(dev, dev->rd_msg_buf, hdr->length); 594 mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
578 mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; 595 mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
579 596
597 /* ignore spurious message and prevent reset nesting
598 * hbm is put to idle during system reset
599 */
600 if (dev->hbm_state == MEI_HBM_IDLE) {
601 dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n");
602 return 0;
603 }
604
580 switch (mei_msg->hbm_cmd) { 605 switch (mei_msg->hbm_cmd) {
581 case HOST_START_RES_CMD: 606 case HOST_START_RES_CMD:
607 dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n");
608
609 dev->init_clients_timer = 0;
610
582 version_res = (struct hbm_host_version_response *)mei_msg; 611 version_res = (struct hbm_host_version_response *)mei_msg;
583 612
584 dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", 613 dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n",
@@ -597,73 +626,89 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
597 } 626 }
598 627
599 if (!mei_hbm_version_is_supported(dev)) { 628 if (!mei_hbm_version_is_supported(dev)) {
600 dev_warn(&dev->pdev->dev, "hbm version mismatch: stopping the driver.\n"); 629 dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n");
601 630
602 dev->hbm_state = MEI_HBM_STOP; 631 dev->hbm_state = MEI_HBM_STOPPED;
603 mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, 632 mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
604 dev->wr_msg.data); 633 dev->wr_msg.data);
605 mei_write_message(dev, &dev->wr_msg.hdr, 634 if (mei_write_message(dev, &dev->wr_msg.hdr,
606 dev->wr_msg.data); 635 dev->wr_msg.data)) {
636 dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n");
637 return -EIO;
638 }
639 break;
640 }
607 641
608 return; 642 if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
643 dev->hbm_state != MEI_HBM_START) {
644 dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n",
645 dev->dev_state, dev->hbm_state);
646 return -EPROTO;
609 } 647 }
610 648
611 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 649 dev->hbm_state = MEI_HBM_STARTED;
612 dev->hbm_state == MEI_HBM_START) { 650
613 dev->init_clients_timer = 0; 651 if (mei_hbm_enum_clients_req(dev)) {
614 mei_hbm_enum_clients_req(dev); 652 dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n");
615 } else { 653 return -EIO;
616 dev_err(&dev->pdev->dev, "reset: wrong host start response\n");
617 mei_reset(dev, 1);
618 return;
619 } 654 }
620 655
621 wake_up_interruptible(&dev->wait_recvd_msg); 656 wake_up_interruptible(&dev->wait_recvd_msg);
622 dev_dbg(&dev->pdev->dev, "host start response message received.\n");
623 break; 657 break;
624 658
625 case CLIENT_CONNECT_RES_CMD: 659 case CLIENT_CONNECT_RES_CMD:
660 dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n");
661
626 connect_res = (struct hbm_client_connect_response *) mei_msg; 662 connect_res = (struct hbm_client_connect_response *) mei_msg;
627 mei_hbm_cl_connect_res(dev, connect_res); 663 mei_hbm_cl_connect_res(dev, connect_res);
628 dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
629 wake_up(&dev->wait_recvd_msg); 664 wake_up(&dev->wait_recvd_msg);
630 break; 665 break;
631 666
632 case CLIENT_DISCONNECT_RES_CMD: 667 case CLIENT_DISCONNECT_RES_CMD:
668 dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n");
669
633 disconnect_res = (struct hbm_client_connect_response *) mei_msg; 670 disconnect_res = (struct hbm_client_connect_response *) mei_msg;
634 mei_hbm_cl_disconnect_res(dev, disconnect_res); 671 mei_hbm_cl_disconnect_res(dev, disconnect_res);
635 dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
636 wake_up(&dev->wait_recvd_msg); 672 wake_up(&dev->wait_recvd_msg);
637 break; 673 break;
638 674
639 case MEI_FLOW_CONTROL_CMD: 675 case MEI_FLOW_CONTROL_CMD:
676 dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n");
677
640 flow_control = (struct hbm_flow_control *) mei_msg; 678 flow_control = (struct hbm_flow_control *) mei_msg;
641 mei_hbm_cl_flow_control_res(dev, flow_control); 679 mei_hbm_cl_flow_control_res(dev, flow_control);
642 dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
643 break; 680 break;
644 681
645 case HOST_CLIENT_PROPERTIES_RES_CMD: 682 case HOST_CLIENT_PROPERTIES_RES_CMD:
683 dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n");
684
685 dev->init_clients_timer = 0;
686
687 if (dev->me_clients == NULL) {
688 dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n");
689 return -EPROTO;
690 }
691
646 props_res = (struct hbm_props_response *)mei_msg; 692 props_res = (struct hbm_props_response *)mei_msg;
647 me_client = &dev->me_clients[dev->me_client_presentation_num]; 693 me_client = &dev->me_clients[dev->me_client_presentation_num];
648 694
649 if (props_res->status || !dev->me_clients) { 695 if (props_res->status) {
650 dev_err(&dev->pdev->dev, "reset: properties response hbm wrong status.\n"); 696 dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n",
651 mei_reset(dev, 1); 697 props_res->status);
652 return; 698 return -EPROTO;
653 } 699 }
654 700
655 if (me_client->client_id != props_res->address) { 701 if (me_client->client_id != props_res->address) {
656 dev_err(&dev->pdev->dev, "reset: host properties response address mismatch\n"); 702 dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n",
657 mei_reset(dev, 1); 703 me_client->client_id, props_res->address);
658 return; 704 return -EPROTO;
659 } 705 }
660 706
661 if (dev->dev_state != MEI_DEV_INIT_CLIENTS || 707 if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
662 dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { 708 dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
663 dev_err(&dev->pdev->dev, "reset: unexpected properties response\n"); 709 dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
664 mei_reset(dev, 1); 710 dev->dev_state, dev->hbm_state);
665 711 return -EPROTO;
666 return;
667 } 712 }
668 713
669 me_client->props = props_res->client_properties; 714 me_client->props = props_res->client_properties;
@@ -671,49 +716,70 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
671 dev->me_client_presentation_num++; 716 dev->me_client_presentation_num++;
672 717
673 /* request property for the next client */ 718 /* request property for the next client */
674 mei_hbm_prop_req(dev); 719 if (mei_hbm_prop_req(dev))
720 return -EIO;
675 721
676 break; 722 break;
677 723
678 case HOST_ENUM_RES_CMD: 724 case HOST_ENUM_RES_CMD:
725 dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n");
726
727 dev->init_clients_timer = 0;
728
679 enum_res = (struct hbm_host_enum_response *) mei_msg; 729 enum_res = (struct hbm_host_enum_response *) mei_msg;
680 BUILD_BUG_ON(sizeof(dev->me_clients_map) 730 BUILD_BUG_ON(sizeof(dev->me_clients_map)
681 < sizeof(enum_res->valid_addresses)); 731 < sizeof(enum_res->valid_addresses));
682 memcpy(dev->me_clients_map, enum_res->valid_addresses, 732 memcpy(dev->me_clients_map, enum_res->valid_addresses,
683 sizeof(enum_res->valid_addresses)); 733 sizeof(enum_res->valid_addresses));
684 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 734
685 dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { 735 if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
686 dev->init_clients_timer = 0; 736 dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
687 mei_hbm_me_cl_allocate(dev); 737 dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
688 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; 738 dev->dev_state, dev->hbm_state);
689 739 return -EPROTO;
690 /* first property reqeust */ 740 }
691 mei_hbm_prop_req(dev); 741
692 } else { 742 if (mei_hbm_me_cl_allocate(dev)) {
693 dev_err(&dev->pdev->dev, "reset: unexpected enumeration response hbm.\n"); 743 dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n");
694 mei_reset(dev, 1); 744 return -ENOMEM;
695 return;
696 } 745 }
746
747 dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
748
749 /* first property request */
750 if (mei_hbm_prop_req(dev))
751 return -EIO;
752
697 break; 753 break;
698 754
699 case HOST_STOP_RES_CMD: 755 case HOST_STOP_RES_CMD:
756 dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n");
757
758 dev->init_clients_timer = 0;
700 759
701 if (dev->hbm_state != MEI_HBM_STOP) 760 if (dev->hbm_state != MEI_HBM_STOPPED) {
702 dev_err(&dev->pdev->dev, "unexpected stop response hbm.\n"); 761 dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n",
703 dev->dev_state = MEI_DEV_DISABLED; 762 dev->dev_state, dev->hbm_state);
704 dev_info(&dev->pdev->dev, "reset: FW stop response.\n"); 763 return -EPROTO;
705 mei_reset(dev, 1); 764 }
765
766 dev->dev_state = MEI_DEV_POWER_DOWN;
767 dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n");
768 /* force the reset */
769 return -EPROTO;
706 break; 770 break;
707 771
708 case CLIENT_DISCONNECT_REQ_CMD: 772 case CLIENT_DISCONNECT_REQ_CMD:
709 /* search for client */ 773 dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n");
774
710 disconnect_req = (struct hbm_client_connect_request *)mei_msg; 775 disconnect_req = (struct hbm_client_connect_request *)mei_msg;
711 mei_hbm_fw_disconnect_req(dev, disconnect_req); 776 mei_hbm_fw_disconnect_req(dev, disconnect_req);
712 break; 777 break;
713 778
714 case ME_STOP_REQ_CMD: 779 case ME_STOP_REQ_CMD:
780 dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n");
715 781
716 dev->hbm_state = MEI_HBM_STOP; 782 dev->hbm_state = MEI_HBM_STOPPED;
717 mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, 783 mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
718 dev->wr_ext_msg.data); 784 dev->wr_ext_msg.data);
719 break; 785 break;
@@ -722,5 +788,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
722 break; 788 break;
723 789
724 } 790 }
791 return 0;
725} 792}
726 793
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 4ae2e56e404f..5f92188a5cd7 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -32,13 +32,13 @@ struct mei_cl;
32enum mei_hbm_state { 32enum mei_hbm_state {
33 MEI_HBM_IDLE = 0, 33 MEI_HBM_IDLE = 0,
34 MEI_HBM_START, 34 MEI_HBM_START,
35 MEI_HBM_STARTED,
35 MEI_HBM_ENUM_CLIENTS, 36 MEI_HBM_ENUM_CLIENTS,
36 MEI_HBM_CLIENT_PROPERTIES, 37 MEI_HBM_CLIENT_PROPERTIES,
37 MEI_HBM_STARTED, 38 MEI_HBM_STOPPED,
38 MEI_HBM_STOP,
39}; 39};
40 40
41void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); 41int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
42 42
43static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) 43static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
44{ 44{
@@ -49,6 +49,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
49 hdr->reserved = 0; 49 hdr->reserved = 0;
50} 50}
51 51
52void mei_hbm_idle(struct mei_device *dev);
52int mei_hbm_start_req(struct mei_device *dev); 53int mei_hbm_start_req(struct mei_device *dev);
53int mei_hbm_start_wait(struct mei_device *dev); 54int mei_hbm_start_wait(struct mei_device *dev);
54int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); 55int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 3412adcdaeb0..6f656c053b14 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -185,7 +185,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
185 185
186 mei_me_reg_write(hw, H_CSR, hcsr); 186 mei_me_reg_write(hw, H_CSR, hcsr);
187 187
188 if (dev->dev_state == MEI_DEV_POWER_DOWN) 188 if (intr_enable == false)
189 mei_me_hw_reset_release(dev); 189 mei_me_hw_reset_release(dev);
190 190
191 return 0; 191 return 0;
@@ -469,7 +469,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
469 struct mei_device *dev = (struct mei_device *) dev_id; 469 struct mei_device *dev = (struct mei_device *) dev_id;
470 struct mei_cl_cb complete_list; 470 struct mei_cl_cb complete_list;
471 s32 slots; 471 s32 slots;
472 int rets; 472 int rets = 0;
473 473
474 dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); 474 dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
475 /* initialize our complete list */ 475 /* initialize our complete list */
@@ -482,15 +482,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
482 mei_clear_interrupts(dev); 482 mei_clear_interrupts(dev);
483 483
484 /* check if ME wants a reset */ 484 /* check if ME wants a reset */
485 if (!mei_hw_is_ready(dev) && 485 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
486 dev->dev_state != MEI_DEV_RESETTING && 486 dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
487 dev->dev_state != MEI_DEV_INITIALIZING && 487 schedule_work(&dev->reset_work);
488 dev->dev_state != MEI_DEV_POWER_DOWN && 488 goto end;
489 dev->dev_state != MEI_DEV_POWER_UP) {
490 dev_dbg(&dev->pdev->dev, "FW not ready.\n");
491 mei_reset(dev, 1);
492 mutex_unlock(&dev->device_lock);
493 return IRQ_HANDLED;
494 } 489 }
495 490
496 /* check if we need to start the dev */ 491 /* check if we need to start the dev */
@@ -500,15 +495,12 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
500 495
501 dev->recvd_hw_ready = true; 496 dev->recvd_hw_ready = true;
502 wake_up_interruptible(&dev->wait_hw_ready); 497 wake_up_interruptible(&dev->wait_hw_ready);
503
504 mutex_unlock(&dev->device_lock);
505 return IRQ_HANDLED;
506 } else { 498 } else {
499
507 dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); 500 dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
508 mei_me_hw_reset_release(dev); 501 mei_me_hw_reset_release(dev);
509 mutex_unlock(&dev->device_lock);
510 return IRQ_HANDLED;
511 } 502 }
503 goto end;
512 } 504 }
513 /* check slots available for reading */ 505 /* check slots available for reading */
514 slots = mei_count_full_read_slots(dev); 506 slots = mei_count_full_read_slots(dev);
@@ -516,21 +508,23 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
516 /* we have urgent data to send so break the read */ 508 /* we have urgent data to send so break the read */
517 if (dev->wr_ext_msg.hdr.length) 509 if (dev->wr_ext_msg.hdr.length)
518 break; 510 break;
519 dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots); 511 dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots);
520 dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
521 rets = mei_irq_read_handler(dev, &complete_list, &slots); 512 rets = mei_irq_read_handler(dev, &complete_list, &slots);
522 if (rets) 513 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
514 schedule_work(&dev->reset_work);
523 goto end; 515 goto end;
516 }
524 } 517 }
518
525 rets = mei_irq_write_handler(dev, &complete_list); 519 rets = mei_irq_write_handler(dev, &complete_list);
526end:
527 dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
528 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
529 520
530 mutex_unlock(&dev->device_lock); 521 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
531 522
532 mei_irq_compl_handler(dev, &complete_list); 523 mei_irq_compl_handler(dev, &complete_list);
533 524
525end:
526 dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);
527 mutex_unlock(&dev->device_lock);
534 return IRQ_HANDLED; 528 return IRQ_HANDLED;
535} 529}
536static const struct mei_hw_ops mei_me_hw_ops = { 530static const struct mei_hw_ops mei_me_hw_ops = {
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index cb2f556b4252..dd44e33ad2b6 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -111,7 +111,8 @@ struct mei_msg_hdr {
111 u32 me_addr:8; 111 u32 me_addr:8;
112 u32 host_addr:8; 112 u32 host_addr:8;
113 u32 length:9; 113 u32 length:9;
114 u32 reserved:6; 114 u32 reserved:5;
115 u32 internal:1;
115 u32 msg_complete:1; 116 u32 msg_complete:1;
116} __packed; 117} __packed;
117 118
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index f7f3abbe12b6..cdd31c2a2a2b 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -43,41 +43,119 @@ const char *mei_dev_state_str(int state)
43#undef MEI_DEV_STATE 43#undef MEI_DEV_STATE
44} 44}
45 45
46void mei_device_init(struct mei_device *dev)
47{
48 /* setup our list array */
49 INIT_LIST_HEAD(&dev->file_list);
50 INIT_LIST_HEAD(&dev->device_list);
51 mutex_init(&dev->device_lock);
52 init_waitqueue_head(&dev->wait_hw_ready);
53 init_waitqueue_head(&dev->wait_recvd_msg);
54 init_waitqueue_head(&dev->wait_stop_wd);
55 dev->dev_state = MEI_DEV_INITIALIZING;
56 46
57 mei_io_list_init(&dev->read_list); 47/**
58 mei_io_list_init(&dev->write_list); 48 * mei_cancel_work. Cancel mei background jobs
59 mei_io_list_init(&dev->write_waiting_list); 49 *
60 mei_io_list_init(&dev->ctrl_wr_list); 50 * @dev: the device structure
61 mei_io_list_init(&dev->ctrl_rd_list); 51 *
52 * returns 0 on success or < 0 if the reset hasn't succeeded
53 */
54void mei_cancel_work(struct mei_device *dev)
55{
56 cancel_work_sync(&dev->init_work);
57 cancel_work_sync(&dev->reset_work);
62 58
63 INIT_DELAYED_WORK(&dev->timer_work, mei_timer); 59 cancel_delayed_work(&dev->timer_work);
64 INIT_WORK(&dev->init_work, mei_host_client_init); 60}
61EXPORT_SYMBOL_GPL(mei_cancel_work);
65 62
66 INIT_LIST_HEAD(&dev->wd_cl.link); 63/**
67 INIT_LIST_HEAD(&dev->iamthif_cl.link); 64 * mei_reset - resets host and fw.
68 mei_io_list_init(&dev->amthif_cmd_list); 65 *
69 mei_io_list_init(&dev->amthif_rd_complete_list); 66 * @dev: the device structure
67 */
68int mei_reset(struct mei_device *dev)
69{
70 enum mei_dev_state state = dev->dev_state;
71 bool interrupts_enabled;
72 int ret;
70 73
71 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); 74 if (state != MEI_DEV_INITIALIZING &&
72 dev->open_handle_count = 0; 75 state != MEI_DEV_DISABLED &&
76 state != MEI_DEV_POWER_DOWN &&
77 state != MEI_DEV_POWER_UP)
78 dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
79 mei_dev_state_str(state));
73 80
74 /* 81 /* we're already in reset, cancel the init timer
75 * Reserving the first client ID 82 * if the reset was called due the hbm protocol error
76 * 0: Reserved for MEI Bus Message communications 83 * we need to call it before hw start
84 * so the hbm watchdog won't kick in
77 */ 85 */
78 bitmap_set(dev->host_clients_map, 0, 1); 86 mei_hbm_idle(dev);
87
88 /* enter reset flow */
89 interrupts_enabled = state != MEI_DEV_POWER_DOWN;
90 dev->dev_state = MEI_DEV_RESETTING;
91
92 dev->reset_count++;
93 if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
94 dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
95 dev->dev_state = MEI_DEV_DISABLED;
96 return -ENODEV;
97 }
98
99 ret = mei_hw_reset(dev, interrupts_enabled);
100 /* fall through and remove the sw state even if hw reset has failed */
101
102 /* no need to clean up software state in case of power up */
103 if (state != MEI_DEV_INITIALIZING &&
104 state != MEI_DEV_POWER_UP) {
105
106 /* remove all waiting requests */
107 mei_cl_all_write_clear(dev);
108
109 mei_cl_all_disconnect(dev);
110
111 /* wake up all readers and writers so they can be interrupted */
112 mei_cl_all_wakeup(dev);
113
114 /* remove entry if already in list */
115 dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
116 mei_cl_unlink(&dev->wd_cl);
117 mei_cl_unlink(&dev->iamthif_cl);
118 mei_amthif_reset_params(dev);
119 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
120 }
121
122
123 dev->me_clients_num = 0;
124 dev->rd_msg_hdr = 0;
125 dev->wd_pending = false;
126
127 if (ret) {
128 dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret);
129 dev->dev_state = MEI_DEV_DISABLED;
130 return ret;
131 }
132
133 if (state == MEI_DEV_POWER_DOWN) {
134 dev_dbg(&dev->pdev->dev, "powering down: end of reset\n");
135 dev->dev_state = MEI_DEV_DISABLED;
136 return 0;
137 }
138
139 ret = mei_hw_start(dev);
140 if (ret) {
141 dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret);
142 dev->dev_state = MEI_DEV_DISABLED;
143 return ret;
144 }
145
146 dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
147
148 dev->dev_state = MEI_DEV_INIT_CLIENTS;
149 ret = mei_hbm_start_req(dev);
150 if (ret) {
151 dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret);
152 dev->dev_state = MEI_DEV_DISABLED;
153 return ret;
154 }
155
156 return 0;
79} 157}
80EXPORT_SYMBOL_GPL(mei_device_init); 158EXPORT_SYMBOL_GPL(mei_reset);
81 159
82/** 160/**
83 * mei_start - initializes host and fw to start work. 161 * mei_start - initializes host and fw to start work.
@@ -90,14 +168,21 @@ int mei_start(struct mei_device *dev)
90{ 168{
91 mutex_lock(&dev->device_lock); 169 mutex_lock(&dev->device_lock);
92 170
93 /* acknowledge interrupt and stop interupts */ 171 /* acknowledge interrupt and stop interrupts */
94 mei_clear_interrupts(dev); 172 mei_clear_interrupts(dev);
95 173
96 mei_hw_config(dev); 174 mei_hw_config(dev);
97 175
98 dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); 176 dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
99 177
100 mei_reset(dev, 1); 178 dev->dev_state = MEI_DEV_INITIALIZING;
179 dev->reset_count = 0;
180 mei_reset(dev);
181
182 if (dev->dev_state == MEI_DEV_DISABLED) {
183 dev_err(&dev->pdev->dev, "reset failed");
184 goto err;
185 }
101 186
102 if (mei_hbm_start_wait(dev)) { 187 if (mei_hbm_start_wait(dev)) {
103 dev_err(&dev->pdev->dev, "HBM haven't started"); 188 dev_err(&dev->pdev->dev, "HBM haven't started");
@@ -132,101 +217,64 @@ err:
132EXPORT_SYMBOL_GPL(mei_start); 217EXPORT_SYMBOL_GPL(mei_start);
133 218
134/** 219/**
135 * mei_reset - resets host and fw. 220 * mei_restart - restart device after suspend
136 * 221 *
137 * @dev: the device structure 222 * @dev: the device structure
138 * @interrupts_enabled: if interrupt should be enabled after reset. 223 *
224 * returns 0 on success or -ENODEV if the restart hasn't succeeded
139 */ 225 */
140void mei_reset(struct mei_device *dev, int interrupts_enabled) 226int mei_restart(struct mei_device *dev)
141{ 227{
142 bool unexpected; 228 int err;
143 int ret;
144
145 unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
146 dev->dev_state != MEI_DEV_DISABLED &&
147 dev->dev_state != MEI_DEV_POWER_DOWN &&
148 dev->dev_state != MEI_DEV_POWER_UP);
149 229
150 if (unexpected) 230 mutex_lock(&dev->device_lock);
151 dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
152 mei_dev_state_str(dev->dev_state));
153
154 ret = mei_hw_reset(dev, interrupts_enabled);
155 if (ret) {
156 dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n");
157 interrupts_enabled = false;
158 dev->dev_state = MEI_DEV_DISABLED;
159 }
160
161 dev->hbm_state = MEI_HBM_IDLE;
162 231
163 if (dev->dev_state != MEI_DEV_INITIALIZING && 232 mei_clear_interrupts(dev);
164 dev->dev_state != MEI_DEV_POWER_UP) {
165 if (dev->dev_state != MEI_DEV_DISABLED &&
166 dev->dev_state != MEI_DEV_POWER_DOWN)
167 dev->dev_state = MEI_DEV_RESETTING;
168 233
169 /* remove all waiting requests */ 234 dev->dev_state = MEI_DEV_POWER_UP;
170 mei_cl_all_write_clear(dev); 235 dev->reset_count = 0;
171 236
172 mei_cl_all_disconnect(dev); 237 err = mei_reset(dev);
173 238
174 /* wake up all readings so they can be interrupted */ 239 mutex_unlock(&dev->device_lock);
175 mei_cl_all_wakeup(dev);
176
177 /* remove entry if already in list */
178 dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
179 mei_cl_unlink(&dev->wd_cl);
180 mei_cl_unlink(&dev->iamthif_cl);
181 mei_amthif_reset_params(dev);
182 memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
183 }
184 240
185 /* we're already in reset, cancel the init timer */ 241 if (err || dev->dev_state == MEI_DEV_DISABLED)
186 dev->init_clients_timer = 0; 242 return -ENODEV;
187 243
188 dev->me_clients_num = 0; 244 return 0;
189 dev->rd_msg_hdr = 0; 245}
190 dev->wd_pending = false; 246EXPORT_SYMBOL_GPL(mei_restart);
191 247
192 if (!interrupts_enabled) {
193 dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n");
194 return;
195 }
196 248
197 ret = mei_hw_start(dev); 249static void mei_reset_work(struct work_struct *work)
198 if (ret) { 250{
199 dev_err(&dev->pdev->dev, "hw_start failed disabling the device\n"); 251 struct mei_device *dev =
200 dev->dev_state = MEI_DEV_DISABLED; 252 container_of(work, struct mei_device, reset_work);
201 return;
202 }
203 253
204 dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); 254 mutex_lock(&dev->device_lock);
205 /* link is established * start sending messages. */
206 255
207 dev->dev_state = MEI_DEV_INIT_CLIENTS; 256 mei_reset(dev);
208 257
209 mei_hbm_start_req(dev); 258 mutex_unlock(&dev->device_lock);
210 259
260 if (dev->dev_state == MEI_DEV_DISABLED)
261 dev_err(&dev->pdev->dev, "reset failed");
211} 262}
212EXPORT_SYMBOL_GPL(mei_reset);
213 263
214void mei_stop(struct mei_device *dev) 264void mei_stop(struct mei_device *dev)
215{ 265{
216 dev_dbg(&dev->pdev->dev, "stopping the device.\n"); 266 dev_dbg(&dev->pdev->dev, "stopping the device.\n");
217 267
218 flush_scheduled_work(); 268 mei_cancel_work(dev);
219 269
220 mutex_lock(&dev->device_lock); 270 mei_nfc_host_exit(dev);
221 271
222 cancel_delayed_work(&dev->timer_work); 272 mutex_lock(&dev->device_lock);
223 273
224 mei_wd_stop(dev); 274 mei_wd_stop(dev);
225 275
226 mei_nfc_host_exit();
227
228 dev->dev_state = MEI_DEV_POWER_DOWN; 276 dev->dev_state = MEI_DEV_POWER_DOWN;
229 mei_reset(dev, 0); 277 mei_reset(dev);
230 278
231 mutex_unlock(&dev->device_lock); 279 mutex_unlock(&dev->device_lock);
232 280
@@ -236,3 +284,41 @@ EXPORT_SYMBOL_GPL(mei_stop);
236 284
237 285
238 286
287void mei_device_init(struct mei_device *dev)
288{
289 /* setup our list array */
290 INIT_LIST_HEAD(&dev->file_list);
291 INIT_LIST_HEAD(&dev->device_list);
292 mutex_init(&dev->device_lock);
293 init_waitqueue_head(&dev->wait_hw_ready);
294 init_waitqueue_head(&dev->wait_recvd_msg);
295 init_waitqueue_head(&dev->wait_stop_wd);
296 dev->dev_state = MEI_DEV_INITIALIZING;
297 dev->reset_count = 0;
298
299 mei_io_list_init(&dev->read_list);
300 mei_io_list_init(&dev->write_list);
301 mei_io_list_init(&dev->write_waiting_list);
302 mei_io_list_init(&dev->ctrl_wr_list);
303 mei_io_list_init(&dev->ctrl_rd_list);
304
305 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
306 INIT_WORK(&dev->init_work, mei_host_client_init);
307 INIT_WORK(&dev->reset_work, mei_reset_work);
308
309 INIT_LIST_HEAD(&dev->wd_cl.link);
310 INIT_LIST_HEAD(&dev->iamthif_cl.link);
311 mei_io_list_init(&dev->amthif_cmd_list);
312 mei_io_list_init(&dev->amthif_rd_complete_list);
313
314 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
315 dev->open_handle_count = 0;
316
317 /*
318 * Reserving the first client ID
319 * 0: Reserved for MEI Bus Message communications
320 */
321 bitmap_set(dev->host_clients_map, 0, 1);
322}
323EXPORT_SYMBOL_GPL(mei_device_init);
324
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 7a95c07e59a6..f0fbb5179f80 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -31,7 +31,7 @@
31 31
32 32
33/** 33/**
34 * mei_irq_compl_handler - dispatch complete handelers 34 * mei_irq_compl_handler - dispatch complete handlers
35 * for the completed callbacks 35 * for the completed callbacks
36 * 36 *
37 * @dev - mei device 37 * @dev - mei device
@@ -301,13 +301,11 @@ int mei_irq_read_handler(struct mei_device *dev,
301 struct mei_cl_cb *cmpl_list, s32 *slots) 301 struct mei_cl_cb *cmpl_list, s32 *slots)
302{ 302{
303 struct mei_msg_hdr *mei_hdr; 303 struct mei_msg_hdr *mei_hdr;
304 struct mei_cl *cl_pos = NULL; 304 struct mei_cl *cl;
305 struct mei_cl *cl_next = NULL; 305 int ret;
306 int ret = 0;
307 306
308 if (!dev->rd_msg_hdr) { 307 if (!dev->rd_msg_hdr) {
309 dev->rd_msg_hdr = mei_read_hdr(dev); 308 dev->rd_msg_hdr = mei_read_hdr(dev);
310 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
311 (*slots)--; 309 (*slots)--;
312 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); 310 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
313 } 311 }
@@ -315,61 +313,67 @@ int mei_irq_read_handler(struct mei_device *dev,
315 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); 313 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
316 314
317 if (mei_hdr->reserved || !dev->rd_msg_hdr) { 315 if (mei_hdr->reserved || !dev->rd_msg_hdr) {
318 dev_dbg(&dev->pdev->dev, "corrupted message header.\n"); 316 dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n",
317 dev->rd_msg_hdr);
319 ret = -EBADMSG; 318 ret = -EBADMSG;
320 goto end; 319 goto end;
321 } 320 }
322 321
323 if (mei_hdr->host_addr || mei_hdr->me_addr) { 322 if (mei_slots2data(*slots) < mei_hdr->length) {
324 list_for_each_entry_safe(cl_pos, cl_next, 323 dev_err(&dev->pdev->dev, "less data available than length=%08x.\n",
325 &dev->file_list, link) {
326 dev_dbg(&dev->pdev->dev,
327 "list_for_each_entry_safe read host"
328 " client = %d, ME client = %d\n",
329 cl_pos->host_client_id,
330 cl_pos->me_client_id);
331 if (mei_cl_hbm_equal(cl_pos, mei_hdr))
332 break;
333 }
334
335 if (&cl_pos->link == &dev->file_list) {
336 dev_dbg(&dev->pdev->dev, "corrupted message header\n");
337 ret = -EBADMSG;
338 goto end;
339 }
340 }
341 if (((*slots) * sizeof(u32)) < mei_hdr->length) {
342 dev_err(&dev->pdev->dev,
343 "we can't read the message slots =%08x.\n",
344 *slots); 324 *slots);
345 /* we can't read the message */ 325 /* we can't read the message */
346 ret = -ERANGE; 326 ret = -ERANGE;
347 goto end; 327 goto end;
348 } 328 }
349 329
350 /* decide where to read the message too */ 330 /* HBM message */
351 if (!mei_hdr->host_addr) { 331 if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
352 dev_dbg(&dev->pdev->dev, "call mei_hbm_dispatch.\n"); 332 ret = mei_hbm_dispatch(dev, mei_hdr);
353 mei_hbm_dispatch(dev, mei_hdr); 333 if (ret) {
354 dev_dbg(&dev->pdev->dev, "end mei_hbm_dispatch.\n"); 334 dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n",
355 } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && 335 ret);
356 (MEI_FILE_CONNECTED == dev->iamthif_cl.state) && 336 goto end;
357 (dev->iamthif_state == MEI_IAMTHIF_READING)) { 337 }
338 goto reset_slots;
339 }
340
341 /* find recipient cl */
342 list_for_each_entry(cl, &dev->file_list, link) {
343 if (mei_cl_hbm_equal(cl, mei_hdr)) {
344 cl_dbg(dev, cl, "got a message\n");
345 break;
346 }
347 }
348
349 /* if no recipient cl was found we assume corrupted header */
350 if (&cl->link == &dev->file_list) {
351 dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n",
352 dev->rd_msg_hdr);
353 ret = -EBADMSG;
354 goto end;
355 }
358 356
359 dev_dbg(&dev->pdev->dev, "call mei_amthif_irq_read_msg.\n"); 357 if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
360 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); 358 MEI_FILE_CONNECTED == dev->iamthif_cl.state &&
359 dev->iamthif_state == MEI_IAMTHIF_READING) {
361 360
362 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); 361 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
363 if (ret) 362 if (ret) {
363 dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n",
364 ret);
364 goto end; 365 goto end;
366 }
365 } else { 367 } else {
366 dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n");
367 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
368 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); 368 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
369 if (ret) 369 if (ret) {
370 dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n",
371 ret);
370 goto end; 372 goto end;
373 }
371 } 374 }
372 375
376reset_slots:
373 /* reset the number of slots and header */ 377 /* reset the number of slots and header */
374 *slots = mei_count_full_read_slots(dev); 378 *slots = mei_count_full_read_slots(dev);
375 dev->rd_msg_hdr = 0; 379 dev->rd_msg_hdr = 0;
@@ -533,7 +537,6 @@ EXPORT_SYMBOL_GPL(mei_irq_write_handler);
533 * 537 *
534 * @work: pointer to the work_struct structure 538 * @work: pointer to the work_struct structure
535 * 539 *
536 * NOTE: This function is called by timer interrupt work
537 */ 540 */
538void mei_timer(struct work_struct *work) 541void mei_timer(struct work_struct *work)
539{ 542{
@@ -548,24 +551,30 @@ void mei_timer(struct work_struct *work)
548 551
549 552
550 mutex_lock(&dev->device_lock); 553 mutex_lock(&dev->device_lock);
551 if (dev->dev_state != MEI_DEV_ENABLED) { 554
552 if (dev->dev_state == MEI_DEV_INIT_CLIENTS) { 555 /* Catch interrupt stalls during HBM init handshake */
553 if (dev->init_clients_timer) { 556 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
554 if (--dev->init_clients_timer == 0) { 557 dev->hbm_state != MEI_HBM_IDLE) {
555 dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n", 558
556 dev->hbm_state); 559 if (dev->init_clients_timer) {
557 mei_reset(dev, 1); 560 if (--dev->init_clients_timer == 0) {
558 } 561 dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n",
562 dev->hbm_state);
563 mei_reset(dev);
564 goto out;
559 } 565 }
560 } 566 }
561 goto out;
562 } 567 }
568
569 if (dev->dev_state != MEI_DEV_ENABLED)
570 goto out;
571
563 /*** connect/disconnect timeouts ***/ 572 /*** connect/disconnect timeouts ***/
564 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { 573 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
565 if (cl_pos->timer_count) { 574 if (cl_pos->timer_count) {
566 if (--cl_pos->timer_count == 0) { 575 if (--cl_pos->timer_count == 0) {
567 dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n"); 576 dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n");
568 mei_reset(dev, 1); 577 mei_reset(dev);
569 goto out; 578 goto out;
570 } 579 }
571 } 580 }
@@ -573,8 +582,8 @@ void mei_timer(struct work_struct *work)
573 582
574 if (dev->iamthif_stall_timer) { 583 if (dev->iamthif_stall_timer) {
575 if (--dev->iamthif_stall_timer == 0) { 584 if (--dev->iamthif_stall_timer == 0) {
576 dev_err(&dev->pdev->dev, "reset: amthif hanged.\n"); 585 dev_err(&dev->pdev->dev, "timer: amthif hanged.\n");
577 mei_reset(dev, 1); 586 mei_reset(dev);
578 dev->iamthif_msg_buf_size = 0; 587 dev->iamthif_msg_buf_size = 0;
579 dev->iamthif_msg_buf_index = 0; 588 dev->iamthif_msg_buf_index = 0;
580 dev->iamthif_canceled = false; 589 dev->iamthif_canceled = false;
@@ -627,7 +636,8 @@ void mei_timer(struct work_struct *work)
627 } 636 }
628 } 637 }
629out: 638out:
630 schedule_delayed_work(&dev->timer_work, 2 * HZ); 639 if (dev->dev_state != MEI_DEV_DISABLED)
640 schedule_delayed_work(&dev->timer_work, 2 * HZ);
631 mutex_unlock(&dev->device_lock); 641 mutex_unlock(&dev->device_lock);
632} 642}
633 643
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 9661a812f550..5424f8ff3f7f 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -48,7 +48,7 @@
48 * 48 *
49 * @inode: pointer to inode structure 49 * @inode: pointer to inode structure
50 * @file: pointer to file structure 50 * @file: pointer to file structure
51 e 51 *
52 * returns 0 on success, <0 on error 52 * returns 0 on success, <0 on error
53 */ 53 */
54static int mei_open(struct inode *inode, struct file *file) 54static int mei_open(struct inode *inode, struct file *file)
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 406f68e05b4e..f7de95b4cdd9 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -61,11 +61,16 @@ extern const uuid_le mei_wd_guid;
61#define MEI_CLIENTS_MAX 256 61#define MEI_CLIENTS_MAX 256
62 62
63/* 63/*
64 * maximum number of consecutive resets
65 */
66#define MEI_MAX_CONSEC_RESET 3
67
68/*
64 * Number of File descriptors/handles 69 * Number of File descriptors/handles
65 * that can be opened to the driver. 70 * that can be opened to the driver.
66 * 71 *
67 * Limit to 255: 256 Total Clients 72 * Limit to 255: 256 Total Clients
68 * minus internal client for MEI Bus Messags 73 * minus internal client for MEI Bus Messages
69 */ 74 */
70#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) 75#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
71 76
@@ -178,9 +183,10 @@ struct mei_cl_cb {
178 unsigned long buf_idx; 183 unsigned long buf_idx;
179 unsigned long read_time; 184 unsigned long read_time;
180 struct file *file_object; 185 struct file *file_object;
186 u32 internal:1;
181}; 187};
182 188
183/* MEI client instance carried as file->pirvate_data*/ 189/* MEI client instance carried as file->private_data*/
184struct mei_cl { 190struct mei_cl {
185 struct list_head link; 191 struct list_head link;
186 struct mei_device *dev; 192 struct mei_device *dev;
@@ -326,6 +332,7 @@ struct mei_cl_device {
326/** 332/**
327 * struct mei_device - MEI private device struct 333 * struct mei_device - MEI private device struct
328 334
335 * @reset_count - limits the number of consecutive resets
329 * @hbm_state - state of host bus message protocol 336 * @hbm_state - state of host bus message protocol
330 * @mem_addr - mem mapped base register address 337 * @mem_addr - mem mapped base register address
331 338
@@ -369,6 +376,7 @@ struct mei_device {
369 /* 376 /*
370 * mei device states 377 * mei device states
371 */ 378 */
379 unsigned long reset_count;
372 enum mei_dev_state dev_state; 380 enum mei_dev_state dev_state;
373 enum mei_hbm_state hbm_state; 381 enum mei_hbm_state hbm_state;
374 u16 init_clients_timer; 382 u16 init_clients_timer;
@@ -427,6 +435,7 @@ struct mei_device {
427 bool iamthif_canceled; 435 bool iamthif_canceled;
428 436
429 struct work_struct init_work; 437 struct work_struct init_work;
438 struct work_struct reset_work;
430 439
431 /* List of bus devices */ 440 /* List of bus devices */
432 struct list_head device_list; 441 struct list_head device_list;
@@ -456,13 +465,25 @@ static inline u32 mei_data2slots(size_t length)
456 return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); 465 return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
457} 466}
458 467
468/**
469 * mei_slots2data- get data in slots - bytes from slots
470 * @slots - number of available slots
471 * returns - number of bytes in slots
472 */
473static inline u32 mei_slots2data(int slots)
474{
475 return slots * 4;
476}
477
459/* 478/*
460 * mei init function prototypes 479 * mei init function prototypes
461 */ 480 */
462void mei_device_init(struct mei_device *dev); 481void mei_device_init(struct mei_device *dev);
463void mei_reset(struct mei_device *dev, int interrupts); 482int mei_reset(struct mei_device *dev);
464int mei_start(struct mei_device *dev); 483int mei_start(struct mei_device *dev);
484int mei_restart(struct mei_device *dev);
465void mei_stop(struct mei_device *dev); 485void mei_stop(struct mei_device *dev);
486void mei_cancel_work(struct mei_device *dev);
466 487
467/* 488/*
468 * MEI interrupt functions prototype 489 * MEI interrupt functions prototype
@@ -510,7 +531,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
510 * NFC functions 531 * NFC functions
511 */ 532 */
512int mei_nfc_host_init(struct mei_device *dev); 533int mei_nfc_host_init(struct mei_device *dev);
513void mei_nfc_host_exit(void); 534void mei_nfc_host_exit(struct mei_device *dev);
514 535
515/* 536/*
516 * NFC Client UUID 537 * NFC Client UUID
@@ -626,9 +647,9 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {}
626int mei_register(struct mei_device *dev); 647int mei_register(struct mei_device *dev);
627void mei_deregister(struct mei_device *dev); 648void mei_deregister(struct mei_device *dev);
628 649
629#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d" 650#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d"
630#define MEI_HDR_PRM(hdr) \ 651#define MEI_HDR_PRM(hdr) \
631 (hdr)->host_addr, (hdr)->me_addr, \ 652 (hdr)->host_addr, (hdr)->me_addr, \
632 (hdr)->length, (hdr)->msg_complete 653 (hdr)->length, (hdr)->internal, (hdr)->msg_complete
633 654
634#endif 655#endif
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 994ca4aff1a3..a58320c0c049 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -92,7 +92,7 @@ struct mei_nfc_hci_hdr {
92 * @cl: NFC host client 92 * @cl: NFC host client
93 * @cl_info: NFC info host client 93 * @cl_info: NFC info host client
94 * @init_work: perform connection to the info client 94 * @init_work: perform connection to the info client
95 * @fw_ivn: NFC Intervace Version Number 95 * @fw_ivn: NFC Interface Version Number
96 * @vendor_id: NFC manufacturer ID 96 * @vendor_id: NFC manufacturer ID
97 * @radio_type: NFC radio type 97 * @radio_type: NFC radio type
98 */ 98 */
@@ -163,7 +163,7 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
163 return 0; 163 return 0;
164 164
165 default: 165 default:
166 dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", 166 dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n",
167 ndev->radio_type); 167 ndev->radio_type);
168 168
169 return -EINVAL; 169 return -EINVAL;
@@ -175,14 +175,14 @@ static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
175 ndev->bus_name = "pn544"; 175 ndev->bus_name = "pn544";
176 return 0; 176 return 0;
177 default: 177 default:
178 dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", 178 dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n",
179 ndev->radio_type); 179 ndev->radio_type);
180 180
181 return -EINVAL; 181 return -EINVAL;
182 } 182 }
183 183
184 default: 184 default:
185 dev_err(&dev->pdev->dev, "Unknow vendor ID 0x%x\n", 185 dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n",
186 ndev->vendor_id); 186 ndev->vendor_id);
187 187
188 return -EINVAL; 188 return -EINVAL;
@@ -428,7 +428,7 @@ static void mei_nfc_init(struct work_struct *work)
428 mutex_unlock(&dev->device_lock); 428 mutex_unlock(&dev->device_lock);
429 429
430 if (mei_nfc_if_version(ndev) < 0) { 430 if (mei_nfc_if_version(ndev) < 0) {
431 dev_err(&dev->pdev->dev, "Could not get the NFC interfave version"); 431 dev_err(&dev->pdev->dev, "Could not get the NFC interface version");
432 432
433 goto err; 433 goto err;
434 } 434 }
@@ -469,7 +469,9 @@ static void mei_nfc_init(struct work_struct *work)
469 return; 469 return;
470 470
471err: 471err:
472 mutex_lock(&dev->device_lock);
472 mei_nfc_free(ndev); 473 mei_nfc_free(ndev);
474 mutex_unlock(&dev->device_lock);
473 475
474 return; 476 return;
475} 477}
@@ -481,7 +483,7 @@ int mei_nfc_host_init(struct mei_device *dev)
481 struct mei_cl *cl_info, *cl = NULL; 483 struct mei_cl *cl_info, *cl = NULL;
482 int i, ret; 484 int i, ret;
483 485
484 /* already initialzed */ 486 /* already initialized */
485 if (ndev->cl_info) 487 if (ndev->cl_info)
486 return 0; 488 return 0;
487 489
@@ -547,12 +549,16 @@ err:
547 return ret; 549 return ret;
548} 550}
549 551
550void mei_nfc_host_exit(void) 552void mei_nfc_host_exit(struct mei_device *dev)
551{ 553{
552 struct mei_nfc_dev *ndev = &nfc_dev; 554 struct mei_nfc_dev *ndev = &nfc_dev;
553 555
556 cancel_work_sync(&ndev->init_work);
557
558 mutex_lock(&dev->device_lock);
554 if (ndev->cl && ndev->cl->device) 559 if (ndev->cl && ndev->cl->device)
555 mei_cl_remove_device(ndev->cl->device); 560 mei_cl_remove_device(ndev->cl->device);
556 561
557 mei_nfc_free(ndev); 562 mei_nfc_free(ndev);
563 mutex_unlock(&dev->device_lock);
558} 564}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 2cab3c0a6805..ddadd08956f4 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -144,6 +144,21 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
144 dev_err(&pdev->dev, "failed to get pci regions.\n"); 144 dev_err(&pdev->dev, "failed to get pci regions.\n");
145 goto disable_device; 145 goto disable_device;
146 } 146 }
147
148 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
149 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
150
151 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
152 if (err)
153 err = dma_set_coherent_mask(&pdev->dev,
154 DMA_BIT_MASK(32));
155 }
156 if (err) {
157 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
158 goto release_regions;
159 }
160
161
147 /* allocates and initializes the mei dev structure */ 162 /* allocates and initializes the mei dev structure */
148 dev = mei_me_dev_init(pdev); 163 dev = mei_me_dev_init(pdev);
149 if (!dev) { 164 if (!dev) {
@@ -197,8 +212,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
197 return 0; 212 return 0;
198 213
199release_irq: 214release_irq:
215 mei_cancel_work(dev);
200 mei_disable_interrupts(dev); 216 mei_disable_interrupts(dev);
201 flush_scheduled_work();
202 free_irq(pdev->irq, dev); 217 free_irq(pdev->irq, dev);
203disable_msi: 218disable_msi:
204 pci_disable_msi(pdev); 219 pci_disable_msi(pdev);
@@ -306,16 +321,14 @@ static int mei_me_pci_resume(struct device *device)
306 return err; 321 return err;
307 } 322 }
308 323
309 mutex_lock(&dev->device_lock); 324 err = mei_restart(dev);
310 dev->dev_state = MEI_DEV_POWER_UP; 325 if (err)
311 mei_clear_interrupts(dev); 326 return err;
312 mei_reset(dev, 1);
313 mutex_unlock(&dev->device_lock);
314 327
315 /* Start timer if stopped in suspend */ 328 /* Start timer if stopped in suspend */
316 schedule_delayed_work(&dev->timer_work, HZ); 329 schedule_delayed_work(&dev->timer_work, HZ);
317 330
318 return err; 331 return 0;
319} 332}
320static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); 333static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume);
321#define MEI_ME_PM_OPS (&mei_me_pm_ops) 334#define MEI_ME_PM_OPS (&mei_me_pm_ops)
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 9e354216c163..f70945ed96f6 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -115,6 +115,7 @@ int mei_wd_send(struct mei_device *dev)
115 hdr.me_addr = dev->wd_cl.me_client_id; 115 hdr.me_addr = dev->wd_cl.me_client_id;
116 hdr.msg_complete = 1; 116 hdr.msg_complete = 1;
117 hdr.reserved = 0; 117 hdr.reserved = 0;
118 hdr.internal = 0;
118 119
119 if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) 120 if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
120 hdr.length = MEI_WD_START_MSG_SIZE; 121 hdr.length = MEI_WD_START_MSG_SIZE;
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 3574cc375bb9..b2da289320c9 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -134,6 +134,8 @@ struct mic_device {
134 * @send_intr: Send an interrupt for a particular doorbell on the card. 134 * @send_intr: Send an interrupt for a particular doorbell on the card.
135 * @ack_interrupt: Hardware specific operations to ack the h/w on 135 * @ack_interrupt: Hardware specific operations to ack the h/w on
136 * receipt of an interrupt. 136 * receipt of an interrupt.
137 * @intr_workarounds: Hardware specific workarounds needed after
138 * handling an interrupt.
137 * @reset: Reset the remote processor. 139 * @reset: Reset the remote processor.
138 * @reset_fw_ready: Reset firmware ready field. 140 * @reset_fw_ready: Reset firmware ready field.
139 * @is_fw_ready: Check if firmware is ready for OS download. 141 * @is_fw_ready: Check if firmware is ready for OS download.
@@ -149,6 +151,7 @@ struct mic_hw_ops {
149 void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val); 151 void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val);
150 void (*send_intr)(struct mic_device *mdev, int doorbell); 152 void (*send_intr)(struct mic_device *mdev, int doorbell);
151 u32 (*ack_interrupt)(struct mic_device *mdev); 153 u32 (*ack_interrupt)(struct mic_device *mdev);
154 void (*intr_workarounds)(struct mic_device *mdev);
152 void (*reset)(struct mic_device *mdev); 155 void (*reset)(struct mic_device *mdev);
153 void (*reset_fw_ready)(struct mic_device *mdev); 156 void (*reset_fw_ready)(struct mic_device *mdev);
154 bool (*is_fw_ready)(struct mic_device *mdev); 157 bool (*is_fw_ready)(struct mic_device *mdev);
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index ad838c7651c4..c04a021e20c7 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -115,7 +115,7 @@ static irqreturn_t mic_shutdown_db(int irq, void *data)
115 struct mic_device *mdev = data; 115 struct mic_device *mdev = data;
116 struct mic_bootparam *bootparam = mdev->dp; 116 struct mic_bootparam *bootparam = mdev->dp;
117 117
118 mdev->ops->ack_interrupt(mdev); 118 mdev->ops->intr_workarounds(mdev);
119 119
120 switch (bootparam->shutdown_status) { 120 switch (bootparam->shutdown_status) {
121 case MIC_HALTED: 121 case MIC_HALTED:
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index e04bb4fe6823..752ff873f891 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -369,7 +369,7 @@ static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
369 struct mic_vdev *mvdev = data; 369 struct mic_vdev *mvdev = data;
370 struct mic_device *mdev = mvdev->mdev; 370 struct mic_device *mdev = mvdev->mdev;
371 371
372 mdev->ops->ack_interrupt(mdev); 372 mdev->ops->intr_workarounds(mdev);
373 schedule_work(&mvdev->virtio_bh_work); 373 schedule_work(&mvdev->virtio_bh_work);
374 return IRQ_HANDLED; 374 return IRQ_HANDLED;
375} 375}
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 0dfa8a81436e..5562fdd3ef4e 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -174,35 +174,38 @@ static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
174} 174}
175 175
176/** 176/**
177 * mic_ack_interrupt - Device specific interrupt handling. 177 * mic_x100_ack_interrupt - Read the interrupt sources register and
178 * @mdev: pointer to mic_device instance 178 * clear it. This function will be called in the MSI/INTx case.
179 * @mdev: Pointer to mic_device instance.
179 * 180 *
180 * Returns: bitmask of doorbell events triggered. 181 * Returns: bitmask of interrupt sources triggered.
181 */ 182 */
182static u32 mic_x100_ack_interrupt(struct mic_device *mdev) 183static u32 mic_x100_ack_interrupt(struct mic_device *mdev)
183{ 184{
184 u32 reg = 0;
185 struct mic_mw *mw = &mdev->mmio;
186 u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0; 185 u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0;
186 u32 reg = mic_mmio_read(&mdev->mmio, sicr0);
187 mic_mmio_write(&mdev->mmio, reg, sicr0);
188 return reg;
189}
190
191/**
192 * mic_x100_intr_workarounds - These hardware specific workarounds are
193 * to be invoked everytime an interrupt is handled.
194 * @mdev: Pointer to mic_device instance.
195 *
196 * Returns: none
197 */
198static void mic_x100_intr_workarounds(struct mic_device *mdev)
199{
200 struct mic_mw *mw = &mdev->mmio;
187 201
188 /* Clear pending bit array. */ 202 /* Clear pending bit array. */
189 if (MIC_A0_STEP == mdev->stepping) 203 if (MIC_A0_STEP == mdev->stepping)
190 mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS + 204 mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS +
191 MIC_X100_SBOX_MSIXPBACR); 205 MIC_X100_SBOX_MSIXPBACR);
192 206
193 if (mdev->irq_info.num_vectors <= 1) {
194 reg = mic_mmio_read(mw, sicr0);
195
196 if (unlikely(!reg))
197 goto done;
198
199 mic_mmio_write(mw, reg, sicr0);
200 }
201
202 if (mdev->stepping >= MIC_B0_STEP) 207 if (mdev->stepping >= MIC_B0_STEP)
203 mdev->intr_ops->enable_interrupts(mdev); 208 mdev->intr_ops->enable_interrupts(mdev);
204done:
205 return reg;
206} 209}
207 210
208/** 211/**
@@ -553,6 +556,7 @@ struct mic_hw_ops mic_x100_ops = {
553 .write_spad = mic_x100_write_spad, 556 .write_spad = mic_x100_write_spad,
554 .send_intr = mic_x100_send_intr, 557 .send_intr = mic_x100_send_intr,
555 .ack_interrupt = mic_x100_ack_interrupt, 558 .ack_interrupt = mic_x100_ack_interrupt,
559 .intr_workarounds = mic_x100_intr_workarounds,
556 .reset = mic_x100_hw_reset, 560 .reset = mic_x100_hw_reset,
557 .reset_fw_ready = mic_x100_reset_fw_ready, 561 .reset_fw_ready = mic_x100_reset_fw_ready,
558 .is_fw_ready = mic_x100_is_fw_ready, 562 .is_fw_ready = mic_x100_is_fw_ready,
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 652593fc486d..128d5615c804 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -828,6 +828,7 @@ enum xp_retval
828xpc_allocate_msg_wait(struct xpc_channel *ch) 828xpc_allocate_msg_wait(struct xpc_channel *ch)
829{ 829{
830 enum xp_retval ret; 830 enum xp_retval ret;
831 DEFINE_WAIT(wait);
831 832
832 if (ch->flags & XPC_C_DISCONNECTING) { 833 if (ch->flags & XPC_C_DISCONNECTING) {
833 DBUG_ON(ch->reason == xpInterrupted); 834 DBUG_ON(ch->reason == xpInterrupted);
@@ -835,7 +836,9 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
835 } 836 }
836 837
837 atomic_inc(&ch->n_on_msg_allocate_wq); 838 atomic_inc(&ch->n_on_msg_allocate_wq);
838 ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); 839 prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
840 ret = schedule_timeout(1);
841 finish_wait(&ch->msg_allocate_wq, &wait);
839 atomic_dec(&ch->n_on_msg_allocate_wq); 842 atomic_dec(&ch->n_on_msg_allocate_wq);
840 843
841 if (ch->flags & XPC_C_DISCONNECTING) { 844 if (ch->flags & XPC_C_DISCONNECTING) {
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 8d64b681dd93..3aed525e55b4 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -812,7 +812,7 @@ static void st_tty_flush_buffer(struct tty_struct *tty)
812 kfree_skb(st_gdata->tx_skb); 812 kfree_skb(st_gdata->tx_skb);
813 st_gdata->tx_skb = NULL; 813 st_gdata->tx_skb = NULL;
814 814
815 tty->ops->flush_buffer(tty); 815 tty_driver_flush_buffer(tty);
816 return; 816 return;
817} 817}
818 818
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 96853a09788a..9d3dbb28734b 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -531,7 +531,6 @@ long st_kim_stop(void *kim_data)
531 /* Flush any pending characters in the driver and discipline. */ 531 /* Flush any pending characters in the driver and discipline. */
532 tty_ldisc_flush(tty); 532 tty_ldisc_flush(tty);
533 tty_driver_flush_buffer(tty); 533 tty_driver_flush_buffer(tty);
534 tty->ops->flush_buffer(tty);
535 } 534 }
536 535
537 /* send uninstall notification to UIM */ 536 /* send uninstall notification to UIM */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index c98b03b99353..d35cda06b5e8 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -165,7 +165,7 @@ static void vmci_guest_cid_update(u32 sub_id,
165 * true if required hypercalls (or fallback hypercalls) are 165 * true if required hypercalls (or fallback hypercalls) are
166 * supported by the host, false otherwise. 166 * supported by the host, false otherwise.
167 */ 167 */
168static bool vmci_check_host_caps(struct pci_dev *pdev) 168static int vmci_check_host_caps(struct pci_dev *pdev)
169{ 169{
170 bool result; 170 bool result;
171 struct vmci_resource_query_msg *msg; 171 struct vmci_resource_query_msg *msg;
@@ -176,7 +176,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev)
176 check_msg = kmalloc(msg_size, GFP_KERNEL); 176 check_msg = kmalloc(msg_size, GFP_KERNEL);
177 if (!check_msg) { 177 if (!check_msg) {
178 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); 178 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
179 return false; 179 return -ENOMEM;
180 } 180 }
181 181
182 check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 182 check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
@@ -196,7 +196,7 @@ static bool vmci_check_host_caps(struct pci_dev *pdev)
196 __func__, result ? "PASSED" : "FAILED"); 196 __func__, result ? "PASSED" : "FAILED");
197 197
198 /* We need the vector. There are no fallbacks. */ 198 /* We need the vector. There are no fallbacks. */
199 return result; 199 return result ? 0 : -ENXIO;
200} 200}
201 201
202/* 202/*
@@ -564,12 +564,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
564 dev_warn(&pdev->dev, 564 dev_warn(&pdev->dev,
565 "VMCI device unable to register notification bitmap with PPN 0x%x\n", 565 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
566 (u32) bitmap_ppn); 566 (u32) bitmap_ppn);
567 error = -ENXIO;
567 goto err_remove_vmci_dev_g; 568 goto err_remove_vmci_dev_g;
568 } 569 }
569 } 570 }
570 571
571 /* Check host capabilities. */ 572 /* Check host capabilities. */
572 if (!vmci_check_host_caps(pdev)) 573 error = vmci_check_host_caps(pdev);
574 if (error)
573 goto err_remove_bitmap; 575 goto err_remove_bitmap;
574 576
575 /* Enable device. */ 577 /* Enable device. */
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 963761526229..76ee7750bc5e 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2600,8 +2600,6 @@ enum parport_pc_pci_cards {
2600 syba_2p_epp, 2600 syba_2p_epp,
2601 syba_1p_ecp, 2601 syba_1p_ecp,
2602 titan_010l, 2602 titan_010l,
2603 titan_1284p1,
2604 titan_1284p2,
2605 avlab_1p, 2603 avlab_1p,
2606 avlab_2p, 2604 avlab_2p,
2607 oxsemi_952, 2605 oxsemi_952,
@@ -2660,8 +2658,6 @@ static struct parport_pc_pci {
2660 /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } }, 2658 /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
2661 /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } }, 2659 /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
2662 /* titan_010l */ { 1, { { 3, -1 }, } }, 2660 /* titan_010l */ { 1, { { 3, -1 }, } },
2663 /* titan_1284p1 */ { 1, { { 0, 1 }, } },
2664 /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
2665 /* avlab_1p */ { 1, { { 0, 1}, } }, 2661 /* avlab_1p */ { 1, { { 0, 1}, } },
2666 /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} }, 2662 /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
2667 /* The Oxford Semi cards are unusual: 954 doesn't support ECP, 2663 /* The Oxford Semi cards are unusual: 954 doesn't support ECP,
@@ -2677,8 +2673,8 @@ static struct parport_pc_pci {
2677 /* netmos_9705 */ { 1, { { 0, -1 }, } }, 2673 /* netmos_9705 */ { 1, { { 0, -1 }, } },
2678 /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} }, 2674 /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
2679 /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, 2675 /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
2680 /* netmos_9805 */ { 1, { { 0, -1 }, } }, 2676 /* netmos_9805 */ { 1, { { 0, 1 }, } },
2681 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, 2677 /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
2682 /* netmos_9901 */ { 1, { { 0, -1 }, } }, 2678 /* netmos_9901 */ { 1, { { 0, -1 }, } },
2683 /* netmos_9865 */ { 1, { { 0, -1 }, } }, 2679 /* netmos_9865 */ { 1, { { 0, -1 }, } },
2684 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, 2680 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
@@ -2722,8 +2718,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2722 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp }, 2718 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
2723 { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L, 2719 { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
2724 PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l }, 2720 PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
2725 { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
2726 { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
2727 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ 2721 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
2728 /* AFAVLAB_TK9902 */ 2722 /* AFAVLAB_TK9902 */
2729 { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, 2723 { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
@@ -2827,16 +2821,12 @@ static int parport_pc_pci_probe(struct pci_dev *dev,
2827 if (irq == IRQ_NONE) { 2821 if (irq == IRQ_NONE) {
2828 printk(KERN_DEBUG 2822 printk(KERN_DEBUG
2829 "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n", 2823 "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
2830 parport_pc_pci_tbl[i + last_sio].vendor, 2824 id->vendor, id->device, io_lo, io_hi);
2831 parport_pc_pci_tbl[i + last_sio].device,
2832 io_lo, io_hi);
2833 irq = PARPORT_IRQ_NONE; 2825 irq = PARPORT_IRQ_NONE;
2834 } else { 2826 } else {
2835 printk(KERN_DEBUG 2827 printk(KERN_DEBUG
2836 "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n", 2828 "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
2837 parport_pc_pci_tbl[i + last_sio].vendor, 2829 id->vendor, id->device, io_lo, io_hi, irq);
2838 parport_pc_pci_tbl[i + last_sio].device,
2839 io_lo, io_hi, irq);
2840 } 2830 }
2841 data->ports[count] = 2831 data->ports[count] =
2842 parport_pc_probe_port(io_lo, io_hi, irq, 2832 parport_pc_probe_port(io_lo, io_hi, irq,
@@ -2866,8 +2856,6 @@ static void parport_pc_pci_remove(struct pci_dev *dev)
2866 struct pci_parport_data *data = pci_get_drvdata(dev); 2856 struct pci_parport_data *data = pci_get_drvdata(dev);
2867 int i; 2857 int i;
2868 2858
2869 pci_set_drvdata(dev, NULL);
2870
2871 if (data) { 2859 if (data) {
2872 for (i = data->num - 1; i >= 0; i--) 2860 for (i = data->num - 1; i >= 0; i--)
2873 parport_pc_unregister_port(data->ports[i]); 2861 parport_pc_unregister_port(data->ports[i]);
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index ed3b522601b3..971991bab975 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -303,7 +303,7 @@ static int bfin_cf_remove(struct platform_device *pdev)
303 303
304static struct platform_driver bfin_cf_driver = { 304static struct platform_driver bfin_cf_driver = {
305 .driver = { 305 .driver = {
306 .name = (char *)driver_name, 306 .name = driver_name,
307 .owner = THIS_MODULE, 307 .owner = THIS_MODULE,
308 }, 308 },
309 .probe = bfin_cf_probe, 309 .probe = bfin_cf_probe,
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 1b206eac5f93..5ea64d0f61ab 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -359,7 +359,7 @@ MODULE_DEVICE_TABLE(of, electra_cf_match);
359 359
360static struct platform_driver electra_cf_driver = { 360static struct platform_driver electra_cf_driver = {
361 .driver = { 361 .driver = {
362 .name = (char *)driver_name, 362 .name = driver_name,
363 .owner = THIS_MODULE, 363 .owner = THIS_MODULE,
364 .of_match_table = electra_cf_match, 364 .of_match_table = electra_cf_match,
365 }, 365 },
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 330ef2d06567..d0611b84d7f7 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -21,6 +21,12 @@ config PHY_EXYNOS_MIPI_VIDEO
21 Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P 21 Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
22 and EXYNOS SoCs. 22 and EXYNOS SoCs.
23 23
24config PHY_MVEBU_SATA
25 def_bool y
26 depends on ARCH_KIRKWOOD || ARCH_DOVE
27 depends on OF
28 select GENERIC_PHY
29
24config OMAP_USB2 30config OMAP_USB2
25 tristate "OMAP USB2 PHY Driver" 31 tristate "OMAP USB2 PHY Driver"
26 depends on ARCH_OMAP2PLUS 32 depends on ARCH_OMAP2PLUS
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index d0caae9cfb83..4e4adc96f753 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -5,5 +5,6 @@
5obj-$(CONFIG_GENERIC_PHY) += phy-core.o 5obj-$(CONFIG_GENERIC_PHY) += phy-core.o
6obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o 6obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
7obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o 7obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
8obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
8obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o 9obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
9obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o 10obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 58e0e9739028..645c867c1257 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -94,19 +94,31 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
94 94
95int phy_pm_runtime_get(struct phy *phy) 95int phy_pm_runtime_get(struct phy *phy)
96{ 96{
97 int ret;
98
97 if (!pm_runtime_enabled(&phy->dev)) 99 if (!pm_runtime_enabled(&phy->dev))
98 return -ENOTSUPP; 100 return -ENOTSUPP;
99 101
100 return pm_runtime_get(&phy->dev); 102 ret = pm_runtime_get(&phy->dev);
103 if (ret < 0 && ret != -EINPROGRESS)
104 pm_runtime_put_noidle(&phy->dev);
105
106 return ret;
101} 107}
102EXPORT_SYMBOL_GPL(phy_pm_runtime_get); 108EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
103 109
104int phy_pm_runtime_get_sync(struct phy *phy) 110int phy_pm_runtime_get_sync(struct phy *phy)
105{ 111{
112 int ret;
113
106 if (!pm_runtime_enabled(&phy->dev)) 114 if (!pm_runtime_enabled(&phy->dev))
107 return -ENOTSUPP; 115 return -ENOTSUPP;
108 116
109 return pm_runtime_get_sync(&phy->dev); 117 ret = pm_runtime_get_sync(&phy->dev);
118 if (ret < 0)
119 pm_runtime_put_sync(&phy->dev);
120
121 return ret;
110} 122}
111EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync); 123EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
112 124
@@ -155,13 +167,14 @@ int phy_init(struct phy *phy)
155 return ret; 167 return ret;
156 168
157 mutex_lock(&phy->mutex); 169 mutex_lock(&phy->mutex);
158 if (phy->init_count++ == 0 && phy->ops->init) { 170 if (phy->init_count == 0 && phy->ops->init) {
159 ret = phy->ops->init(phy); 171 ret = phy->ops->init(phy);
160 if (ret < 0) { 172 if (ret < 0) {
161 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 173 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
162 goto out; 174 goto out;
163 } 175 }
164 } 176 }
177 ++phy->init_count;
165 178
166out: 179out:
167 mutex_unlock(&phy->mutex); 180 mutex_unlock(&phy->mutex);
@@ -179,13 +192,14 @@ int phy_exit(struct phy *phy)
179 return ret; 192 return ret;
180 193
181 mutex_lock(&phy->mutex); 194 mutex_lock(&phy->mutex);
182 if (--phy->init_count == 0 && phy->ops->exit) { 195 if (phy->init_count == 1 && phy->ops->exit) {
183 ret = phy->ops->exit(phy); 196 ret = phy->ops->exit(phy);
184 if (ret < 0) { 197 if (ret < 0) {
185 dev_err(&phy->dev, "phy exit failed --> %d\n", ret); 198 dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
186 goto out; 199 goto out;
187 } 200 }
188 } 201 }
202 --phy->init_count;
189 203
190out: 204out:
191 mutex_unlock(&phy->mutex); 205 mutex_unlock(&phy->mutex);
@@ -196,23 +210,27 @@ EXPORT_SYMBOL_GPL(phy_exit);
196 210
197int phy_power_on(struct phy *phy) 211int phy_power_on(struct phy *phy)
198{ 212{
199 int ret = -ENOTSUPP; 213 int ret;
200 214
201 ret = phy_pm_runtime_get_sync(phy); 215 ret = phy_pm_runtime_get_sync(phy);
202 if (ret < 0 && ret != -ENOTSUPP) 216 if (ret < 0 && ret != -ENOTSUPP)
203 return ret; 217 return ret;
204 218
205 mutex_lock(&phy->mutex); 219 mutex_lock(&phy->mutex);
206 if (phy->power_count++ == 0 && phy->ops->power_on) { 220 if (phy->power_count == 0 && phy->ops->power_on) {
207 ret = phy->ops->power_on(phy); 221 ret = phy->ops->power_on(phy);
208 if (ret < 0) { 222 if (ret < 0) {
209 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 223 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
210 goto out; 224 goto out;
211 } 225 }
212 } 226 }
227 ++phy->power_count;
228 mutex_unlock(&phy->mutex);
229 return 0;
213 230
214out: 231out:
215 mutex_unlock(&phy->mutex); 232 mutex_unlock(&phy->mutex);
233 phy_pm_runtime_put_sync(phy);
216 234
217 return ret; 235 return ret;
218} 236}
@@ -220,22 +238,22 @@ EXPORT_SYMBOL_GPL(phy_power_on);
220 238
221int phy_power_off(struct phy *phy) 239int phy_power_off(struct phy *phy)
222{ 240{
223 int ret = -ENOTSUPP; 241 int ret;
224 242
225 mutex_lock(&phy->mutex); 243 mutex_lock(&phy->mutex);
226 if (--phy->power_count == 0 && phy->ops->power_off) { 244 if (phy->power_count == 1 && phy->ops->power_off) {
227 ret = phy->ops->power_off(phy); 245 ret = phy->ops->power_off(phy);
228 if (ret < 0) { 246 if (ret < 0) {
229 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret); 247 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
230 goto out; 248 mutex_unlock(&phy->mutex);
249 return ret;
231 } 250 }
232 } 251 }
233 252 --phy->power_count;
234out:
235 mutex_unlock(&phy->mutex); 253 mutex_unlock(&phy->mutex);
236 phy_pm_runtime_put(phy); 254 phy_pm_runtime_put(phy);
237 255
238 return ret; 256 return 0;
239} 257}
240EXPORT_SYMBOL_GPL(phy_power_off); 258EXPORT_SYMBOL_GPL(phy_power_off);
241 259
@@ -360,7 +378,7 @@ EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
360struct phy *phy_get(struct device *dev, const char *string) 378struct phy *phy_get(struct device *dev, const char *string)
361{ 379{
362 int index = 0; 380 int index = 0;
363 struct phy *phy = NULL; 381 struct phy *phy;
364 382
365 if (string == NULL) { 383 if (string == NULL) {
366 dev_WARN(dev, "missing string\n"); 384 dev_WARN(dev, "missing string\n");
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
new file mode 100644
index 000000000000..d43786f62437
--- /dev/null
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -0,0 +1,137 @@
1/*
2 * phy-mvebu-sata.c: SATA Phy driver for the Marvell mvebu SoCs.
3 *
4 * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/clk.h>
15#include <linux/phy/phy.h>
16#include <linux/io.h>
17#include <linux/platform_device.h>
18
19struct priv {
20 struct clk *clk;
21 void __iomem *base;
22};
23
24#define SATA_PHY_MODE_2 0x0330
25#define MODE_2_FORCE_PU_TX BIT(0)
26#define MODE_2_FORCE_PU_RX BIT(1)
27#define MODE_2_PU_PLL BIT(2)
28#define MODE_2_PU_IVREF BIT(3)
29#define SATA_IF_CTRL 0x0050
30#define CTRL_PHY_SHUTDOWN BIT(9)
31
32static int phy_mvebu_sata_power_on(struct phy *phy)
33{
34 struct priv *priv = phy_get_drvdata(phy);
35 u32 reg;
36
37 clk_prepare_enable(priv->clk);
38
39 /* Enable PLL and IVREF */
40 reg = readl(priv->base + SATA_PHY_MODE_2);
41 reg |= (MODE_2_FORCE_PU_TX | MODE_2_FORCE_PU_RX |
42 MODE_2_PU_PLL | MODE_2_PU_IVREF);
43 writel(reg , priv->base + SATA_PHY_MODE_2);
44
45 /* Enable PHY */
46 reg = readl(priv->base + SATA_IF_CTRL);
47 reg &= ~CTRL_PHY_SHUTDOWN;
48 writel(reg, priv->base + SATA_IF_CTRL);
49
50 clk_disable_unprepare(priv->clk);
51
52 return 0;
53}
54
55static int phy_mvebu_sata_power_off(struct phy *phy)
56{
57 struct priv *priv = phy_get_drvdata(phy);
58 u32 reg;
59
60 clk_prepare_enable(priv->clk);
61
62 /* Disable PLL and IVREF */
63 reg = readl(priv->base + SATA_PHY_MODE_2);
64 reg &= ~(MODE_2_FORCE_PU_TX | MODE_2_FORCE_PU_RX |
65 MODE_2_PU_PLL | MODE_2_PU_IVREF);
66 writel(reg, priv->base + SATA_PHY_MODE_2);
67
68 /* Disable PHY */
69 reg = readl(priv->base + SATA_IF_CTRL);
70 reg |= CTRL_PHY_SHUTDOWN;
71 writel(reg, priv->base + SATA_IF_CTRL);
72
73 clk_disable_unprepare(priv->clk);
74
75 return 0;
76}
77
78static struct phy_ops phy_mvebu_sata_ops = {
79 .power_on = phy_mvebu_sata_power_on,
80 .power_off = phy_mvebu_sata_power_off,
81 .owner = THIS_MODULE,
82};
83
84static int phy_mvebu_sata_probe(struct platform_device *pdev)
85{
86 struct phy_provider *phy_provider;
87 struct resource *res;
88 struct priv *priv;
89 struct phy *phy;
90
91 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
92
93 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
94 priv->base = devm_ioremap_resource(&pdev->dev, res);
95 if (IS_ERR(priv->base))
96 return PTR_ERR(priv->base);
97
98 priv->clk = devm_clk_get(&pdev->dev, "sata");
99 if (IS_ERR(priv->clk))
100 return PTR_ERR(priv->clk);
101
102 phy_provider = devm_of_phy_provider_register(&pdev->dev,
103 of_phy_simple_xlate);
104 if (IS_ERR(phy_provider))
105 return PTR_ERR(phy_provider);
106
107 phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL);
108 if (IS_ERR(phy))
109 return PTR_ERR(phy);
110
111 phy_set_drvdata(phy, priv);
112
113 /* The boot loader may of left it on. Turn it off. */
114 phy_mvebu_sata_power_off(phy);
115
116 return 0;
117}
118
119static const struct of_device_id phy_mvebu_sata_of_match[] = {
120 { .compatible = "marvell,mvebu-sata-phy" },
121 { },
122};
123MODULE_DEVICE_TABLE(of, phy_mvebu_sata_of_match);
124
125static struct platform_driver phy_mvebu_sata_driver = {
126 .probe = phy_mvebu_sata_probe,
127 .driver = {
128 .name = "phy-mvebu-sata",
129 .owner = THIS_MODULE,
130 .of_match_table = phy_mvebu_sata_of_match,
131 }
132};
133module_platform_driver(phy_mvebu_sata_driver);
134
135MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
136MODULE_DESCRIPTION("Marvell MVEBU SATA PHY driver");
137MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index f7beb6eb40c7..a673e5b6a2e0 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -847,7 +847,7 @@ int __uio_register_device(struct module *owner,
847 info->uio_dev = idev; 847 info->uio_dev = idev;
848 848
849 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { 849 if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
850 ret = devm_request_irq(parent, info->irq, uio_interrupt, 850 ret = devm_request_irq(idev->dev, info->irq, uio_interrupt,
851 info->irq_flags, info->name, idev); 851 info->irq_flags, info->name, idev);
852 if (ret) 852 if (ret)
853 goto err_request_irq; 853 goto err_request_irq;
diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c
index f764adbfe036..d1f95a1567bb 100644
--- a/drivers/uio/uio_mf624.c
+++ b/drivers/uio/uio_mf624.c
@@ -228,7 +228,7 @@ static void mf624_pci_remove(struct pci_dev *dev)
228 kfree(info); 228 kfree(info);
229} 229}
230 230
231static DEFINE_PCI_DEVICE_TABLE(mf624_pci_id) = { 231static const struct pci_device_id mf624_pci_id[] = {
232 { PCI_DEVICE(PCI_VENDOR_ID_HUMUSOFT, PCI_DEVICE_ID_MF624) }, 232 { PCI_DEVICE(PCI_VENDOR_ID_HUMUSOFT, PCI_DEVICE_ID_MF624) },
233 { 0, } 233 { 0, }
234}; 234};
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 15c7251b0556..1e5d94c5afc9 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -46,7 +46,6 @@
46 46
47struct mxc_w1_device { 47struct mxc_w1_device {
48 void __iomem *regs; 48 void __iomem *regs;
49 unsigned int clkdiv;
50 struct clk *clk; 49 struct clk *clk;
51 struct w1_bus_master bus_master; 50 struct w1_bus_master bus_master;
52}; 51};
@@ -106,8 +105,10 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
106static int mxc_w1_probe(struct platform_device *pdev) 105static int mxc_w1_probe(struct platform_device *pdev)
107{ 106{
108 struct mxc_w1_device *mdev; 107 struct mxc_w1_device *mdev;
108 unsigned long clkrate;
109 struct resource *res; 109 struct resource *res;
110 int err = 0; 110 unsigned int clkdiv;
111 int err;
111 112
112 mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device), 113 mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
113 GFP_KERNEL); 114 GFP_KERNEL);
@@ -118,27 +119,39 @@ static int mxc_w1_probe(struct platform_device *pdev)
118 if (IS_ERR(mdev->clk)) 119 if (IS_ERR(mdev->clk))
119 return PTR_ERR(mdev->clk); 120 return PTR_ERR(mdev->clk);
120 121
121 mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1; 122 clkrate = clk_get_rate(mdev->clk);
123 if (clkrate < 10000000)
124 dev_warn(&pdev->dev,
125 "Low clock frequency causes improper function\n");
126
127 clkdiv = DIV_ROUND_CLOSEST(clkrate, 1000000);
128 clkrate /= clkdiv;
129 if ((clkrate < 980000) || (clkrate > 1020000))
130 dev_warn(&pdev->dev,
131 "Incorrect time base frequency %lu Hz\n", clkrate);
122 132
123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 133 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
124 mdev->regs = devm_ioremap_resource(&pdev->dev, res); 134 mdev->regs = devm_ioremap_resource(&pdev->dev, res);
125 if (IS_ERR(mdev->regs)) 135 if (IS_ERR(mdev->regs))
126 return PTR_ERR(mdev->regs); 136 return PTR_ERR(mdev->regs);
127 137
128 clk_prepare_enable(mdev->clk); 138 err = clk_prepare_enable(mdev->clk);
129 __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER); 139 if (err)
140 return err;
141
142 __raw_writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
130 143
131 mdev->bus_master.data = mdev; 144 mdev->bus_master.data = mdev;
132 mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus; 145 mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
133 mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit; 146 mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit;
134 147
135 err = w1_add_master_device(&mdev->bus_master); 148 platform_set_drvdata(pdev, mdev);
136 149
150 err = w1_add_master_device(&mdev->bus_master);
137 if (err) 151 if (err)
138 return err; 152 clk_disable_unprepare(mdev->clk);
139 153
140 platform_set_drvdata(pdev, mdev); 154 return err;
141 return 0;
142} 155}
143 156
144/* 157/*
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 4195810f87fe..8900fdf511c6 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -51,6 +51,7 @@ struct gpio_extcon_platform_data {
51 /* if NULL, "0" or "1" will be printed */ 51 /* if NULL, "0" or "1" will be printed */
52 const char *state_on; 52 const char *state_on;
53 const char *state_off; 53 const char *state_off;
54 bool check_on_resume;
54}; 55};
55 56
56#endif /* __EXTCON_GPIO_H__ */ 57#endif /* __EXTCON_GPIO_H__ */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index cb49417f8ba9..b31976595eba 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -2196,6 +2196,15 @@
2196/* 2196/*
2197 * R677 (0x2A5) - Mic Detect 3 2197 * R677 (0x2A5) - Mic Detect 3
2198 */ 2198 */
2199#define ARIZONA_MICD_LVL_0 0x0004 /* MICD_LVL - [2] */
2200#define ARIZONA_MICD_LVL_1 0x0008 /* MICD_LVL - [3] */
2201#define ARIZONA_MICD_LVL_2 0x0010 /* MICD_LVL - [4] */
2202#define ARIZONA_MICD_LVL_3 0x0020 /* MICD_LVL - [5] */
2203#define ARIZONA_MICD_LVL_4 0x0040 /* MICD_LVL - [6] */
2204#define ARIZONA_MICD_LVL_5 0x0080 /* MICD_LVL - [7] */
2205#define ARIZONA_MICD_LVL_6 0x0100 /* MICD_LVL - [8] */
2206#define ARIZONA_MICD_LVL_7 0x0200 /* MICD_LVL - [9] */
2207#define ARIZONA_MICD_LVL_8 0x0400 /* MICD_LVL - [10] */
2199#define ARIZONA_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ 2208#define ARIZONA_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
2200#define ARIZONA_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ 2209#define ARIZONA_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
2201#define ARIZONA_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ 2210#define ARIZONA_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
diff --git a/include/uapi/linux/genwqe/genwqe_card.h b/include/uapi/linux/genwqe/genwqe_card.h
new file mode 100644
index 000000000000..795e957bb840
--- /dev/null
+++ b/include/uapi/linux/genwqe/genwqe_card.h
@@ -0,0 +1,500 @@
1#ifndef __GENWQE_CARD_H__
2#define __GENWQE_CARD_H__
3
4/**
5 * IBM Accelerator Family 'GenWQE'
6 *
7 * (C) Copyright IBM Corp. 2013
8 *
9 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
10 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
11 * Author: Michael Jung <mijung@de.ibm.com>
12 * Author: Michael Ruettger <michael@ibmra.de>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License (version 2 only)
16 * as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24/*
25 * User-space API for the GenWQE card. For debugging and test purposes
26 * the register addresses are included here too.
27 */
28
29#include <linux/types.h>
30#include <linux/ioctl.h>
31
32/* Basename of sysfs, debugfs and /dev interfaces */
33#define GENWQE_DEVNAME "genwqe"
34
35#define GENWQE_TYPE_ALTERA_230 0x00 /* GenWQE4 Stratix-IV-230 */
36#define GENWQE_TYPE_ALTERA_530 0x01 /* GenWQE4 Stratix-IV-530 */
37#define GENWQE_TYPE_ALTERA_A4 0x02 /* GenWQE5 A4 Stratix-V-A4 */
38#define GENWQE_TYPE_ALTERA_A7 0x03 /* GenWQE5 A7 Stratix-V-A7 */
39
40/* MMIO Unit offsets: Each UnitID occupies a defined address range */
41#define GENWQE_UID_OFFS(uid) ((uid) << 24)
42#define GENWQE_SLU_OFFS GENWQE_UID_OFFS(0)
43#define GENWQE_HSU_OFFS GENWQE_UID_OFFS(1)
44#define GENWQE_APP_OFFS GENWQE_UID_OFFS(2)
45#define GENWQE_MAX_UNITS 3
46
47/* Common offsets per UnitID */
48#define IO_EXTENDED_ERROR_POINTER 0x00000048
49#define IO_ERROR_INJECT_SELECTOR 0x00000060
50#define IO_EXTENDED_DIAG_SELECTOR 0x00000070
51#define IO_EXTENDED_DIAG_READ_MBX 0x00000078
52#define IO_EXTENDED_DIAG_MAP(ring) (0x00000500 | ((ring) << 3))
53
54#define GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace) (((ring) << 8) | (trace))
55
56/* UnitID 0: Service Layer Unit (SLU) */
57
58/* SLU: Unit Configuration Register */
59#define IO_SLU_UNITCFG 0x00000000
60#define IO_SLU_UNITCFG_TYPE_MASK 0x000000000ff00000 /* 27:20 */
61
62/* SLU: Fault Isolation Register (FIR) (ac_slu_fir) */
63#define IO_SLU_FIR 0x00000008 /* read only, wr direct */
64#define IO_SLU_FIR_CLR 0x00000010 /* read and clear */
65
66/* SLU: First Error Capture Register (FEC/WOF) */
67#define IO_SLU_FEC 0x00000018
68
69#define IO_SLU_ERR_ACT_MASK 0x00000020
70#define IO_SLU_ERR_ATTN_MASK 0x00000028
71#define IO_SLU_FIRX1_ACT_MASK 0x00000030
72#define IO_SLU_FIRX0_ACT_MASK 0x00000038
73#define IO_SLU_SEC_LEM_DEBUG_OVR 0x00000040
74#define IO_SLU_EXTENDED_ERR_PTR 0x00000048
75#define IO_SLU_COMMON_CONFIG 0x00000060
76
77#define IO_SLU_FLASH_FIR 0x00000108
78#define IO_SLU_SLC_FIR 0x00000110
79#define IO_SLU_RIU_TRAP 0x00000280
80#define IO_SLU_FLASH_FEC 0x00000308
81#define IO_SLU_SLC_FEC 0x00000310
82
83/*
84 * The Virtual Function's Access is from offset 0x00010000
85 * The Physical Function's Access is from offset 0x00050000
86 * Single Shared Registers exists only at offset 0x00060000
87 *
88 * SLC: Queue Virtual Window Window for accessing into a specific VF
89 * queue. When accessing the 0x10000 space using the 0x50000 address
90 * segment, the value indicated here is used to specify which VF
91 * register is decoded. This register, and the 0x50000 register space
92 * can only be accessed by the PF. Example, if this register is set to
93 * 0x2, then a read from 0x50000 is the same as a read from 0x10000
94 * from VF=2.
95 */
96
97/* SLC: Queue Segment */
98#define IO_SLC_QUEUE_SEGMENT 0x00010000
99#define IO_SLC_VF_QUEUE_SEGMENT 0x00050000
100
101/* SLC: Queue Offset */
102#define IO_SLC_QUEUE_OFFSET 0x00010008
103#define IO_SLC_VF_QUEUE_OFFSET 0x00050008
104
105/* SLC: Queue Configuration */
106#define IO_SLC_QUEUE_CONFIG 0x00010010
107#define IO_SLC_VF_QUEUE_CONFIG 0x00050010
108
109/* SLC: Job Timout/Only accessible for the PF */
110#define IO_SLC_APPJOB_TIMEOUT 0x00010018
111#define IO_SLC_VF_APPJOB_TIMEOUT 0x00050018
112#define TIMEOUT_250MS 0x0000000f
113#define HEARTBEAT_DISABLE 0x0000ff00
114
115/* SLC: Queue InitSequence Register */
116#define IO_SLC_QUEUE_INITSQN 0x00010020
117#define IO_SLC_VF_QUEUE_INITSQN 0x00050020
118
119/* SLC: Queue Wrap */
120#define IO_SLC_QUEUE_WRAP 0x00010028
121#define IO_SLC_VF_QUEUE_WRAP 0x00050028
122
123/* SLC: Queue Status */
124#define IO_SLC_QUEUE_STATUS 0x00010100
125#define IO_SLC_VF_QUEUE_STATUS 0x00050100
126
127/* SLC: Queue Working Time */
128#define IO_SLC_QUEUE_WTIME 0x00010030
129#define IO_SLC_VF_QUEUE_WTIME 0x00050030
130
131/* SLC: Queue Error Counts */
132#define IO_SLC_QUEUE_ERRCNTS 0x00010038
133#define IO_SLC_VF_QUEUE_ERRCNTS 0x00050038
134
135/* SLC: Queue Loast Response Word */
136#define IO_SLC_QUEUE_LRW 0x00010040
137#define IO_SLC_VF_QUEUE_LRW 0x00050040
138
139/* SLC: Freerunning Timer */
140#define IO_SLC_FREE_RUNNING_TIMER 0x00010108
141#define IO_SLC_VF_FREE_RUNNING_TIMER 0x00050108
142
143/* SLC: Queue Virtual Access Region */
144#define IO_PF_SLC_VIRTUAL_REGION 0x00050000
145
146/* SLC: Queue Virtual Window */
147#define IO_PF_SLC_VIRTUAL_WINDOW 0x00060000
148
149/* SLC: DDCB Application Job Pending [n] (n=0:63) */
150#define IO_PF_SLC_JOBPEND(n) (0x00061000 + 8*(n))
151#define IO_SLC_JOBPEND(n) IO_PF_SLC_JOBPEND(n)
152
153/* SLC: Parser Trap RAM [n] (n=0:31) */
154#define IO_SLU_SLC_PARSE_TRAP(n) (0x00011000 + 8*(n))
155
156/* SLC: Dispatcher Trap RAM [n] (n=0:31) */
157#define IO_SLU_SLC_DISP_TRAP(n) (0x00011200 + 8*(n))
158
159/* Global Fault Isolation Register (GFIR) */
160#define IO_SLC_CFGREG_GFIR 0x00020000
161#define GFIR_ERR_TRIGGER 0x0000ffff
162
163/* SLU: Soft Reset Register */
164#define IO_SLC_CFGREG_SOFTRESET 0x00020018
165
166/* SLU: Misc Debug Register */
167#define IO_SLC_MISC_DEBUG 0x00020060
168#define IO_SLC_MISC_DEBUG_CLR 0x00020068
169#define IO_SLC_MISC_DEBUG_SET 0x00020070
170
171/* Temperature Sensor Reading */
172#define IO_SLU_TEMPERATURE_SENSOR 0x00030000
173#define IO_SLU_TEMPERATURE_CONFIG 0x00030008
174
175/* Voltage Margining Control */
176#define IO_SLU_VOLTAGE_CONTROL 0x00030080
177#define IO_SLU_VOLTAGE_NOMINAL 0x00000000
178#define IO_SLU_VOLTAGE_DOWN5 0x00000006
179#define IO_SLU_VOLTAGE_UP5 0x00000007
180
181/* Direct LED Control Register */
182#define IO_SLU_LEDCONTROL 0x00030100
183
184/* SLU: Flashbus Direct Access -A5 */
185#define IO_SLU_FLASH_DIRECTACCESS 0x00040010
186
187/* SLU: Flashbus Direct Access2 -A5 */
188#define IO_SLU_FLASH_DIRECTACCESS2 0x00040020
189
190/* SLU: Flashbus Command Interface -A5 */
191#define IO_SLU_FLASH_CMDINTF 0x00040030
192
193/* SLU: BitStream Loaded */
194#define IO_SLU_BITSTREAM 0x00040040
195
196/* This Register has a switch which will change the CAs to UR */
197#define IO_HSU_ERR_BEHAVIOR 0x01001010
198
199#define IO_SLC2_SQB_TRAP 0x00062000
200#define IO_SLC2_QUEUE_MANAGER_TRAP 0x00062008
201#define IO_SLC2_FLS_MASTER_TRAP 0x00062010
202
203/* UnitID 1: HSU Registers */
204#define IO_HSU_UNITCFG 0x01000000
205#define IO_HSU_FIR 0x01000008
206#define IO_HSU_FIR_CLR 0x01000010
207#define IO_HSU_FEC 0x01000018
208#define IO_HSU_ERR_ACT_MASK 0x01000020
209#define IO_HSU_ERR_ATTN_MASK 0x01000028
210#define IO_HSU_FIRX1_ACT_MASK 0x01000030
211#define IO_HSU_FIRX0_ACT_MASK 0x01000038
212#define IO_HSU_SEC_LEM_DEBUG_OVR 0x01000040
213#define IO_HSU_EXTENDED_ERR_PTR 0x01000048
214#define IO_HSU_COMMON_CONFIG 0x01000060
215
216/* UnitID 2: Application Unit (APP) */
217#define IO_APP_UNITCFG 0x02000000
218#define IO_APP_FIR 0x02000008
219#define IO_APP_FIR_CLR 0x02000010
220#define IO_APP_FEC 0x02000018
221#define IO_APP_ERR_ACT_MASK 0x02000020
222#define IO_APP_ERR_ATTN_MASK 0x02000028
223#define IO_APP_FIRX1_ACT_MASK 0x02000030
224#define IO_APP_FIRX0_ACT_MASK 0x02000038
225#define IO_APP_SEC_LEM_DEBUG_OVR 0x02000040
226#define IO_APP_EXTENDED_ERR_PTR 0x02000048
227#define IO_APP_COMMON_CONFIG 0x02000060
228
229#define IO_APP_DEBUG_REG_01 0x02010000
230#define IO_APP_DEBUG_REG_02 0x02010008
231#define IO_APP_DEBUG_REG_03 0x02010010
232#define IO_APP_DEBUG_REG_04 0x02010018
233#define IO_APP_DEBUG_REG_05 0x02010020
234#define IO_APP_DEBUG_REG_06 0x02010028
235#define IO_APP_DEBUG_REG_07 0x02010030
236#define IO_APP_DEBUG_REG_08 0x02010038
237#define IO_APP_DEBUG_REG_09 0x02010040
238#define IO_APP_DEBUG_REG_10 0x02010048
239#define IO_APP_DEBUG_REG_11 0x02010050
240#define IO_APP_DEBUG_REG_12 0x02010058
241#define IO_APP_DEBUG_REG_13 0x02010060
242#define IO_APP_DEBUG_REG_14 0x02010068
243#define IO_APP_DEBUG_REG_15 0x02010070
244#define IO_APP_DEBUG_REG_16 0x02010078
245#define IO_APP_DEBUG_REG_17 0x02010080
246#define IO_APP_DEBUG_REG_18 0x02010088
247
248/* Read/write from/to registers */
249struct genwqe_reg_io {
250 __u64 num; /* register offset/address */
251 __u64 val64;
252};
253
254/*
255 * All registers of our card will return values not equal this values.
256 * If we see IO_ILLEGAL_VALUE on any of our MMIO register reads, the
257 * card can be considered as unusable. It will need recovery.
258 */
259#define IO_ILLEGAL_VALUE 0xffffffffffffffffull
260
261/*
262 * Generic DDCB execution interface.
263 *
264 * This interface is a first prototype resulting from discussions we
265 * had with other teams which wanted to use the Genwqe card. It allows
266 * to issue a DDCB request in a generic way. The request will block
267 * until it finishes or time out with error.
268 *
269 * Some DDCBs require DMA addresses to be specified in the ASIV
270 * block. The interface provies the capability to let the kernel
271 * driver know where those addresses are by specifying the ATS field,
272 * such that it can replace the user-space addresses with appropriate
273 * DMA addresses or DMA addresses of a scatter gather list which is
274 * dynamically created.
275 *
276 * Our hardware will refuse DDCB execution if the ATS field is not as
277 * expected. That means the DDCB execution engine in the chip knows
278 * where it expects DMA addresses within the ASIV part of the DDCB and
279 * will check that against the ATS field definition. Any invalid or
280 * unknown ATS content will lead to DDCB refusal.
281 */
282
283/* Genwqe chip Units */
284#define DDCB_ACFUNC_SLU 0x00 /* chip service layer unit */
285#define DDCB_ACFUNC_APP 0x01 /* chip application */
286
287/* DDCB return codes (RETC) */
288#define DDCB_RETC_IDLE 0x0000 /* Unexecuted/DDCB created */
289#define DDCB_RETC_PENDING 0x0101 /* Pending Execution */
290#define DDCB_RETC_COMPLETE 0x0102 /* Cmd complete. No error */
291#define DDCB_RETC_FAULT 0x0104 /* App Err, recoverable */
292#define DDCB_RETC_ERROR 0x0108 /* App Err, non-recoverable */
293#define DDCB_RETC_FORCED_ERROR 0x01ff /* overwritten by driver */
294
295#define DDCB_RETC_UNEXEC 0x0110 /* Unexe/Removed from queue */
296#define DDCB_RETC_TERM 0x0120 /* Terminated */
297#define DDCB_RETC_RES0 0x0140 /* Reserved */
298#define DDCB_RETC_RES1 0x0180 /* Reserved */
299
300/* DDCB Command Options (CMDOPT) */
301#define DDCB_OPT_ECHO_FORCE_NO 0x0000 /* ECHO DDCB */
302#define DDCB_OPT_ECHO_FORCE_102 0x0001 /* force return code */
303#define DDCB_OPT_ECHO_FORCE_104 0x0002
304#define DDCB_OPT_ECHO_FORCE_108 0x0003
305
306#define DDCB_OPT_ECHO_FORCE_110 0x0004 /* only on PF ! */
307#define DDCB_OPT_ECHO_FORCE_120 0x0005
308#define DDCB_OPT_ECHO_FORCE_140 0x0006
309#define DDCB_OPT_ECHO_FORCE_180 0x0007
310
311#define DDCB_OPT_ECHO_COPY_NONE (0 << 5)
312#define DDCB_OPT_ECHO_COPY_ALL (1 << 5)
313
314/* Definitions of Service Layer Commands */
315#define SLCMD_ECHO_SYNC 0x00 /* PF/VF */
316#define SLCMD_MOVE_FLASH 0x06 /* PF only */
317#define SLCMD_MOVE_FLASH_FLAGS_MODE 0x03 /* bit 0 and 1 used for mode */
318#define SLCMD_MOVE_FLASH_FLAGS_DLOAD 0 /* mode: download */
319#define SLCMD_MOVE_FLASH_FLAGS_EMUL 1 /* mode: emulation */
320#define SLCMD_MOVE_FLASH_FLAGS_UPLOAD 2 /* mode: upload */
321#define SLCMD_MOVE_FLASH_FLAGS_VERIFY 3 /* mode: verify */
322#define SLCMD_MOVE_FLASH_FLAG_NOTAP (1 << 2)/* just dump DDCB and exit */
323#define SLCMD_MOVE_FLASH_FLAG_POLL (1 << 3)/* wait for RETC >= 0102 */
324#define SLCMD_MOVE_FLASH_FLAG_PARTITION (1 << 4)
325#define SLCMD_MOVE_FLASH_FLAG_ERASE (1 << 5)
326
327enum genwqe_card_state {
328 GENWQE_CARD_UNUSED = 0,
329 GENWQE_CARD_USED = 1,
330 GENWQE_CARD_FATAL_ERROR = 2,
331 GENWQE_CARD_STATE_MAX,
332};
333
334/* common struct for chip image exchange */
335struct genwqe_bitstream {
336 __u64 data_addr; /* pointer to image data */
337 __u32 size; /* size of image file */
338 __u32 crc; /* crc of this image */
339 __u64 target_addr; /* starting address in Flash */
340 __u32 partition; /* '0', '1', or 'v' */
341 __u32 uid; /* 1=host/x=dram */
342
343 __u64 slu_id; /* informational/sim: SluID */
344 __u64 app_id; /* informational/sim: AppID */
345
346 __u16 retc; /* returned from processing */
347 __u16 attn; /* attention code from processing */
348 __u32 progress; /* progress code from processing */
349};
350
351/* Issuing a specific DDCB command */
352#define DDCB_LENGTH 256 /* for debug data */
353#define DDCB_ASIV_LENGTH 104 /* len of the DDCB ASIV array */
354#define DDCB_ASIV_LENGTH_ATS 96 /* ASIV in ATS architecture */
355#define DDCB_ASV_LENGTH 64 /* len of the DDCB ASV array */
356#define DDCB_FIXUPS 12 /* maximum number of fixups */
357
358struct genwqe_debug_data {
359 char driver_version[64];
360 __u64 slu_unitcfg;
361 __u64 app_unitcfg;
362
363 __u8 ddcb_before[DDCB_LENGTH];
364 __u8 ddcb_prev[DDCB_LENGTH];
365 __u8 ddcb_finished[DDCB_LENGTH];
366};
367
368/*
369 * Address Translation Specification (ATS) definitions
370 *
371 * Each 4 bit within the ATS 64-bit word specify the required address
372 * translation at the defined offset.
373 *
374 * 63 LSB
375 * 6666.5555.5555.5544.4444.4443.3333.3333 ... 11
376 * 3210.9876.5432.1098.7654.3210.9876.5432 ... 1098.7654.3210
377 *
378 * offset: 0x00 0x08 0x10 0x18 0x20 0x28 0x30 0x38 ... 0x68 0x70 0x78
379 * res res res res ASIV ...
380 * The first 4 entries in the ATS word are reserved. The following nibbles
381 * each describe at an 8 byte offset the format of the required data.
382 */
383#define ATS_TYPE_DATA 0x0ull /* data */
384#define ATS_TYPE_FLAT_RD 0x4ull /* flat buffer read only */
385#define ATS_TYPE_FLAT_RDWR 0x5ull /* flat buffer read/write */
386#define ATS_TYPE_SGL_RD 0x6ull /* sgl read only */
387#define ATS_TYPE_SGL_RDWR 0x7ull /* sgl read/write */
388
389#define ATS_SET_FLAGS(_struct, _field, _flags) \
390 (((_flags) & 0xf) << (44 - (4 * (offsetof(_struct, _field) / 8))))
391
392#define ATS_GET_FLAGS(_ats, _byte_offs) \
393 (((_ats) >> (44 - (4 * ((_byte_offs) / 8)))) & 0xf)
394
395/**
396 * struct genwqe_ddcb_cmd - User parameter for generic DDCB commands
397 *
398 * On the way into the kernel the driver will read the whole data
399 * structure. On the way out the driver will not copy the ASIV data
400 * back to user-space.
401 */
402struct genwqe_ddcb_cmd {
403 /* START of data copied to/from driver */
404 __u64 next_addr; /* chaining genwqe_ddcb_cmd */
405 __u64 flags; /* reserved */
406
407 __u8 acfunc; /* accelerators functional unit */
408 __u8 cmd; /* command to execute */
409 __u8 asiv_length; /* used parameter length */
410 __u8 asv_length; /* length of valid return values */
411 __u16 cmdopts; /* command options */
412 __u16 retc; /* return code from processing */
413
414 __u16 attn; /* attention code from processing */
415 __u16 vcrc; /* variant crc16 */
416 __u32 progress; /* progress code from processing */
417
418 __u64 deque_ts; /* dequeue time stamp */
419 __u64 cmplt_ts; /* completion time stamp */
420 __u64 disp_ts; /* SW processing start */
421
422 /* move to end and avoid copy-back */
423 __u64 ddata_addr; /* collect debug data */
424
425 /* command specific values */
426 __u8 asv[DDCB_ASV_LENGTH];
427
428 /* END of data copied from driver */
429 union {
430 struct {
431 __u64 ats;
432 __u8 asiv[DDCB_ASIV_LENGTH_ATS];
433 };
434 /* used for flash update to keep it backward compatible */
435 __u8 __asiv[DDCB_ASIV_LENGTH];
436 };
437 /* END of data copied to driver */
438};
439
440#define GENWQE_IOC_CODE 0xa5
441
442/* Access functions */
443#define GENWQE_READ_REG64 _IOR(GENWQE_IOC_CODE, 30, struct genwqe_reg_io)
444#define GENWQE_WRITE_REG64 _IOW(GENWQE_IOC_CODE, 31, struct genwqe_reg_io)
445#define GENWQE_READ_REG32 _IOR(GENWQE_IOC_CODE, 32, struct genwqe_reg_io)
446#define GENWQE_WRITE_REG32 _IOW(GENWQE_IOC_CODE, 33, struct genwqe_reg_io)
447#define GENWQE_READ_REG16 _IOR(GENWQE_IOC_CODE, 34, struct genwqe_reg_io)
448#define GENWQE_WRITE_REG16 _IOW(GENWQE_IOC_CODE, 35, struct genwqe_reg_io)
449
450#define GENWQE_GET_CARD_STATE _IOR(GENWQE_IOC_CODE, 36, enum genwqe_card_state)
451
452/**
453 * struct genwqe_mem - Memory pinning/unpinning information
454 * @addr: virtual user space address
455 * @size: size of the area pin/dma-map/unmap
456 * direction: 0: read/1: read and write
457 *
458 * Avoid pinning and unpinning of memory pages dynamically. Instead
459 * the idea is to pin the whole buffer space required for DDCB
460 * opertionas in advance. The driver will reuse this pinning and the
461 * memory associated with it to setup the sglists for the DDCB
462 * requests without the need to allocate and free memory or map and
463 * unmap to get the DMA addresses.
464 *
465 * The inverse operation needs to be called after the pinning is not
466 * needed anymore. The pinnings else the pinnings will get removed
467 * after the device is closed. Note that pinnings will required
468 * memory.
469 */
470struct genwqe_mem {
471 __u64 addr;
472 __u64 size;
473 __u64 direction;
474 __u64 flags;
475};
476
477#define GENWQE_PIN_MEM _IOWR(GENWQE_IOC_CODE, 40, struct genwqe_mem)
478#define GENWQE_UNPIN_MEM _IOWR(GENWQE_IOC_CODE, 41, struct genwqe_mem)
479
480/*
481 * Generic synchronous DDCB execution interface.
482 * Synchronously execute a DDCB.
483 *
484 * Return: 0 on success or negative error code.
485 * -EINVAL: Invalid parameters (ASIV_LEN, ASV_LEN, illegal fixups
486 * no mappings found/could not create mappings
487 * -EFAULT: illegal addresses in fixups, purging failed
488 * -EBADMSG: enqueing failed, retc != DDCB_RETC_COMPLETE
489 */
490#define GENWQE_EXECUTE_DDCB \
491 _IOWR(GENWQE_IOC_CODE, 50, struct genwqe_ddcb_cmd)
492
493#define GENWQE_EXECUTE_RAW_DDCB \
494 _IOWR(GENWQE_IOC_CODE, 51, struct genwqe_ddcb_cmd)
495
496/* Service Layer functions (PF only) */
497#define GENWQE_SLU_UPDATE _IOWR(GENWQE_IOC_CODE, 80, struct genwqe_bitstream)
498#define GENWQE_SLU_READ _IOWR(GENWQE_IOC_CODE, 81, struct genwqe_bitstream)
499
500#endif /* __GENWQE_CARD_H__ */
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index b8d6d541d854..4088b816a3ee 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -26,7 +26,6 @@
26#include <sys/socket.h> 26#include <sys/socket.h>
27#include <sys/poll.h> 27#include <sys/poll.h>
28#include <sys/utsname.h> 28#include <sys/utsname.h>
29#include <linux/types.h>
30#include <stdio.h> 29#include <stdio.h>
31#include <stdlib.h> 30#include <stdlib.h>
32#include <unistd.h> 31#include <unistd.h>
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index 8bcb04096eb2..520de3304571 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -22,7 +22,6 @@
22#include <sys/socket.h> 22#include <sys/socket.h>
23#include <sys/poll.h> 23#include <sys/poll.h>
24#include <sys/ioctl.h> 24#include <sys/ioctl.h>
25#include <linux/types.h>
26#include <fcntl.h> 25#include <fcntl.h>
27#include <stdio.h> 26#include <stdio.h>
28#include <mntent.h> 27#include <mntent.h>