diff options
183 files changed, 9949 insertions, 5710 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus index 636e938d5e33..5d0125f7bcaf 100644 --- a/Documentation/ABI/stable/sysfs-bus-vmbus +++ b/Documentation/ABI/stable/sysfs-bus-vmbus | |||
@@ -27,3 +27,17 @@ Description: The mapping of which primary/sub channels are bound to which | |||
27 | Virtual Processors. | 27 | Virtual Processors. |
28 | Format: <channel's child_relid:the bound cpu's number> | 28 | Format: <channel's child_relid:the bound cpu's number> |
29 | Users: tools/hv/lsvmbus | 29 | Users: tools/hv/lsvmbus |
30 | |||
31 | What: /sys/bus/vmbus/devices/vmbus_*/device | ||
32 | Date: Dec. 2015 | ||
33 | KernelVersion: 4.5 | ||
34 | Contact: K. Y. Srinivasan <kys@microsoft.com> | ||
35 | Description: The 16 bit device ID of the device | ||
36 | Users: tools/hv/lsvmbus and user level RDMA libraries | ||
37 | |||
38 | What: /sys/bus/vmbus/devices/vmbus_*/vendor | ||
39 | Date: Dec. 2015 | ||
40 | KernelVersion: 4.5 | ||
41 | Contact: K. Y. Srinivasan <kys@microsoft.com> | ||
42 | Description: The 16 bit vendor ID of the device | ||
43 | Users: tools/hv/lsvmbus and user level RDMA libraries | ||
diff --git a/Documentation/devicetree/bindings/goldfish/pipe.txt b/Documentation/devicetree/bindings/goldfish/pipe.txt new file mode 100644 index 000000000000..e417a31a1ee3 --- /dev/null +++ b/Documentation/devicetree/bindings/goldfish/pipe.txt | |||
@@ -0,0 +1,17 @@ | |||
1 | Android Goldfish QEMU Pipe | ||
2 | |||
3 | Andorid pipe virtual device generated by android emulator. | ||
4 | |||
5 | Required properties: | ||
6 | |||
7 | - compatible : should contain "google,android-pipe" to match emulator | ||
8 | - reg : <registers mapping> | ||
9 | - interrupts : <interrupt mapping> | ||
10 | |||
11 | Example: | ||
12 | |||
13 | android_pipe@a010000 { | ||
14 | compatible = "google,android-pipe"; | ||
15 | reg = <ff018000 0x2000>; | ||
16 | interrupts = <0x12>; | ||
17 | }; | ||
diff --git a/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt new file mode 100644 index 000000000000..a8ebb4621f79 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/eeprom-93xx46.txt | |||
@@ -0,0 +1,25 @@ | |||
1 | EEPROMs (SPI) compatible with Microchip Technology 93xx46 family. | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : shall be one of: | ||
5 | "atmel,at93c46d" | ||
6 | "eeprom-93xx46" | ||
7 | - data-size : number of data bits per word (either 8 or 16) | ||
8 | |||
9 | Optional properties: | ||
10 | - read-only : parameter-less property which disables writes to the EEPROM | ||
11 | - select-gpios : if present, specifies the GPIO that will be asserted prior to | ||
12 | each access to the EEPROM (e.g. for SPI bus multiplexing) | ||
13 | |||
14 | Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt | ||
15 | apply. In particular, "reg" and "spi-max-frequency" properties must be given. | ||
16 | |||
17 | Example: | ||
18 | eeprom@0 { | ||
19 | compatible = "eeprom-93xx46"; | ||
20 | reg = <0>; | ||
21 | spi-max-frequency = <1000000>; | ||
22 | spi-cs-high; | ||
23 | data-size = <8>; | ||
24 | select-gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>; | ||
25 | }; | ||
diff --git a/Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt b/Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt new file mode 100644 index 000000000000..809df68f6e14 --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt | |||
@@ -0,0 +1,28 @@ | |||
1 | * NXP LPC18xx EEPROM memory NVMEM driver | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "nxp,lpc1857-eeprom" | ||
5 | - reg: Must contain an entry with the physical base address and length | ||
6 | for each entry in reg-names. | ||
7 | - reg-names: Must include the following entries. | ||
8 | - reg: EEPROM registers. | ||
9 | - mem: EEPROM address space. | ||
10 | - clocks: Must contain an entry for each entry in clock-names. | ||
11 | - clock-names: Must include the following entries. | ||
12 | - eeprom: EEPROM operating clock. | ||
13 | - resets: Should contain a reference to the reset controller asserting | ||
14 | the EEPROM in reset. | ||
15 | - interrupts: Should contain EEPROM interrupt. | ||
16 | |||
17 | Example: | ||
18 | |||
19 | eeprom: eeprom@4000e000 { | ||
20 | compatible = "nxp,lpc1857-eeprom"; | ||
21 | reg = <0x4000e000 0x1000>, | ||
22 | <0x20040000 0x4000>; | ||
23 | reg-names = "reg", "mem"; | ||
24 | clocks = <&ccu1 CLK_CPU_EEPROM>; | ||
25 | clock-names = "eeprom"; | ||
26 | resets = <&rgu 27>; | ||
27 | interrupts = <4>; | ||
28 | }; | ||
diff --git a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt new file mode 100644 index 000000000000..74cf52908a6c --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt | |||
@@ -0,0 +1,36 @@ | |||
1 | = Mediatek MTK-EFUSE device tree bindings = | ||
2 | |||
3 | This binding is intended to represent MTK-EFUSE which is found in most Mediatek SOCs. | ||
4 | |||
5 | Required properties: | ||
6 | - compatible: should be "mediatek,mt8173-efuse" or "mediatek,efuse" | ||
7 | - reg: Should contain registers location and length | ||
8 | |||
9 | = Data cells = | ||
10 | Are child nodes of MTK-EFUSE, bindings of which as described in | ||
11 | bindings/nvmem/nvmem.txt | ||
12 | |||
13 | Example: | ||
14 | |||
15 | efuse: efuse@10206000 { | ||
16 | compatible = "mediatek,mt8173-efuse"; | ||
17 | reg = <0 0x10206000 0 0x1000>; | ||
18 | #address-cells = <1>; | ||
19 | #size-cells = <1>; | ||
20 | |||
21 | /* Data cells */ | ||
22 | thermal_calibration: calib@528 { | ||
23 | reg = <0x528 0xc>; | ||
24 | }; | ||
25 | }; | ||
26 | |||
27 | = Data consumers = | ||
28 | Are device nodes which consume nvmem data cells. | ||
29 | |||
30 | For example: | ||
31 | |||
32 | thermal { | ||
33 | ... | ||
34 | nvmem-cells = <&thermal_calibration>; | ||
35 | nvmem-cell-names = "calibration"; | ||
36 | }; | ||
diff --git a/Documentation/mic/mic_overview.txt b/Documentation/mic/mic_overview.txt index 73f44fc3e715..074adbdf83a4 100644 --- a/Documentation/mic/mic_overview.txt +++ b/Documentation/mic/mic_overview.txt | |||
@@ -12,10 +12,19 @@ for the X100 devices. | |||
12 | 12 | ||
13 | Since it is a PCIe card, it does not have the ability to host hardware | 13 | Since it is a PCIe card, it does not have the ability to host hardware |
14 | devices for networking, storage and console. We provide these devices | 14 | devices for networking, storage and console. We provide these devices |
15 | on X100 coprocessors thus enabling a self-bootable equivalent environment | 15 | on X100 coprocessors thus enabling a self-bootable equivalent |
16 | for applications. A key benefit of our solution is that it leverages | 16 | environment for applications. A key benefit of our solution is that it |
17 | the standard virtio framework for network, disk and console devices, | 17 | leverages the standard virtio framework for network, disk and console |
18 | though in our case the virtio framework is used across a PCIe bus. | 18 | devices, though in our case the virtio framework is used across a PCIe |
19 | bus. A Virtio Over PCIe (VOP) driver allows creating user space | ||
20 | backends or devices on the host which are used to probe virtio drivers | ||
21 | for these devices on the MIC card. The existing VRINGH infrastructure | ||
22 | in the kernel is used to access virtio rings from the host. The card | ||
23 | VOP driver allows card virtio drivers to communicate with their user | ||
24 | space backends on the host via a device page. Ring 3 apps on the host | ||
25 | can add, remove and configure virtio devices. A thin MIC specific | ||
26 | virtio_config_ops is implemented which is borrowed heavily from | ||
27 | previous similar implementations in lguest and s390. | ||
19 | 28 | ||
20 | MIC PCIe card has a dma controller with 8 channels. These channels are | 29 | MIC PCIe card has a dma controller with 8 channels. These channels are |
21 | shared between the host s/w and the card s/w. 0 to 3 are used by host | 30 | shared between the host s/w and the card s/w. 0 to 3 are used by host |
@@ -38,7 +47,6 @@ single threaded performance for the host compared to MIC, the ability of | |||
38 | the host to initiate DMA's to/from the card using the MIC DMA engine and | 47 | the host to initiate DMA's to/from the card using the MIC DMA engine and |
39 | the fact that the virtio block storage backend can only be on the host. | 48 | the fact that the virtio block storage backend can only be on the host. |
40 | 49 | ||
41 | | | ||
42 | +----------+ | +----------+ | 50 | +----------+ | +----------+ |
43 | | Card OS | | | Host OS | | 51 | | Card OS | | | Host OS | |
44 | +----------+ | +----------+ | 52 | +----------+ | +----------+ |
@@ -47,27 +55,25 @@ the fact that the virtio block storage backend can only be on the host. | |||
47 | | Virtio| |Virtio | |Virtio| | |Virtio | |Virtio | |Virtio | | 55 | | Virtio| |Virtio | |Virtio| | |Virtio | |Virtio | |Virtio | |
48 | | Net | |Console | |Block | | |Net | |Console | |Block | | 56 | | Net | |Console | |Block | | |Net | |Console | |Block | |
49 | | Driver| |Driver | |Driver| | |backend | |backend | |backend | | 57 | | Driver| |Driver | |Driver| | |backend | |backend | |backend | |
50 | +-------+ +--------+ +------+ | +---------+ +--------+ +--------+ | 58 | +---+---+ +---+----+ +--+---+ | +---------+ +----+---+ +--------+ |
51 | | | | | | | | | 59 | | | | | | | | |
52 | | | | |User | | | | 60 | | | | |User | | | |
53 | | | | |------|------------|---------|------- | 61 | | | | |------|------------|--+------|------- |
54 | +-------------------+ |Kernel +--------------------------+ | 62 | +---------+---------+ |Kernel | |
55 | | | | Virtio over PCIe IOCTLs | | 63 | | | | |
56 | | | +--------------------------+ | 64 | +---------+ +---+----+ +------+ | +------+ +------+ +--+---+ +-------+ |
57 | +-----------+ | | | +-----------+ | 65 | |MIC DMA | | VOP | | SCIF | | | SCIF | | COSM | | VOP | |MIC DMA| |
58 | | MIC DMA | | +------+ | +------+ +------+ | | MIC DMA | | 66 | +---+-----+ +---+----+ +--+---+ | +--+---+ +--+---+ +------+ +----+--+ |
59 | | Driver | | | SCIF | | | SCIF | | COSM | | | Driver | | 67 | | | | | | | | |
60 | +-----------+ | +------+ | +------+ +--+---+ | +-----------+ | 68 | +---+-----+ +---+----+ +--+---+ | +--+---+ +--+---+ +------+ +----+--+ |
61 | | | | | | | | | | 69 | |MIC | | VOP | |SCIF | | |SCIF | | COSM | | VOP | | MIC | |
62 | +---------------+ | +------+ | +--+---+ +--+---+ | +----------------+ | 70 | |HW Bus | | HW Bus| |HW Bus| | |HW Bus| | Bus | |HW Bus| |HW Bus | |
63 | |MIC virtual Bus| | |SCIF | | |SCIF | | COSM | | |MIC virtual Bus | | 71 | +---------+ +--------+ +--+---+ | +--+---+ +------+ +------+ +-------+ |
64 | +---------------+ | |HW Bus| | |HW Bus| | Bus | | +----------------+ | 72 | | | | | | | | |
65 | | | +------+ | +--+---+ +------+ | | | 73 | | +-----------+--+ | | | +---------------+ | |
66 | | | | | | | | | | 74 | | |Intel MIC | | | | |Intel MIC | | |
67 | | +-----------+---+ | | | +---------------+ | | 75 | | |Card Driver | | | | |Host Driver | | |
68 | | |Intel MIC | | | | |Intel MIC | | | 76 | +---+--------------+------+ | +----+---------------+-----+ |
69 | +---|Card Driver | | | | |Host Driver | | | ||
70 | +------------+--------+ | +----+---------------+-----+ | ||
71 | | | | | 77 | | | | |
72 | +-------------------------------------------------------------+ | 78 | +-------------------------------------------------------------+ |
73 | | | | 79 | | | |
diff --git a/Documentation/mic/mpssd/mpss b/Documentation/mic/mpssd/mpss index 09ea90931649..5fcf9fa4b082 100755 --- a/Documentation/mic/mpssd/mpss +++ b/Documentation/mic/mpssd/mpss | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | exec=/usr/sbin/mpssd | 36 | exec=/usr/sbin/mpssd |
37 | sysfs="/sys/class/mic" | 37 | sysfs="/sys/class/mic" |
38 | mic_modules="mic_host mic_x100_dma scif" | 38 | mic_modules="mic_host mic_x100_dma scif vop" |
39 | 39 | ||
40 | start() | 40 | start() |
41 | { | 41 | { |
diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c index 7ce1e53568df..30fb842a976d 100644 --- a/Documentation/mic/mpssd/mpssd.c +++ b/Documentation/mic/mpssd/mpssd.c | |||
@@ -926,7 +926,7 @@ add_virtio_device(struct mic_info *mic, struct mic_device_desc *dd) | |||
926 | char path[PATH_MAX]; | 926 | char path[PATH_MAX]; |
927 | int fd, err; | 927 | int fd, err; |
928 | 928 | ||
929 | snprintf(path, PATH_MAX, "/dev/mic%d", mic->id); | 929 | snprintf(path, PATH_MAX, "/dev/vop_virtio%d", mic->id); |
930 | fd = open(path, O_RDWR); | 930 | fd = open(path, O_RDWR); |
931 | if (fd < 0) { | 931 | if (fd < 0) { |
932 | mpsslog("Could not open %s %s\n", path, strerror(errno)); | 932 | mpsslog("Could not open %s %s\n", path, strerror(errno)); |
diff --git a/Documentation/misc-devices/mei/mei.txt b/Documentation/misc-devices/mei/mei.txt index 91c1fa34f48b..2b80a0cd621f 100644 --- a/Documentation/misc-devices/mei/mei.txt +++ b/Documentation/misc-devices/mei/mei.txt | |||
@@ -231,15 +231,15 @@ IT knows when a platform crashes even when there is a hard failure on the host. | |||
231 | The Intel AMT Watchdog is composed of two parts: | 231 | The Intel AMT Watchdog is composed of two parts: |
232 | 1) Firmware feature - receives the heartbeats | 232 | 1) Firmware feature - receives the heartbeats |
233 | and sends an event when the heartbeats stop. | 233 | and sends an event when the heartbeats stop. |
234 | 2) Intel MEI driver - connects to the watchdog feature, configures the | 234 | 2) Intel MEI iAMT watchdog driver - connects to the watchdog feature, |
235 | watchdog and sends the heartbeats. | 235 | configures the watchdog and sends the heartbeats. |
236 | 236 | ||
237 | The Intel MEI driver uses the kernel watchdog API to configure the Intel AMT | 237 | The Intel iAMT watchdog MEI driver uses the kernel watchdog API to configure |
238 | Watchdog and to send heartbeats to it. The default timeout of the | 238 | the Intel AMT Watchdog and to send heartbeats to it. The default timeout of the |
239 | watchdog is 120 seconds. | 239 | watchdog is 120 seconds. |
240 | 240 | ||
241 | If the Intel AMT Watchdog feature does not exist (i.e. the connection failed), | 241 | If the Intel AMT is not enabled in the firmware then the watchdog client won't enumerate |
242 | the Intel MEI driver will disable the sending of heartbeats. | 242 | on the me client bus and watchdog devices won't be exposed. |
243 | 243 | ||
244 | 244 | ||
245 | Supported Chipsets | 245 | Supported Chipsets |
diff --git a/MAINTAINERS b/MAINTAINERS index f3c688f87abb..543dd219de80 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5765,6 +5765,7 @@ S: Supported | |||
5765 | F: include/uapi/linux/mei.h | 5765 | F: include/uapi/linux/mei.h |
5766 | F: include/linux/mei_cl_bus.h | 5766 | F: include/linux/mei_cl_bus.h |
5767 | F: drivers/misc/mei/* | 5767 | F: drivers/misc/mei/* |
5768 | F: drivers/watchdog/mei_wdt.c | ||
5768 | F: Documentation/misc-devices/mei/* | 5769 | F: Documentation/misc-devices/mei/* |
5769 | 5770 | ||
5770 | INTEL MIC DRIVERS (mic) | 5771 | INTEL MIC DRIVERS (mic) |
@@ -6598,6 +6599,11 @@ F: samples/livepatch/ | |||
6598 | L: live-patching@vger.kernel.org | 6599 | L: live-patching@vger.kernel.org |
6599 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git | 6600 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git |
6600 | 6601 | ||
6602 | LINUX KERNEL DUMP TEST MODULE (LKDTM) | ||
6603 | M: Kees Cook <keescook@chromium.org> | ||
6604 | S: Maintained | ||
6605 | F: drivers/misc/lkdtm.c | ||
6606 | |||
6601 | LLC (802.2) | 6607 | LLC (802.2) |
6602 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 6608 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> |
6603 | S: Maintained | 6609 | S: Maintained |
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index a0986c65be0c..592e65c3a4e0 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts | |||
@@ -562,8 +562,7 @@ | |||
562 | extcon_usb2: tps659038_usb { | 562 | extcon_usb2: tps659038_usb { |
563 | compatible = "ti,palmas-usb-vid"; | 563 | compatible = "ti,palmas-usb-vid"; |
564 | ti,enable-vbus-detection; | 564 | ti,enable-vbus-detection; |
565 | ti,enable-id-detection; | 565 | vbus-gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>; |
566 | id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>; | ||
567 | }; | 566 | }; |
568 | 567 | ||
569 | }; | 568 | }; |
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index de1316bf643a..62ebac51bab9 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c | |||
@@ -115,13 +115,14 @@ static void mityomapl138_cpufreq_init(const char *partnum) | |||
115 | static void mityomapl138_cpufreq_init(const char *partnum) { } | 115 | static void mityomapl138_cpufreq_init(const char *partnum) { } |
116 | #endif | 116 | #endif |
117 | 117 | ||
118 | static void read_factory_config(struct memory_accessor *a, void *context) | 118 | static void read_factory_config(struct nvmem_device *nvmem, void *context) |
119 | { | 119 | { |
120 | int ret; | 120 | int ret; |
121 | const char *partnum = NULL; | 121 | const char *partnum = NULL; |
122 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 122 | struct davinci_soc_info *soc_info = &davinci_soc_info; |
123 | 123 | ||
124 | ret = a->read(a, (char *)&factory_config, 0, sizeof(factory_config)); | 124 | ret = nvmem_device_read(nvmem, 0, sizeof(factory_config), |
125 | &factory_config); | ||
125 | if (ret != sizeof(struct factory_config)) { | 126 | if (ret != sizeof(struct factory_config)) { |
126 | pr_warn("Read Factory Config Failed: %d\n", ret); | 127 | pr_warn("Read Factory Config Failed: %d\n", ret); |
127 | goto bad_config; | 128 | goto bad_config; |
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c index a794f6d9d444..f55ef2ef2f92 100644 --- a/arch/arm/mach-davinci/common.c +++ b/arch/arm/mach-davinci/common.c | |||
@@ -28,13 +28,13 @@ EXPORT_SYMBOL(davinci_soc_info); | |||
28 | void __iomem *davinci_intc_base; | 28 | void __iomem *davinci_intc_base; |
29 | int davinci_intc_type; | 29 | int davinci_intc_type; |
30 | 30 | ||
31 | void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context) | 31 | void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context) |
32 | { | 32 | { |
33 | char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; | 33 | char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; |
34 | off_t offset = (off_t)context; | 34 | off_t offset = (off_t)context; |
35 | 35 | ||
36 | /* Read MAC addr from EEPROM */ | 36 | /* Read MAC addr from EEPROM */ |
37 | if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN) | 37 | if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) |
38 | pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); | 38 | pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); |
39 | } | 39 | } |
40 | 40 | ||
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 7d00b7a015ea..57f52a2afa35 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -1321,6 +1321,7 @@ static void binder_transaction(struct binder_proc *proc, | |||
1321 | struct binder_transaction *t; | 1321 | struct binder_transaction *t; |
1322 | struct binder_work *tcomplete; | 1322 | struct binder_work *tcomplete; |
1323 | binder_size_t *offp, *off_end; | 1323 | binder_size_t *offp, *off_end; |
1324 | binder_size_t off_min; | ||
1324 | struct binder_proc *target_proc; | 1325 | struct binder_proc *target_proc; |
1325 | struct binder_thread *target_thread = NULL; | 1326 | struct binder_thread *target_thread = NULL; |
1326 | struct binder_node *target_node = NULL; | 1327 | struct binder_node *target_node = NULL; |
@@ -1522,18 +1523,24 @@ static void binder_transaction(struct binder_proc *proc, | |||
1522 | goto err_bad_offset; | 1523 | goto err_bad_offset; |
1523 | } | 1524 | } |
1524 | off_end = (void *)offp + tr->offsets_size; | 1525 | off_end = (void *)offp + tr->offsets_size; |
1526 | off_min = 0; | ||
1525 | for (; offp < off_end; offp++) { | 1527 | for (; offp < off_end; offp++) { |
1526 | struct flat_binder_object *fp; | 1528 | struct flat_binder_object *fp; |
1527 | 1529 | ||
1528 | if (*offp > t->buffer->data_size - sizeof(*fp) || | 1530 | if (*offp > t->buffer->data_size - sizeof(*fp) || |
1531 | *offp < off_min || | ||
1529 | t->buffer->data_size < sizeof(*fp) || | 1532 | t->buffer->data_size < sizeof(*fp) || |
1530 | !IS_ALIGNED(*offp, sizeof(u32))) { | 1533 | !IS_ALIGNED(*offp, sizeof(u32))) { |
1531 | binder_user_error("%d:%d got transaction with invalid offset, %lld\n", | 1534 | binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n", |
1532 | proc->pid, thread->pid, (u64)*offp); | 1535 | proc->pid, thread->pid, (u64)*offp, |
1536 | (u64)off_min, | ||
1537 | (u64)(t->buffer->data_size - | ||
1538 | sizeof(*fp))); | ||
1533 | return_error = BR_FAILED_REPLY; | 1539 | return_error = BR_FAILED_REPLY; |
1534 | goto err_bad_offset; | 1540 | goto err_bad_offset; |
1535 | } | 1541 | } |
1536 | fp = (struct flat_binder_object *)(t->buffer->data + *offp); | 1542 | fp = (struct flat_binder_object *)(t->buffer->data + *offp); |
1543 | off_min = *offp + sizeof(struct flat_binder_object); | ||
1537 | switch (fp->type) { | 1544 | switch (fp->type) { |
1538 | case BINDER_TYPE_BINDER: | 1545 | case BINDER_TYPE_BINDER: |
1539 | case BINDER_TYPE_WEAK_BINDER: { | 1546 | case BINDER_TYPE_WEAK_BINDER: { |
@@ -3593,13 +3600,24 @@ static int binder_transactions_show(struct seq_file *m, void *unused) | |||
3593 | 3600 | ||
3594 | static int binder_proc_show(struct seq_file *m, void *unused) | 3601 | static int binder_proc_show(struct seq_file *m, void *unused) |
3595 | { | 3602 | { |
3603 | struct binder_proc *itr; | ||
3596 | struct binder_proc *proc = m->private; | 3604 | struct binder_proc *proc = m->private; |
3597 | int do_lock = !binder_debug_no_lock; | 3605 | int do_lock = !binder_debug_no_lock; |
3606 | bool valid_proc = false; | ||
3598 | 3607 | ||
3599 | if (do_lock) | 3608 | if (do_lock) |
3600 | binder_lock(__func__); | 3609 | binder_lock(__func__); |
3601 | seq_puts(m, "binder proc state:\n"); | 3610 | |
3602 | print_binder_proc(m, proc, 1); | 3611 | hlist_for_each_entry(itr, &binder_procs, proc_node) { |
3612 | if (itr == proc) { | ||
3613 | valid_proc = true; | ||
3614 | break; | ||
3615 | } | ||
3616 | } | ||
3617 | if (valid_proc) { | ||
3618 | seq_puts(m, "binder proc state:\n"); | ||
3619 | print_binder_proc(m, proc, 1); | ||
3620 | } | ||
3603 | if (do_lock) | 3621 | if (do_lock) |
3604 | binder_unlock(__func__); | 3622 | binder_unlock(__func__); |
3605 | return 0; | 3623 | return 0; |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index f3f7215ad378..773fc3099769 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -258,7 +258,7 @@ static void __fw_free_buf(struct kref *ref) | |||
258 | vunmap(buf->data); | 258 | vunmap(buf->data); |
259 | for (i = 0; i < buf->nr_pages; i++) | 259 | for (i = 0; i < buf->nr_pages; i++) |
260 | __free_page(buf->pages[i]); | 260 | __free_page(buf->pages[i]); |
261 | kfree(buf->pages); | 261 | vfree(buf->pages); |
262 | } else | 262 | } else |
263 | #endif | 263 | #endif |
264 | vfree(buf->data); | 264 | vfree(buf->data); |
@@ -635,7 +635,7 @@ static ssize_t firmware_loading_store(struct device *dev, | |||
635 | if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) { | 635 | if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) { |
636 | for (i = 0; i < fw_buf->nr_pages; i++) | 636 | for (i = 0; i < fw_buf->nr_pages; i++) |
637 | __free_page(fw_buf->pages[i]); | 637 | __free_page(fw_buf->pages[i]); |
638 | kfree(fw_buf->pages); | 638 | vfree(fw_buf->pages); |
639 | fw_buf->pages = NULL; | 639 | fw_buf->pages = NULL; |
640 | fw_buf->page_array_size = 0; | 640 | fw_buf->page_array_size = 0; |
641 | fw_buf->nr_pages = 0; | 641 | fw_buf->nr_pages = 0; |
@@ -746,8 +746,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) | |||
746 | buf->page_array_size * 2); | 746 | buf->page_array_size * 2); |
747 | struct page **new_pages; | 747 | struct page **new_pages; |
748 | 748 | ||
749 | new_pages = kmalloc(new_array_size * sizeof(void *), | 749 | new_pages = vmalloc(new_array_size * sizeof(void *)); |
750 | GFP_KERNEL); | ||
751 | if (!new_pages) { | 750 | if (!new_pages) { |
752 | fw_load_abort(fw_priv); | 751 | fw_load_abort(fw_priv); |
753 | return -ENOMEM; | 752 | return -ENOMEM; |
@@ -756,7 +755,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) | |||
756 | buf->page_array_size * sizeof(void *)); | 755 | buf->page_array_size * sizeof(void *)); |
757 | memset(&new_pages[buf->page_array_size], 0, sizeof(void *) * | 756 | memset(&new_pages[buf->page_array_size], 0, sizeof(void *) * |
758 | (new_array_size - buf->page_array_size)); | 757 | (new_array_size - buf->page_array_size)); |
759 | kfree(buf->pages); | 758 | vfree(buf->pages); |
760 | buf->pages = new_pages; | 759 | buf->pages = new_pages; |
761 | buf->page_array_size = new_array_size; | 760 | buf->page_array_size = new_array_size; |
762 | } | 761 | } |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index a043107da2af..3ec0766ed5e9 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -328,7 +328,8 @@ config JS_RTC | |||
328 | 328 | ||
329 | config GEN_RTC | 329 | config GEN_RTC |
330 | tristate "Generic /dev/rtc emulation" | 330 | tristate "Generic /dev/rtc emulation" |
331 | depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML | 331 | depends on RTC!=y |
332 | depends on ALPHA || M68K || MN10300 || PARISC || PPC || X86 | ||
332 | ---help--- | 333 | ---help--- |
333 | If you say Y here and create a character special file /dev/rtc with | 334 | If you say Y here and create a character special file /dev/rtc with |
334 | major number 10 and minor number 135 using mknod ("man mknod"), you | 335 | major number 10 and minor number 135 using mknod ("man mknod"), you |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 4f6f94c43412..71025c2f6bbb 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -695,7 +695,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) | |||
695 | offset += file->f_pos; | 695 | offset += file->f_pos; |
696 | case SEEK_SET: | 696 | case SEEK_SET: |
697 | /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ | 697 | /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ |
698 | if (IS_ERR_VALUE((unsigned long long)offset)) { | 698 | if ((unsigned long long)offset >= -MAX_ERRNO) { |
699 | ret = -EOVERFLOW; | 699 | ret = -EOVERFLOW; |
700 | break; | 700 | break; |
701 | } | 701 | } |
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 01292328a456..678fa97e41fb 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c | |||
@@ -496,12 +496,12 @@ static void pc_set_checksum(void) | |||
496 | 496 | ||
497 | #ifdef CONFIG_PROC_FS | 497 | #ifdef CONFIG_PROC_FS |
498 | 498 | ||
499 | static char *floppy_types[] = { | 499 | static const char * const floppy_types[] = { |
500 | "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", | 500 | "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", |
501 | "3.5'' 2.88M", "3.5'' 2.88M" | 501 | "3.5'' 2.88M", "3.5'' 2.88M" |
502 | }; | 502 | }; |
503 | 503 | ||
504 | static char *gfx_types[] = { | 504 | static const char * const gfx_types[] = { |
505 | "EGA, VGA, ... (with BIOS)", | 505 | "EGA, VGA, ... (with BIOS)", |
506 | "CGA (40 cols)", | 506 | "CGA (40 cols)", |
507 | "CGA (80 cols)", | 507 | "CGA (80 cols)", |
@@ -602,7 +602,7 @@ static void atari_set_checksum(void) | |||
602 | 602 | ||
603 | static struct { | 603 | static struct { |
604 | unsigned char val; | 604 | unsigned char val; |
605 | char *name; | 605 | const char *name; |
606 | } boot_prefs[] = { | 606 | } boot_prefs[] = { |
607 | { 0x80, "TOS" }, | 607 | { 0x80, "TOS" }, |
608 | { 0x40, "ASV" }, | 608 | { 0x40, "ASV" }, |
@@ -611,7 +611,7 @@ static struct { | |||
611 | { 0x00, "unspecified" } | 611 | { 0x00, "unspecified" } |
612 | }; | 612 | }; |
613 | 613 | ||
614 | static char *languages[] = { | 614 | static const char * const languages[] = { |
615 | "English (US)", | 615 | "English (US)", |
616 | "German", | 616 | "German", |
617 | "French", | 617 | "French", |
@@ -623,7 +623,7 @@ static char *languages[] = { | |||
623 | "Swiss (German)" | 623 | "Swiss (German)" |
624 | }; | 624 | }; |
625 | 625 | ||
626 | static char *dateformat[] = { | 626 | static const char * const dateformat[] = { |
627 | "MM%cDD%cYY", | 627 | "MM%cDD%cYY", |
628 | "DD%cMM%cYY", | 628 | "DD%cMM%cYY", |
629 | "YY%cMM%cDD", | 629 | "YY%cMM%cDD", |
@@ -634,7 +634,7 @@ static char *dateformat[] = { | |||
634 | "7 (undefined)" | 634 | "7 (undefined)" |
635 | }; | 635 | }; |
636 | 636 | ||
637 | static char *colors[] = { | 637 | static const char * const colors[] = { |
638 | "2", "4", "16", "256", "65536", "??", "??", "??" | 638 | "2", "4", "16", "256", "65536", "??", "??", "??" |
639 | }; | 639 | }; |
640 | 640 | ||
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index 76c490fa0511..0e184426db98 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c | |||
@@ -129,10 +129,9 @@ static void button_consume_callbacks (int bpcount) | |||
129 | 129 | ||
130 | static void button_sequence_finished (unsigned long parameters) | 130 | static void button_sequence_finished (unsigned long parameters) |
131 | { | 131 | { |
132 | #ifdef CONFIG_NWBUTTON_REBOOT /* Reboot using button is enabled */ | 132 | if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) && |
133 | if (button_press_count == reboot_count) | 133 | button_press_count == reboot_count) |
134 | kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */ | 134 | kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */ |
135 | #endif /* CONFIG_NWBUTTON_REBOOT */ | ||
136 | button_consume_callbacks (button_press_count); | 135 | button_consume_callbacks (button_press_count); |
137 | bcount = sprintf (button_output_buffer, "%d\n", button_press_count); | 136 | bcount = sprintf (button_output_buffer, "%d\n", button_press_count); |
138 | button_press_count = 0; /* Reset the button press counter */ | 137 | button_press_count = 0; /* Reset the button press counter */ |
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index ae0b42b66e55..d23368874710 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c | |||
@@ -69,12 +69,13 @@ | |||
69 | #include <linux/ppdev.h> | 69 | #include <linux/ppdev.h> |
70 | #include <linux/mutex.h> | 70 | #include <linux/mutex.h> |
71 | #include <linux/uaccess.h> | 71 | #include <linux/uaccess.h> |
72 | #include <linux/compat.h> | ||
72 | 73 | ||
73 | #define PP_VERSION "ppdev: user-space parallel port driver" | 74 | #define PP_VERSION "ppdev: user-space parallel port driver" |
74 | #define CHRDEV "ppdev" | 75 | #define CHRDEV "ppdev" |
75 | 76 | ||
76 | struct pp_struct { | 77 | struct pp_struct { |
77 | struct pardevice * pdev; | 78 | struct pardevice *pdev; |
78 | wait_queue_head_t irq_wait; | 79 | wait_queue_head_t irq_wait; |
79 | atomic_t irqc; | 80 | atomic_t irqc; |
80 | unsigned int flags; | 81 | unsigned int flags; |
@@ -98,18 +99,26 @@ struct pp_struct { | |||
98 | #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) | 99 | #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) |
99 | 100 | ||
100 | static DEFINE_MUTEX(pp_do_mutex); | 101 | static DEFINE_MUTEX(pp_do_mutex); |
101 | static inline void pp_enable_irq (struct pp_struct *pp) | 102 | |
103 | /* define fixed sized ioctl cmd for y2038 migration */ | ||
104 | #define PPGETTIME32 _IOR(PP_IOCTL, 0x95, s32[2]) | ||
105 | #define PPSETTIME32 _IOW(PP_IOCTL, 0x96, s32[2]) | ||
106 | #define PPGETTIME64 _IOR(PP_IOCTL, 0x95, s64[2]) | ||
107 | #define PPSETTIME64 _IOW(PP_IOCTL, 0x96, s64[2]) | ||
108 | |||
109 | static inline void pp_enable_irq(struct pp_struct *pp) | ||
102 | { | 110 | { |
103 | struct parport *port = pp->pdev->port; | 111 | struct parport *port = pp->pdev->port; |
104 | port->ops->enable_irq (port); | 112 | |
113 | port->ops->enable_irq(port); | ||
105 | } | 114 | } |
106 | 115 | ||
107 | static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | 116 | static ssize_t pp_read(struct file *file, char __user *buf, size_t count, |
108 | loff_t * ppos) | 117 | loff_t *ppos) |
109 | { | 118 | { |
110 | unsigned int minor = iminor(file_inode(file)); | 119 | unsigned int minor = iminor(file_inode(file)); |
111 | struct pp_struct *pp = file->private_data; | 120 | struct pp_struct *pp = file->private_data; |
112 | char * kbuffer; | 121 | char *kbuffer; |
113 | ssize_t bytes_read = 0; | 122 | ssize_t bytes_read = 0; |
114 | struct parport *pport; | 123 | struct parport *pport; |
115 | int mode; | 124 | int mode; |
@@ -125,16 +134,15 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | |||
125 | return 0; | 134 | return 0; |
126 | 135 | ||
127 | kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); | 136 | kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); |
128 | if (!kbuffer) { | 137 | if (!kbuffer) |
129 | return -ENOMEM; | 138 | return -ENOMEM; |
130 | } | ||
131 | pport = pp->pdev->port; | 139 | pport = pp->pdev->port; |
132 | mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); | 140 | mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); |
133 | 141 | ||
134 | parport_set_timeout (pp->pdev, | 142 | parport_set_timeout(pp->pdev, |
135 | (file->f_flags & O_NONBLOCK) ? | 143 | (file->f_flags & O_NONBLOCK) ? |
136 | PARPORT_INACTIVITY_O_NONBLOCK : | 144 | PARPORT_INACTIVITY_O_NONBLOCK : |
137 | pp->default_inactivity); | 145 | pp->default_inactivity); |
138 | 146 | ||
139 | while (bytes_read == 0) { | 147 | while (bytes_read == 0) { |
140 | ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); | 148 | ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); |
@@ -144,20 +152,17 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | |||
144 | int flags = 0; | 152 | int flags = 0; |
145 | size_t (*fn)(struct parport *, void *, size_t, int); | 153 | size_t (*fn)(struct parport *, void *, size_t, int); |
146 | 154 | ||
147 | if (pp->flags & PP_W91284PIC) { | 155 | if (pp->flags & PP_W91284PIC) |
148 | flags |= PARPORT_W91284PIC; | 156 | flags |= PARPORT_W91284PIC; |
149 | } | 157 | if (pp->flags & PP_FASTREAD) |
150 | if (pp->flags & PP_FASTREAD) { | ||
151 | flags |= PARPORT_EPP_FAST; | 158 | flags |= PARPORT_EPP_FAST; |
152 | } | 159 | if (pport->ieee1284.mode & IEEE1284_ADDR) |
153 | if (pport->ieee1284.mode & IEEE1284_ADDR) { | ||
154 | fn = pport->ops->epp_read_addr; | 160 | fn = pport->ops->epp_read_addr; |
155 | } else { | 161 | else |
156 | fn = pport->ops->epp_read_data; | 162 | fn = pport->ops->epp_read_data; |
157 | } | ||
158 | bytes_read = (*fn)(pport, kbuffer, need, flags); | 163 | bytes_read = (*fn)(pport, kbuffer, need, flags); |
159 | } else { | 164 | } else { |
160 | bytes_read = parport_read (pport, kbuffer, need); | 165 | bytes_read = parport_read(pport, kbuffer, need); |
161 | } | 166 | } |
162 | 167 | ||
163 | if (bytes_read != 0) | 168 | if (bytes_read != 0) |
@@ -168,7 +173,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | |||
168 | break; | 173 | break; |
169 | } | 174 | } |
170 | 175 | ||
171 | if (signal_pending (current)) { | 176 | if (signal_pending(current)) { |
172 | bytes_read = -ERESTARTSYS; | 177 | bytes_read = -ERESTARTSYS; |
173 | break; | 178 | break; |
174 | } | 179 | } |
@@ -176,22 +181,22 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | |||
176 | cond_resched(); | 181 | cond_resched(); |
177 | } | 182 | } |
178 | 183 | ||
179 | parport_set_timeout (pp->pdev, pp->default_inactivity); | 184 | parport_set_timeout(pp->pdev, pp->default_inactivity); |
180 | 185 | ||
181 | if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) | 186 | if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read)) |
182 | bytes_read = -EFAULT; | 187 | bytes_read = -EFAULT; |
183 | 188 | ||
184 | kfree (kbuffer); | 189 | kfree(kbuffer); |
185 | pp_enable_irq (pp); | 190 | pp_enable_irq(pp); |
186 | return bytes_read; | 191 | return bytes_read; |
187 | } | 192 | } |
188 | 193 | ||
189 | static ssize_t pp_write (struct file * file, const char __user * buf, | 194 | static ssize_t pp_write(struct file *file, const char __user *buf, |
190 | size_t count, loff_t * ppos) | 195 | size_t count, loff_t *ppos) |
191 | { | 196 | { |
192 | unsigned int minor = iminor(file_inode(file)); | 197 | unsigned int minor = iminor(file_inode(file)); |
193 | struct pp_struct *pp = file->private_data; | 198 | struct pp_struct *pp = file->private_data; |
194 | char * kbuffer; | 199 | char *kbuffer; |
195 | ssize_t bytes_written = 0; | 200 | ssize_t bytes_written = 0; |
196 | ssize_t wrote; | 201 | ssize_t wrote; |
197 | int mode; | 202 | int mode; |
@@ -204,21 +209,21 @@ static ssize_t pp_write (struct file * file, const char __user * buf, | |||
204 | } | 209 | } |
205 | 210 | ||
206 | kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); | 211 | kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); |
207 | if (!kbuffer) { | 212 | if (!kbuffer) |
208 | return -ENOMEM; | 213 | return -ENOMEM; |
209 | } | 214 | |
210 | pport = pp->pdev->port; | 215 | pport = pp->pdev->port; |
211 | mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); | 216 | mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); |
212 | 217 | ||
213 | parport_set_timeout (pp->pdev, | 218 | parport_set_timeout(pp->pdev, |
214 | (file->f_flags & O_NONBLOCK) ? | 219 | (file->f_flags & O_NONBLOCK) ? |
215 | PARPORT_INACTIVITY_O_NONBLOCK : | 220 | PARPORT_INACTIVITY_O_NONBLOCK : |
216 | pp->default_inactivity); | 221 | pp->default_inactivity); |
217 | 222 | ||
218 | while (bytes_written < count) { | 223 | while (bytes_written < count) { |
219 | ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); | 224 | ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); |
220 | 225 | ||
221 | if (copy_from_user (kbuffer, buf + bytes_written, n)) { | 226 | if (copy_from_user(kbuffer, buf + bytes_written, n)) { |
222 | bytes_written = -EFAULT; | 227 | bytes_written = -EFAULT; |
223 | break; | 228 | break; |
224 | } | 229 | } |
@@ -226,20 +231,19 @@ static ssize_t pp_write (struct file * file, const char __user * buf, | |||
226 | if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { | 231 | if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { |
227 | /* do a fast EPP write */ | 232 | /* do a fast EPP write */ |
228 | if (pport->ieee1284.mode & IEEE1284_ADDR) { | 233 | if (pport->ieee1284.mode & IEEE1284_ADDR) { |
229 | wrote = pport->ops->epp_write_addr (pport, | 234 | wrote = pport->ops->epp_write_addr(pport, |
230 | kbuffer, n, PARPORT_EPP_FAST); | 235 | kbuffer, n, PARPORT_EPP_FAST); |
231 | } else { | 236 | } else { |
232 | wrote = pport->ops->epp_write_data (pport, | 237 | wrote = pport->ops->epp_write_data(pport, |
233 | kbuffer, n, PARPORT_EPP_FAST); | 238 | kbuffer, n, PARPORT_EPP_FAST); |
234 | } | 239 | } |
235 | } else { | 240 | } else { |
236 | wrote = parport_write (pp->pdev->port, kbuffer, n); | 241 | wrote = parport_write(pp->pdev->port, kbuffer, n); |
237 | } | 242 | } |
238 | 243 | ||
239 | if (wrote <= 0) { | 244 | if (wrote <= 0) { |
240 | if (!bytes_written) { | 245 | if (!bytes_written) |
241 | bytes_written = wrote; | 246 | bytes_written = wrote; |
242 | } | ||
243 | break; | 247 | break; |
244 | } | 248 | } |
245 | 249 | ||
@@ -251,67 +255,69 @@ static ssize_t pp_write (struct file * file, const char __user * buf, | |||
251 | break; | 255 | break; |
252 | } | 256 | } |
253 | 257 | ||
254 | if (signal_pending (current)) | 258 | if (signal_pending(current)) |
255 | break; | 259 | break; |
256 | 260 | ||
257 | cond_resched(); | 261 | cond_resched(); |
258 | } | 262 | } |
259 | 263 | ||
260 | parport_set_timeout (pp->pdev, pp->default_inactivity); | 264 | parport_set_timeout(pp->pdev, pp->default_inactivity); |
261 | 265 | ||
262 | kfree (kbuffer); | 266 | kfree(kbuffer); |
263 | pp_enable_irq (pp); | 267 | pp_enable_irq(pp); |
264 | return bytes_written; | 268 | return bytes_written; |
265 | } | 269 | } |
266 | 270 | ||
267 | static void pp_irq (void *private) | 271 | static void pp_irq(void *private) |
268 | { | 272 | { |
269 | struct pp_struct *pp = private; | 273 | struct pp_struct *pp = private; |
270 | 274 | ||
271 | if (pp->irqresponse) { | 275 | if (pp->irqresponse) { |
272 | parport_write_control (pp->pdev->port, pp->irqctl); | 276 | parport_write_control(pp->pdev->port, pp->irqctl); |
273 | pp->irqresponse = 0; | 277 | pp->irqresponse = 0; |
274 | } | 278 | } |
275 | 279 | ||
276 | atomic_inc (&pp->irqc); | 280 | atomic_inc(&pp->irqc); |
277 | wake_up_interruptible (&pp->irq_wait); | 281 | wake_up_interruptible(&pp->irq_wait); |
278 | } | 282 | } |
279 | 283 | ||
280 | static int register_device (int minor, struct pp_struct *pp) | 284 | static int register_device(int minor, struct pp_struct *pp) |
281 | { | 285 | { |
282 | struct parport *port; | 286 | struct parport *port; |
283 | struct pardevice * pdev = NULL; | 287 | struct pardevice *pdev = NULL; |
284 | char *name; | 288 | char *name; |
285 | int fl; | 289 | struct pardev_cb ppdev_cb; |
286 | 290 | ||
287 | name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); | 291 | name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); |
288 | if (name == NULL) | 292 | if (name == NULL) |
289 | return -ENOMEM; | 293 | return -ENOMEM; |
290 | 294 | ||
291 | port = parport_find_number (minor); | 295 | port = parport_find_number(minor); |
292 | if (!port) { | 296 | if (!port) { |
293 | printk (KERN_WARNING "%s: no associated port!\n", name); | 297 | printk(KERN_WARNING "%s: no associated port!\n", name); |
294 | kfree (name); | 298 | kfree(name); |
295 | return -ENXIO; | 299 | return -ENXIO; |
296 | } | 300 | } |
297 | 301 | ||
298 | fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; | 302 | memset(&ppdev_cb, 0, sizeof(ppdev_cb)); |
299 | pdev = parport_register_device (port, name, NULL, | 303 | ppdev_cb.irq_func = pp_irq; |
300 | NULL, pp_irq, fl, pp); | 304 | ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; |
301 | parport_put_port (port); | 305 | ppdev_cb.private = pp; |
306 | pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); | ||
307 | parport_put_port(port); | ||
302 | 308 | ||
303 | if (!pdev) { | 309 | if (!pdev) { |
304 | printk (KERN_WARNING "%s: failed to register device!\n", name); | 310 | printk(KERN_WARNING "%s: failed to register device!\n", name); |
305 | kfree (name); | 311 | kfree(name); |
306 | return -ENXIO; | 312 | return -ENXIO; |
307 | } | 313 | } |
308 | 314 | ||
309 | pp->pdev = pdev; | 315 | pp->pdev = pdev; |
310 | pr_debug("%s: registered pardevice\n", name); | 316 | dev_dbg(&pdev->dev, "registered pardevice\n"); |
311 | return 0; | 317 | return 0; |
312 | } | 318 | } |
313 | 319 | ||
314 | static enum ieee1284_phase init_phase (int mode) | 320 | static enum ieee1284_phase init_phase(int mode) |
315 | { | 321 | { |
316 | switch (mode & ~(IEEE1284_DEVICEID | 322 | switch (mode & ~(IEEE1284_DEVICEID |
317 | | IEEE1284_ADDR)) { | 323 | | IEEE1284_ADDR)) { |
@@ -322,11 +328,27 @@ static enum ieee1284_phase init_phase (int mode) | |||
322 | return IEEE1284_PH_FWD_IDLE; | 328 | return IEEE1284_PH_FWD_IDLE; |
323 | } | 329 | } |
324 | 330 | ||
331 | static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec) | ||
332 | { | ||
333 | long to_jiffies; | ||
334 | |||
335 | if ((tv_sec < 0) || (tv_usec < 0)) | ||
336 | return -EINVAL; | ||
337 | |||
338 | to_jiffies = usecs_to_jiffies(tv_usec); | ||
339 | to_jiffies += tv_sec * HZ; | ||
340 | if (to_jiffies <= 0) | ||
341 | return -EINVAL; | ||
342 | |||
343 | pdev->timeout = to_jiffies; | ||
344 | return 0; | ||
345 | } | ||
346 | |||
325 | static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 347 | static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
326 | { | 348 | { |
327 | unsigned int minor = iminor(file_inode(file)); | 349 | unsigned int minor = iminor(file_inode(file)); |
328 | struct pp_struct *pp = file->private_data; | 350 | struct pp_struct *pp = file->private_data; |
329 | struct parport * port; | 351 | struct parport *port; |
330 | void __user *argp = (void __user *)arg; | 352 | void __user *argp = (void __user *)arg; |
331 | 353 | ||
332 | /* First handle the cases that don't take arguments. */ | 354 | /* First handle the cases that don't take arguments. */ |
@@ -337,19 +359,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
337 | int ret; | 359 | int ret; |
338 | 360 | ||
339 | if (pp->flags & PP_CLAIMED) { | 361 | if (pp->flags & PP_CLAIMED) { |
340 | pr_debug(CHRDEV "%x: you've already got it!\n", minor); | 362 | dev_dbg(&pp->pdev->dev, "you've already got it!\n"); |
341 | return -EINVAL; | 363 | return -EINVAL; |
342 | } | 364 | } |
343 | 365 | ||
344 | /* Deferred device registration. */ | 366 | /* Deferred device registration. */ |
345 | if (!pp->pdev) { | 367 | if (!pp->pdev) { |
346 | int err = register_device (minor, pp); | 368 | int err = register_device(minor, pp); |
347 | if (err) { | 369 | |
370 | if (err) | ||
348 | return err; | 371 | return err; |
349 | } | ||
350 | } | 372 | } |
351 | 373 | ||
352 | ret = parport_claim_or_block (pp->pdev); | 374 | ret = parport_claim_or_block(pp->pdev); |
353 | if (ret < 0) | 375 | if (ret < 0) |
354 | return ret; | 376 | return ret; |
355 | 377 | ||
@@ -357,7 +379,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
357 | 379 | ||
358 | /* For interrupt-reporting to work, we need to be | 380 | /* For interrupt-reporting to work, we need to be |
359 | * informed of each interrupt. */ | 381 | * informed of each interrupt. */ |
360 | pp_enable_irq (pp); | 382 | pp_enable_irq(pp); |
361 | 383 | ||
362 | /* We may need to fix up the state machine. */ | 384 | /* We may need to fix up the state machine. */ |
363 | info = &pp->pdev->port->ieee1284; | 385 | info = &pp->pdev->port->ieee1284; |
@@ -365,15 +387,15 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
365 | pp->saved_state.phase = info->phase; | 387 | pp->saved_state.phase = info->phase; |
366 | info->mode = pp->state.mode; | 388 | info->mode = pp->state.mode; |
367 | info->phase = pp->state.phase; | 389 | info->phase = pp->state.phase; |
368 | pp->default_inactivity = parport_set_timeout (pp->pdev, 0); | 390 | pp->default_inactivity = parport_set_timeout(pp->pdev, 0); |
369 | parport_set_timeout (pp->pdev, pp->default_inactivity); | 391 | parport_set_timeout(pp->pdev, pp->default_inactivity); |
370 | 392 | ||
371 | return 0; | 393 | return 0; |
372 | } | 394 | } |
373 | case PPEXCL: | 395 | case PPEXCL: |
374 | if (pp->pdev) { | 396 | if (pp->pdev) { |
375 | pr_debug(CHRDEV "%x: too late for PPEXCL; " | 397 | dev_dbg(&pp->pdev->dev, |
376 | "already registered\n", minor); | 398 | "too late for PPEXCL; already registered\n"); |
377 | if (pp->flags & PP_EXCL) | 399 | if (pp->flags & PP_EXCL) |
378 | /* But it's not really an error. */ | 400 | /* But it's not really an error. */ |
379 | return 0; | 401 | return 0; |
@@ -388,11 +410,12 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
388 | case PPSETMODE: | 410 | case PPSETMODE: |
389 | { | 411 | { |
390 | int mode; | 412 | int mode; |
391 | if (copy_from_user (&mode, argp, sizeof (mode))) | 413 | |
414 | if (copy_from_user(&mode, argp, sizeof(mode))) | ||
392 | return -EFAULT; | 415 | return -EFAULT; |
393 | /* FIXME: validate mode */ | 416 | /* FIXME: validate mode */ |
394 | pp->state.mode = mode; | 417 | pp->state.mode = mode; |
395 | pp->state.phase = init_phase (mode); | 418 | pp->state.phase = init_phase(mode); |
396 | 419 | ||
397 | if (pp->flags & PP_CLAIMED) { | 420 | if (pp->flags & PP_CLAIMED) { |
398 | pp->pdev->port->ieee1284.mode = mode; | 421 | pp->pdev->port->ieee1284.mode = mode; |
@@ -405,28 +428,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
405 | { | 428 | { |
406 | int mode; | 429 | int mode; |
407 | 430 | ||
408 | if (pp->flags & PP_CLAIMED) { | 431 | if (pp->flags & PP_CLAIMED) |
409 | mode = pp->pdev->port->ieee1284.mode; | 432 | mode = pp->pdev->port->ieee1284.mode; |
410 | } else { | 433 | else |
411 | mode = pp->state.mode; | 434 | mode = pp->state.mode; |
412 | } | 435 | |
413 | if (copy_to_user (argp, &mode, sizeof (mode))) { | 436 | if (copy_to_user(argp, &mode, sizeof(mode))) |
414 | return -EFAULT; | 437 | return -EFAULT; |
415 | } | ||
416 | return 0; | 438 | return 0; |
417 | } | 439 | } |
418 | case PPSETPHASE: | 440 | case PPSETPHASE: |
419 | { | 441 | { |
420 | int phase; | 442 | int phase; |
421 | if (copy_from_user (&phase, argp, sizeof (phase))) { | 443 | |
444 | if (copy_from_user(&phase, argp, sizeof(phase))) | ||
422 | return -EFAULT; | 445 | return -EFAULT; |
423 | } | 446 | |
424 | /* FIXME: validate phase */ | 447 | /* FIXME: validate phase */ |
425 | pp->state.phase = phase; | 448 | pp->state.phase = phase; |
426 | 449 | ||
427 | if (pp->flags & PP_CLAIMED) { | 450 | if (pp->flags & PP_CLAIMED) |
428 | pp->pdev->port->ieee1284.phase = phase; | 451 | pp->pdev->port->ieee1284.phase = phase; |
429 | } | ||
430 | 452 | ||
431 | return 0; | 453 | return 0; |
432 | } | 454 | } |
@@ -434,38 +456,34 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
434 | { | 456 | { |
435 | int phase; | 457 | int phase; |
436 | 458 | ||
437 | if (pp->flags & PP_CLAIMED) { | 459 | if (pp->flags & PP_CLAIMED) |
438 | phase = pp->pdev->port->ieee1284.phase; | 460 | phase = pp->pdev->port->ieee1284.phase; |
439 | } else { | 461 | else |
440 | phase = pp->state.phase; | 462 | phase = pp->state.phase; |
441 | } | 463 | if (copy_to_user(argp, &phase, sizeof(phase))) |
442 | if (copy_to_user (argp, &phase, sizeof (phase))) { | ||
443 | return -EFAULT; | 464 | return -EFAULT; |
444 | } | ||
445 | return 0; | 465 | return 0; |
446 | } | 466 | } |
447 | case PPGETMODES: | 467 | case PPGETMODES: |
448 | { | 468 | { |
449 | unsigned int modes; | 469 | unsigned int modes; |
450 | 470 | ||
451 | port = parport_find_number (minor); | 471 | port = parport_find_number(minor); |
452 | if (!port) | 472 | if (!port) |
453 | return -ENODEV; | 473 | return -ENODEV; |
454 | 474 | ||
455 | modes = port->modes; | 475 | modes = port->modes; |
456 | parport_put_port(port); | 476 | parport_put_port(port); |
457 | if (copy_to_user (argp, &modes, sizeof (modes))) { | 477 | if (copy_to_user(argp, &modes, sizeof(modes))) |
458 | return -EFAULT; | 478 | return -EFAULT; |
459 | } | ||
460 | return 0; | 479 | return 0; |
461 | } | 480 | } |
462 | case PPSETFLAGS: | 481 | case PPSETFLAGS: |
463 | { | 482 | { |
464 | int uflags; | 483 | int uflags; |
465 | 484 | ||
466 | if (copy_from_user (&uflags, argp, sizeof (uflags))) { | 485 | if (copy_from_user(&uflags, argp, sizeof(uflags))) |
467 | return -EFAULT; | 486 | return -EFAULT; |
468 | } | ||
469 | pp->flags &= ~PP_FLAGMASK; | 487 | pp->flags &= ~PP_FLAGMASK; |
470 | pp->flags |= (uflags & PP_FLAGMASK); | 488 | pp->flags |= (uflags & PP_FLAGMASK); |
471 | return 0; | 489 | return 0; |
@@ -475,9 +493,8 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
475 | int uflags; | 493 | int uflags; |
476 | 494 | ||
477 | uflags = pp->flags & PP_FLAGMASK; | 495 | uflags = pp->flags & PP_FLAGMASK; |
478 | if (copy_to_user (argp, &uflags, sizeof (uflags))) { | 496 | if (copy_to_user(argp, &uflags, sizeof(uflags))) |
479 | return -EFAULT; | 497 | return -EFAULT; |
480 | } | ||
481 | return 0; | 498 | return 0; |
482 | } | 499 | } |
483 | } /* end switch() */ | 500 | } /* end switch() */ |
@@ -495,27 +512,28 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
495 | unsigned char reg; | 512 | unsigned char reg; |
496 | unsigned char mask; | 513 | unsigned char mask; |
497 | int mode; | 514 | int mode; |
515 | s32 time32[2]; | ||
516 | s64 time64[2]; | ||
517 | struct timespec64 ts; | ||
498 | int ret; | 518 | int ret; |
499 | struct timeval par_timeout; | ||
500 | long to_jiffies; | ||
501 | 519 | ||
502 | case PPRSTATUS: | 520 | case PPRSTATUS: |
503 | reg = parport_read_status (port); | 521 | reg = parport_read_status(port); |
504 | if (copy_to_user (argp, ®, sizeof (reg))) | 522 | if (copy_to_user(argp, ®, sizeof(reg))) |
505 | return -EFAULT; | 523 | return -EFAULT; |
506 | return 0; | 524 | return 0; |
507 | case PPRDATA: | 525 | case PPRDATA: |
508 | reg = parport_read_data (port); | 526 | reg = parport_read_data(port); |
509 | if (copy_to_user (argp, ®, sizeof (reg))) | 527 | if (copy_to_user(argp, ®, sizeof(reg))) |
510 | return -EFAULT; | 528 | return -EFAULT; |
511 | return 0; | 529 | return 0; |
512 | case PPRCONTROL: | 530 | case PPRCONTROL: |
513 | reg = parport_read_control (port); | 531 | reg = parport_read_control(port); |
514 | if (copy_to_user (argp, ®, sizeof (reg))) | 532 | if (copy_to_user(argp, ®, sizeof(reg))) |
515 | return -EFAULT; | 533 | return -EFAULT; |
516 | return 0; | 534 | return 0; |
517 | case PPYIELD: | 535 | case PPYIELD: |
518 | parport_yield_blocking (pp->pdev); | 536 | parport_yield_blocking(pp->pdev); |
519 | return 0; | 537 | return 0; |
520 | 538 | ||
521 | case PPRELEASE: | 539 | case PPRELEASE: |
@@ -525,45 +543,45 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
525 | pp->state.phase = info->phase; | 543 | pp->state.phase = info->phase; |
526 | info->mode = pp->saved_state.mode; | 544 | info->mode = pp->saved_state.mode; |
527 | info->phase = pp->saved_state.phase; | 545 | info->phase = pp->saved_state.phase; |
528 | parport_release (pp->pdev); | 546 | parport_release(pp->pdev); |
529 | pp->flags &= ~PP_CLAIMED; | 547 | pp->flags &= ~PP_CLAIMED; |
530 | return 0; | 548 | return 0; |
531 | 549 | ||
532 | case PPWCONTROL: | 550 | case PPWCONTROL: |
533 | if (copy_from_user (®, argp, sizeof (reg))) | 551 | if (copy_from_user(®, argp, sizeof(reg))) |
534 | return -EFAULT; | 552 | return -EFAULT; |
535 | parport_write_control (port, reg); | 553 | parport_write_control(port, reg); |
536 | return 0; | 554 | return 0; |
537 | 555 | ||
538 | case PPWDATA: | 556 | case PPWDATA: |
539 | if (copy_from_user (®, argp, sizeof (reg))) | 557 | if (copy_from_user(®, argp, sizeof(reg))) |
540 | return -EFAULT; | 558 | return -EFAULT; |
541 | parport_write_data (port, reg); | 559 | parport_write_data(port, reg); |
542 | return 0; | 560 | return 0; |
543 | 561 | ||
544 | case PPFCONTROL: | 562 | case PPFCONTROL: |
545 | if (copy_from_user (&mask, argp, | 563 | if (copy_from_user(&mask, argp, |
546 | sizeof (mask))) | 564 | sizeof(mask))) |
547 | return -EFAULT; | 565 | return -EFAULT; |
548 | if (copy_from_user (®, 1 + (unsigned char __user *) arg, | 566 | if (copy_from_user(®, 1 + (unsigned char __user *) arg, |
549 | sizeof (reg))) | 567 | sizeof(reg))) |
550 | return -EFAULT; | 568 | return -EFAULT; |
551 | parport_frob_control (port, mask, reg); | 569 | parport_frob_control(port, mask, reg); |
552 | return 0; | 570 | return 0; |
553 | 571 | ||
554 | case PPDATADIR: | 572 | case PPDATADIR: |
555 | if (copy_from_user (&mode, argp, sizeof (mode))) | 573 | if (copy_from_user(&mode, argp, sizeof(mode))) |
556 | return -EFAULT; | 574 | return -EFAULT; |
557 | if (mode) | 575 | if (mode) |
558 | port->ops->data_reverse (port); | 576 | port->ops->data_reverse(port); |
559 | else | 577 | else |
560 | port->ops->data_forward (port); | 578 | port->ops->data_forward(port); |
561 | return 0; | 579 | return 0; |
562 | 580 | ||
563 | case PPNEGOT: | 581 | case PPNEGOT: |
564 | if (copy_from_user (&mode, argp, sizeof (mode))) | 582 | if (copy_from_user(&mode, argp, sizeof(mode))) |
565 | return -EFAULT; | 583 | return -EFAULT; |
566 | switch ((ret = parport_negotiate (port, mode))) { | 584 | switch ((ret = parport_negotiate(port, mode))) { |
567 | case 0: break; | 585 | case 0: break; |
568 | case -1: /* handshake failed, peripheral not IEEE 1284 */ | 586 | case -1: /* handshake failed, peripheral not IEEE 1284 */ |
569 | ret = -EIO; | 587 | ret = -EIO; |
@@ -572,11 +590,11 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
572 | ret = -ENXIO; | 590 | ret = -ENXIO; |
573 | break; | 591 | break; |
574 | } | 592 | } |
575 | pp_enable_irq (pp); | 593 | pp_enable_irq(pp); |
576 | return ret; | 594 | return ret; |
577 | 595 | ||
578 | case PPWCTLONIRQ: | 596 | case PPWCTLONIRQ: |
579 | if (copy_from_user (®, argp, sizeof (reg))) | 597 | if (copy_from_user(®, argp, sizeof(reg))) |
580 | return -EFAULT; | 598 | return -EFAULT; |
581 | 599 | ||
582 | /* Remember what to set the control lines to, for next | 600 | /* Remember what to set the control lines to, for next |
@@ -586,39 +604,50 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
586 | return 0; | 604 | return 0; |
587 | 605 | ||
588 | case PPCLRIRQ: | 606 | case PPCLRIRQ: |
589 | ret = atomic_read (&pp->irqc); | 607 | ret = atomic_read(&pp->irqc); |
590 | if (copy_to_user (argp, &ret, sizeof (ret))) | 608 | if (copy_to_user(argp, &ret, sizeof(ret))) |
591 | return -EFAULT; | 609 | return -EFAULT; |
592 | atomic_sub (ret, &pp->irqc); | 610 | atomic_sub(ret, &pp->irqc); |
593 | return 0; | 611 | return 0; |
594 | 612 | ||
595 | case PPSETTIME: | 613 | case PPSETTIME32: |
596 | if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { | 614 | if (copy_from_user(time32, argp, sizeof(time32))) |
597 | return -EFAULT; | 615 | return -EFAULT; |
598 | } | 616 | |
599 | /* Convert to jiffies, place in pp->pdev->timeout */ | 617 | return pp_set_timeout(pp->pdev, time32[0], time32[1]); |
600 | if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { | 618 | |
601 | return -EINVAL; | 619 | case PPSETTIME64: |
602 | } | 620 | if (copy_from_user(time64, argp, sizeof(time64))) |
603 | to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); | 621 | return -EFAULT; |
604 | to_jiffies += par_timeout.tv_sec * (long)HZ; | 622 | |
605 | if (to_jiffies <= 0) { | 623 | return pp_set_timeout(pp->pdev, time64[0], time64[1]); |
624 | |||
625 | case PPGETTIME32: | ||
626 | jiffies_to_timespec64(pp->pdev->timeout, &ts); | ||
627 | time32[0] = ts.tv_sec; | ||
628 | time32[1] = ts.tv_nsec / NSEC_PER_USEC; | ||
629 | if ((time32[0] < 0) || (time32[1] < 0)) | ||
606 | return -EINVAL; | 630 | return -EINVAL; |
607 | } | 631 | |
608 | pp->pdev->timeout = to_jiffies; | 632 | if (copy_to_user(argp, time32, sizeof(time32))) |
633 | return -EFAULT; | ||
634 | |||
609 | return 0; | 635 | return 0; |
610 | 636 | ||
611 | case PPGETTIME: | 637 | case PPGETTIME64: |
612 | to_jiffies = pp->pdev->timeout; | 638 | jiffies_to_timespec64(pp->pdev->timeout, &ts); |
613 | memset(&par_timeout, 0, sizeof(par_timeout)); | 639 | time64[0] = ts.tv_sec; |
614 | par_timeout.tv_sec = to_jiffies / HZ; | 640 | time64[1] = ts.tv_nsec / NSEC_PER_USEC; |
615 | par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); | 641 | if ((time64[0] < 0) || (time64[1] < 0)) |
616 | if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) | 642 | return -EINVAL; |
643 | |||
644 | if (copy_to_user(argp, time64, sizeof(time64))) | ||
617 | return -EFAULT; | 645 | return -EFAULT; |
646 | |||
618 | return 0; | 647 | return 0; |
619 | 648 | ||
620 | default: | 649 | default: |
621 | pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); | 650 | dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd); |
622 | return -EINVAL; | 651 | return -EINVAL; |
623 | } | 652 | } |
624 | 653 | ||
@@ -629,13 +658,22 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
629 | static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 658 | static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
630 | { | 659 | { |
631 | long ret; | 660 | long ret; |
661 | |||
632 | mutex_lock(&pp_do_mutex); | 662 | mutex_lock(&pp_do_mutex); |
633 | ret = pp_do_ioctl(file, cmd, arg); | 663 | ret = pp_do_ioctl(file, cmd, arg); |
634 | mutex_unlock(&pp_do_mutex); | 664 | mutex_unlock(&pp_do_mutex); |
635 | return ret; | 665 | return ret; |
636 | } | 666 | } |
637 | 667 | ||
638 | static int pp_open (struct inode * inode, struct file * file) | 668 | #ifdef CONFIG_COMPAT |
669 | static long pp_compat_ioctl(struct file *file, unsigned int cmd, | ||
670 | unsigned long arg) | ||
671 | { | ||
672 | return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); | ||
673 | } | ||
674 | #endif | ||
675 | |||
676 | static int pp_open(struct inode *inode, struct file *file) | ||
639 | { | 677 | { |
640 | unsigned int minor = iminor(inode); | 678 | unsigned int minor = iminor(inode); |
641 | struct pp_struct *pp; | 679 | struct pp_struct *pp; |
@@ -643,16 +681,16 @@ static int pp_open (struct inode * inode, struct file * file) | |||
643 | if (minor >= PARPORT_MAX) | 681 | if (minor >= PARPORT_MAX) |
644 | return -ENXIO; | 682 | return -ENXIO; |
645 | 683 | ||
646 | pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); | 684 | pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL); |
647 | if (!pp) | 685 | if (!pp) |
648 | return -ENOMEM; | 686 | return -ENOMEM; |
649 | 687 | ||
650 | pp->state.mode = IEEE1284_MODE_COMPAT; | 688 | pp->state.mode = IEEE1284_MODE_COMPAT; |
651 | pp->state.phase = init_phase (pp->state.mode); | 689 | pp->state.phase = init_phase(pp->state.mode); |
652 | pp->flags = 0; | 690 | pp->flags = 0; |
653 | pp->irqresponse = 0; | 691 | pp->irqresponse = 0; |
654 | atomic_set (&pp->irqc, 0); | 692 | atomic_set(&pp->irqc, 0); |
655 | init_waitqueue_head (&pp->irq_wait); | 693 | init_waitqueue_head(&pp->irq_wait); |
656 | 694 | ||
657 | /* Defer the actual device registration until the first claim. | 695 | /* Defer the actual device registration until the first claim. |
658 | * That way, we know whether or not the driver wants to have | 696 | * That way, we know whether or not the driver wants to have |
@@ -664,7 +702,7 @@ static int pp_open (struct inode * inode, struct file * file) | |||
664 | return 0; | 702 | return 0; |
665 | } | 703 | } |
666 | 704 | ||
667 | static int pp_release (struct inode * inode, struct file * file) | 705 | static int pp_release(struct inode *inode, struct file *file) |
668 | { | 706 | { |
669 | unsigned int minor = iminor(inode); | 707 | unsigned int minor = iminor(inode); |
670 | struct pp_struct *pp = file->private_data; | 708 | struct pp_struct *pp = file->private_data; |
@@ -673,10 +711,10 @@ static int pp_release (struct inode * inode, struct file * file) | |||
673 | compat_negot = 0; | 711 | compat_negot = 0; |
674 | if (!(pp->flags & PP_CLAIMED) && pp->pdev && | 712 | if (!(pp->flags & PP_CLAIMED) && pp->pdev && |
675 | (pp->state.mode != IEEE1284_MODE_COMPAT)) { | 713 | (pp->state.mode != IEEE1284_MODE_COMPAT)) { |
676 | struct ieee1284_info *info; | 714 | struct ieee1284_info *info; |
677 | 715 | ||
678 | /* parport released, but not in compatibility mode */ | 716 | /* parport released, but not in compatibility mode */ |
679 | parport_claim_or_block (pp->pdev); | 717 | parport_claim_or_block(pp->pdev); |
680 | pp->flags |= PP_CLAIMED; | 718 | pp->flags |= PP_CLAIMED; |
681 | info = &pp->pdev->port->ieee1284; | 719 | info = &pp->pdev->port->ieee1284; |
682 | pp->saved_state.mode = info->mode; | 720 | pp->saved_state.mode = info->mode; |
@@ -689,9 +727,9 @@ static int pp_release (struct inode * inode, struct file * file) | |||
689 | compat_negot = 2; | 727 | compat_negot = 2; |
690 | } | 728 | } |
691 | if (compat_negot) { | 729 | if (compat_negot) { |
692 | parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); | 730 | parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT); |
693 | pr_debug(CHRDEV "%x: negotiated back to compatibility " | 731 | dev_dbg(&pp->pdev->dev, |
694 | "mode because user-space forgot\n", minor); | 732 | "negotiated back to compatibility mode because user-space forgot\n"); |
695 | } | 733 | } |
696 | 734 | ||
697 | if (pp->flags & PP_CLAIMED) { | 735 | if (pp->flags & PP_CLAIMED) { |
@@ -702,7 +740,7 @@ static int pp_release (struct inode * inode, struct file * file) | |||
702 | pp->state.phase = info->phase; | 740 | pp->state.phase = info->phase; |
703 | info->mode = pp->saved_state.mode; | 741 | info->mode = pp->saved_state.mode; |
704 | info->phase = pp->saved_state.phase; | 742 | info->phase = pp->saved_state.phase; |
705 | parport_release (pp->pdev); | 743 | parport_release(pp->pdev); |
706 | if (compat_negot != 1) { | 744 | if (compat_negot != 1) { |
707 | pr_debug(CHRDEV "%x: released pardevice " | 745 | pr_debug(CHRDEV "%x: released pardevice " |
708 | "because user-space forgot\n", minor); | 746 | "because user-space forgot\n", minor); |
@@ -711,25 +749,26 @@ static int pp_release (struct inode * inode, struct file * file) | |||
711 | 749 | ||
712 | if (pp->pdev) { | 750 | if (pp->pdev) { |
713 | const char *name = pp->pdev->name; | 751 | const char *name = pp->pdev->name; |
714 | parport_unregister_device (pp->pdev); | 752 | |
715 | kfree (name); | 753 | parport_unregister_device(pp->pdev); |
754 | kfree(name); | ||
716 | pp->pdev = NULL; | 755 | pp->pdev = NULL; |
717 | pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); | 756 | pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); |
718 | } | 757 | } |
719 | 758 | ||
720 | kfree (pp); | 759 | kfree(pp); |
721 | 760 | ||
722 | return 0; | 761 | return 0; |
723 | } | 762 | } |
724 | 763 | ||
725 | /* No kernel lock held - fine */ | 764 | /* No kernel lock held - fine */ |
726 | static unsigned int pp_poll (struct file * file, poll_table * wait) | 765 | static unsigned int pp_poll(struct file *file, poll_table *wait) |
727 | { | 766 | { |
728 | struct pp_struct *pp = file->private_data; | 767 | struct pp_struct *pp = file->private_data; |
729 | unsigned int mask = 0; | 768 | unsigned int mask = 0; |
730 | 769 | ||
731 | poll_wait (file, &pp->irq_wait, wait); | 770 | poll_wait(file, &pp->irq_wait, wait); |
732 | if (atomic_read (&pp->irqc)) | 771 | if (atomic_read(&pp->irqc)) |
733 | mask |= POLLIN | POLLRDNORM; | 772 | mask |= POLLIN | POLLRDNORM; |
734 | 773 | ||
735 | return mask; | 774 | return mask; |
@@ -744,6 +783,9 @@ static const struct file_operations pp_fops = { | |||
744 | .write = pp_write, | 783 | .write = pp_write, |
745 | .poll = pp_poll, | 784 | .poll = pp_poll, |
746 | .unlocked_ioctl = pp_ioctl, | 785 | .unlocked_ioctl = pp_ioctl, |
786 | #ifdef CONFIG_COMPAT | ||
787 | .compat_ioctl = pp_compat_ioctl, | ||
788 | #endif | ||
747 | .open = pp_open, | 789 | .open = pp_open, |
748 | .release = pp_release, | 790 | .release = pp_release, |
749 | }; | 791 | }; |
@@ -759,19 +801,32 @@ static void pp_detach(struct parport *port) | |||
759 | device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); | 801 | device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); |
760 | } | 802 | } |
761 | 803 | ||
804 | static int pp_probe(struct pardevice *par_dev) | ||
805 | { | ||
806 | struct device_driver *drv = par_dev->dev.driver; | ||
807 | int len = strlen(drv->name); | ||
808 | |||
809 | if (strncmp(par_dev->name, drv->name, len)) | ||
810 | return -ENODEV; | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | |||
762 | static struct parport_driver pp_driver = { | 815 | static struct parport_driver pp_driver = { |
763 | .name = CHRDEV, | 816 | .name = CHRDEV, |
764 | .attach = pp_attach, | 817 | .probe = pp_probe, |
818 | .match_port = pp_attach, | ||
765 | .detach = pp_detach, | 819 | .detach = pp_detach, |
820 | .devmodel = true, | ||
766 | }; | 821 | }; |
767 | 822 | ||
768 | static int __init ppdev_init (void) | 823 | static int __init ppdev_init(void) |
769 | { | 824 | { |
770 | int err = 0; | 825 | int err = 0; |
771 | 826 | ||
772 | if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { | 827 | if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) { |
773 | printk (KERN_WARNING CHRDEV ": unable to get major %d\n", | 828 | printk(KERN_WARNING CHRDEV ": unable to get major %d\n", |
774 | PP_MAJOR); | 829 | PP_MAJOR); |
775 | return -EIO; | 830 | return -EIO; |
776 | } | 831 | } |
777 | ppdev_class = class_create(THIS_MODULE, CHRDEV); | 832 | ppdev_class = class_create(THIS_MODULE, CHRDEV); |
@@ -781,11 +836,11 @@ static int __init ppdev_init (void) | |||
781 | } | 836 | } |
782 | err = parport_register_driver(&pp_driver); | 837 | err = parport_register_driver(&pp_driver); |
783 | if (err < 0) { | 838 | if (err < 0) { |
784 | printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); | 839 | printk(KERN_WARNING CHRDEV ": unable to register with parport\n"); |
785 | goto out_class; | 840 | goto out_class; |
786 | } | 841 | } |
787 | 842 | ||
788 | printk (KERN_INFO PP_VERSION "\n"); | 843 | printk(KERN_INFO PP_VERSION "\n"); |
789 | goto out; | 844 | goto out; |
790 | 845 | ||
791 | out_class: | 846 | out_class: |
@@ -796,12 +851,12 @@ out: | |||
796 | return err; | 851 | return err; |
797 | } | 852 | } |
798 | 853 | ||
799 | static void __exit ppdev_cleanup (void) | 854 | static void __exit ppdev_cleanup(void) |
800 | { | 855 | { |
801 | /* Clean up all parport stuff */ | 856 | /* Clean up all parport stuff */ |
802 | parport_unregister_driver(&pp_driver); | 857 | parport_unregister_driver(&pp_driver); |
803 | class_destroy(ppdev_class); | 858 | class_destroy(ppdev_class); |
804 | unregister_chrdev (PP_MAJOR, CHRDEV); | 859 | unregister_chrdev(PP_MAJOR, CHRDEV); |
805 | } | 860 | } |
806 | 861 | ||
807 | module_init(ppdev_init); | 862 | module_init(ppdev_init); |
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 9b9809b709a5..e83b2adc014a 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
@@ -334,10 +334,8 @@ static int __init raw_init(void) | |||
334 | 334 | ||
335 | cdev_init(&raw_cdev, &raw_fops); | 335 | cdev_init(&raw_cdev, &raw_fops); |
336 | ret = cdev_add(&raw_cdev, dev, max_raw_minors); | 336 | ret = cdev_add(&raw_cdev, dev, max_raw_minors); |
337 | if (ret) { | 337 | if (ret) |
338 | goto error_region; | 338 | goto error_region; |
339 | } | ||
340 | |||
341 | raw_class = class_create(THIS_MODULE, "raw"); | 339 | raw_class = class_create(THIS_MODULE, "raw"); |
342 | if (IS_ERR(raw_class)) { | 340 | if (IS_ERR(raw_class)) { |
343 | printk(KERN_ERR "Error creating raw class.\n"); | 341 | printk(KERN_ERR "Error creating raw class.\n"); |
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c index 77d6c127e691..dcd19f3f182e 100644 --- a/drivers/char/xillybus/xillybus_core.c +++ b/drivers/char/xillybus/xillybus_core.c | |||
@@ -509,7 +509,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep, | |||
509 | channel->log2_element_size = ((format > 2) ? | 509 | channel->log2_element_size = ((format > 2) ? |
510 | 2 : format); | 510 | 2 : format); |
511 | 511 | ||
512 | bytebufsize = channel->rd_buf_size = bufsize * | 512 | bytebufsize = bufsize * |
513 | (1 << channel->log2_element_size); | 513 | (1 << channel->log2_element_size); |
514 | 514 | ||
515 | buffers = devm_kcalloc(dev, bufnum, | 515 | buffers = devm_kcalloc(dev, bufnum, |
@@ -523,6 +523,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep, | |||
523 | 523 | ||
524 | if (!is_writebuf) { | 524 | if (!is_writebuf) { |
525 | channel->num_rd_buffers = bufnum; | 525 | channel->num_rd_buffers = bufnum; |
526 | channel->rd_buf_size = bytebufsize; | ||
526 | channel->rd_allow_partial = allowpartial; | 527 | channel->rd_allow_partial = allowpartial; |
527 | channel->rd_synchronous = synchronous; | 528 | channel->rd_synchronous = synchronous; |
528 | channel->rd_exclusive_open = exclusive_open; | 529 | channel->rd_exclusive_open = exclusive_open; |
@@ -533,6 +534,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep, | |||
533 | bufnum, bytebufsize); | 534 | bufnum, bytebufsize); |
534 | } else if (channelnum > 0) { | 535 | } else if (channelnum > 0) { |
535 | channel->num_wr_buffers = bufnum; | 536 | channel->num_wr_buffers = bufnum; |
537 | channel->wr_buf_size = bytebufsize; | ||
536 | 538 | ||
537 | channel->seekable = seekable; | 539 | channel->seekable = seekable; |
538 | channel->wr_supports_nonempty = supports_nonempty; | 540 | channel->wr_supports_nonempty = supports_nonempty; |
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c index c121d01a5cd6..1d8e0a57bd51 100644 --- a/drivers/extcon/extcon-arizona.c +++ b/drivers/extcon/extcon-arizona.c | |||
@@ -185,7 +185,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info, | |||
185 | break; | 185 | break; |
186 | }; | 186 | }; |
187 | 187 | ||
188 | mutex_lock(&arizona->dapm->card->dapm_mutex); | 188 | snd_soc_dapm_mutex_lock(arizona->dapm); |
189 | 189 | ||
190 | arizona->hpdet_clamp = clamp; | 190 | arizona->hpdet_clamp = clamp; |
191 | 191 | ||
@@ -227,7 +227,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info, | |||
227 | ret); | 227 | ret); |
228 | } | 228 | } |
229 | 229 | ||
230 | mutex_unlock(&arizona->dapm->card->dapm_mutex); | 230 | snd_soc_dapm_mutex_unlock(arizona->dapm); |
231 | } | 231 | } |
232 | 232 | ||
233 | static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode) | 233 | static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode) |
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c index 279ff8f6637d..d023789f0fda 100644 --- a/drivers/extcon/extcon-gpio.c +++ b/drivers/extcon/extcon-gpio.c | |||
@@ -126,7 +126,7 @@ static int gpio_extcon_probe(struct platform_device *pdev) | |||
126 | INIT_DELAYED_WORK(&data->work, gpio_extcon_work); | 126 | INIT_DELAYED_WORK(&data->work, gpio_extcon_work); |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Request the interrput of gpio to detect whether external connector | 129 | * Request the interrupt of gpio to detect whether external connector |
130 | * is attached or detached. | 130 | * is attached or detached. |
131 | */ | 131 | */ |
132 | ret = devm_request_any_context_irq(&pdev->dev, data->irq, | 132 | ret = devm_request_any_context_irq(&pdev->dev, data->irq, |
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c index b30ab97ce75f..852a7112f451 100644 --- a/drivers/extcon/extcon-max14577.c +++ b/drivers/extcon/extcon-max14577.c | |||
@@ -150,6 +150,7 @@ enum max14577_muic_acc_type { | |||
150 | 150 | ||
151 | static const unsigned int max14577_extcon_cable[] = { | 151 | static const unsigned int max14577_extcon_cable[] = { |
152 | EXTCON_USB, | 152 | EXTCON_USB, |
153 | EXTCON_CHG_USB_SDP, | ||
153 | EXTCON_CHG_USB_DCP, | 154 | EXTCON_CHG_USB_DCP, |
154 | EXTCON_CHG_USB_FAST, | 155 | EXTCON_CHG_USB_FAST, |
155 | EXTCON_CHG_USB_SLOW, | 156 | EXTCON_CHG_USB_SLOW, |
@@ -454,6 +455,8 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info) | |||
454 | return ret; | 455 | return ret; |
455 | 456 | ||
456 | extcon_set_cable_state_(info->edev, EXTCON_USB, attached); | 457 | extcon_set_cable_state_(info->edev, EXTCON_USB, attached); |
458 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
459 | attached); | ||
457 | break; | 460 | break; |
458 | case MAX14577_CHARGER_TYPE_DEDICATED_CHG: | 461 | case MAX14577_CHARGER_TYPE_DEDICATED_CHG: |
459 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP, | 462 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP, |
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index fdf8f5d4d4e9..f17cb76b567c 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c | |||
@@ -204,6 +204,7 @@ enum max77693_muic_acc_type { | |||
204 | static const unsigned int max77693_extcon_cable[] = { | 204 | static const unsigned int max77693_extcon_cable[] = { |
205 | EXTCON_USB, | 205 | EXTCON_USB, |
206 | EXTCON_USB_HOST, | 206 | EXTCON_USB_HOST, |
207 | EXTCON_CHG_USB_SDP, | ||
207 | EXTCON_CHG_USB_DCP, | 208 | EXTCON_CHG_USB_DCP, |
208 | EXTCON_CHG_USB_FAST, | 209 | EXTCON_CHG_USB_FAST, |
209 | EXTCON_CHG_USB_SLOW, | 210 | EXTCON_CHG_USB_SLOW, |
@@ -512,8 +513,11 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info, | |||
512 | break; | 513 | break; |
513 | case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */ | 514 | case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */ |
514 | dock_id = EXTCON_DOCK; | 515 | dock_id = EXTCON_DOCK; |
515 | if (!attached) | 516 | if (!attached) { |
516 | extcon_set_cable_state_(info->edev, EXTCON_USB, false); | 517 | extcon_set_cable_state_(info->edev, EXTCON_USB, false); |
518 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
519 | false); | ||
520 | } | ||
517 | break; | 521 | break; |
518 | default: | 522 | default: |
519 | dev_err(info->dev, "failed to detect %s dock device\n", | 523 | dev_err(info->dev, "failed to detect %s dock device\n", |
@@ -601,6 +605,8 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info) | |||
601 | if (ret < 0) | 605 | if (ret < 0) |
602 | return ret; | 606 | return ret; |
603 | extcon_set_cable_state_(info->edev, EXTCON_USB, attached); | 607 | extcon_set_cable_state_(info->edev, EXTCON_USB, attached); |
608 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
609 | attached); | ||
604 | break; | 610 | break; |
605 | case MAX77693_MUIC_GND_MHL: | 611 | case MAX77693_MUIC_GND_MHL: |
606 | case MAX77693_MUIC_GND_MHL_VB: | 612 | case MAX77693_MUIC_GND_MHL_VB: |
@@ -830,6 +836,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info) | |||
830 | */ | 836 | */ |
831 | extcon_set_cable_state_(info->edev, EXTCON_USB, | 837 | extcon_set_cable_state_(info->edev, EXTCON_USB, |
832 | attached); | 838 | attached); |
839 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
840 | attached); | ||
833 | 841 | ||
834 | if (!cable_attached) | 842 | if (!cable_attached) |
835 | extcon_set_cable_state_(info->edev, EXTCON_DOCK, | 843 | extcon_set_cable_state_(info->edev, EXTCON_DOCK, |
@@ -899,6 +907,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info) | |||
899 | 907 | ||
900 | extcon_set_cable_state_(info->edev, EXTCON_USB, | 908 | extcon_set_cable_state_(info->edev, EXTCON_USB, |
901 | attached); | 909 | attached); |
910 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
911 | attached); | ||
902 | break; | 912 | break; |
903 | case MAX77693_CHARGER_TYPE_DEDICATED_CHG: | 913 | case MAX77693_CHARGER_TYPE_DEDICATED_CHG: |
904 | /* Only TA cable */ | 914 | /* Only TA cable */ |
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c index 74dfb7f4f277..b188bd650efa 100644 --- a/drivers/extcon/extcon-max77843.c +++ b/drivers/extcon/extcon-max77843.c | |||
@@ -122,6 +122,7 @@ enum max77843_muic_charger_type { | |||
122 | static const unsigned int max77843_extcon_cable[] = { | 122 | static const unsigned int max77843_extcon_cable[] = { |
123 | EXTCON_USB, | 123 | EXTCON_USB, |
124 | EXTCON_USB_HOST, | 124 | EXTCON_USB_HOST, |
125 | EXTCON_CHG_USB_SDP, | ||
125 | EXTCON_CHG_USB_DCP, | 126 | EXTCON_CHG_USB_DCP, |
126 | EXTCON_CHG_USB_CDP, | 127 | EXTCON_CHG_USB_CDP, |
127 | EXTCON_CHG_USB_FAST, | 128 | EXTCON_CHG_USB_FAST, |
@@ -486,6 +487,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info) | |||
486 | return ret; | 487 | return ret; |
487 | 488 | ||
488 | extcon_set_cable_state_(info->edev, EXTCON_USB, attached); | 489 | extcon_set_cable_state_(info->edev, EXTCON_USB, attached); |
490 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
491 | attached); | ||
489 | break; | 492 | break; |
490 | case MAX77843_MUIC_CHG_DOWNSTREAM: | 493 | case MAX77843_MUIC_CHG_DOWNSTREAM: |
491 | ret = max77843_muic_set_path(info, | 494 | ret = max77843_muic_set_path(info, |
@@ -803,7 +806,7 @@ static int max77843_muic_probe(struct platform_device *pdev) | |||
803 | /* Clear IRQ bits before request IRQs */ | 806 | /* Clear IRQ bits before request IRQs */ |
804 | ret = regmap_bulk_read(max77843->regmap_muic, | 807 | ret = regmap_bulk_read(max77843->regmap_muic, |
805 | MAX77843_MUIC_REG_INT1, info->status, | 808 | MAX77843_MUIC_REG_INT1, info->status, |
806 | MAX77843_MUIC_IRQ_NUM); | 809 | MAX77843_MUIC_STATUS_NUM); |
807 | if (ret) { | 810 | if (ret) { |
808 | dev_err(&pdev->dev, "Failed to Clear IRQ bits\n"); | 811 | dev_err(&pdev->dev, "Failed to Clear IRQ bits\n"); |
809 | goto err_muic_irq; | 812 | goto err_muic_irq; |
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c index b2b13b3dce14..9a89320d09a8 100644 --- a/drivers/extcon/extcon-max8997.c +++ b/drivers/extcon/extcon-max8997.c | |||
@@ -148,6 +148,7 @@ struct max8997_muic_info { | |||
148 | static const unsigned int max8997_extcon_cable[] = { | 148 | static const unsigned int max8997_extcon_cable[] = { |
149 | EXTCON_USB, | 149 | EXTCON_USB, |
150 | EXTCON_USB_HOST, | 150 | EXTCON_USB_HOST, |
151 | EXTCON_CHG_USB_SDP, | ||
151 | EXTCON_CHG_USB_DCP, | 152 | EXTCON_CHG_USB_DCP, |
152 | EXTCON_CHG_USB_FAST, | 153 | EXTCON_CHG_USB_FAST, |
153 | EXTCON_CHG_USB_SLOW, | 154 | EXTCON_CHG_USB_SLOW, |
@@ -334,6 +335,8 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info, | |||
334 | break; | 335 | break; |
335 | case MAX8997_USB_DEVICE: | 336 | case MAX8997_USB_DEVICE: |
336 | extcon_set_cable_state_(info->edev, EXTCON_USB, attached); | 337 | extcon_set_cable_state_(info->edev, EXTCON_USB, attached); |
338 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
339 | attached); | ||
337 | break; | 340 | break; |
338 | default: | 341 | default: |
339 | dev_err(info->dev, "failed to detect %s usb cable\n", | 342 | dev_err(info->dev, "failed to detect %s usb cable\n", |
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 93c30a885740..841a4b586395 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c | |||
@@ -216,11 +216,23 @@ static int palmas_usb_probe(struct platform_device *pdev) | |||
216 | return PTR_ERR(palmas_usb->id_gpiod); | 216 | return PTR_ERR(palmas_usb->id_gpiod); |
217 | } | 217 | } |
218 | 218 | ||
219 | palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus", | ||
220 | GPIOD_IN); | ||
221 | if (IS_ERR(palmas_usb->vbus_gpiod)) { | ||
222 | dev_err(&pdev->dev, "failed to get vbus gpio\n"); | ||
223 | return PTR_ERR(palmas_usb->vbus_gpiod); | ||
224 | } | ||
225 | |||
219 | if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) { | 226 | if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) { |
220 | palmas_usb->enable_id_detection = false; | 227 | palmas_usb->enable_id_detection = false; |
221 | palmas_usb->enable_gpio_id_detection = true; | 228 | palmas_usb->enable_gpio_id_detection = true; |
222 | } | 229 | } |
223 | 230 | ||
231 | if (palmas_usb->enable_vbus_detection && palmas_usb->vbus_gpiod) { | ||
232 | palmas_usb->enable_vbus_detection = false; | ||
233 | palmas_usb->enable_gpio_vbus_detection = true; | ||
234 | } | ||
235 | |||
224 | if (palmas_usb->enable_gpio_id_detection) { | 236 | if (palmas_usb->enable_gpio_id_detection) { |
225 | u32 debounce; | 237 | u32 debounce; |
226 | 238 | ||
@@ -266,7 +278,7 @@ static int palmas_usb_probe(struct platform_device *pdev) | |||
266 | palmas_usb->id_irq, | 278 | palmas_usb->id_irq, |
267 | NULL, palmas_id_irq_handler, | 279 | NULL, palmas_id_irq_handler, |
268 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | | 280 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | |
269 | IRQF_ONESHOT | IRQF_EARLY_RESUME, | 281 | IRQF_ONESHOT, |
270 | "palmas_usb_id", palmas_usb); | 282 | "palmas_usb_id", palmas_usb); |
271 | if (status < 0) { | 283 | if (status < 0) { |
272 | dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", | 284 | dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", |
@@ -304,13 +316,47 @@ static int palmas_usb_probe(struct platform_device *pdev) | |||
304 | palmas_usb->vbus_irq, NULL, | 316 | palmas_usb->vbus_irq, NULL, |
305 | palmas_vbus_irq_handler, | 317 | palmas_vbus_irq_handler, |
306 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | | 318 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | |
307 | IRQF_ONESHOT | IRQF_EARLY_RESUME, | 319 | IRQF_ONESHOT, |
308 | "palmas_usb_vbus", palmas_usb); | 320 | "palmas_usb_vbus", palmas_usb); |
309 | if (status < 0) { | 321 | if (status < 0) { |
310 | dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", | 322 | dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", |
311 | palmas_usb->vbus_irq, status); | 323 | palmas_usb->vbus_irq, status); |
312 | return status; | 324 | return status; |
313 | } | 325 | } |
326 | } else if (palmas_usb->enable_gpio_vbus_detection) { | ||
327 | /* remux GPIO_1 as VBUSDET */ | ||
328 | status = palmas_update_bits(palmas, | ||
329 | PALMAS_PU_PD_OD_BASE, | ||
330 | PALMAS_PRIMARY_SECONDARY_PAD1, | ||
331 | PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK, | ||
332 | (1 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT)); | ||
333 | if (status < 0) { | ||
334 | dev_err(&pdev->dev, "can't remux GPIO1\n"); | ||
335 | return status; | ||
336 | } | ||
337 | |||
338 | palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data, | ||
339 | PALMAS_VBUS_OTG_IRQ); | ||
340 | palmas_usb->gpio_vbus_irq = gpiod_to_irq(palmas_usb->vbus_gpiod); | ||
341 | if (palmas_usb->gpio_vbus_irq < 0) { | ||
342 | dev_err(&pdev->dev, "failed to get vbus irq\n"); | ||
343 | return palmas_usb->gpio_vbus_irq; | ||
344 | } | ||
345 | status = devm_request_threaded_irq(&pdev->dev, | ||
346 | palmas_usb->gpio_vbus_irq, | ||
347 | NULL, | ||
348 | palmas_vbus_irq_handler, | ||
349 | IRQF_TRIGGER_FALLING | | ||
350 | IRQF_TRIGGER_RISING | | ||
351 | IRQF_ONESHOT | | ||
352 | IRQF_EARLY_RESUME, | ||
353 | "palmas_usb_vbus", | ||
354 | palmas_usb); | ||
355 | if (status < 0) { | ||
356 | dev_err(&pdev->dev, | ||
357 | "failed to request handler for vbus irq\n"); | ||
358 | return status; | ||
359 | } | ||
314 | } | 360 | } |
315 | 361 | ||
316 | palmas_enable_irq(palmas_usb); | 362 | palmas_enable_irq(palmas_usb); |
@@ -337,6 +383,8 @@ static int palmas_usb_suspend(struct device *dev) | |||
337 | if (device_may_wakeup(dev)) { | 383 | if (device_may_wakeup(dev)) { |
338 | if (palmas_usb->enable_vbus_detection) | 384 | if (palmas_usb->enable_vbus_detection) |
339 | enable_irq_wake(palmas_usb->vbus_irq); | 385 | enable_irq_wake(palmas_usb->vbus_irq); |
386 | if (palmas_usb->enable_gpio_vbus_detection) | ||
387 | enable_irq_wake(palmas_usb->gpio_vbus_irq); | ||
340 | if (palmas_usb->enable_id_detection) | 388 | if (palmas_usb->enable_id_detection) |
341 | enable_irq_wake(palmas_usb->id_irq); | 389 | enable_irq_wake(palmas_usb->id_irq); |
342 | if (palmas_usb->enable_gpio_id_detection) | 390 | if (palmas_usb->enable_gpio_id_detection) |
@@ -352,6 +400,8 @@ static int palmas_usb_resume(struct device *dev) | |||
352 | if (device_may_wakeup(dev)) { | 400 | if (device_may_wakeup(dev)) { |
353 | if (palmas_usb->enable_vbus_detection) | 401 | if (palmas_usb->enable_vbus_detection) |
354 | disable_irq_wake(palmas_usb->vbus_irq); | 402 | disable_irq_wake(palmas_usb->vbus_irq); |
403 | if (palmas_usb->enable_gpio_vbus_detection) | ||
404 | disable_irq_wake(palmas_usb->gpio_vbus_irq); | ||
355 | if (palmas_usb->enable_id_detection) | 405 | if (palmas_usb->enable_id_detection) |
356 | disable_irq_wake(palmas_usb->id_irq); | 406 | disable_irq_wake(palmas_usb->id_irq); |
357 | if (palmas_usb->enable_gpio_id_detection) | 407 | if (palmas_usb->enable_gpio_id_detection) |
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c index e1bb82809bef..97e074d70eca 100644 --- a/drivers/extcon/extcon-rt8973a.c +++ b/drivers/extcon/extcon-rt8973a.c | |||
@@ -93,6 +93,7 @@ static struct reg_data rt8973a_reg_data[] = { | |||
93 | static const unsigned int rt8973a_extcon_cable[] = { | 93 | static const unsigned int rt8973a_extcon_cable[] = { |
94 | EXTCON_USB, | 94 | EXTCON_USB, |
95 | EXTCON_USB_HOST, | 95 | EXTCON_USB_HOST, |
96 | EXTCON_CHG_USB_SDP, | ||
96 | EXTCON_CHG_USB_DCP, | 97 | EXTCON_CHG_USB_DCP, |
97 | EXTCON_JIG, | 98 | EXTCON_JIG, |
98 | EXTCON_NONE, | 99 | EXTCON_NONE, |
@@ -398,6 +399,9 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info, | |||
398 | 399 | ||
399 | /* Change the state of external accessory */ | 400 | /* Change the state of external accessory */ |
400 | extcon_set_cable_state_(info->edev, id, attached); | 401 | extcon_set_cable_state_(info->edev, id, attached); |
402 | if (id == EXTCON_USB) | ||
403 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
404 | attached); | ||
401 | 405 | ||
402 | return 0; | 406 | return 0; |
403 | } | 407 | } |
@@ -663,7 +667,7 @@ MODULE_DEVICE_TABLE(of, rt8973a_dt_match); | |||
663 | #ifdef CONFIG_PM_SLEEP | 667 | #ifdef CONFIG_PM_SLEEP |
664 | static int rt8973a_muic_suspend(struct device *dev) | 668 | static int rt8973a_muic_suspend(struct device *dev) |
665 | { | 669 | { |
666 | struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); | 670 | struct i2c_client *i2c = to_i2c_client(dev); |
667 | struct rt8973a_muic_info *info = i2c_get_clientdata(i2c); | 671 | struct rt8973a_muic_info *info = i2c_get_clientdata(i2c); |
668 | 672 | ||
669 | enable_irq_wake(info->irq); | 673 | enable_irq_wake(info->irq); |
@@ -673,7 +677,7 @@ static int rt8973a_muic_suspend(struct device *dev) | |||
673 | 677 | ||
674 | static int rt8973a_muic_resume(struct device *dev) | 678 | static int rt8973a_muic_resume(struct device *dev) |
675 | { | 679 | { |
676 | struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); | 680 | struct i2c_client *i2c = to_i2c_client(dev); |
677 | struct rt8973a_muic_info *info = i2c_get_clientdata(i2c); | 681 | struct rt8973a_muic_info *info = i2c_get_clientdata(i2c); |
678 | 682 | ||
679 | disable_irq_wake(info->irq); | 683 | disable_irq_wake(info->irq); |
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c index 7aac3cc7efd7..df769a17e736 100644 --- a/drivers/extcon/extcon-sm5502.c +++ b/drivers/extcon/extcon-sm5502.c | |||
@@ -95,6 +95,7 @@ static struct reg_data sm5502_reg_data[] = { | |||
95 | static const unsigned int sm5502_extcon_cable[] = { | 95 | static const unsigned int sm5502_extcon_cable[] = { |
96 | EXTCON_USB, | 96 | EXTCON_USB, |
97 | EXTCON_USB_HOST, | 97 | EXTCON_USB_HOST, |
98 | EXTCON_CHG_USB_SDP, | ||
98 | EXTCON_CHG_USB_DCP, | 99 | EXTCON_CHG_USB_DCP, |
99 | EXTCON_NONE, | 100 | EXTCON_NONE, |
100 | }; | 101 | }; |
@@ -411,6 +412,9 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info, | |||
411 | 412 | ||
412 | /* Change the state of external accessory */ | 413 | /* Change the state of external accessory */ |
413 | extcon_set_cable_state_(info->edev, id, attached); | 414 | extcon_set_cable_state_(info->edev, id, attached); |
415 | if (id == EXTCON_USB) | ||
416 | extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP, | ||
417 | attached); | ||
414 | 418 | ||
415 | return 0; | 419 | return 0; |
416 | } | 420 | } |
@@ -655,7 +659,7 @@ MODULE_DEVICE_TABLE(of, sm5502_dt_match); | |||
655 | #ifdef CONFIG_PM_SLEEP | 659 | #ifdef CONFIG_PM_SLEEP |
656 | static int sm5502_muic_suspend(struct device *dev) | 660 | static int sm5502_muic_suspend(struct device *dev) |
657 | { | 661 | { |
658 | struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); | 662 | struct i2c_client *i2c = to_i2c_client(dev); |
659 | struct sm5502_muic_info *info = i2c_get_clientdata(i2c); | 663 | struct sm5502_muic_info *info = i2c_get_clientdata(i2c); |
660 | 664 | ||
661 | enable_irq_wake(info->irq); | 665 | enable_irq_wake(info->irq); |
@@ -665,7 +669,7 @@ static int sm5502_muic_suspend(struct device *dev) | |||
665 | 669 | ||
666 | static int sm5502_muic_resume(struct device *dev) | 670 | static int sm5502_muic_resume(struct device *dev) |
667 | { | 671 | { |
668 | struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); | 672 | struct i2c_client *i2c = to_i2c_client(dev); |
669 | struct sm5502_muic_info *info = i2c_get_clientdata(i2c); | 673 | struct sm5502_muic_info *info = i2c_get_clientdata(i2c); |
670 | 674 | ||
671 | disable_irq_wake(info->irq); | 675 | disable_irq_wake(info->irq); |
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 1161d68a1863..56dd261f7142 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
@@ -219,6 +219,21 @@ error0: | |||
219 | } | 219 | } |
220 | EXPORT_SYMBOL_GPL(vmbus_open); | 220 | EXPORT_SYMBOL_GPL(vmbus_open); |
221 | 221 | ||
222 | /* Used for Hyper-V Socket: a guest client's connect() to the host */ | ||
223 | int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, | ||
224 | const uuid_le *shv_host_servie_id) | ||
225 | { | ||
226 | struct vmbus_channel_tl_connect_request conn_msg; | ||
227 | |||
228 | memset(&conn_msg, 0, sizeof(conn_msg)); | ||
229 | conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST; | ||
230 | conn_msg.guest_endpoint_id = *shv_guest_servie_id; | ||
231 | conn_msg.host_service_id = *shv_host_servie_id; | ||
232 | |||
233 | return vmbus_post_msg(&conn_msg, sizeof(conn_msg)); | ||
234 | } | ||
235 | EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request); | ||
236 | |||
222 | /* | 237 | /* |
223 | * create_gpadl_header - Creates a gpadl for the specified buffer | 238 | * create_gpadl_header - Creates a gpadl for the specified buffer |
224 | */ | 239 | */ |
@@ -624,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, | |||
624 | u64 aligned_data = 0; | 639 | u64 aligned_data = 0; |
625 | int ret; | 640 | int ret; |
626 | bool signal = false; | 641 | bool signal = false; |
642 | bool lock = channel->acquire_ring_lock; | ||
627 | int num_vecs = ((bufferlen != 0) ? 3 : 1); | 643 | int num_vecs = ((bufferlen != 0) ? 3 : 1); |
628 | 644 | ||
629 | 645 | ||
@@ -643,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, | |||
643 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); | 659 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); |
644 | 660 | ||
645 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, | 661 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, |
646 | &signal); | 662 | &signal, lock); |
647 | 663 | ||
648 | /* | 664 | /* |
649 | * Signalling the host is conditional on many factors: | 665 | * Signalling the host is conditional on many factors: |
@@ -659,6 +675,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, | |||
659 | * If we cannot write to the ring-buffer; signal the host | 675 | * If we cannot write to the ring-buffer; signal the host |
660 | * even if we may not have written anything. This is a rare | 676 | * even if we may not have written anything. This is a rare |
661 | * enough condition that it should not matter. | 677 | * enough condition that it should not matter. |
678 | * NOTE: in this case, the hvsock channel is an exception, because | ||
679 | * it looks the host side's hvsock implementation has a throttling | ||
680 | * mechanism which can hurt the performance otherwise. | ||
662 | */ | 681 | */ |
663 | 682 | ||
664 | if (channel->signal_policy) | 683 | if (channel->signal_policy) |
@@ -666,7 +685,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, | |||
666 | else | 685 | else |
667 | kick_q = true; | 686 | kick_q = true; |
668 | 687 | ||
669 | if (((ret == 0) && kick_q && signal) || (ret)) | 688 | if (((ret == 0) && kick_q && signal) || |
689 | (ret && !is_hvsock_channel(channel))) | ||
670 | vmbus_setevent(channel); | 690 | vmbus_setevent(channel); |
671 | 691 | ||
672 | return ret; | 692 | return ret; |
@@ -719,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, | |||
719 | struct kvec bufferlist[3]; | 739 | struct kvec bufferlist[3]; |
720 | u64 aligned_data = 0; | 740 | u64 aligned_data = 0; |
721 | bool signal = false; | 741 | bool signal = false; |
742 | bool lock = channel->acquire_ring_lock; | ||
722 | 743 | ||
723 | if (pagecount > MAX_PAGE_BUFFER_COUNT) | 744 | if (pagecount > MAX_PAGE_BUFFER_COUNT) |
724 | return -EINVAL; | 745 | return -EINVAL; |
@@ -755,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, | |||
755 | bufferlist[2].iov_base = &aligned_data; | 776 | bufferlist[2].iov_base = &aligned_data; |
756 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); | 777 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); |
757 | 778 | ||
758 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); | 779 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, |
780 | &signal, lock); | ||
759 | 781 | ||
760 | /* | 782 | /* |
761 | * Signalling the host is conditional on many factors: | 783 | * Signalling the host is conditional on many factors: |
@@ -818,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, | |||
818 | struct kvec bufferlist[3]; | 840 | struct kvec bufferlist[3]; |
819 | u64 aligned_data = 0; | 841 | u64 aligned_data = 0; |
820 | bool signal = false; | 842 | bool signal = false; |
843 | bool lock = channel->acquire_ring_lock; | ||
821 | 844 | ||
822 | packetlen = desc_size + bufferlen; | 845 | packetlen = desc_size + bufferlen; |
823 | packetlen_aligned = ALIGN(packetlen, sizeof(u64)); | 846 | packetlen_aligned = ALIGN(packetlen, sizeof(u64)); |
@@ -837,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, | |||
837 | bufferlist[2].iov_base = &aligned_data; | 860 | bufferlist[2].iov_base = &aligned_data; |
838 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); | 861 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); |
839 | 862 | ||
840 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); | 863 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, |
864 | &signal, lock); | ||
841 | 865 | ||
842 | if (ret == 0 && signal) | 866 | if (ret == 0 && signal) |
843 | vmbus_setevent(channel); | 867 | vmbus_setevent(channel); |
@@ -862,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, | |||
862 | struct kvec bufferlist[3]; | 886 | struct kvec bufferlist[3]; |
863 | u64 aligned_data = 0; | 887 | u64 aligned_data = 0; |
864 | bool signal = false; | 888 | bool signal = false; |
889 | bool lock = channel->acquire_ring_lock; | ||
865 | u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, | 890 | u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, |
866 | multi_pagebuffer->len); | 891 | multi_pagebuffer->len); |
867 | 892 | ||
@@ -900,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, | |||
900 | bufferlist[2].iov_base = &aligned_data; | 925 | bufferlist[2].iov_base = &aligned_data; |
901 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); | 926 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); |
902 | 927 | ||
903 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); | 928 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, |
929 | &signal, lock); | ||
904 | 930 | ||
905 | if (ret == 0 && signal) | 931 | if (ret == 0 && signal) |
906 | vmbus_setevent(channel); | 932 | vmbus_setevent(channel); |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 1c1ad47042c5..38b682bab85a 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
@@ -28,12 +28,127 @@ | |||
28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/completion.h> | 30 | #include <linux/completion.h> |
31 | #include <linux/delay.h> | ||
31 | #include <linux/hyperv.h> | 32 | #include <linux/hyperv.h> |
32 | 33 | ||
33 | #include "hyperv_vmbus.h" | 34 | #include "hyperv_vmbus.h" |
34 | 35 | ||
35 | static void init_vp_index(struct vmbus_channel *channel, | 36 | static void init_vp_index(struct vmbus_channel *channel, u16 dev_type); |
36 | const uuid_le *type_guid); | 37 | |
38 | static const struct vmbus_device vmbus_devs[] = { | ||
39 | /* IDE */ | ||
40 | { .dev_type = HV_IDE, | ||
41 | HV_IDE_GUID, | ||
42 | .perf_device = true, | ||
43 | }, | ||
44 | |||
45 | /* SCSI */ | ||
46 | { .dev_type = HV_SCSI, | ||
47 | HV_SCSI_GUID, | ||
48 | .perf_device = true, | ||
49 | }, | ||
50 | |||
51 | /* Fibre Channel */ | ||
52 | { .dev_type = HV_FC, | ||
53 | HV_SYNTHFC_GUID, | ||
54 | .perf_device = true, | ||
55 | }, | ||
56 | |||
57 | /* Synthetic NIC */ | ||
58 | { .dev_type = HV_NIC, | ||
59 | HV_NIC_GUID, | ||
60 | .perf_device = true, | ||
61 | }, | ||
62 | |||
63 | /* Network Direct */ | ||
64 | { .dev_type = HV_ND, | ||
65 | HV_ND_GUID, | ||
66 | .perf_device = true, | ||
67 | }, | ||
68 | |||
69 | /* PCIE */ | ||
70 | { .dev_type = HV_PCIE, | ||
71 | HV_PCIE_GUID, | ||
72 | .perf_device = true, | ||
73 | }, | ||
74 | |||
75 | /* Synthetic Frame Buffer */ | ||
76 | { .dev_type = HV_FB, | ||
77 | HV_SYNTHVID_GUID, | ||
78 | .perf_device = false, | ||
79 | }, | ||
80 | |||
81 | /* Synthetic Keyboard */ | ||
82 | { .dev_type = HV_KBD, | ||
83 | HV_KBD_GUID, | ||
84 | .perf_device = false, | ||
85 | }, | ||
86 | |||
87 | /* Synthetic MOUSE */ | ||
88 | { .dev_type = HV_MOUSE, | ||
89 | HV_MOUSE_GUID, | ||
90 | .perf_device = false, | ||
91 | }, | ||
92 | |||
93 | /* KVP */ | ||
94 | { .dev_type = HV_KVP, | ||
95 | HV_KVP_GUID, | ||
96 | .perf_device = false, | ||
97 | }, | ||
98 | |||
99 | /* Time Synch */ | ||
100 | { .dev_type = HV_TS, | ||
101 | HV_TS_GUID, | ||
102 | .perf_device = false, | ||
103 | }, | ||
104 | |||
105 | /* Heartbeat */ | ||
106 | { .dev_type = HV_HB, | ||
107 | HV_HEART_BEAT_GUID, | ||
108 | .perf_device = false, | ||
109 | }, | ||
110 | |||
111 | /* Shutdown */ | ||
112 | { .dev_type = HV_SHUTDOWN, | ||
113 | HV_SHUTDOWN_GUID, | ||
114 | .perf_device = false, | ||
115 | }, | ||
116 | |||
117 | /* File copy */ | ||
118 | { .dev_type = HV_FCOPY, | ||
119 | HV_FCOPY_GUID, | ||
120 | .perf_device = false, | ||
121 | }, | ||
122 | |||
123 | /* Backup */ | ||
124 | { .dev_type = HV_BACKUP, | ||
125 | HV_VSS_GUID, | ||
126 | .perf_device = false, | ||
127 | }, | ||
128 | |||
129 | /* Dynamic Memory */ | ||
130 | { .dev_type = HV_DM, | ||
131 | HV_DM_GUID, | ||
132 | .perf_device = false, | ||
133 | }, | ||
134 | |||
135 | /* Unknown GUID */ | ||
136 | { .dev_type = HV_UNKOWN, | ||
137 | .perf_device = false, | ||
138 | }, | ||
139 | }; | ||
140 | |||
141 | static u16 hv_get_dev_type(const uuid_le *guid) | ||
142 | { | ||
143 | u16 i; | ||
144 | |||
145 | for (i = HV_IDE; i < HV_UNKOWN; i++) { | ||
146 | if (!uuid_le_cmp(*guid, vmbus_devs[i].guid)) | ||
147 | return i; | ||
148 | } | ||
149 | pr_info("Unknown GUID: %pUl\n", guid); | ||
150 | return i; | ||
151 | } | ||
37 | 152 | ||
38 | /** | 153 | /** |
39 | * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message | 154 | * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message |
@@ -144,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void) | |||
144 | return NULL; | 259 | return NULL; |
145 | 260 | ||
146 | channel->id = atomic_inc_return(&chan_num); | 261 | channel->id = atomic_inc_return(&chan_num); |
262 | channel->acquire_ring_lock = true; | ||
147 | spin_lock_init(&channel->inbound_lock); | 263 | spin_lock_init(&channel->inbound_lock); |
148 | spin_lock_init(&channel->lock); | 264 | spin_lock_init(&channel->lock); |
149 | 265 | ||
@@ -195,6 +311,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | |||
195 | vmbus_release_relid(relid); | 311 | vmbus_release_relid(relid); |
196 | 312 | ||
197 | BUG_ON(!channel->rescind); | 313 | BUG_ON(!channel->rescind); |
314 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); | ||
198 | 315 | ||
199 | if (channel->target_cpu != get_cpu()) { | 316 | if (channel->target_cpu != get_cpu()) { |
200 | put_cpu(); | 317 | put_cpu(); |
@@ -206,9 +323,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | |||
206 | } | 323 | } |
207 | 324 | ||
208 | if (channel->primary_channel == NULL) { | 325 | if (channel->primary_channel == NULL) { |
209 | mutex_lock(&vmbus_connection.channel_mutex); | ||
210 | list_del(&channel->listentry); | 326 | list_del(&channel->listentry); |
211 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
212 | 327 | ||
213 | primary_channel = channel; | 328 | primary_channel = channel; |
214 | } else { | 329 | } else { |
@@ -251,6 +366,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
251 | struct vmbus_channel *channel; | 366 | struct vmbus_channel *channel; |
252 | bool fnew = true; | 367 | bool fnew = true; |
253 | unsigned long flags; | 368 | unsigned long flags; |
369 | u16 dev_type; | ||
370 | int ret; | ||
254 | 371 | ||
255 | /* Make sure this is a new offer */ | 372 | /* Make sure this is a new offer */ |
256 | mutex_lock(&vmbus_connection.channel_mutex); | 373 | mutex_lock(&vmbus_connection.channel_mutex); |
@@ -288,7 +405,9 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
288 | goto err_free_chan; | 405 | goto err_free_chan; |
289 | } | 406 | } |
290 | 407 | ||
291 | init_vp_index(newchannel, &newchannel->offermsg.offer.if_type); | 408 | dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type); |
409 | |||
410 | init_vp_index(newchannel, dev_type); | ||
292 | 411 | ||
293 | if (newchannel->target_cpu != get_cpu()) { | 412 | if (newchannel->target_cpu != get_cpu()) { |
294 | put_cpu(); | 413 | put_cpu(); |
@@ -325,12 +444,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
325 | if (!newchannel->device_obj) | 444 | if (!newchannel->device_obj) |
326 | goto err_deq_chan; | 445 | goto err_deq_chan; |
327 | 446 | ||
447 | newchannel->device_obj->device_id = dev_type; | ||
328 | /* | 448 | /* |
329 | * Add the new device to the bus. This will kick off device-driver | 449 | * Add the new device to the bus. This will kick off device-driver |
330 | * binding which eventually invokes the device driver's AddDevice() | 450 | * binding which eventually invokes the device driver's AddDevice() |
331 | * method. | 451 | * method. |
332 | */ | 452 | */ |
333 | if (vmbus_device_register(newchannel->device_obj) != 0) { | 453 | mutex_lock(&vmbus_connection.channel_mutex); |
454 | ret = vmbus_device_register(newchannel->device_obj); | ||
455 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
456 | |||
457 | if (ret != 0) { | ||
334 | pr_err("unable to add child device object (relid %d)\n", | 458 | pr_err("unable to add child device object (relid %d)\n", |
335 | newchannel->offermsg.child_relid); | 459 | newchannel->offermsg.child_relid); |
336 | kfree(newchannel->device_obj); | 460 | kfree(newchannel->device_obj); |
@@ -358,37 +482,6 @@ err_free_chan: | |||
358 | free_channel(newchannel); | 482 | free_channel(newchannel); |
359 | } | 483 | } |
360 | 484 | ||
361 | enum { | ||
362 | IDE = 0, | ||
363 | SCSI, | ||
364 | FC, | ||
365 | NIC, | ||
366 | ND_NIC, | ||
367 | PCIE, | ||
368 | MAX_PERF_CHN, | ||
369 | }; | ||
370 | |||
371 | /* | ||
372 | * This is an array of device_ids (device types) that are performance critical. | ||
373 | * We attempt to distribute the interrupt load for these devices across | ||
374 | * all available CPUs. | ||
375 | */ | ||
376 | static const struct hv_vmbus_device_id hp_devs[] = { | ||
377 | /* IDE */ | ||
378 | { HV_IDE_GUID, }, | ||
379 | /* Storage - SCSI */ | ||
380 | { HV_SCSI_GUID, }, | ||
381 | /* Storage - FC */ | ||
382 | { HV_SYNTHFC_GUID, }, | ||
383 | /* Network */ | ||
384 | { HV_NIC_GUID, }, | ||
385 | /* NetworkDirect Guest RDMA */ | ||
386 | { HV_ND_GUID, }, | ||
387 | /* PCI Express Pass Through */ | ||
388 | { HV_PCIE_GUID, }, | ||
389 | }; | ||
390 | |||
391 | |||
392 | /* | 485 | /* |
393 | * We use this state to statically distribute the channel interrupt load. | 486 | * We use this state to statically distribute the channel interrupt load. |
394 | */ | 487 | */ |
@@ -405,22 +498,15 @@ static int next_numa_node_id; | |||
405 | * For pre-win8 hosts or non-performance critical channels we assign the | 498 | * For pre-win8 hosts or non-performance critical channels we assign the |
406 | * first CPU in the first NUMA node. | 499 | * first CPU in the first NUMA node. |
407 | */ | 500 | */ |
408 | static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid) | 501 | static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) |
409 | { | 502 | { |
410 | u32 cur_cpu; | 503 | u32 cur_cpu; |
411 | int i; | 504 | bool perf_chn = vmbus_devs[dev_type].perf_device; |
412 | bool perf_chn = false; | ||
413 | struct vmbus_channel *primary = channel->primary_channel; | 505 | struct vmbus_channel *primary = channel->primary_channel; |
414 | int next_node; | 506 | int next_node; |
415 | struct cpumask available_mask; | 507 | struct cpumask available_mask; |
416 | struct cpumask *alloced_mask; | 508 | struct cpumask *alloced_mask; |
417 | 509 | ||
418 | for (i = IDE; i < MAX_PERF_CHN; i++) { | ||
419 | if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) { | ||
420 | perf_chn = true; | ||
421 | break; | ||
422 | } | ||
423 | } | ||
424 | if ((vmbus_proto_version == VERSION_WS2008) || | 510 | if ((vmbus_proto_version == VERSION_WS2008) || |
425 | (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { | 511 | (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { |
426 | /* | 512 | /* |
@@ -469,6 +555,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui | |||
469 | cpumask_of_node(primary->numa_node)); | 555 | cpumask_of_node(primary->numa_node)); |
470 | 556 | ||
471 | cur_cpu = -1; | 557 | cur_cpu = -1; |
558 | |||
559 | /* | ||
560 | * Normally Hyper-V host doesn't create more subchannels than there | ||
561 | * are VCPUs on the node but it is possible when not all present VCPUs | ||
562 | * on the node are initialized by guest. Clear the alloced_cpus_in_node | ||
563 | * to start over. | ||
564 | */ | ||
565 | if (cpumask_equal(&primary->alloced_cpus_in_node, | ||
566 | cpumask_of_node(primary->numa_node))) | ||
567 | cpumask_clear(&primary->alloced_cpus_in_node); | ||
568 | |||
472 | while (true) { | 569 | while (true) { |
473 | cur_cpu = cpumask_next(cur_cpu, &available_mask); | 570 | cur_cpu = cpumask_next(cur_cpu, &available_mask); |
474 | if (cur_cpu >= nr_cpu_ids) { | 571 | if (cur_cpu >= nr_cpu_ids) { |
@@ -498,6 +595,32 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui | |||
498 | channel->target_vp = hv_context.vp_index[cur_cpu]; | 595 | channel->target_vp = hv_context.vp_index[cur_cpu]; |
499 | } | 596 | } |
500 | 597 | ||
598 | static void vmbus_wait_for_unload(void) | ||
599 | { | ||
600 | int cpu = smp_processor_id(); | ||
601 | void *page_addr = hv_context.synic_message_page[cpu]; | ||
602 | struct hv_message *msg = (struct hv_message *)page_addr + | ||
603 | VMBUS_MESSAGE_SINT; | ||
604 | struct vmbus_channel_message_header *hdr; | ||
605 | bool unloaded = false; | ||
606 | |||
607 | while (1) { | ||
608 | if (READ_ONCE(msg->header.message_type) == HVMSG_NONE) { | ||
609 | mdelay(10); | ||
610 | continue; | ||
611 | } | ||
612 | |||
613 | hdr = (struct vmbus_channel_message_header *)msg->u.payload; | ||
614 | if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE) | ||
615 | unloaded = true; | ||
616 | |||
617 | vmbus_signal_eom(msg); | ||
618 | |||
619 | if (unloaded) | ||
620 | break; | ||
621 | } | ||
622 | } | ||
623 | |||
501 | /* | 624 | /* |
502 | * vmbus_unload_response - Handler for the unload response. | 625 | * vmbus_unload_response - Handler for the unload response. |
503 | */ | 626 | */ |
@@ -510,7 +633,7 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr) | |||
510 | complete(&vmbus_connection.unload_event); | 633 | complete(&vmbus_connection.unload_event); |
511 | } | 634 | } |
512 | 635 | ||
513 | void vmbus_initiate_unload(void) | 636 | void vmbus_initiate_unload(bool crash) |
514 | { | 637 | { |
515 | struct vmbus_channel_message_header hdr; | 638 | struct vmbus_channel_message_header hdr; |
516 | 639 | ||
@@ -523,7 +646,14 @@ void vmbus_initiate_unload(void) | |||
523 | hdr.msgtype = CHANNELMSG_UNLOAD; | 646 | hdr.msgtype = CHANNELMSG_UNLOAD; |
524 | vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); | 647 | vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); |
525 | 648 | ||
526 | wait_for_completion(&vmbus_connection.unload_event); | 649 | /* |
650 | * vmbus_initiate_unload() is also called on crash and the crash can be | ||
651 | * happening in an interrupt context, where scheduling is impossible. | ||
652 | */ | ||
653 | if (!crash) | ||
654 | wait_for_completion(&vmbus_connection.unload_event); | ||
655 | else | ||
656 | vmbus_wait_for_unload(); | ||
527 | } | 657 | } |
528 | 658 | ||
529 | /* | 659 | /* |
@@ -592,6 +722,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
592 | struct device *dev; | 722 | struct device *dev; |
593 | 723 | ||
594 | rescind = (struct vmbus_channel_rescind_offer *)hdr; | 724 | rescind = (struct vmbus_channel_rescind_offer *)hdr; |
725 | |||
726 | mutex_lock(&vmbus_connection.channel_mutex); | ||
595 | channel = relid2channel(rescind->child_relid); | 727 | channel = relid2channel(rescind->child_relid); |
596 | 728 | ||
597 | if (channel == NULL) { | 729 | if (channel == NULL) { |
@@ -600,7 +732,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
600 | * vmbus_process_offer(), we have already invoked | 732 | * vmbus_process_offer(), we have already invoked |
601 | * vmbus_release_relid() on error. | 733 | * vmbus_release_relid() on error. |
602 | */ | 734 | */ |
603 | return; | 735 | goto out; |
604 | } | 736 | } |
605 | 737 | ||
606 | spin_lock_irqsave(&channel->lock, flags); | 738 | spin_lock_irqsave(&channel->lock, flags); |
@@ -608,6 +740,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
608 | spin_unlock_irqrestore(&channel->lock, flags); | 740 | spin_unlock_irqrestore(&channel->lock, flags); |
609 | 741 | ||
610 | if (channel->device_obj) { | 742 | if (channel->device_obj) { |
743 | if (channel->chn_rescind_callback) { | ||
744 | channel->chn_rescind_callback(channel); | ||
745 | goto out; | ||
746 | } | ||
611 | /* | 747 | /* |
612 | * We will have to unregister this device from the | 748 | * We will have to unregister this device from the |
613 | * driver core. | 749 | * driver core. |
@@ -621,7 +757,24 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
621 | hv_process_channel_removal(channel, | 757 | hv_process_channel_removal(channel, |
622 | channel->offermsg.child_relid); | 758 | channel->offermsg.child_relid); |
623 | } | 759 | } |
760 | |||
761 | out: | ||
762 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
763 | } | ||
764 | |||
765 | void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) | ||
766 | { | ||
767 | mutex_lock(&vmbus_connection.channel_mutex); | ||
768 | |||
769 | BUG_ON(!is_hvsock_channel(channel)); | ||
770 | |||
771 | channel->rescind = true; | ||
772 | vmbus_device_unregister(channel->device_obj); | ||
773 | |||
774 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
624 | } | 775 | } |
776 | EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); | ||
777 | |||
625 | 778 | ||
626 | /* | 779 | /* |
627 | * vmbus_onoffers_delivered - | 780 | * vmbus_onoffers_delivered - |
@@ -825,6 +978,10 @@ struct vmbus_channel_message_table_entry | |||
825 | {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response}, | 978 | {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response}, |
826 | {CHANNELMSG_UNLOAD, 0, NULL}, | 979 | {CHANNELMSG_UNLOAD, 0, NULL}, |
827 | {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response}, | 980 | {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response}, |
981 | {CHANNELMSG_18, 0, NULL}, | ||
982 | {CHANNELMSG_19, 0, NULL}, | ||
983 | {CHANNELMSG_20, 0, NULL}, | ||
984 | {CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL}, | ||
828 | }; | 985 | }; |
829 | 986 | ||
830 | /* | 987 | /* |
@@ -973,3 +1130,10 @@ bool vmbus_are_subchannels_present(struct vmbus_channel *primary) | |||
973 | return ret; | 1130 | return ret; |
974 | } | 1131 | } |
975 | EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present); | 1132 | EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present); |
1133 | |||
1134 | void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, | ||
1135 | void (*chn_rescind_cb)(struct vmbus_channel *)) | ||
1136 | { | ||
1137 | channel->chn_rescind_callback = chn_rescind_cb; | ||
1138 | } | ||
1139 | EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback); | ||
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 3dc5a9c7fad6..d02f1373dd98 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
@@ -88,8 +88,16 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
88 | * This has been the behavior pre-win8. This is not | 88 | * This has been the behavior pre-win8. This is not |
89 | * perf issue and having all channel messages delivered on CPU 0 | 89 | * perf issue and having all channel messages delivered on CPU 0 |
90 | * would be ok. | 90 | * would be ok. |
91 | * For post win8 hosts, we support receiving channel messagges on | ||
92 | * all the CPUs. This is needed for kexec to work correctly where | ||
93 | * the CPU attempting to connect may not be CPU 0. | ||
91 | */ | 94 | */ |
92 | msg->target_vcpu = 0; | 95 | if (version >= VERSION_WIN8_1) { |
96 | msg->target_vcpu = hv_context.vp_index[get_cpu()]; | ||
97 | put_cpu(); | ||
98 | } else { | ||
99 | msg->target_vcpu = 0; | ||
100 | } | ||
93 | 101 | ||
94 | /* | 102 | /* |
95 | * Add to list before we send the request since we may | 103 | * Add to list before we send the request since we may |
@@ -236,7 +244,7 @@ void vmbus_disconnect(void) | |||
236 | /* | 244 | /* |
237 | * First send the unload request to the host. | 245 | * First send the unload request to the host. |
238 | */ | 246 | */ |
239 | vmbus_initiate_unload(); | 247 | vmbus_initiate_unload(false); |
240 | 248 | ||
241 | if (vmbus_connection.work_queue) { | 249 | if (vmbus_connection.work_queue) { |
242 | drain_workqueue(vmbus_connection.work_queue); | 250 | drain_workqueue(vmbus_connection.work_queue); |
@@ -288,7 +296,8 @@ struct vmbus_channel *relid2channel(u32 relid) | |||
288 | struct list_head *cur, *tmp; | 296 | struct list_head *cur, *tmp; |
289 | struct vmbus_channel *cur_sc; | 297 | struct vmbus_channel *cur_sc; |
290 | 298 | ||
291 | mutex_lock(&vmbus_connection.channel_mutex); | 299 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); |
300 | |||
292 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { | 301 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { |
293 | if (channel->offermsg.child_relid == relid) { | 302 | if (channel->offermsg.child_relid == relid) { |
294 | found_channel = channel; | 303 | found_channel = channel; |
@@ -307,7 +316,6 @@ struct vmbus_channel *relid2channel(u32 relid) | |||
307 | } | 316 | } |
308 | } | 317 | } |
309 | } | 318 | } |
310 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
311 | 319 | ||
312 | return found_channel; | 320 | return found_channel; |
313 | } | 321 | } |
@@ -474,7 +482,7 @@ int vmbus_post_msg(void *buffer, size_t buflen) | |||
474 | /* | 482 | /* |
475 | * vmbus_set_event - Send an event notification to the parent | 483 | * vmbus_set_event - Send an event notification to the parent |
476 | */ | 484 | */ |
477 | int vmbus_set_event(struct vmbus_channel *channel) | 485 | void vmbus_set_event(struct vmbus_channel *channel) |
478 | { | 486 | { |
479 | u32 child_relid = channel->offermsg.child_relid; | 487 | u32 child_relid = channel->offermsg.child_relid; |
480 | 488 | ||
@@ -485,5 +493,5 @@ int vmbus_set_event(struct vmbus_channel *channel) | |||
485 | (child_relid >> 5)); | 493 | (child_relid >> 5)); |
486 | } | 494 | } |
487 | 495 | ||
488 | return hv_signal_event(channel->sig_event); | 496 | hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL); |
489 | } | 497 | } |
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 11bca51ef5ff..a1c086ba3b9a 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c | |||
@@ -204,6 +204,8 @@ int hv_init(void) | |||
204 | sizeof(int) * NR_CPUS); | 204 | sizeof(int) * NR_CPUS); |
205 | memset(hv_context.event_dpc, 0, | 205 | memset(hv_context.event_dpc, 0, |
206 | sizeof(void *) * NR_CPUS); | 206 | sizeof(void *) * NR_CPUS); |
207 | memset(hv_context.msg_dpc, 0, | ||
208 | sizeof(void *) * NR_CPUS); | ||
207 | memset(hv_context.clk_evt, 0, | 209 | memset(hv_context.clk_evt, 0, |
208 | sizeof(void *) * NR_CPUS); | 210 | sizeof(void *) * NR_CPUS); |
209 | 211 | ||
@@ -295,8 +297,14 @@ void hv_cleanup(void) | |||
295 | * Cleanup the TSC page based CS. | 297 | * Cleanup the TSC page based CS. |
296 | */ | 298 | */ |
297 | if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { | 299 | if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { |
298 | clocksource_change_rating(&hyperv_cs_tsc, 10); | 300 | /* |
299 | clocksource_unregister(&hyperv_cs_tsc); | 301 | * Crash can happen in an interrupt context and unregistering |
302 | * a clocksource is impossible and redundant in this case. | ||
303 | */ | ||
304 | if (!oops_in_progress) { | ||
305 | clocksource_change_rating(&hyperv_cs_tsc, 10); | ||
306 | clocksource_unregister(&hyperv_cs_tsc); | ||
307 | } | ||
300 | 308 | ||
301 | hypercall_msr.as_uint64 = 0; | 309 | hypercall_msr.as_uint64 = 0; |
302 | wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); | 310 | wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); |
@@ -337,22 +345,6 @@ int hv_post_message(union hv_connection_id connection_id, | |||
337 | return status & 0xFFFF; | 345 | return status & 0xFFFF; |
338 | } | 346 | } |
339 | 347 | ||
340 | |||
341 | /* | ||
342 | * hv_signal_event - | ||
343 | * Signal an event on the specified connection using the hypervisor event IPC. | ||
344 | * | ||
345 | * This involves a hypercall. | ||
346 | */ | ||
347 | int hv_signal_event(void *con_id) | ||
348 | { | ||
349 | u64 status; | ||
350 | |||
351 | status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL); | ||
352 | |||
353 | return status & 0xFFFF; | ||
354 | } | ||
355 | |||
356 | static int hv_ce_set_next_event(unsigned long delta, | 348 | static int hv_ce_set_next_event(unsigned long delta, |
357 | struct clock_event_device *evt) | 349 | struct clock_event_device *evt) |
358 | { | 350 | { |
@@ -425,6 +417,13 @@ int hv_synic_alloc(void) | |||
425 | } | 417 | } |
426 | tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); | 418 | tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); |
427 | 419 | ||
420 | hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC); | ||
421 | if (hv_context.msg_dpc[cpu] == NULL) { | ||
422 | pr_err("Unable to allocate event dpc\n"); | ||
423 | goto err; | ||
424 | } | ||
425 | tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu); | ||
426 | |||
428 | hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC); | 427 | hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC); |
429 | if (hv_context.clk_evt[cpu] == NULL) { | 428 | if (hv_context.clk_evt[cpu] == NULL) { |
430 | pr_err("Unable to allocate clock event device\n"); | 429 | pr_err("Unable to allocate clock event device\n"); |
@@ -466,6 +465,7 @@ err: | |||
466 | static void hv_synic_free_cpu(int cpu) | 465 | static void hv_synic_free_cpu(int cpu) |
467 | { | 466 | { |
468 | kfree(hv_context.event_dpc[cpu]); | 467 | kfree(hv_context.event_dpc[cpu]); |
468 | kfree(hv_context.msg_dpc[cpu]); | ||
469 | kfree(hv_context.clk_evt[cpu]); | 469 | kfree(hv_context.clk_evt[cpu]); |
470 | if (hv_context.synic_event_page[cpu]) | 470 | if (hv_context.synic_event_page[cpu]) |
471 | free_page((unsigned long)hv_context.synic_event_page[cpu]); | 471 | free_page((unsigned long)hv_context.synic_event_page[cpu]); |
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index c37a71e13de0..23c70799ad8a 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c | |||
@@ -251,7 +251,6 @@ void hv_fcopy_onchannelcallback(void *context) | |||
251 | */ | 251 | */ |
252 | 252 | ||
253 | fcopy_transaction.recv_len = recvlen; | 253 | fcopy_transaction.recv_len = recvlen; |
254 | fcopy_transaction.recv_channel = channel; | ||
255 | fcopy_transaction.recv_req_id = requestid; | 254 | fcopy_transaction.recv_req_id = requestid; |
256 | fcopy_transaction.fcopy_msg = fcopy_msg; | 255 | fcopy_transaction.fcopy_msg = fcopy_msg; |
257 | 256 | ||
@@ -317,6 +316,7 @@ static void fcopy_on_reset(void) | |||
317 | int hv_fcopy_init(struct hv_util_service *srv) | 316 | int hv_fcopy_init(struct hv_util_service *srv) |
318 | { | 317 | { |
319 | recv_buffer = srv->recv_buffer; | 318 | recv_buffer = srv->recv_buffer; |
319 | fcopy_transaction.recv_channel = srv->channel; | ||
320 | 320 | ||
321 | /* | 321 | /* |
322 | * When this driver loads, the user level daemon that | 322 | * When this driver loads, the user level daemon that |
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index d4ab81bcd515..9b9b370fe22a 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c | |||
@@ -639,7 +639,6 @@ void hv_kvp_onchannelcallback(void *context) | |||
639 | */ | 639 | */ |
640 | 640 | ||
641 | kvp_transaction.recv_len = recvlen; | 641 | kvp_transaction.recv_len = recvlen; |
642 | kvp_transaction.recv_channel = channel; | ||
643 | kvp_transaction.recv_req_id = requestid; | 642 | kvp_transaction.recv_req_id = requestid; |
644 | kvp_transaction.kvp_msg = kvp_msg; | 643 | kvp_transaction.kvp_msg = kvp_msg; |
645 | 644 | ||
@@ -688,6 +687,7 @@ int | |||
688 | hv_kvp_init(struct hv_util_service *srv) | 687 | hv_kvp_init(struct hv_util_service *srv) |
689 | { | 688 | { |
690 | recv_buffer = srv->recv_buffer; | 689 | recv_buffer = srv->recv_buffer; |
690 | kvp_transaction.recv_channel = srv->channel; | ||
691 | 691 | ||
692 | /* | 692 | /* |
693 | * When this driver loads, the user level daemon that | 693 | * When this driver loads, the user level daemon that |
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index 67def4a831c8..3fba14e88f03 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c | |||
@@ -263,7 +263,6 @@ void hv_vss_onchannelcallback(void *context) | |||
263 | */ | 263 | */ |
264 | 264 | ||
265 | vss_transaction.recv_len = recvlen; | 265 | vss_transaction.recv_len = recvlen; |
266 | vss_transaction.recv_channel = channel; | ||
267 | vss_transaction.recv_req_id = requestid; | 266 | vss_transaction.recv_req_id = requestid; |
268 | vss_transaction.msg = (struct hv_vss_msg *)vss_msg; | 267 | vss_transaction.msg = (struct hv_vss_msg *)vss_msg; |
269 | 268 | ||
@@ -337,6 +336,7 @@ hv_vss_init(struct hv_util_service *srv) | |||
337 | return -ENOTSUPP; | 336 | return -ENOTSUPP; |
338 | } | 337 | } |
339 | recv_buffer = srv->recv_buffer; | 338 | recv_buffer = srv->recv_buffer; |
339 | vss_transaction.recv_channel = srv->channel; | ||
340 | 340 | ||
341 | /* | 341 | /* |
342 | * When this driver loads, the user level daemon that | 342 | * When this driver loads, the user level daemon that |
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 7994ec2e4151..d5acaa2d8e61 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c | |||
@@ -322,6 +322,7 @@ static int util_probe(struct hv_device *dev, | |||
322 | srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL); | 322 | srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL); |
323 | if (!srv->recv_buffer) | 323 | if (!srv->recv_buffer) |
324 | return -ENOMEM; | 324 | return -ENOMEM; |
325 | srv->channel = dev->channel; | ||
325 | if (srv->util_init) { | 326 | if (srv->util_init) { |
326 | ret = srv->util_init(srv); | 327 | ret = srv->util_init(srv); |
327 | if (ret) { | 328 | if (ret) { |
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index 4f42c0e20c20..9a9983fa4531 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c | |||
@@ -310,6 +310,9 @@ struct hvutil_transport *hvutil_transport_init(const char *name, | |||
310 | return hvt; | 310 | return hvt; |
311 | 311 | ||
312 | err_free_hvt: | 312 | err_free_hvt: |
313 | spin_lock(&hvt_list_lock); | ||
314 | list_del(&hvt->list); | ||
315 | spin_unlock(&hvt_list_lock); | ||
313 | kfree(hvt); | 316 | kfree(hvt); |
314 | return NULL; | 317 | return NULL; |
315 | } | 318 | } |
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 2f8c0f40930b..12321b93a756 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h | |||
@@ -443,10 +443,11 @@ struct hv_context { | |||
443 | u32 vp_index[NR_CPUS]; | 443 | u32 vp_index[NR_CPUS]; |
444 | /* | 444 | /* |
445 | * Starting with win8, we can take channel interrupts on any CPU; | 445 | * Starting with win8, we can take channel interrupts on any CPU; |
446 | * we will manage the tasklet that handles events on a per CPU | 446 | * we will manage the tasklet that handles events messages on a per CPU |
447 | * basis. | 447 | * basis. |
448 | */ | 448 | */ |
449 | struct tasklet_struct *event_dpc[NR_CPUS]; | 449 | struct tasklet_struct *event_dpc[NR_CPUS]; |
450 | struct tasklet_struct *msg_dpc[NR_CPUS]; | ||
450 | /* | 451 | /* |
451 | * To optimize the mapping of relid to channel, maintain | 452 | * To optimize the mapping of relid to channel, maintain |
452 | * per-cpu list of the channels based on their CPU affinity. | 453 | * per-cpu list of the channels based on their CPU affinity. |
@@ -495,8 +496,6 @@ extern int hv_post_message(union hv_connection_id connection_id, | |||
495 | enum hv_message_type message_type, | 496 | enum hv_message_type message_type, |
496 | void *payload, size_t payload_size); | 497 | void *payload, size_t payload_size); |
497 | 498 | ||
498 | extern int hv_signal_event(void *con_id); | ||
499 | |||
500 | extern int hv_synic_alloc(void); | 499 | extern int hv_synic_alloc(void); |
501 | 500 | ||
502 | extern void hv_synic_free(void); | 501 | extern void hv_synic_free(void); |
@@ -525,7 +524,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); | |||
525 | 524 | ||
526 | int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, | 525 | int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, |
527 | struct kvec *kv_list, | 526 | struct kvec *kv_list, |
528 | u32 kv_count, bool *signal); | 527 | u32 kv_count, bool *signal, bool lock); |
529 | 528 | ||
530 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | 529 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, |
531 | void *buffer, u32 buflen, u32 *buffer_actual_len, | 530 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
@@ -620,6 +619,30 @@ struct vmbus_channel_message_table_entry { | |||
620 | extern struct vmbus_channel_message_table_entry | 619 | extern struct vmbus_channel_message_table_entry |
621 | channel_message_table[CHANNELMSG_COUNT]; | 620 | channel_message_table[CHANNELMSG_COUNT]; |
622 | 621 | ||
622 | /* Free the message slot and signal end-of-message if required */ | ||
623 | static inline void vmbus_signal_eom(struct hv_message *msg) | ||
624 | { | ||
625 | msg->header.message_type = HVMSG_NONE; | ||
626 | |||
627 | /* | ||
628 | * Make sure the write to MessageType (ie set to | ||
629 | * HVMSG_NONE) happens before we read the | ||
630 | * MessagePending and EOMing. Otherwise, the EOMing | ||
631 | * will not deliver any more messages since there is | ||
632 | * no empty slot | ||
633 | */ | ||
634 | mb(); | ||
635 | |||
636 | if (msg->header.message_flags.msg_pending) { | ||
637 | /* | ||
638 | * This will cause message queue rescan to | ||
639 | * possibly deliver another msg from the | ||
640 | * hypervisor | ||
641 | */ | ||
642 | wrmsrl(HV_X64_MSR_EOM, 0); | ||
643 | } | ||
644 | } | ||
645 | |||
623 | /* General vmbus interface */ | 646 | /* General vmbus interface */ |
624 | 647 | ||
625 | struct hv_device *vmbus_device_create(const uuid_le *type, | 648 | struct hv_device *vmbus_device_create(const uuid_le *type, |
@@ -644,9 +667,10 @@ void vmbus_disconnect(void); | |||
644 | 667 | ||
645 | int vmbus_post_msg(void *buffer, size_t buflen); | 668 | int vmbus_post_msg(void *buffer, size_t buflen); |
646 | 669 | ||
647 | int vmbus_set_event(struct vmbus_channel *channel); | 670 | void vmbus_set_event(struct vmbus_channel *channel); |
648 | 671 | ||
649 | void vmbus_on_event(unsigned long data); | 672 | void vmbus_on_event(unsigned long data); |
673 | void vmbus_on_msg_dpc(unsigned long data); | ||
650 | 674 | ||
651 | int hv_kvp_init(struct hv_util_service *); | 675 | int hv_kvp_init(struct hv_util_service *); |
652 | void hv_kvp_deinit(void); | 676 | void hv_kvp_deinit(void); |
@@ -659,7 +683,7 @@ void hv_vss_onchannelcallback(void *); | |||
659 | int hv_fcopy_init(struct hv_util_service *); | 683 | int hv_fcopy_init(struct hv_util_service *); |
660 | void hv_fcopy_deinit(void); | 684 | void hv_fcopy_deinit(void); |
661 | void hv_fcopy_onchannelcallback(void *); | 685 | void hv_fcopy_onchannelcallback(void *); |
662 | void vmbus_initiate_unload(void); | 686 | void vmbus_initiate_unload(bool crash); |
663 | 687 | ||
664 | static inline void hv_poll_channel(struct vmbus_channel *channel, | 688 | static inline void hv_poll_channel(struct vmbus_channel *channel, |
665 | void (*cb)(void *)) | 689 | void (*cb)(void *)) |
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index b53702ce692f..5613e2b5cff7 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
@@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | |||
314 | 314 | ||
315 | /* Write to the ring buffer. */ | 315 | /* Write to the ring buffer. */ |
316 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | 316 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
317 | struct kvec *kv_list, u32 kv_count, bool *signal) | 317 | struct kvec *kv_list, u32 kv_count, bool *signal, bool lock) |
318 | { | 318 | { |
319 | int i = 0; | 319 | int i = 0; |
320 | u32 bytes_avail_towrite; | 320 | u32 bytes_avail_towrite; |
@@ -324,14 +324,15 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
324 | u32 next_write_location; | 324 | u32 next_write_location; |
325 | u32 old_write; | 325 | u32 old_write; |
326 | u64 prev_indices = 0; | 326 | u64 prev_indices = 0; |
327 | unsigned long flags; | 327 | unsigned long flags = 0; |
328 | 328 | ||
329 | for (i = 0; i < kv_count; i++) | 329 | for (i = 0; i < kv_count; i++) |
330 | totalbytes_towrite += kv_list[i].iov_len; | 330 | totalbytes_towrite += kv_list[i].iov_len; |
331 | 331 | ||
332 | totalbytes_towrite += sizeof(u64); | 332 | totalbytes_towrite += sizeof(u64); |
333 | 333 | ||
334 | spin_lock_irqsave(&outring_info->ring_lock, flags); | 334 | if (lock) |
335 | spin_lock_irqsave(&outring_info->ring_lock, flags); | ||
335 | 336 | ||
336 | hv_get_ringbuffer_availbytes(outring_info, | 337 | hv_get_ringbuffer_availbytes(outring_info, |
337 | &bytes_avail_toread, | 338 | &bytes_avail_toread, |
@@ -343,7 +344,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
343 | * is empty since the read index == write index. | 344 | * is empty since the read index == write index. |
344 | */ | 345 | */ |
345 | if (bytes_avail_towrite <= totalbytes_towrite) { | 346 | if (bytes_avail_towrite <= totalbytes_towrite) { |
346 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | 347 | if (lock) |
348 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | ||
347 | return -EAGAIN; | 349 | return -EAGAIN; |
348 | } | 350 | } |
349 | 351 | ||
@@ -374,7 +376,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
374 | hv_set_next_write_location(outring_info, next_write_location); | 376 | hv_set_next_write_location(outring_info, next_write_location); |
375 | 377 | ||
376 | 378 | ||
377 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | 379 | if (lock) |
380 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | ||
378 | 381 | ||
379 | *signal = hv_need_to_signal(old_write, outring_info); | 382 | *signal = hv_need_to_signal(old_write, outring_info); |
380 | return 0; | 383 | return 0; |
@@ -388,7 +391,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
388 | u32 bytes_avail_toread; | 391 | u32 bytes_avail_toread; |
389 | u32 next_read_location = 0; | 392 | u32 next_read_location = 0; |
390 | u64 prev_indices = 0; | 393 | u64 prev_indices = 0; |
391 | unsigned long flags; | ||
392 | struct vmpacket_descriptor desc; | 394 | struct vmpacket_descriptor desc; |
393 | u32 offset; | 395 | u32 offset; |
394 | u32 packetlen; | 396 | u32 packetlen; |
@@ -397,7 +399,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
397 | if (buflen <= 0) | 399 | if (buflen <= 0) |
398 | return -EINVAL; | 400 | return -EINVAL; |
399 | 401 | ||
400 | spin_lock_irqsave(&inring_info->ring_lock, flags); | ||
401 | 402 | ||
402 | *buffer_actual_len = 0; | 403 | *buffer_actual_len = 0; |
403 | *requestid = 0; | 404 | *requestid = 0; |
@@ -412,7 +413,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
412 | * No error is set when there is even no header, drivers are | 413 | * No error is set when there is even no header, drivers are |
413 | * supposed to analyze buffer_actual_len. | 414 | * supposed to analyze buffer_actual_len. |
414 | */ | 415 | */ |
415 | goto out_unlock; | 416 | return ret; |
416 | } | 417 | } |
417 | 418 | ||
418 | next_read_location = hv_get_next_read_location(inring_info); | 419 | next_read_location = hv_get_next_read_location(inring_info); |
@@ -425,15 +426,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
425 | *buffer_actual_len = packetlen; | 426 | *buffer_actual_len = packetlen; |
426 | *requestid = desc.trans_id; | 427 | *requestid = desc.trans_id; |
427 | 428 | ||
428 | if (bytes_avail_toread < packetlen + offset) { | 429 | if (bytes_avail_toread < packetlen + offset) |
429 | ret = -EAGAIN; | 430 | return -EAGAIN; |
430 | goto out_unlock; | ||
431 | } | ||
432 | 431 | ||
433 | if (packetlen > buflen) { | 432 | if (packetlen > buflen) |
434 | ret = -ENOBUFS; | 433 | return -ENOBUFS; |
435 | goto out_unlock; | ||
436 | } | ||
437 | 434 | ||
438 | next_read_location = | 435 | next_read_location = |
439 | hv_get_next_readlocation_withoffset(inring_info, offset); | 436 | hv_get_next_readlocation_withoffset(inring_info, offset); |
@@ -460,7 +457,5 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
460 | 457 | ||
461 | *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); | 458 | *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); |
462 | 459 | ||
463 | out_unlock: | ||
464 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); | ||
465 | return ret; | 460 | return ret; |
466 | } | 461 | } |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 328e4c3808e0..64713ff47e36 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
@@ -45,7 +45,6 @@ | |||
45 | 45 | ||
46 | static struct acpi_device *hv_acpi_dev; | 46 | static struct acpi_device *hv_acpi_dev; |
47 | 47 | ||
48 | static struct tasklet_struct msg_dpc; | ||
49 | static struct completion probe_event; | 48 | static struct completion probe_event; |
50 | 49 | ||
51 | 50 | ||
@@ -477,6 +476,24 @@ static ssize_t channel_vp_mapping_show(struct device *dev, | |||
477 | } | 476 | } |
478 | static DEVICE_ATTR_RO(channel_vp_mapping); | 477 | static DEVICE_ATTR_RO(channel_vp_mapping); |
479 | 478 | ||
479 | static ssize_t vendor_show(struct device *dev, | ||
480 | struct device_attribute *dev_attr, | ||
481 | char *buf) | ||
482 | { | ||
483 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
484 | return sprintf(buf, "0x%x\n", hv_dev->vendor_id); | ||
485 | } | ||
486 | static DEVICE_ATTR_RO(vendor); | ||
487 | |||
488 | static ssize_t device_show(struct device *dev, | ||
489 | struct device_attribute *dev_attr, | ||
490 | char *buf) | ||
491 | { | ||
492 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
493 | return sprintf(buf, "0x%x\n", hv_dev->device_id); | ||
494 | } | ||
495 | static DEVICE_ATTR_RO(device); | ||
496 | |||
480 | /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ | 497 | /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ |
481 | static struct attribute *vmbus_attrs[] = { | 498 | static struct attribute *vmbus_attrs[] = { |
482 | &dev_attr_id.attr, | 499 | &dev_attr_id.attr, |
@@ -502,6 +519,8 @@ static struct attribute *vmbus_attrs[] = { | |||
502 | &dev_attr_in_read_bytes_avail.attr, | 519 | &dev_attr_in_read_bytes_avail.attr, |
503 | &dev_attr_in_write_bytes_avail.attr, | 520 | &dev_attr_in_write_bytes_avail.attr, |
504 | &dev_attr_channel_vp_mapping.attr, | 521 | &dev_attr_channel_vp_mapping.attr, |
522 | &dev_attr_vendor.attr, | ||
523 | &dev_attr_device.attr, | ||
505 | NULL, | 524 | NULL, |
506 | }; | 525 | }; |
507 | ATTRIBUTE_GROUPS(vmbus); | 526 | ATTRIBUTE_GROUPS(vmbus); |
@@ -562,6 +581,10 @@ static int vmbus_match(struct device *device, struct device_driver *driver) | |||
562 | struct hv_driver *drv = drv_to_hv_drv(driver); | 581 | struct hv_driver *drv = drv_to_hv_drv(driver); |
563 | struct hv_device *hv_dev = device_to_hv_device(device); | 582 | struct hv_device *hv_dev = device_to_hv_device(device); |
564 | 583 | ||
584 | /* The hv_sock driver handles all hv_sock offers. */ | ||
585 | if (is_hvsock_channel(hv_dev->channel)) | ||
586 | return drv->hvsock; | ||
587 | |||
565 | if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type)) | 588 | if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type)) |
566 | return 1; | 589 | return 1; |
567 | 590 | ||
@@ -685,28 +708,10 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu) | |||
685 | if (dev->event_handler) | 708 | if (dev->event_handler) |
686 | dev->event_handler(dev); | 709 | dev->event_handler(dev); |
687 | 710 | ||
688 | msg->header.message_type = HVMSG_NONE; | 711 | vmbus_signal_eom(msg); |
689 | |||
690 | /* | ||
691 | * Make sure the write to MessageType (ie set to | ||
692 | * HVMSG_NONE) happens before we read the | ||
693 | * MessagePending and EOMing. Otherwise, the EOMing | ||
694 | * will not deliver any more messages since there is | ||
695 | * no empty slot | ||
696 | */ | ||
697 | mb(); | ||
698 | |||
699 | if (msg->header.message_flags.msg_pending) { | ||
700 | /* | ||
701 | * This will cause message queue rescan to | ||
702 | * possibly deliver another msg from the | ||
703 | * hypervisor | ||
704 | */ | ||
705 | wrmsrl(HV_X64_MSR_EOM, 0); | ||
706 | } | ||
707 | } | 712 | } |
708 | 713 | ||
709 | static void vmbus_on_msg_dpc(unsigned long data) | 714 | void vmbus_on_msg_dpc(unsigned long data) |
710 | { | 715 | { |
711 | int cpu = smp_processor_id(); | 716 | int cpu = smp_processor_id(); |
712 | void *page_addr = hv_context.synic_message_page[cpu]; | 717 | void *page_addr = hv_context.synic_message_page[cpu]; |
@@ -716,52 +721,32 @@ static void vmbus_on_msg_dpc(unsigned long data) | |||
716 | struct vmbus_channel_message_table_entry *entry; | 721 | struct vmbus_channel_message_table_entry *entry; |
717 | struct onmessage_work_context *ctx; | 722 | struct onmessage_work_context *ctx; |
718 | 723 | ||
719 | while (1) { | 724 | if (msg->header.message_type == HVMSG_NONE) |
720 | if (msg->header.message_type == HVMSG_NONE) | 725 | /* no msg */ |
721 | /* no msg */ | 726 | return; |
722 | break; | ||
723 | 727 | ||
724 | hdr = (struct vmbus_channel_message_header *)msg->u.payload; | 728 | hdr = (struct vmbus_channel_message_header *)msg->u.payload; |
725 | 729 | ||
726 | if (hdr->msgtype >= CHANNELMSG_COUNT) { | 730 | if (hdr->msgtype >= CHANNELMSG_COUNT) { |
727 | WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype); | 731 | WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype); |
728 | goto msg_handled; | 732 | goto msg_handled; |
729 | } | 733 | } |
730 | 734 | ||
731 | entry = &channel_message_table[hdr->msgtype]; | 735 | entry = &channel_message_table[hdr->msgtype]; |
732 | if (entry->handler_type == VMHT_BLOCKING) { | 736 | if (entry->handler_type == VMHT_BLOCKING) { |
733 | ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); | 737 | ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); |
734 | if (ctx == NULL) | 738 | if (ctx == NULL) |
735 | continue; | 739 | return; |
736 | 740 | ||
737 | INIT_WORK(&ctx->work, vmbus_onmessage_work); | 741 | INIT_WORK(&ctx->work, vmbus_onmessage_work); |
738 | memcpy(&ctx->msg, msg, sizeof(*msg)); | 742 | memcpy(&ctx->msg, msg, sizeof(*msg)); |
739 | 743 | ||
740 | queue_work(vmbus_connection.work_queue, &ctx->work); | 744 | queue_work(vmbus_connection.work_queue, &ctx->work); |
741 | } else | 745 | } else |
742 | entry->message_handler(hdr); | 746 | entry->message_handler(hdr); |
743 | 747 | ||
744 | msg_handled: | 748 | msg_handled: |
745 | msg->header.message_type = HVMSG_NONE; | 749 | vmbus_signal_eom(msg); |
746 | |||
747 | /* | ||
748 | * Make sure the write to MessageType (ie set to | ||
749 | * HVMSG_NONE) happens before we read the | ||
750 | * MessagePending and EOMing. Otherwise, the EOMing | ||
751 | * will not deliver any more messages since there is | ||
752 | * no empty slot | ||
753 | */ | ||
754 | mb(); | ||
755 | |||
756 | if (msg->header.message_flags.msg_pending) { | ||
757 | /* | ||
758 | * This will cause message queue rescan to | ||
759 | * possibly deliver another msg from the | ||
760 | * hypervisor | ||
761 | */ | ||
762 | wrmsrl(HV_X64_MSR_EOM, 0); | ||
763 | } | ||
764 | } | ||
765 | } | 750 | } |
766 | 751 | ||
767 | static void vmbus_isr(void) | 752 | static void vmbus_isr(void) |
@@ -814,7 +799,7 @@ static void vmbus_isr(void) | |||
814 | if (msg->header.message_type == HVMSG_TIMER_EXPIRED) | 799 | if (msg->header.message_type == HVMSG_TIMER_EXPIRED) |
815 | hv_process_timer_expiration(msg, cpu); | 800 | hv_process_timer_expiration(msg, cpu); |
816 | else | 801 | else |
817 | tasklet_schedule(&msg_dpc); | 802 | tasklet_schedule(hv_context.msg_dpc[cpu]); |
818 | } | 803 | } |
819 | } | 804 | } |
820 | 805 | ||
@@ -838,8 +823,6 @@ static int vmbus_bus_init(void) | |||
838 | return ret; | 823 | return ret; |
839 | } | 824 | } |
840 | 825 | ||
841 | tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0); | ||
842 | |||
843 | ret = bus_register(&hv_bus); | 826 | ret = bus_register(&hv_bus); |
844 | if (ret) | 827 | if (ret) |
845 | goto err_cleanup; | 828 | goto err_cleanup; |
@@ -957,6 +940,7 @@ struct hv_device *vmbus_device_create(const uuid_le *type, | |||
957 | memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); | 940 | memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); |
958 | memcpy(&child_device_obj->dev_instance, instance, | 941 | memcpy(&child_device_obj->dev_instance, instance, |
959 | sizeof(uuid_le)); | 942 | sizeof(uuid_le)); |
943 | child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */ | ||
960 | 944 | ||
961 | 945 | ||
962 | return child_device_obj; | 946 | return child_device_obj; |
@@ -1268,7 +1252,7 @@ static void hv_kexec_handler(void) | |||
1268 | int cpu; | 1252 | int cpu; |
1269 | 1253 | ||
1270 | hv_synic_clockevents_cleanup(); | 1254 | hv_synic_clockevents_cleanup(); |
1271 | vmbus_initiate_unload(); | 1255 | vmbus_initiate_unload(false); |
1272 | for_each_online_cpu(cpu) | 1256 | for_each_online_cpu(cpu) |
1273 | smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); | 1257 | smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1); |
1274 | hv_cleanup(); | 1258 | hv_cleanup(); |
@@ -1276,7 +1260,7 @@ static void hv_kexec_handler(void) | |||
1276 | 1260 | ||
1277 | static void hv_crash_handler(struct pt_regs *regs) | 1261 | static void hv_crash_handler(struct pt_regs *regs) |
1278 | { | 1262 | { |
1279 | vmbus_initiate_unload(); | 1263 | vmbus_initiate_unload(true); |
1280 | /* | 1264 | /* |
1281 | * In crash handler we can't schedule synic cleanup for all CPUs, | 1265 | * In crash handler we can't schedule synic cleanup for all CPUs, |
1282 | * doing the cleanup for current CPU only. This should be sufficient | 1266 | * doing the cleanup for current CPU only. This should be sufficient |
@@ -1334,7 +1318,8 @@ static void __exit vmbus_exit(void) | |||
1334 | hv_synic_clockevents_cleanup(); | 1318 | hv_synic_clockevents_cleanup(); |
1335 | vmbus_disconnect(); | 1319 | vmbus_disconnect(); |
1336 | hv_remove_vmbus_irq(); | 1320 | hv_remove_vmbus_irq(); |
1337 | tasklet_kill(&msg_dpc); | 1321 | for_each_online_cpu(cpu) |
1322 | tasklet_kill(hv_context.msg_dpc[cpu]); | ||
1338 | vmbus_free_channels(); | 1323 | vmbus_free_channels(); |
1339 | if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { | 1324 | if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { |
1340 | unregister_die_notifier(&hyperv_die_block); | 1325 | unregister_die_notifier(&hyperv_die_block); |
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index c85935f3525a..db0541031c72 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig | |||
@@ -4,6 +4,7 @@ | |||
4 | menuconfig CORESIGHT | 4 | menuconfig CORESIGHT |
5 | bool "CoreSight Tracing Support" | 5 | bool "CoreSight Tracing Support" |
6 | select ARM_AMBA | 6 | select ARM_AMBA |
7 | select PERF_EVENTS | ||
7 | help | 8 | help |
8 | This framework provides a kernel interface for the CoreSight debug | 9 | This framework provides a kernel interface for the CoreSight debug |
9 | and trace drivers to register themselves with. It's intended to build | 10 | and trace drivers to register themselves with. It's intended to build |
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index 99f8e5f6256e..cf8c6d689747 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile | |||
@@ -8,6 +8,8 @@ obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o | |||
8 | obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o | 8 | obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o |
9 | obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ | 9 | obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ |
10 | coresight-replicator.o | 10 | coresight-replicator.o |
11 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o | 11 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \ |
12 | coresight-etm3x-sysfs.o \ | ||
13 | coresight-etm-perf.o | ||
12 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o | 14 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o |
13 | obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o | 15 | obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o |
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 77d0f9c1118d..acbce79934d6 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
2 | * | 2 | * |
3 | * Description: CoreSight Embedded Trace Buffer driver | ||
4 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
@@ -10,8 +12,8 @@ | |||
10 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
11 | */ | 13 | */ |
12 | 14 | ||
15 | #include <asm/local.h> | ||
13 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | 17 | #include <linux/init.h> |
16 | #include <linux/types.h> | 18 | #include <linux/types.h> |
17 | #include <linux/device.h> | 19 | #include <linux/device.h> |
@@ -27,6 +29,11 @@ | |||
27 | #include <linux/coresight.h> | 29 | #include <linux/coresight.h> |
28 | #include <linux/amba/bus.h> | 30 | #include <linux/amba/bus.h> |
29 | #include <linux/clk.h> | 31 | #include <linux/clk.h> |
32 | #include <linux/circ_buf.h> | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/perf_event.h> | ||
35 | |||
36 | #include <asm/local.h> | ||
30 | 37 | ||
31 | #include "coresight-priv.h" | 38 | #include "coresight-priv.h" |
32 | 39 | ||
@@ -64,6 +71,26 @@ | |||
64 | #define ETB_FRAME_SIZE_WORDS 4 | 71 | #define ETB_FRAME_SIZE_WORDS 4 |
65 | 72 | ||
66 | /** | 73 | /** |
74 | * struct cs_buffer - keep track of a recording session' specifics | ||
75 | * @cur: index of the current buffer | ||
76 | * @nr_pages: max number of pages granted to us | ||
77 | * @offset: offset within the current buffer | ||
78 | * @data_size: how much we collected in this run | ||
79 | * @lost: other than zero if we had a HW buffer wrap around | ||
80 | * @snapshot: is this run in snapshot mode | ||
81 | * @data_pages: a handle the ring buffer | ||
82 | */ | ||
83 | struct cs_buffers { | ||
84 | unsigned int cur; | ||
85 | unsigned int nr_pages; | ||
86 | unsigned long offset; | ||
87 | local_t data_size; | ||
88 | local_t lost; | ||
89 | bool snapshot; | ||
90 | void **data_pages; | ||
91 | }; | ||
92 | |||
93 | /** | ||
67 | * struct etb_drvdata - specifics associated to an ETB component | 94 | * struct etb_drvdata - specifics associated to an ETB component |
68 | * @base: memory mapped base address for this component. | 95 | * @base: memory mapped base address for this component. |
69 | * @dev: the device entity associated to this component. | 96 | * @dev: the device entity associated to this component. |
@@ -71,10 +98,10 @@ | |||
71 | * @csdev: component vitals needed by the framework. | 98 | * @csdev: component vitals needed by the framework. |
72 | * @miscdev: specifics to handle "/dev/xyz.etb" entry. | 99 | * @miscdev: specifics to handle "/dev/xyz.etb" entry. |
73 | * @spinlock: only one at a time pls. | 100 | * @spinlock: only one at a time pls. |
74 | * @in_use: synchronise user space access to etb buffer. | 101 | * @reading: synchronise user space access to etb buffer. |
102 | * @mode: this ETB is being used. | ||
75 | * @buf: area of memory where ETB buffer content gets sent. | 103 | * @buf: area of memory where ETB buffer content gets sent. |
76 | * @buffer_depth: size of @buf. | 104 | * @buffer_depth: size of @buf. |
77 | * @enable: this ETB is being used. | ||
78 | * @trigger_cntr: amount of words to store after a trigger. | 105 | * @trigger_cntr: amount of words to store after a trigger. |
79 | */ | 106 | */ |
80 | struct etb_drvdata { | 107 | struct etb_drvdata { |
@@ -84,10 +111,10 @@ struct etb_drvdata { | |||
84 | struct coresight_device *csdev; | 111 | struct coresight_device *csdev; |
85 | struct miscdevice miscdev; | 112 | struct miscdevice miscdev; |
86 | spinlock_t spinlock; | 113 | spinlock_t spinlock; |
87 | atomic_t in_use; | 114 | local_t reading; |
115 | local_t mode; | ||
88 | u8 *buf; | 116 | u8 *buf; |
89 | u32 buffer_depth; | 117 | u32 buffer_depth; |
90 | bool enable; | ||
91 | u32 trigger_cntr; | 118 | u32 trigger_cntr; |
92 | }; | 119 | }; |
93 | 120 | ||
@@ -132,18 +159,31 @@ static void etb_enable_hw(struct etb_drvdata *drvdata) | |||
132 | CS_LOCK(drvdata->base); | 159 | CS_LOCK(drvdata->base); |
133 | } | 160 | } |
134 | 161 | ||
135 | static int etb_enable(struct coresight_device *csdev) | 162 | static int etb_enable(struct coresight_device *csdev, u32 mode) |
136 | { | 163 | { |
137 | struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 164 | u32 val; |
138 | unsigned long flags; | 165 | unsigned long flags; |
166 | struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
139 | 167 | ||
140 | pm_runtime_get_sync(drvdata->dev); | 168 | val = local_cmpxchg(&drvdata->mode, |
169 | CS_MODE_DISABLED, mode); | ||
170 | /* | ||
171 | * When accessing from Perf, a HW buffer can be handled | ||
172 | * by a single trace entity. In sysFS mode many tracers | ||
173 | * can be logging to the same HW buffer. | ||
174 | */ | ||
175 | if (val == CS_MODE_PERF) | ||
176 | return -EBUSY; | ||
177 | |||
178 | /* Nothing to do, the tracer is already enabled. */ | ||
179 | if (val == CS_MODE_SYSFS) | ||
180 | goto out; | ||
141 | 181 | ||
142 | spin_lock_irqsave(&drvdata->spinlock, flags); | 182 | spin_lock_irqsave(&drvdata->spinlock, flags); |
143 | etb_enable_hw(drvdata); | 183 | etb_enable_hw(drvdata); |
144 | drvdata->enable = true; | ||
145 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 184 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
146 | 185 | ||
186 | out: | ||
147 | dev_info(drvdata->dev, "ETB enabled\n"); | 187 | dev_info(drvdata->dev, "ETB enabled\n"); |
148 | return 0; | 188 | return 0; |
149 | } | 189 | } |
@@ -244,17 +284,225 @@ static void etb_disable(struct coresight_device *csdev) | |||
244 | spin_lock_irqsave(&drvdata->spinlock, flags); | 284 | spin_lock_irqsave(&drvdata->spinlock, flags); |
245 | etb_disable_hw(drvdata); | 285 | etb_disable_hw(drvdata); |
246 | etb_dump_hw(drvdata); | 286 | etb_dump_hw(drvdata); |
247 | drvdata->enable = false; | ||
248 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 287 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
249 | 288 | ||
250 | pm_runtime_put(drvdata->dev); | 289 | local_set(&drvdata->mode, CS_MODE_DISABLED); |
251 | 290 | ||
252 | dev_info(drvdata->dev, "ETB disabled\n"); | 291 | dev_info(drvdata->dev, "ETB disabled\n"); |
253 | } | 292 | } |
254 | 293 | ||
294 | static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu, | ||
295 | void **pages, int nr_pages, bool overwrite) | ||
296 | { | ||
297 | int node; | ||
298 | struct cs_buffers *buf; | ||
299 | |||
300 | if (cpu == -1) | ||
301 | cpu = smp_processor_id(); | ||
302 | node = cpu_to_node(cpu); | ||
303 | |||
304 | buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); | ||
305 | if (!buf) | ||
306 | return NULL; | ||
307 | |||
308 | buf->snapshot = overwrite; | ||
309 | buf->nr_pages = nr_pages; | ||
310 | buf->data_pages = pages; | ||
311 | |||
312 | return buf; | ||
313 | } | ||
314 | |||
315 | static void etb_free_buffer(void *config) | ||
316 | { | ||
317 | struct cs_buffers *buf = config; | ||
318 | |||
319 | kfree(buf); | ||
320 | } | ||
321 | |||
322 | static int etb_set_buffer(struct coresight_device *csdev, | ||
323 | struct perf_output_handle *handle, | ||
324 | void *sink_config) | ||
325 | { | ||
326 | int ret = 0; | ||
327 | unsigned long head; | ||
328 | struct cs_buffers *buf = sink_config; | ||
329 | |||
330 | /* wrap head around to the amount of space we have */ | ||
331 | head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); | ||
332 | |||
333 | /* find the page to write to */ | ||
334 | buf->cur = head / PAGE_SIZE; | ||
335 | |||
336 | /* and offset within that page */ | ||
337 | buf->offset = head % PAGE_SIZE; | ||
338 | |||
339 | local_set(&buf->data_size, 0); | ||
340 | |||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | static unsigned long etb_reset_buffer(struct coresight_device *csdev, | ||
345 | struct perf_output_handle *handle, | ||
346 | void *sink_config, bool *lost) | ||
347 | { | ||
348 | unsigned long size = 0; | ||
349 | struct cs_buffers *buf = sink_config; | ||
350 | |||
351 | if (buf) { | ||
352 | /* | ||
353 | * In snapshot mode ->data_size holds the new address of the | ||
354 | * ring buffer's head. The size itself is the whole address | ||
355 | * range since we want the latest information. | ||
356 | */ | ||
357 | if (buf->snapshot) | ||
358 | handle->head = local_xchg(&buf->data_size, | ||
359 | buf->nr_pages << PAGE_SHIFT); | ||
360 | |||
361 | /* | ||
362 | * Tell the tracer PMU how much we got in this run and if | ||
363 | * something went wrong along the way. Nobody else can use | ||
364 | * this cs_buffers instance until we are done. As such | ||
365 | * resetting parameters here and squaring off with the ring | ||
366 | * buffer API in the tracer PMU is fine. | ||
367 | */ | ||
368 | *lost = !!local_xchg(&buf->lost, 0); | ||
369 | size = local_xchg(&buf->data_size, 0); | ||
370 | } | ||
371 | |||
372 | return size; | ||
373 | } | ||
374 | |||
375 | static void etb_update_buffer(struct coresight_device *csdev, | ||
376 | struct perf_output_handle *handle, | ||
377 | void *sink_config) | ||
378 | { | ||
379 | int i, cur; | ||
380 | u8 *buf_ptr; | ||
381 | u32 read_ptr, write_ptr, capacity; | ||
382 | u32 status, read_data, to_read; | ||
383 | unsigned long offset; | ||
384 | struct cs_buffers *buf = sink_config; | ||
385 | struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
386 | |||
387 | if (!buf) | ||
388 | return; | ||
389 | |||
390 | capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS; | ||
391 | |||
392 | CS_UNLOCK(drvdata->base); | ||
393 | etb_disable_hw(drvdata); | ||
394 | |||
395 | /* unit is in words, not bytes */ | ||
396 | read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); | ||
397 | write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); | ||
398 | |||
399 | /* | ||
400 | * Entries should be aligned to the frame size. If they are not | ||
401 | * go back to the last alignement point to give decoding tools a | ||
402 | * chance to fix things. | ||
403 | */ | ||
404 | if (write_ptr % ETB_FRAME_SIZE_WORDS) { | ||
405 | dev_err(drvdata->dev, | ||
406 | "write_ptr: %lu not aligned to formatter frame size\n", | ||
407 | (unsigned long)write_ptr); | ||
408 | |||
409 | write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1); | ||
410 | local_inc(&buf->lost); | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Get a hold of the status register and see if a wrap around | ||
415 | * has occurred. If so adjust things accordingly. Otherwise | ||
416 | * start at the beginning and go until the write pointer has | ||
417 | * been reached. | ||
418 | */ | ||
419 | status = readl_relaxed(drvdata->base + ETB_STATUS_REG); | ||
420 | if (status & ETB_STATUS_RAM_FULL) { | ||
421 | local_inc(&buf->lost); | ||
422 | to_read = capacity; | ||
423 | read_ptr = write_ptr; | ||
424 | } else { | ||
425 | to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth); | ||
426 | to_read *= ETB_FRAME_SIZE_WORDS; | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * Make sure we don't overwrite data that hasn't been consumed yet. | ||
431 | * It is entirely possible that the HW buffer has more data than the | ||
432 | * ring buffer can currently handle. If so adjust the start address | ||
433 | * to take only the last traces. | ||
434 | * | ||
435 | * In snapshot mode we are looking to get the latest traces only and as | ||
436 | * such, we don't care about not overwriting data that hasn't been | ||
437 | * processed by user space. | ||
438 | */ | ||
439 | if (!buf->snapshot && to_read > handle->size) { | ||
440 | u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1); | ||
441 | |||
442 | /* The new read pointer must be frame size aligned */ | ||
443 | to_read -= handle->size & mask; | ||
444 | /* | ||
445 | * Move the RAM read pointer up, keeping in mind that | ||
446 | * everything is in frame size units. | ||
447 | */ | ||
448 | read_ptr = (write_ptr + drvdata->buffer_depth) - | ||
449 | to_read / ETB_FRAME_SIZE_WORDS; | ||
450 | /* Wrap around if need be*/ | ||
451 | read_ptr &= ~(drvdata->buffer_depth - 1); | ||
452 | /* let the decoder know we've skipped ahead */ | ||
453 | local_inc(&buf->lost); | ||
454 | } | ||
455 | |||
456 | /* finally tell HW where we want to start reading from */ | ||
457 | writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER); | ||
458 | |||
459 | cur = buf->cur; | ||
460 | offset = buf->offset; | ||
461 | for (i = 0; i < to_read; i += 4) { | ||
462 | buf_ptr = buf->data_pages[cur] + offset; | ||
463 | read_data = readl_relaxed(drvdata->base + | ||
464 | ETB_RAM_READ_DATA_REG); | ||
465 | *buf_ptr++ = read_data >> 0; | ||
466 | *buf_ptr++ = read_data >> 8; | ||
467 | *buf_ptr++ = read_data >> 16; | ||
468 | *buf_ptr++ = read_data >> 24; | ||
469 | |||
470 | offset += 4; | ||
471 | if (offset >= PAGE_SIZE) { | ||
472 | offset = 0; | ||
473 | cur++; | ||
474 | /* wrap around at the end of the buffer */ | ||
475 | cur &= buf->nr_pages - 1; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | /* reset ETB buffer for next run */ | ||
480 | writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER); | ||
481 | writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); | ||
482 | |||
483 | /* | ||
484 | * In snapshot mode all we have to do is communicate to | ||
485 | * perf_aux_output_end() the address of the current head. In full | ||
486 | * trace mode the same function expects a size to move rb->aux_head | ||
487 | * forward. | ||
488 | */ | ||
489 | if (buf->snapshot) | ||
490 | local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); | ||
491 | else | ||
492 | local_add(to_read, &buf->data_size); | ||
493 | |||
494 | etb_enable_hw(drvdata); | ||
495 | CS_LOCK(drvdata->base); | ||
496 | } | ||
497 | |||
255 | static const struct coresight_ops_sink etb_sink_ops = { | 498 | static const struct coresight_ops_sink etb_sink_ops = { |
256 | .enable = etb_enable, | 499 | .enable = etb_enable, |
257 | .disable = etb_disable, | 500 | .disable = etb_disable, |
501 | .alloc_buffer = etb_alloc_buffer, | ||
502 | .free_buffer = etb_free_buffer, | ||
503 | .set_buffer = etb_set_buffer, | ||
504 | .reset_buffer = etb_reset_buffer, | ||
505 | .update_buffer = etb_update_buffer, | ||
258 | }; | 506 | }; |
259 | 507 | ||
260 | static const struct coresight_ops etb_cs_ops = { | 508 | static const struct coresight_ops etb_cs_ops = { |
@@ -266,7 +514,7 @@ static void etb_dump(struct etb_drvdata *drvdata) | |||
266 | unsigned long flags; | 514 | unsigned long flags; |
267 | 515 | ||
268 | spin_lock_irqsave(&drvdata->spinlock, flags); | 516 | spin_lock_irqsave(&drvdata->spinlock, flags); |
269 | if (drvdata->enable) { | 517 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { |
270 | etb_disable_hw(drvdata); | 518 | etb_disable_hw(drvdata); |
271 | etb_dump_hw(drvdata); | 519 | etb_dump_hw(drvdata); |
272 | etb_enable_hw(drvdata); | 520 | etb_enable_hw(drvdata); |
@@ -281,7 +529,7 @@ static int etb_open(struct inode *inode, struct file *file) | |||
281 | struct etb_drvdata *drvdata = container_of(file->private_data, | 529 | struct etb_drvdata *drvdata = container_of(file->private_data, |
282 | struct etb_drvdata, miscdev); | 530 | struct etb_drvdata, miscdev); |
283 | 531 | ||
284 | if (atomic_cmpxchg(&drvdata->in_use, 0, 1)) | 532 | if (local_cmpxchg(&drvdata->reading, 0, 1)) |
285 | return -EBUSY; | 533 | return -EBUSY; |
286 | 534 | ||
287 | dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); | 535 | dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); |
@@ -317,7 +565,7 @@ static int etb_release(struct inode *inode, struct file *file) | |||
317 | { | 565 | { |
318 | struct etb_drvdata *drvdata = container_of(file->private_data, | 566 | struct etb_drvdata *drvdata = container_of(file->private_data, |
319 | struct etb_drvdata, miscdev); | 567 | struct etb_drvdata, miscdev); |
320 | atomic_set(&drvdata->in_use, 0); | 568 | local_set(&drvdata->reading, 0); |
321 | 569 | ||
322 | dev_dbg(drvdata->dev, "%s: released\n", __func__); | 570 | dev_dbg(drvdata->dev, "%s: released\n", __func__); |
323 | return 0; | 571 | return 0; |
@@ -489,15 +737,6 @@ err_misc_register: | |||
489 | return ret; | 737 | return ret; |
490 | } | 738 | } |
491 | 739 | ||
492 | static int etb_remove(struct amba_device *adev) | ||
493 | { | ||
494 | struct etb_drvdata *drvdata = amba_get_drvdata(adev); | ||
495 | |||
496 | misc_deregister(&drvdata->miscdev); | ||
497 | coresight_unregister(drvdata->csdev); | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | #ifdef CONFIG_PM | 740 | #ifdef CONFIG_PM |
502 | static int etb_runtime_suspend(struct device *dev) | 741 | static int etb_runtime_suspend(struct device *dev) |
503 | { | 742 | { |
@@ -537,14 +776,10 @@ static struct amba_driver etb_driver = { | |||
537 | .name = "coresight-etb10", | 776 | .name = "coresight-etb10", |
538 | .owner = THIS_MODULE, | 777 | .owner = THIS_MODULE, |
539 | .pm = &etb_dev_pm_ops, | 778 | .pm = &etb_dev_pm_ops, |
779 | .suppress_bind_attrs = true, | ||
540 | 780 | ||
541 | }, | 781 | }, |
542 | .probe = etb_probe, | 782 | .probe = etb_probe, |
543 | .remove = etb_remove, | ||
544 | .id_table = etb_ids, | 783 | .id_table = etb_ids, |
545 | }; | 784 | }; |
546 | 785 | builtin_amba_driver(etb_driver); | |
547 | module_amba_driver(etb_driver); | ||
548 | |||
549 | MODULE_LICENSE("GPL v2"); | ||
550 | MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c new file mode 100644 index 000000000000..755125f7917f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c | |||
@@ -0,0 +1,393 @@ | |||
1 | /* | ||
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/coresight.h> | ||
19 | #include <linux/coresight-pmu.h> | ||
20 | #include <linux/cpumask.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/perf_event.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/workqueue.h> | ||
29 | |||
30 | #include "coresight-priv.h" | ||
31 | |||
32 | static struct pmu etm_pmu; | ||
33 | static bool etm_perf_up; | ||
34 | |||
35 | /** | ||
36 | * struct etm_event_data - Coresight specifics associated to an event | ||
37 | * @work: Handle to free allocated memory outside IRQ context. | ||
38 | * @mask: Hold the CPU(s) this event was set for. | ||
39 | * @snk_config: The sink configuration. | ||
40 | * @path: An array of path, each slot for one CPU. | ||
41 | */ | ||
42 | struct etm_event_data { | ||
43 | struct work_struct work; | ||
44 | cpumask_t mask; | ||
45 | void *snk_config; | ||
46 | struct list_head **path; | ||
47 | }; | ||
48 | |||
49 | static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); | ||
50 | static DEFINE_PER_CPU(struct coresight_device *, csdev_src); | ||
51 | |||
52 | /* ETMv3.5/PTM's ETMCR is 'config' */ | ||
53 | PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); | ||
54 | PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); | ||
55 | |||
56 | static struct attribute *etm_config_formats_attr[] = { | ||
57 | &format_attr_cycacc.attr, | ||
58 | &format_attr_timestamp.attr, | ||
59 | NULL, | ||
60 | }; | ||
61 | |||
62 | static struct attribute_group etm_pmu_format_group = { | ||
63 | .name = "format", | ||
64 | .attrs = etm_config_formats_attr, | ||
65 | }; | ||
66 | |||
67 | static const struct attribute_group *etm_pmu_attr_groups[] = { | ||
68 | &etm_pmu_format_group, | ||
69 | NULL, | ||
70 | }; | ||
71 | |||
72 | static void etm_event_read(struct perf_event *event) {} | ||
73 | |||
74 | static int etm_event_init(struct perf_event *event) | ||
75 | { | ||
76 | if (event->attr.type != etm_pmu.type) | ||
77 | return -ENOENT; | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static void free_event_data(struct work_struct *work) | ||
83 | { | ||
84 | int cpu; | ||
85 | cpumask_t *mask; | ||
86 | struct etm_event_data *event_data; | ||
87 | struct coresight_device *sink; | ||
88 | |||
89 | event_data = container_of(work, struct etm_event_data, work); | ||
90 | mask = &event_data->mask; | ||
91 | /* | ||
92 | * First deal with the sink configuration. See comment in | ||
93 | * etm_setup_aux() about why we take the first available path. | ||
94 | */ | ||
95 | if (event_data->snk_config) { | ||
96 | cpu = cpumask_first(mask); | ||
97 | sink = coresight_get_sink(event_data->path[cpu]); | ||
98 | if (sink_ops(sink)->free_buffer) | ||
99 | sink_ops(sink)->free_buffer(event_data->snk_config); | ||
100 | } | ||
101 | |||
102 | for_each_cpu(cpu, mask) { | ||
103 | if (event_data->path[cpu]) | ||
104 | coresight_release_path(event_data->path[cpu]); | ||
105 | } | ||
106 | |||
107 | kfree(event_data->path); | ||
108 | kfree(event_data); | ||
109 | } | ||
110 | |||
111 | static void *alloc_event_data(int cpu) | ||
112 | { | ||
113 | int size; | ||
114 | cpumask_t *mask; | ||
115 | struct etm_event_data *event_data; | ||
116 | |||
117 | /* First get memory for the session's data */ | ||
118 | event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL); | ||
119 | if (!event_data) | ||
120 | return NULL; | ||
121 | |||
122 | /* Make sure nothing disappears under us */ | ||
123 | get_online_cpus(); | ||
124 | size = num_online_cpus(); | ||
125 | |||
126 | mask = &event_data->mask; | ||
127 | if (cpu != -1) | ||
128 | cpumask_set_cpu(cpu, mask); | ||
129 | else | ||
130 | cpumask_copy(mask, cpu_online_mask); | ||
131 | put_online_cpus(); | ||
132 | |||
133 | /* | ||
134 | * Each CPU has a single path between source and destination. As such | ||
135 | * allocate an array using CPU numbers as indexes. That way a path | ||
136 | * for any CPU can easily be accessed at any given time. We proceed | ||
137 | * the same way for sessions involving a single CPU. The cost of | ||
138 | * unused memory when dealing with single CPU trace scenarios is small | ||
139 | * compared to the cost of searching through an optimized array. | ||
140 | */ | ||
141 | event_data->path = kcalloc(size, | ||
142 | sizeof(struct list_head *), GFP_KERNEL); | ||
143 | if (!event_data->path) { | ||
144 | kfree(event_data); | ||
145 | return NULL; | ||
146 | } | ||
147 | |||
148 | return event_data; | ||
149 | } | ||
150 | |||
151 | static void etm_free_aux(void *data) | ||
152 | { | ||
153 | struct etm_event_data *event_data = data; | ||
154 | |||
155 | schedule_work(&event_data->work); | ||
156 | } | ||
157 | |||
158 | static void *etm_setup_aux(int event_cpu, void **pages, | ||
159 | int nr_pages, bool overwrite) | ||
160 | { | ||
161 | int cpu; | ||
162 | cpumask_t *mask; | ||
163 | struct coresight_device *sink; | ||
164 | struct etm_event_data *event_data = NULL; | ||
165 | |||
166 | event_data = alloc_event_data(event_cpu); | ||
167 | if (!event_data) | ||
168 | return NULL; | ||
169 | |||
170 | INIT_WORK(&event_data->work, free_event_data); | ||
171 | |||
172 | mask = &event_data->mask; | ||
173 | |||
174 | /* Setup the path for each CPU in a trace session */ | ||
175 | for_each_cpu(cpu, mask) { | ||
176 | struct coresight_device *csdev; | ||
177 | |||
178 | csdev = per_cpu(csdev_src, cpu); | ||
179 | if (!csdev) | ||
180 | goto err; | ||
181 | |||
182 | /* | ||
183 | * Building a path doesn't enable it, it simply builds a | ||
184 | * list of devices from source to sink that can be | ||
185 | * referenced later when the path is actually needed. | ||
186 | */ | ||
187 | event_data->path[cpu] = coresight_build_path(csdev); | ||
188 | if (!event_data->path[cpu]) | ||
189 | goto err; | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * In theory nothing prevent tracers in a trace session from being | ||
194 | * associated with different sinks, nor having a sink per tracer. But | ||
195 | * until we have HW with this kind of topology and a way to convey | ||
196 | * sink assignement from the perf cmd line we need to assume tracers | ||
197 | * in a trace session are using the same sink. Therefore pick the sink | ||
198 | * found at the end of the first available path. | ||
199 | */ | ||
200 | cpu = cpumask_first(mask); | ||
201 | /* Grab the sink at the end of the path */ | ||
202 | sink = coresight_get_sink(event_data->path[cpu]); | ||
203 | if (!sink) | ||
204 | goto err; | ||
205 | |||
206 | if (!sink_ops(sink)->alloc_buffer) | ||
207 | goto err; | ||
208 | |||
209 | /* Get the AUX specific data from the sink buffer */ | ||
210 | event_data->snk_config = | ||
211 | sink_ops(sink)->alloc_buffer(sink, cpu, pages, | ||
212 | nr_pages, overwrite); | ||
213 | if (!event_data->snk_config) | ||
214 | goto err; | ||
215 | |||
216 | out: | ||
217 | return event_data; | ||
218 | |||
219 | err: | ||
220 | etm_free_aux(event_data); | ||
221 | event_data = NULL; | ||
222 | goto out; | ||
223 | } | ||
224 | |||
225 | static void etm_event_start(struct perf_event *event, int flags) | ||
226 | { | ||
227 | int cpu = smp_processor_id(); | ||
228 | struct etm_event_data *event_data; | ||
229 | struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); | ||
230 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); | ||
231 | |||
232 | if (!csdev) | ||
233 | goto fail; | ||
234 | |||
235 | /* | ||
236 | * Deal with the ring buffer API and get a handle on the | ||
237 | * session's information. | ||
238 | */ | ||
239 | event_data = perf_aux_output_begin(handle, event); | ||
240 | if (!event_data) | ||
241 | goto fail; | ||
242 | |||
243 | /* We need a sink, no need to continue without one */ | ||
244 | sink = coresight_get_sink(event_data->path[cpu]); | ||
245 | if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer)) | ||
246 | goto fail_end_stop; | ||
247 | |||
248 | /* Configure the sink */ | ||
249 | if (sink_ops(sink)->set_buffer(sink, handle, | ||
250 | event_data->snk_config)) | ||
251 | goto fail_end_stop; | ||
252 | |||
253 | /* Nothing will happen without a path */ | ||
254 | if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF)) | ||
255 | goto fail_end_stop; | ||
256 | |||
257 | /* Tell the perf core the event is alive */ | ||
258 | event->hw.state = 0; | ||
259 | |||
260 | /* Finally enable the tracer */ | ||
261 | if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF)) | ||
262 | goto fail_end_stop; | ||
263 | |||
264 | out: | ||
265 | return; | ||
266 | |||
267 | fail_end_stop: | ||
268 | perf_aux_output_end(handle, 0, true); | ||
269 | fail: | ||
270 | event->hw.state = PERF_HES_STOPPED; | ||
271 | goto out; | ||
272 | } | ||
273 | |||
274 | static void etm_event_stop(struct perf_event *event, int mode) | ||
275 | { | ||
276 | bool lost; | ||
277 | int cpu = smp_processor_id(); | ||
278 | unsigned long size; | ||
279 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); | ||
280 | struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); | ||
281 | struct etm_event_data *event_data = perf_get_aux(handle); | ||
282 | |||
283 | if (event->hw.state == PERF_HES_STOPPED) | ||
284 | return; | ||
285 | |||
286 | if (!csdev) | ||
287 | return; | ||
288 | |||
289 | sink = coresight_get_sink(event_data->path[cpu]); | ||
290 | if (!sink) | ||
291 | return; | ||
292 | |||
293 | /* stop tracer */ | ||
294 | source_ops(csdev)->disable(csdev); | ||
295 | |||
296 | /* tell the core */ | ||
297 | event->hw.state = PERF_HES_STOPPED; | ||
298 | |||
299 | if (mode & PERF_EF_UPDATE) { | ||
300 | if (WARN_ON_ONCE(handle->event != event)) | ||
301 | return; | ||
302 | |||
303 | /* update trace information */ | ||
304 | if (!sink_ops(sink)->update_buffer) | ||
305 | return; | ||
306 | |||
307 | sink_ops(sink)->update_buffer(sink, handle, | ||
308 | event_data->snk_config); | ||
309 | |||
310 | if (!sink_ops(sink)->reset_buffer) | ||
311 | return; | ||
312 | |||
313 | size = sink_ops(sink)->reset_buffer(sink, handle, | ||
314 | event_data->snk_config, | ||
315 | &lost); | ||
316 | |||
317 | perf_aux_output_end(handle, size, lost); | ||
318 | } | ||
319 | |||
320 | /* Disabling the path make its elements available to other sessions */ | ||
321 | coresight_disable_path(event_data->path[cpu]); | ||
322 | } | ||
323 | |||
324 | static int etm_event_add(struct perf_event *event, int mode) | ||
325 | { | ||
326 | int ret = 0; | ||
327 | struct hw_perf_event *hwc = &event->hw; | ||
328 | |||
329 | if (mode & PERF_EF_START) { | ||
330 | etm_event_start(event, 0); | ||
331 | if (hwc->state & PERF_HES_STOPPED) | ||
332 | ret = -EINVAL; | ||
333 | } else { | ||
334 | hwc->state = PERF_HES_STOPPED; | ||
335 | } | ||
336 | |||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | static void etm_event_del(struct perf_event *event, int mode) | ||
341 | { | ||
342 | etm_event_stop(event, PERF_EF_UPDATE); | ||
343 | } | ||
344 | |||
345 | int etm_perf_symlink(struct coresight_device *csdev, bool link) | ||
346 | { | ||
347 | char entry[sizeof("cpu9999999")]; | ||
348 | int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev); | ||
349 | struct device *pmu_dev = etm_pmu.dev; | ||
350 | struct device *cs_dev = &csdev->dev; | ||
351 | |||
352 | sprintf(entry, "cpu%d", cpu); | ||
353 | |||
354 | if (!etm_perf_up) | ||
355 | return -EPROBE_DEFER; | ||
356 | |||
357 | if (link) { | ||
358 | ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry); | ||
359 | if (ret) | ||
360 | return ret; | ||
361 | per_cpu(csdev_src, cpu) = csdev; | ||
362 | } else { | ||
363 | sysfs_remove_link(&pmu_dev->kobj, entry); | ||
364 | per_cpu(csdev_src, cpu) = NULL; | ||
365 | } | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int __init etm_perf_init(void) | ||
371 | { | ||
372 | int ret; | ||
373 | |||
374 | etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE; | ||
375 | |||
376 | etm_pmu.attr_groups = etm_pmu_attr_groups; | ||
377 | etm_pmu.task_ctx_nr = perf_sw_context; | ||
378 | etm_pmu.read = etm_event_read; | ||
379 | etm_pmu.event_init = etm_event_init; | ||
380 | etm_pmu.setup_aux = etm_setup_aux; | ||
381 | etm_pmu.free_aux = etm_free_aux; | ||
382 | etm_pmu.start = etm_event_start; | ||
383 | etm_pmu.stop = etm_event_stop; | ||
384 | etm_pmu.add = etm_event_add; | ||
385 | etm_pmu.del = etm_event_del; | ||
386 | |||
387 | ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); | ||
388 | if (ret == 0) | ||
389 | etm_perf_up = true; | ||
390 | |||
391 | return ret; | ||
392 | } | ||
393 | device_initcall(etm_perf_init); | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h new file mode 100644 index 000000000000..87f5a134eb6f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef _CORESIGHT_ETM_PERF_H | ||
19 | #define _CORESIGHT_ETM_PERF_H | ||
20 | |||
21 | struct coresight_device; | ||
22 | |||
23 | #ifdef CONFIG_CORESIGHT | ||
24 | int etm_perf_symlink(struct coresight_device *csdev, bool link); | ||
25 | |||
26 | #else | ||
27 | static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) | ||
28 | { return -EINVAL; } | ||
29 | |||
30 | #endif /* CONFIG_CORESIGHT */ | ||
31 | |||
32 | #endif | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h index b4481eb29304..51597cb2c08a 100644 --- a/drivers/hwtracing/coresight/coresight-etm.h +++ b/drivers/hwtracing/coresight/coresight-etm.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #ifndef _CORESIGHT_CORESIGHT_ETM_H | 13 | #ifndef _CORESIGHT_CORESIGHT_ETM_H |
14 | #define _CORESIGHT_CORESIGHT_ETM_H | 14 | #define _CORESIGHT_CORESIGHT_ETM_H |
15 | 15 | ||
16 | #include <asm/local.h> | ||
16 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
17 | #include "coresight-priv.h" | 18 | #include "coresight-priv.h" |
18 | 19 | ||
@@ -109,7 +110,10 @@ | |||
109 | #define ETM_MODE_STALL BIT(2) | 110 | #define ETM_MODE_STALL BIT(2) |
110 | #define ETM_MODE_TIMESTAMP BIT(3) | 111 | #define ETM_MODE_TIMESTAMP BIT(3) |
111 | #define ETM_MODE_CTXID BIT(4) | 112 | #define ETM_MODE_CTXID BIT(4) |
112 | #define ETM_MODE_ALL 0x1f | 113 | #define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \ |
114 | ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \ | ||
115 | ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \ | ||
116 | ETM_MODE_EXCL_USER) | ||
113 | 117 | ||
114 | #define ETM_SQR_MASK 0x3 | 118 | #define ETM_SQR_MASK 0x3 |
115 | #define ETM_TRACEID_MASK 0x3f | 119 | #define ETM_TRACEID_MASK 0x3f |
@@ -136,35 +140,16 @@ | |||
136 | #define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \ | 140 | #define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \ |
137 | ETM_ADD_COMP_0 | \ | 141 | ETM_ADD_COMP_0 | \ |
138 | ETM_EVENT_NOT_A) | 142 | ETM_EVENT_NOT_A) |
143 | |||
139 | /** | 144 | /** |
140 | * struct etm_drvdata - specifics associated to an ETM component | 145 | * struct etm_config - configuration information related to an ETM |
141 | * @base: memory mapped base address for this component. | ||
142 | * @dev: the device entity associated to this component. | ||
143 | * @atclk: optional clock for the core parts of the ETM. | ||
144 | * @csdev: component vitals needed by the framework. | ||
145 | * @spinlock: only one at a time pls. | ||
146 | * @cpu: the cpu this component is affined to. | ||
147 | * @port_size: port size as reported by ETMCR bit 4-6 and 21. | ||
148 | * @arch: ETM/PTM version number. | ||
149 | * @use_cpu14: true if management registers need to be accessed via CP14. | ||
150 | * @enable: is this ETM/PTM currently tracing. | ||
151 | * @sticky_enable: true if ETM base configuration has been done. | ||
152 | * @boot_enable:true if we should start tracing at boot time. | ||
153 | * @os_unlock: true if access to management registers is allowed. | ||
154 | * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR. | ||
155 | * @nr_cntr: Number of counters as found in ETMCCR bit 13-15. | ||
156 | * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19. | ||
157 | * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22. | ||
158 | * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25. | ||
159 | * @etmccr: value of register ETMCCR. | ||
160 | * @etmccer: value of register ETMCCER. | ||
161 | * @traceid: value of the current ID for this component. | ||
162 | * @mode: controls various modes supported by this ETM/PTM. | 146 | * @mode: controls various modes supported by this ETM/PTM. |
163 | * @ctrl: used in conjunction with @mode. | 147 | * @ctrl: used in conjunction with @mode. |
164 | * @trigger_event: setting for register ETMTRIGGER. | 148 | * @trigger_event: setting for register ETMTRIGGER. |
165 | * @startstop_ctrl: setting for register ETMTSSCR. | 149 | * @startstop_ctrl: setting for register ETMTSSCR. |
166 | * @enable_event: setting for register ETMTEEVR. | 150 | * @enable_event: setting for register ETMTEEVR. |
167 | * @enable_ctrl1: setting for register ETMTECR1. | 151 | * @enable_ctrl1: setting for register ETMTECR1. |
152 | * @enable_ctrl2: setting for register ETMTECR2. | ||
168 | * @fifofull_level: setting for register ETMFFLR. | 153 | * @fifofull_level: setting for register ETMFFLR. |
169 | * @addr_idx: index for the address comparator selection. | 154 | * @addr_idx: index for the address comparator selection. |
170 | * @addr_val: value for address comparator register. | 155 | * @addr_val: value for address comparator register. |
@@ -189,36 +174,16 @@ | |||
189 | * @ctxid_mask: mask applicable to all the context IDs. | 174 | * @ctxid_mask: mask applicable to all the context IDs. |
190 | * @sync_freq: Synchronisation frequency. | 175 | * @sync_freq: Synchronisation frequency. |
191 | * @timestamp_event: Defines an event that requests the insertion | 176 | * @timestamp_event: Defines an event that requests the insertion |
192 | of a timestamp into the trace stream. | 177 | * of a timestamp into the trace stream. |
193 | */ | 178 | */ |
194 | struct etm_drvdata { | 179 | struct etm_config { |
195 | void __iomem *base; | ||
196 | struct device *dev; | ||
197 | struct clk *atclk; | ||
198 | struct coresight_device *csdev; | ||
199 | spinlock_t spinlock; | ||
200 | int cpu; | ||
201 | int port_size; | ||
202 | u8 arch; | ||
203 | bool use_cp14; | ||
204 | bool enable; | ||
205 | bool sticky_enable; | ||
206 | bool boot_enable; | ||
207 | bool os_unlock; | ||
208 | u8 nr_addr_cmp; | ||
209 | u8 nr_cntr; | ||
210 | u8 nr_ext_inp; | ||
211 | u8 nr_ext_out; | ||
212 | u8 nr_ctxid_cmp; | ||
213 | u32 etmccr; | ||
214 | u32 etmccer; | ||
215 | u32 traceid; | ||
216 | u32 mode; | 180 | u32 mode; |
217 | u32 ctrl; | 181 | u32 ctrl; |
218 | u32 trigger_event; | 182 | u32 trigger_event; |
219 | u32 startstop_ctrl; | 183 | u32 startstop_ctrl; |
220 | u32 enable_event; | 184 | u32 enable_event; |
221 | u32 enable_ctrl1; | 185 | u32 enable_ctrl1; |
186 | u32 enable_ctrl2; | ||
222 | u32 fifofull_level; | 187 | u32 fifofull_level; |
223 | u8 addr_idx; | 188 | u8 addr_idx; |
224 | u32 addr_val[ETM_MAX_ADDR_CMP]; | 189 | u32 addr_val[ETM_MAX_ADDR_CMP]; |
@@ -244,6 +209,56 @@ struct etm_drvdata { | |||
244 | u32 timestamp_event; | 209 | u32 timestamp_event; |
245 | }; | 210 | }; |
246 | 211 | ||
212 | /** | ||
213 | * struct etm_drvdata - specifics associated to an ETM component | ||
214 | * @base: memory mapped base address for this component. | ||
215 | * @dev: the device entity associated to this component. | ||
216 | * @atclk: optional clock for the core parts of the ETM. | ||
217 | * @csdev: component vitals needed by the framework. | ||
218 | * @spinlock: only one at a time pls. | ||
219 | * @cpu: the cpu this component is affined to. | ||
220 | * @port_size: port size as reported by ETMCR bit 4-6 and 21. | ||
221 | * @arch: ETM/PTM version number. | ||
222 | * @use_cpu14: true if management registers need to be accessed via CP14. | ||
223 | * @mode: this tracer's mode, i.e sysFS, Perf or disabled. | ||
224 | * @sticky_enable: true if ETM base configuration has been done. | ||
225 | * @boot_enable:true if we should start tracing at boot time. | ||
226 | * @os_unlock: true if access to management registers is allowed. | ||
227 | * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR. | ||
228 | * @nr_cntr: Number of counters as found in ETMCCR bit 13-15. | ||
229 | * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19. | ||
230 | * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22. | ||
231 | * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25. | ||
232 | * @etmccr: value of register ETMCCR. | ||
233 | * @etmccer: value of register ETMCCER. | ||
234 | * @traceid: value of the current ID for this component. | ||
235 | * @config: structure holding configuration parameters. | ||
236 | */ | ||
237 | struct etm_drvdata { | ||
238 | void __iomem *base; | ||
239 | struct device *dev; | ||
240 | struct clk *atclk; | ||
241 | struct coresight_device *csdev; | ||
242 | spinlock_t spinlock; | ||
243 | int cpu; | ||
244 | int port_size; | ||
245 | u8 arch; | ||
246 | bool use_cp14; | ||
247 | local_t mode; | ||
248 | bool sticky_enable; | ||
249 | bool boot_enable; | ||
250 | bool os_unlock; | ||
251 | u8 nr_addr_cmp; | ||
252 | u8 nr_cntr; | ||
253 | u8 nr_ext_inp; | ||
254 | u8 nr_ext_out; | ||
255 | u8 nr_ctxid_cmp; | ||
256 | u32 etmccr; | ||
257 | u32 etmccer; | ||
258 | u32 traceid; | ||
259 | struct etm_config config; | ||
260 | }; | ||
261 | |||
247 | enum etm_addr_type { | 262 | enum etm_addr_type { |
248 | ETM_ADDR_TYPE_NONE, | 263 | ETM_ADDR_TYPE_NONE, |
249 | ETM_ADDR_TYPE_SINGLE, | 264 | ETM_ADDR_TYPE_SINGLE, |
@@ -251,4 +266,39 @@ enum etm_addr_type { | |||
251 | ETM_ADDR_TYPE_START, | 266 | ETM_ADDR_TYPE_START, |
252 | ETM_ADDR_TYPE_STOP, | 267 | ETM_ADDR_TYPE_STOP, |
253 | }; | 268 | }; |
269 | |||
270 | static inline void etm_writel(struct etm_drvdata *drvdata, | ||
271 | u32 val, u32 off) | ||
272 | { | ||
273 | if (drvdata->use_cp14) { | ||
274 | if (etm_writel_cp14(off, val)) { | ||
275 | dev_err(drvdata->dev, | ||
276 | "invalid CP14 access to ETM reg: %#x", off); | ||
277 | } | ||
278 | } else { | ||
279 | writel_relaxed(val, drvdata->base + off); | ||
280 | } | ||
281 | } | ||
282 | |||
283 | static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off) | ||
284 | { | ||
285 | u32 val; | ||
286 | |||
287 | if (drvdata->use_cp14) { | ||
288 | if (etm_readl_cp14(off, &val)) { | ||
289 | dev_err(drvdata->dev, | ||
290 | "invalid CP14 access to ETM reg: %#x", off); | ||
291 | } | ||
292 | } else { | ||
293 | val = readl_relaxed(drvdata->base + off); | ||
294 | } | ||
295 | |||
296 | return val; | ||
297 | } | ||
298 | |||
299 | extern const struct attribute_group *coresight_etm_groups[]; | ||
300 | int etm_get_trace_id(struct etm_drvdata *drvdata); | ||
301 | void etm_set_default(struct etm_config *config); | ||
302 | void etm_config_trace_mode(struct etm_config *config); | ||
303 | struct etm_config *get_etm_config(struct etm_drvdata *drvdata); | ||
254 | #endif | 304 | #endif |
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c new file mode 100644 index 000000000000..cbb4046c1070 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c | |||
@@ -0,0 +1,1272 @@ | |||
1 | /* | ||
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/pm_runtime.h> | ||
19 | #include <linux/sysfs.h> | ||
20 | #include "coresight-etm.h" | ||
21 | |||
22 | static ssize_t nr_addr_cmp_show(struct device *dev, | ||
23 | struct device_attribute *attr, char *buf) | ||
24 | { | ||
25 | unsigned long val; | ||
26 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
27 | |||
28 | val = drvdata->nr_addr_cmp; | ||
29 | return sprintf(buf, "%#lx\n", val); | ||
30 | } | ||
31 | static DEVICE_ATTR_RO(nr_addr_cmp); | ||
32 | |||
33 | static ssize_t nr_cntr_show(struct device *dev, | ||
34 | struct device_attribute *attr, char *buf) | ||
35 | { unsigned long val; | ||
36 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
37 | |||
38 | val = drvdata->nr_cntr; | ||
39 | return sprintf(buf, "%#lx\n", val); | ||
40 | } | ||
41 | static DEVICE_ATTR_RO(nr_cntr); | ||
42 | |||
43 | static ssize_t nr_ctxid_cmp_show(struct device *dev, | ||
44 | struct device_attribute *attr, char *buf) | ||
45 | { | ||
46 | unsigned long val; | ||
47 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
48 | |||
49 | val = drvdata->nr_ctxid_cmp; | ||
50 | return sprintf(buf, "%#lx\n", val); | ||
51 | } | ||
52 | static DEVICE_ATTR_RO(nr_ctxid_cmp); | ||
53 | |||
54 | static ssize_t etmsr_show(struct device *dev, | ||
55 | struct device_attribute *attr, char *buf) | ||
56 | { | ||
57 | unsigned long flags, val; | ||
58 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
59 | |||
60 | pm_runtime_get_sync(drvdata->dev); | ||
61 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
62 | CS_UNLOCK(drvdata->base); | ||
63 | |||
64 | val = etm_readl(drvdata, ETMSR); | ||
65 | |||
66 | CS_LOCK(drvdata->base); | ||
67 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
68 | pm_runtime_put(drvdata->dev); | ||
69 | |||
70 | return sprintf(buf, "%#lx\n", val); | ||
71 | } | ||
72 | static DEVICE_ATTR_RO(etmsr); | ||
73 | |||
74 | static ssize_t reset_store(struct device *dev, | ||
75 | struct device_attribute *attr, | ||
76 | const char *buf, size_t size) | ||
77 | { | ||
78 | int i, ret; | ||
79 | unsigned long val; | ||
80 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
81 | struct etm_config *config = &drvdata->config; | ||
82 | |||
83 | ret = kstrtoul(buf, 16, &val); | ||
84 | if (ret) | ||
85 | return ret; | ||
86 | |||
87 | if (val) { | ||
88 | spin_lock(&drvdata->spinlock); | ||
89 | memset(config, 0, sizeof(struct etm_config)); | ||
90 | config->mode = ETM_MODE_EXCLUDE; | ||
91 | config->trigger_event = ETM_DEFAULT_EVENT_VAL; | ||
92 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | ||
93 | config->addr_type[i] = ETM_ADDR_TYPE_NONE; | ||
94 | } | ||
95 | |||
96 | etm_set_default(config); | ||
97 | spin_unlock(&drvdata->spinlock); | ||
98 | } | ||
99 | |||
100 | return size; | ||
101 | } | ||
102 | static DEVICE_ATTR_WO(reset); | ||
103 | |||
104 | static ssize_t mode_show(struct device *dev, | ||
105 | struct device_attribute *attr, char *buf) | ||
106 | { | ||
107 | unsigned long val; | ||
108 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
109 | struct etm_config *config = &drvdata->config; | ||
110 | |||
111 | val = config->mode; | ||
112 | return sprintf(buf, "%#lx\n", val); | ||
113 | } | ||
114 | |||
115 | static ssize_t mode_store(struct device *dev, | ||
116 | struct device_attribute *attr, | ||
117 | const char *buf, size_t size) | ||
118 | { | ||
119 | int ret; | ||
120 | unsigned long val; | ||
121 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
122 | struct etm_config *config = &drvdata->config; | ||
123 | |||
124 | ret = kstrtoul(buf, 16, &val); | ||
125 | if (ret) | ||
126 | return ret; | ||
127 | |||
128 | spin_lock(&drvdata->spinlock); | ||
129 | config->mode = val & ETM_MODE_ALL; | ||
130 | |||
131 | if (config->mode & ETM_MODE_EXCLUDE) | ||
132 | config->enable_ctrl1 |= ETMTECR1_INC_EXC; | ||
133 | else | ||
134 | config->enable_ctrl1 &= ~ETMTECR1_INC_EXC; | ||
135 | |||
136 | if (config->mode & ETM_MODE_CYCACC) | ||
137 | config->ctrl |= ETMCR_CYC_ACC; | ||
138 | else | ||
139 | config->ctrl &= ~ETMCR_CYC_ACC; | ||
140 | |||
141 | if (config->mode & ETM_MODE_STALL) { | ||
142 | if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { | ||
143 | dev_warn(drvdata->dev, "stall mode not supported\n"); | ||
144 | ret = -EINVAL; | ||
145 | goto err_unlock; | ||
146 | } | ||
147 | config->ctrl |= ETMCR_STALL_MODE; | ||
148 | } else | ||
149 | config->ctrl &= ~ETMCR_STALL_MODE; | ||
150 | |||
151 | if (config->mode & ETM_MODE_TIMESTAMP) { | ||
152 | if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { | ||
153 | dev_warn(drvdata->dev, "timestamp not supported\n"); | ||
154 | ret = -EINVAL; | ||
155 | goto err_unlock; | ||
156 | } | ||
157 | config->ctrl |= ETMCR_TIMESTAMP_EN; | ||
158 | } else | ||
159 | config->ctrl &= ~ETMCR_TIMESTAMP_EN; | ||
160 | |||
161 | if (config->mode & ETM_MODE_CTXID) | ||
162 | config->ctrl |= ETMCR_CTXID_SIZE; | ||
163 | else | ||
164 | config->ctrl &= ~ETMCR_CTXID_SIZE; | ||
165 | |||
166 | if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) | ||
167 | etm_config_trace_mode(config); | ||
168 | |||
169 | spin_unlock(&drvdata->spinlock); | ||
170 | |||
171 | return size; | ||
172 | |||
173 | err_unlock: | ||
174 | spin_unlock(&drvdata->spinlock); | ||
175 | return ret; | ||
176 | } | ||
177 | static DEVICE_ATTR_RW(mode); | ||
178 | |||
179 | static ssize_t trigger_event_show(struct device *dev, | ||
180 | struct device_attribute *attr, char *buf) | ||
181 | { | ||
182 | unsigned long val; | ||
183 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
184 | struct etm_config *config = &drvdata->config; | ||
185 | |||
186 | val = config->trigger_event; | ||
187 | return sprintf(buf, "%#lx\n", val); | ||
188 | } | ||
189 | |||
190 | static ssize_t trigger_event_store(struct device *dev, | ||
191 | struct device_attribute *attr, | ||
192 | const char *buf, size_t size) | ||
193 | { | ||
194 | int ret; | ||
195 | unsigned long val; | ||
196 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
197 | struct etm_config *config = &drvdata->config; | ||
198 | |||
199 | ret = kstrtoul(buf, 16, &val); | ||
200 | if (ret) | ||
201 | return ret; | ||
202 | |||
203 | config->trigger_event = val & ETM_EVENT_MASK; | ||
204 | |||
205 | return size; | ||
206 | } | ||
207 | static DEVICE_ATTR_RW(trigger_event); | ||
208 | |||
209 | static ssize_t enable_event_show(struct device *dev, | ||
210 | struct device_attribute *attr, char *buf) | ||
211 | { | ||
212 | unsigned long val; | ||
213 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
214 | struct etm_config *config = &drvdata->config; | ||
215 | |||
216 | val = config->enable_event; | ||
217 | return sprintf(buf, "%#lx\n", val); | ||
218 | } | ||
219 | |||
220 | static ssize_t enable_event_store(struct device *dev, | ||
221 | struct device_attribute *attr, | ||
222 | const char *buf, size_t size) | ||
223 | { | ||
224 | int ret; | ||
225 | unsigned long val; | ||
226 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
227 | struct etm_config *config = &drvdata->config; | ||
228 | |||
229 | ret = kstrtoul(buf, 16, &val); | ||
230 | if (ret) | ||
231 | return ret; | ||
232 | |||
233 | config->enable_event = val & ETM_EVENT_MASK; | ||
234 | |||
235 | return size; | ||
236 | } | ||
237 | static DEVICE_ATTR_RW(enable_event); | ||
238 | |||
239 | static ssize_t fifofull_level_show(struct device *dev, | ||
240 | struct device_attribute *attr, char *buf) | ||
241 | { | ||
242 | unsigned long val; | ||
243 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
244 | struct etm_config *config = &drvdata->config; | ||
245 | |||
246 | val = config->fifofull_level; | ||
247 | return sprintf(buf, "%#lx\n", val); | ||
248 | } | ||
249 | |||
250 | static ssize_t fifofull_level_store(struct device *dev, | ||
251 | struct device_attribute *attr, | ||
252 | const char *buf, size_t size) | ||
253 | { | ||
254 | int ret; | ||
255 | unsigned long val; | ||
256 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
257 | struct etm_config *config = &drvdata->config; | ||
258 | |||
259 | ret = kstrtoul(buf, 16, &val); | ||
260 | if (ret) | ||
261 | return ret; | ||
262 | |||
263 | config->fifofull_level = val; | ||
264 | |||
265 | return size; | ||
266 | } | ||
267 | static DEVICE_ATTR_RW(fifofull_level); | ||
268 | |||
269 | static ssize_t addr_idx_show(struct device *dev, | ||
270 | struct device_attribute *attr, char *buf) | ||
271 | { | ||
272 | unsigned long val; | ||
273 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
274 | struct etm_config *config = &drvdata->config; | ||
275 | |||
276 | val = config->addr_idx; | ||
277 | return sprintf(buf, "%#lx\n", val); | ||
278 | } | ||
279 | |||
280 | static ssize_t addr_idx_store(struct device *dev, | ||
281 | struct device_attribute *attr, | ||
282 | const char *buf, size_t size) | ||
283 | { | ||
284 | int ret; | ||
285 | unsigned long val; | ||
286 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
287 | struct etm_config *config = &drvdata->config; | ||
288 | |||
289 | ret = kstrtoul(buf, 16, &val); | ||
290 | if (ret) | ||
291 | return ret; | ||
292 | |||
293 | if (val >= drvdata->nr_addr_cmp) | ||
294 | return -EINVAL; | ||
295 | |||
296 | /* | ||
297 | * Use spinlock to ensure index doesn't change while it gets | ||
298 | * dereferenced multiple times within a spinlock block elsewhere. | ||
299 | */ | ||
300 | spin_lock(&drvdata->spinlock); | ||
301 | config->addr_idx = val; | ||
302 | spin_unlock(&drvdata->spinlock); | ||
303 | |||
304 | return size; | ||
305 | } | ||
306 | static DEVICE_ATTR_RW(addr_idx); | ||
307 | |||
308 | static ssize_t addr_single_show(struct device *dev, | ||
309 | struct device_attribute *attr, char *buf) | ||
310 | { | ||
311 | u8 idx; | ||
312 | unsigned long val; | ||
313 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
314 | struct etm_config *config = &drvdata->config; | ||
315 | |||
316 | spin_lock(&drvdata->spinlock); | ||
317 | idx = config->addr_idx; | ||
318 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
319 | config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
320 | spin_unlock(&drvdata->spinlock); | ||
321 | return -EINVAL; | ||
322 | } | ||
323 | |||
324 | val = config->addr_val[idx]; | ||
325 | spin_unlock(&drvdata->spinlock); | ||
326 | |||
327 | return sprintf(buf, "%#lx\n", val); | ||
328 | } | ||
329 | |||
330 | static ssize_t addr_single_store(struct device *dev, | ||
331 | struct device_attribute *attr, | ||
332 | const char *buf, size_t size) | ||
333 | { | ||
334 | u8 idx; | ||
335 | int ret; | ||
336 | unsigned long val; | ||
337 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
338 | struct etm_config *config = &drvdata->config; | ||
339 | |||
340 | ret = kstrtoul(buf, 16, &val); | ||
341 | if (ret) | ||
342 | return ret; | ||
343 | |||
344 | spin_lock(&drvdata->spinlock); | ||
345 | idx = config->addr_idx; | ||
346 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
347 | config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
348 | spin_unlock(&drvdata->spinlock); | ||
349 | return -EINVAL; | ||
350 | } | ||
351 | |||
352 | config->addr_val[idx] = val; | ||
353 | config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; | ||
354 | spin_unlock(&drvdata->spinlock); | ||
355 | |||
356 | return size; | ||
357 | } | ||
358 | static DEVICE_ATTR_RW(addr_single); | ||
359 | |||
360 | static ssize_t addr_range_show(struct device *dev, | ||
361 | struct device_attribute *attr, char *buf) | ||
362 | { | ||
363 | u8 idx; | ||
364 | unsigned long val1, val2; | ||
365 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
366 | struct etm_config *config = &drvdata->config; | ||
367 | |||
368 | spin_lock(&drvdata->spinlock); | ||
369 | idx = config->addr_idx; | ||
370 | if (idx % 2 != 0) { | ||
371 | spin_unlock(&drvdata->spinlock); | ||
372 | return -EPERM; | ||
373 | } | ||
374 | if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
375 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
376 | (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
377 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
378 | spin_unlock(&drvdata->spinlock); | ||
379 | return -EPERM; | ||
380 | } | ||
381 | |||
382 | val1 = config->addr_val[idx]; | ||
383 | val2 = config->addr_val[idx + 1]; | ||
384 | spin_unlock(&drvdata->spinlock); | ||
385 | |||
386 | return sprintf(buf, "%#lx %#lx\n", val1, val2); | ||
387 | } | ||
388 | |||
389 | static ssize_t addr_range_store(struct device *dev, | ||
390 | struct device_attribute *attr, | ||
391 | const char *buf, size_t size) | ||
392 | { | ||
393 | u8 idx; | ||
394 | unsigned long val1, val2; | ||
395 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
396 | struct etm_config *config = &drvdata->config; | ||
397 | |||
398 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
399 | return -EINVAL; | ||
400 | /* Lower address comparator cannot have a higher address value */ | ||
401 | if (val1 > val2) | ||
402 | return -EINVAL; | ||
403 | |||
404 | spin_lock(&drvdata->spinlock); | ||
405 | idx = config->addr_idx; | ||
406 | if (idx % 2 != 0) { | ||
407 | spin_unlock(&drvdata->spinlock); | ||
408 | return -EPERM; | ||
409 | } | ||
410 | if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
411 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
412 | (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
413 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
414 | spin_unlock(&drvdata->spinlock); | ||
415 | return -EPERM; | ||
416 | } | ||
417 | |||
418 | config->addr_val[idx] = val1; | ||
419 | config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; | ||
420 | config->addr_val[idx + 1] = val2; | ||
421 | config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; | ||
422 | config->enable_ctrl1 |= (1 << (idx/2)); | ||
423 | spin_unlock(&drvdata->spinlock); | ||
424 | |||
425 | return size; | ||
426 | } | ||
427 | static DEVICE_ATTR_RW(addr_range); | ||
428 | |||
429 | static ssize_t addr_start_show(struct device *dev, | ||
430 | struct device_attribute *attr, char *buf) | ||
431 | { | ||
432 | u8 idx; | ||
433 | unsigned long val; | ||
434 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
435 | struct etm_config *config = &drvdata->config; | ||
436 | |||
437 | spin_lock(&drvdata->spinlock); | ||
438 | idx = config->addr_idx; | ||
439 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
440 | config->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
441 | spin_unlock(&drvdata->spinlock); | ||
442 | return -EPERM; | ||
443 | } | ||
444 | |||
445 | val = config->addr_val[idx]; | ||
446 | spin_unlock(&drvdata->spinlock); | ||
447 | |||
448 | return sprintf(buf, "%#lx\n", val); | ||
449 | } | ||
450 | |||
451 | static ssize_t addr_start_store(struct device *dev, | ||
452 | struct device_attribute *attr, | ||
453 | const char *buf, size_t size) | ||
454 | { | ||
455 | u8 idx; | ||
456 | int ret; | ||
457 | unsigned long val; | ||
458 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
459 | struct etm_config *config = &drvdata->config; | ||
460 | |||
461 | ret = kstrtoul(buf, 16, &val); | ||
462 | if (ret) | ||
463 | return ret; | ||
464 | |||
465 | spin_lock(&drvdata->spinlock); | ||
466 | idx = config->addr_idx; | ||
467 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
468 | config->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
469 | spin_unlock(&drvdata->spinlock); | ||
470 | return -EPERM; | ||
471 | } | ||
472 | |||
473 | config->addr_val[idx] = val; | ||
474 | config->addr_type[idx] = ETM_ADDR_TYPE_START; | ||
475 | config->startstop_ctrl |= (1 << idx); | ||
476 | config->enable_ctrl1 |= BIT(25); | ||
477 | spin_unlock(&drvdata->spinlock); | ||
478 | |||
479 | return size; | ||
480 | } | ||
481 | static DEVICE_ATTR_RW(addr_start); | ||
482 | |||
483 | static ssize_t addr_stop_show(struct device *dev, | ||
484 | struct device_attribute *attr, char *buf) | ||
485 | { | ||
486 | u8 idx; | ||
487 | unsigned long val; | ||
488 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
489 | struct etm_config *config = &drvdata->config; | ||
490 | |||
491 | spin_lock(&drvdata->spinlock); | ||
492 | idx = config->addr_idx; | ||
493 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
494 | config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
495 | spin_unlock(&drvdata->spinlock); | ||
496 | return -EPERM; | ||
497 | } | ||
498 | |||
499 | val = config->addr_val[idx]; | ||
500 | spin_unlock(&drvdata->spinlock); | ||
501 | |||
502 | return sprintf(buf, "%#lx\n", val); | ||
503 | } | ||
504 | |||
505 | static ssize_t addr_stop_store(struct device *dev, | ||
506 | struct device_attribute *attr, | ||
507 | const char *buf, size_t size) | ||
508 | { | ||
509 | u8 idx; | ||
510 | int ret; | ||
511 | unsigned long val; | ||
512 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
513 | struct etm_config *config = &drvdata->config; | ||
514 | |||
515 | ret = kstrtoul(buf, 16, &val); | ||
516 | if (ret) | ||
517 | return ret; | ||
518 | |||
519 | spin_lock(&drvdata->spinlock); | ||
520 | idx = config->addr_idx; | ||
521 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
522 | config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
523 | spin_unlock(&drvdata->spinlock); | ||
524 | return -EPERM; | ||
525 | } | ||
526 | |||
527 | config->addr_val[idx] = val; | ||
528 | config->addr_type[idx] = ETM_ADDR_TYPE_STOP; | ||
529 | config->startstop_ctrl |= (1 << (idx + 16)); | ||
530 | config->enable_ctrl1 |= ETMTECR1_START_STOP; | ||
531 | spin_unlock(&drvdata->spinlock); | ||
532 | |||
533 | return size; | ||
534 | } | ||
535 | static DEVICE_ATTR_RW(addr_stop); | ||
536 | |||
537 | static ssize_t addr_acctype_show(struct device *dev, | ||
538 | struct device_attribute *attr, char *buf) | ||
539 | { | ||
540 | unsigned long val; | ||
541 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
542 | struct etm_config *config = &drvdata->config; | ||
543 | |||
544 | spin_lock(&drvdata->spinlock); | ||
545 | val = config->addr_acctype[config->addr_idx]; | ||
546 | spin_unlock(&drvdata->spinlock); | ||
547 | |||
548 | return sprintf(buf, "%#lx\n", val); | ||
549 | } | ||
550 | |||
551 | static ssize_t addr_acctype_store(struct device *dev, | ||
552 | struct device_attribute *attr, | ||
553 | const char *buf, size_t size) | ||
554 | { | ||
555 | int ret; | ||
556 | unsigned long val; | ||
557 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
558 | struct etm_config *config = &drvdata->config; | ||
559 | |||
560 | ret = kstrtoul(buf, 16, &val); | ||
561 | if (ret) | ||
562 | return ret; | ||
563 | |||
564 | spin_lock(&drvdata->spinlock); | ||
565 | config->addr_acctype[config->addr_idx] = val; | ||
566 | spin_unlock(&drvdata->spinlock); | ||
567 | |||
568 | return size; | ||
569 | } | ||
570 | static DEVICE_ATTR_RW(addr_acctype); | ||
571 | |||
572 | static ssize_t cntr_idx_show(struct device *dev, | ||
573 | struct device_attribute *attr, char *buf) | ||
574 | { | ||
575 | unsigned long val; | ||
576 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
577 | struct etm_config *config = &drvdata->config; | ||
578 | |||
579 | val = config->cntr_idx; | ||
580 | return sprintf(buf, "%#lx\n", val); | ||
581 | } | ||
582 | |||
583 | static ssize_t cntr_idx_store(struct device *dev, | ||
584 | struct device_attribute *attr, | ||
585 | const char *buf, size_t size) | ||
586 | { | ||
587 | int ret; | ||
588 | unsigned long val; | ||
589 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
590 | struct etm_config *config = &drvdata->config; | ||
591 | |||
592 | ret = kstrtoul(buf, 16, &val); | ||
593 | if (ret) | ||
594 | return ret; | ||
595 | |||
596 | if (val >= drvdata->nr_cntr) | ||
597 | return -EINVAL; | ||
598 | /* | ||
599 | * Use spinlock to ensure index doesn't change while it gets | ||
600 | * dereferenced multiple times within a spinlock block elsewhere. | ||
601 | */ | ||
602 | spin_lock(&drvdata->spinlock); | ||
603 | config->cntr_idx = val; | ||
604 | spin_unlock(&drvdata->spinlock); | ||
605 | |||
606 | return size; | ||
607 | } | ||
608 | static DEVICE_ATTR_RW(cntr_idx); | ||
609 | |||
610 | static ssize_t cntr_rld_val_show(struct device *dev, | ||
611 | struct device_attribute *attr, char *buf) | ||
612 | { | ||
613 | unsigned long val; | ||
614 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
615 | struct etm_config *config = &drvdata->config; | ||
616 | |||
617 | spin_lock(&drvdata->spinlock); | ||
618 | val = config->cntr_rld_val[config->cntr_idx]; | ||
619 | spin_unlock(&drvdata->spinlock); | ||
620 | |||
621 | return sprintf(buf, "%#lx\n", val); | ||
622 | } | ||
623 | |||
624 | static ssize_t cntr_rld_val_store(struct device *dev, | ||
625 | struct device_attribute *attr, | ||
626 | const char *buf, size_t size) | ||
627 | { | ||
628 | int ret; | ||
629 | unsigned long val; | ||
630 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
631 | struct etm_config *config = &drvdata->config; | ||
632 | |||
633 | ret = kstrtoul(buf, 16, &val); | ||
634 | if (ret) | ||
635 | return ret; | ||
636 | |||
637 | spin_lock(&drvdata->spinlock); | ||
638 | config->cntr_rld_val[config->cntr_idx] = val; | ||
639 | spin_unlock(&drvdata->spinlock); | ||
640 | |||
641 | return size; | ||
642 | } | ||
643 | static DEVICE_ATTR_RW(cntr_rld_val); | ||
644 | |||
645 | static ssize_t cntr_event_show(struct device *dev, | ||
646 | struct device_attribute *attr, char *buf) | ||
647 | { | ||
648 | unsigned long val; | ||
649 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
650 | struct etm_config *config = &drvdata->config; | ||
651 | |||
652 | spin_lock(&drvdata->spinlock); | ||
653 | val = config->cntr_event[config->cntr_idx]; | ||
654 | spin_unlock(&drvdata->spinlock); | ||
655 | |||
656 | return sprintf(buf, "%#lx\n", val); | ||
657 | } | ||
658 | |||
659 | static ssize_t cntr_event_store(struct device *dev, | ||
660 | struct device_attribute *attr, | ||
661 | const char *buf, size_t size) | ||
662 | { | ||
663 | int ret; | ||
664 | unsigned long val; | ||
665 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
666 | struct etm_config *config = &drvdata->config; | ||
667 | |||
668 | ret = kstrtoul(buf, 16, &val); | ||
669 | if (ret) | ||
670 | return ret; | ||
671 | |||
672 | spin_lock(&drvdata->spinlock); | ||
673 | config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK; | ||
674 | spin_unlock(&drvdata->spinlock); | ||
675 | |||
676 | return size; | ||
677 | } | ||
678 | static DEVICE_ATTR_RW(cntr_event); | ||
679 | |||
680 | static ssize_t cntr_rld_event_show(struct device *dev, | ||
681 | struct device_attribute *attr, char *buf) | ||
682 | { | ||
683 | unsigned long val; | ||
684 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
685 | struct etm_config *config = &drvdata->config; | ||
686 | |||
687 | spin_lock(&drvdata->spinlock); | ||
688 | val = config->cntr_rld_event[config->cntr_idx]; | ||
689 | spin_unlock(&drvdata->spinlock); | ||
690 | |||
691 | return sprintf(buf, "%#lx\n", val); | ||
692 | } | ||
693 | |||
694 | static ssize_t cntr_rld_event_store(struct device *dev, | ||
695 | struct device_attribute *attr, | ||
696 | const char *buf, size_t size) | ||
697 | { | ||
698 | int ret; | ||
699 | unsigned long val; | ||
700 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
701 | struct etm_config *config = &drvdata->config; | ||
702 | |||
703 | ret = kstrtoul(buf, 16, &val); | ||
704 | if (ret) | ||
705 | return ret; | ||
706 | |||
707 | spin_lock(&drvdata->spinlock); | ||
708 | config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK; | ||
709 | spin_unlock(&drvdata->spinlock); | ||
710 | |||
711 | return size; | ||
712 | } | ||
713 | static DEVICE_ATTR_RW(cntr_rld_event); | ||
714 | |||
715 | static ssize_t cntr_val_show(struct device *dev, | ||
716 | struct device_attribute *attr, char *buf) | ||
717 | { | ||
718 | int i, ret = 0; | ||
719 | u32 val; | ||
720 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
721 | struct etm_config *config = &drvdata->config; | ||
722 | |||
723 | if (!local_read(&drvdata->mode)) { | ||
724 | spin_lock(&drvdata->spinlock); | ||
725 | for (i = 0; i < drvdata->nr_cntr; i++) | ||
726 | ret += sprintf(buf, "counter %d: %x\n", | ||
727 | i, config->cntr_val[i]); | ||
728 | spin_unlock(&drvdata->spinlock); | ||
729 | return ret; | ||
730 | } | ||
731 | |||
732 | for (i = 0; i < drvdata->nr_cntr; i++) { | ||
733 | val = etm_readl(drvdata, ETMCNTVRn(i)); | ||
734 | ret += sprintf(buf, "counter %d: %x\n", i, val); | ||
735 | } | ||
736 | |||
737 | return ret; | ||
738 | } | ||
739 | |||
740 | static ssize_t cntr_val_store(struct device *dev, | ||
741 | struct device_attribute *attr, | ||
742 | const char *buf, size_t size) | ||
743 | { | ||
744 | int ret; | ||
745 | unsigned long val; | ||
746 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
747 | struct etm_config *config = &drvdata->config; | ||
748 | |||
749 | ret = kstrtoul(buf, 16, &val); | ||
750 | if (ret) | ||
751 | return ret; | ||
752 | |||
753 | spin_lock(&drvdata->spinlock); | ||
754 | config->cntr_val[config->cntr_idx] = val; | ||
755 | spin_unlock(&drvdata->spinlock); | ||
756 | |||
757 | return size; | ||
758 | } | ||
759 | static DEVICE_ATTR_RW(cntr_val); | ||
760 | |||
761 | static ssize_t seq_12_event_show(struct device *dev, | ||
762 | struct device_attribute *attr, char *buf) | ||
763 | { | ||
764 | unsigned long val; | ||
765 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
766 | struct etm_config *config = &drvdata->config; | ||
767 | |||
768 | val = config->seq_12_event; | ||
769 | return sprintf(buf, "%#lx\n", val); | ||
770 | } | ||
771 | |||
772 | static ssize_t seq_12_event_store(struct device *dev, | ||
773 | struct device_attribute *attr, | ||
774 | const char *buf, size_t size) | ||
775 | { | ||
776 | int ret; | ||
777 | unsigned long val; | ||
778 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
779 | struct etm_config *config = &drvdata->config; | ||
780 | |||
781 | ret = kstrtoul(buf, 16, &val); | ||
782 | if (ret) | ||
783 | return ret; | ||
784 | |||
785 | config->seq_12_event = val & ETM_EVENT_MASK; | ||
786 | return size; | ||
787 | } | ||
788 | static DEVICE_ATTR_RW(seq_12_event); | ||
789 | |||
790 | static ssize_t seq_21_event_show(struct device *dev, | ||
791 | struct device_attribute *attr, char *buf) | ||
792 | { | ||
793 | unsigned long val; | ||
794 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
795 | struct etm_config *config = &drvdata->config; | ||
796 | |||
797 | val = config->seq_21_event; | ||
798 | return sprintf(buf, "%#lx\n", val); | ||
799 | } | ||
800 | |||
801 | static ssize_t seq_21_event_store(struct device *dev, | ||
802 | struct device_attribute *attr, | ||
803 | const char *buf, size_t size) | ||
804 | { | ||
805 | int ret; | ||
806 | unsigned long val; | ||
807 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
808 | struct etm_config *config = &drvdata->config; | ||
809 | |||
810 | ret = kstrtoul(buf, 16, &val); | ||
811 | if (ret) | ||
812 | return ret; | ||
813 | |||
814 | config->seq_21_event = val & ETM_EVENT_MASK; | ||
815 | return size; | ||
816 | } | ||
817 | static DEVICE_ATTR_RW(seq_21_event); | ||
818 | |||
819 | static ssize_t seq_23_event_show(struct device *dev, | ||
820 | struct device_attribute *attr, char *buf) | ||
821 | { | ||
822 | unsigned long val; | ||
823 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
824 | struct etm_config *config = &drvdata->config; | ||
825 | |||
826 | val = config->seq_23_event; | ||
827 | return sprintf(buf, "%#lx\n", val); | ||
828 | } | ||
829 | |||
830 | static ssize_t seq_23_event_store(struct device *dev, | ||
831 | struct device_attribute *attr, | ||
832 | const char *buf, size_t size) | ||
833 | { | ||
834 | int ret; | ||
835 | unsigned long val; | ||
836 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
837 | struct etm_config *config = &drvdata->config; | ||
838 | |||
839 | ret = kstrtoul(buf, 16, &val); | ||
840 | if (ret) | ||
841 | return ret; | ||
842 | |||
843 | config->seq_23_event = val & ETM_EVENT_MASK; | ||
844 | return size; | ||
845 | } | ||
846 | static DEVICE_ATTR_RW(seq_23_event); | ||
847 | |||
848 | static ssize_t seq_31_event_show(struct device *dev, | ||
849 | struct device_attribute *attr, char *buf) | ||
850 | { | ||
851 | unsigned long val; | ||
852 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
853 | struct etm_config *config = &drvdata->config; | ||
854 | |||
855 | val = config->seq_31_event; | ||
856 | return sprintf(buf, "%#lx\n", val); | ||
857 | } | ||
858 | |||
859 | static ssize_t seq_31_event_store(struct device *dev, | ||
860 | struct device_attribute *attr, | ||
861 | const char *buf, size_t size) | ||
862 | { | ||
863 | int ret; | ||
864 | unsigned long val; | ||
865 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
866 | struct etm_config *config = &drvdata->config; | ||
867 | |||
868 | ret = kstrtoul(buf, 16, &val); | ||
869 | if (ret) | ||
870 | return ret; | ||
871 | |||
872 | config->seq_31_event = val & ETM_EVENT_MASK; | ||
873 | return size; | ||
874 | } | ||
875 | static DEVICE_ATTR_RW(seq_31_event); | ||
876 | |||
877 | static ssize_t seq_32_event_show(struct device *dev, | ||
878 | struct device_attribute *attr, char *buf) | ||
879 | { | ||
880 | unsigned long val; | ||
881 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
882 | struct etm_config *config = &drvdata->config; | ||
883 | |||
884 | val = config->seq_32_event; | ||
885 | return sprintf(buf, "%#lx\n", val); | ||
886 | } | ||
887 | |||
888 | static ssize_t seq_32_event_store(struct device *dev, | ||
889 | struct device_attribute *attr, | ||
890 | const char *buf, size_t size) | ||
891 | { | ||
892 | int ret; | ||
893 | unsigned long val; | ||
894 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
895 | struct etm_config *config = &drvdata->config; | ||
896 | |||
897 | ret = kstrtoul(buf, 16, &val); | ||
898 | if (ret) | ||
899 | return ret; | ||
900 | |||
901 | config->seq_32_event = val & ETM_EVENT_MASK; | ||
902 | return size; | ||
903 | } | ||
904 | static DEVICE_ATTR_RW(seq_32_event); | ||
905 | |||
906 | static ssize_t seq_13_event_show(struct device *dev, | ||
907 | struct device_attribute *attr, char *buf) | ||
908 | { | ||
909 | unsigned long val; | ||
910 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
911 | struct etm_config *config = &drvdata->config; | ||
912 | |||
913 | val = config->seq_13_event; | ||
914 | return sprintf(buf, "%#lx\n", val); | ||
915 | } | ||
916 | |||
917 | static ssize_t seq_13_event_store(struct device *dev, | ||
918 | struct device_attribute *attr, | ||
919 | const char *buf, size_t size) | ||
920 | { | ||
921 | int ret; | ||
922 | unsigned long val; | ||
923 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
924 | struct etm_config *config = &drvdata->config; | ||
925 | |||
926 | ret = kstrtoul(buf, 16, &val); | ||
927 | if (ret) | ||
928 | return ret; | ||
929 | |||
930 | config->seq_13_event = val & ETM_EVENT_MASK; | ||
931 | return size; | ||
932 | } | ||
933 | static DEVICE_ATTR_RW(seq_13_event); | ||
934 | |||
935 | static ssize_t seq_curr_state_show(struct device *dev, | ||
936 | struct device_attribute *attr, char *buf) | ||
937 | { | ||
938 | unsigned long val, flags; | ||
939 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
940 | struct etm_config *config = &drvdata->config; | ||
941 | |||
942 | if (!local_read(&drvdata->mode)) { | ||
943 | val = config->seq_curr_state; | ||
944 | goto out; | ||
945 | } | ||
946 | |||
947 | pm_runtime_get_sync(drvdata->dev); | ||
948 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
949 | |||
950 | CS_UNLOCK(drvdata->base); | ||
951 | val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); | ||
952 | CS_LOCK(drvdata->base); | ||
953 | |||
954 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
955 | pm_runtime_put(drvdata->dev); | ||
956 | out: | ||
957 | return sprintf(buf, "%#lx\n", val); | ||
958 | } | ||
959 | |||
960 | static ssize_t seq_curr_state_store(struct device *dev, | ||
961 | struct device_attribute *attr, | ||
962 | const char *buf, size_t size) | ||
963 | { | ||
964 | int ret; | ||
965 | unsigned long val; | ||
966 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
967 | struct etm_config *config = &drvdata->config; | ||
968 | |||
969 | ret = kstrtoul(buf, 16, &val); | ||
970 | if (ret) | ||
971 | return ret; | ||
972 | |||
973 | if (val > ETM_SEQ_STATE_MAX_VAL) | ||
974 | return -EINVAL; | ||
975 | |||
976 | config->seq_curr_state = val; | ||
977 | |||
978 | return size; | ||
979 | } | ||
980 | static DEVICE_ATTR_RW(seq_curr_state); | ||
981 | |||
982 | static ssize_t ctxid_idx_show(struct device *dev, | ||
983 | struct device_attribute *attr, char *buf) | ||
984 | { | ||
985 | unsigned long val; | ||
986 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
987 | struct etm_config *config = &drvdata->config; | ||
988 | |||
989 | val = config->ctxid_idx; | ||
990 | return sprintf(buf, "%#lx\n", val); | ||
991 | } | ||
992 | |||
993 | static ssize_t ctxid_idx_store(struct device *dev, | ||
994 | struct device_attribute *attr, | ||
995 | const char *buf, size_t size) | ||
996 | { | ||
997 | int ret; | ||
998 | unsigned long val; | ||
999 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1000 | struct etm_config *config = &drvdata->config; | ||
1001 | |||
1002 | ret = kstrtoul(buf, 16, &val); | ||
1003 | if (ret) | ||
1004 | return ret; | ||
1005 | |||
1006 | if (val >= drvdata->nr_ctxid_cmp) | ||
1007 | return -EINVAL; | ||
1008 | |||
1009 | /* | ||
1010 | * Use spinlock to ensure index doesn't change while it gets | ||
1011 | * dereferenced multiple times within a spinlock block elsewhere. | ||
1012 | */ | ||
1013 | spin_lock(&drvdata->spinlock); | ||
1014 | config->ctxid_idx = val; | ||
1015 | spin_unlock(&drvdata->spinlock); | ||
1016 | |||
1017 | return size; | ||
1018 | } | ||
1019 | static DEVICE_ATTR_RW(ctxid_idx); | ||
1020 | |||
1021 | static ssize_t ctxid_pid_show(struct device *dev, | ||
1022 | struct device_attribute *attr, char *buf) | ||
1023 | { | ||
1024 | unsigned long val; | ||
1025 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1026 | struct etm_config *config = &drvdata->config; | ||
1027 | |||
1028 | spin_lock(&drvdata->spinlock); | ||
1029 | val = config->ctxid_vpid[config->ctxid_idx]; | ||
1030 | spin_unlock(&drvdata->spinlock); | ||
1031 | |||
1032 | return sprintf(buf, "%#lx\n", val); | ||
1033 | } | ||
1034 | |||
1035 | static ssize_t ctxid_pid_store(struct device *dev, | ||
1036 | struct device_attribute *attr, | ||
1037 | const char *buf, size_t size) | ||
1038 | { | ||
1039 | int ret; | ||
1040 | unsigned long vpid, pid; | ||
1041 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1042 | struct etm_config *config = &drvdata->config; | ||
1043 | |||
1044 | ret = kstrtoul(buf, 16, &vpid); | ||
1045 | if (ret) | ||
1046 | return ret; | ||
1047 | |||
1048 | pid = coresight_vpid_to_pid(vpid); | ||
1049 | |||
1050 | spin_lock(&drvdata->spinlock); | ||
1051 | config->ctxid_pid[config->ctxid_idx] = pid; | ||
1052 | config->ctxid_vpid[config->ctxid_idx] = vpid; | ||
1053 | spin_unlock(&drvdata->spinlock); | ||
1054 | |||
1055 | return size; | ||
1056 | } | ||
1057 | static DEVICE_ATTR_RW(ctxid_pid); | ||
1058 | |||
1059 | static ssize_t ctxid_mask_show(struct device *dev, | ||
1060 | struct device_attribute *attr, char *buf) | ||
1061 | { | ||
1062 | unsigned long val; | ||
1063 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1064 | struct etm_config *config = &drvdata->config; | ||
1065 | |||
1066 | val = config->ctxid_mask; | ||
1067 | return sprintf(buf, "%#lx\n", val); | ||
1068 | } | ||
1069 | |||
1070 | static ssize_t ctxid_mask_store(struct device *dev, | ||
1071 | struct device_attribute *attr, | ||
1072 | const char *buf, size_t size) | ||
1073 | { | ||
1074 | int ret; | ||
1075 | unsigned long val; | ||
1076 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1077 | struct etm_config *config = &drvdata->config; | ||
1078 | |||
1079 | ret = kstrtoul(buf, 16, &val); | ||
1080 | if (ret) | ||
1081 | return ret; | ||
1082 | |||
1083 | config->ctxid_mask = val; | ||
1084 | return size; | ||
1085 | } | ||
1086 | static DEVICE_ATTR_RW(ctxid_mask); | ||
1087 | |||
1088 | static ssize_t sync_freq_show(struct device *dev, | ||
1089 | struct device_attribute *attr, char *buf) | ||
1090 | { | ||
1091 | unsigned long val; | ||
1092 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1093 | struct etm_config *config = &drvdata->config; | ||
1094 | |||
1095 | val = config->sync_freq; | ||
1096 | return sprintf(buf, "%#lx\n", val); | ||
1097 | } | ||
1098 | |||
1099 | static ssize_t sync_freq_store(struct device *dev, | ||
1100 | struct device_attribute *attr, | ||
1101 | const char *buf, size_t size) | ||
1102 | { | ||
1103 | int ret; | ||
1104 | unsigned long val; | ||
1105 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1106 | struct etm_config *config = &drvdata->config; | ||
1107 | |||
1108 | ret = kstrtoul(buf, 16, &val); | ||
1109 | if (ret) | ||
1110 | return ret; | ||
1111 | |||
1112 | config->sync_freq = val & ETM_SYNC_MASK; | ||
1113 | return size; | ||
1114 | } | ||
1115 | static DEVICE_ATTR_RW(sync_freq); | ||
1116 | |||
1117 | static ssize_t timestamp_event_show(struct device *dev, | ||
1118 | struct device_attribute *attr, char *buf) | ||
1119 | { | ||
1120 | unsigned long val; | ||
1121 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1122 | struct etm_config *config = &drvdata->config; | ||
1123 | |||
1124 | val = config->timestamp_event; | ||
1125 | return sprintf(buf, "%#lx\n", val); | ||
1126 | } | ||
1127 | |||
1128 | static ssize_t timestamp_event_store(struct device *dev, | ||
1129 | struct device_attribute *attr, | ||
1130 | const char *buf, size_t size) | ||
1131 | { | ||
1132 | int ret; | ||
1133 | unsigned long val; | ||
1134 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1135 | struct etm_config *config = &drvdata->config; | ||
1136 | |||
1137 | ret = kstrtoul(buf, 16, &val); | ||
1138 | if (ret) | ||
1139 | return ret; | ||
1140 | |||
1141 | config->timestamp_event = val & ETM_EVENT_MASK; | ||
1142 | return size; | ||
1143 | } | ||
1144 | static DEVICE_ATTR_RW(timestamp_event); | ||
1145 | |||
1146 | static ssize_t cpu_show(struct device *dev, | ||
1147 | struct device_attribute *attr, char *buf) | ||
1148 | { | ||
1149 | int val; | ||
1150 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1151 | |||
1152 | val = drvdata->cpu; | ||
1153 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | ||
1154 | |||
1155 | } | ||
1156 | static DEVICE_ATTR_RO(cpu); | ||
1157 | |||
1158 | static ssize_t traceid_show(struct device *dev, | ||
1159 | struct device_attribute *attr, char *buf) | ||
1160 | { | ||
1161 | unsigned long val; | ||
1162 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1163 | |||
1164 | val = etm_get_trace_id(drvdata); | ||
1165 | |||
1166 | return sprintf(buf, "%#lx\n", val); | ||
1167 | } | ||
1168 | |||
1169 | static ssize_t traceid_store(struct device *dev, | ||
1170 | struct device_attribute *attr, | ||
1171 | const char *buf, size_t size) | ||
1172 | { | ||
1173 | int ret; | ||
1174 | unsigned long val; | ||
1175 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1176 | |||
1177 | ret = kstrtoul(buf, 16, &val); | ||
1178 | if (ret) | ||
1179 | return ret; | ||
1180 | |||
1181 | drvdata->traceid = val & ETM_TRACEID_MASK; | ||
1182 | return size; | ||
1183 | } | ||
1184 | static DEVICE_ATTR_RW(traceid); | ||
1185 | |||
1186 | static struct attribute *coresight_etm_attrs[] = { | ||
1187 | &dev_attr_nr_addr_cmp.attr, | ||
1188 | &dev_attr_nr_cntr.attr, | ||
1189 | &dev_attr_nr_ctxid_cmp.attr, | ||
1190 | &dev_attr_etmsr.attr, | ||
1191 | &dev_attr_reset.attr, | ||
1192 | &dev_attr_mode.attr, | ||
1193 | &dev_attr_trigger_event.attr, | ||
1194 | &dev_attr_enable_event.attr, | ||
1195 | &dev_attr_fifofull_level.attr, | ||
1196 | &dev_attr_addr_idx.attr, | ||
1197 | &dev_attr_addr_single.attr, | ||
1198 | &dev_attr_addr_range.attr, | ||
1199 | &dev_attr_addr_start.attr, | ||
1200 | &dev_attr_addr_stop.attr, | ||
1201 | &dev_attr_addr_acctype.attr, | ||
1202 | &dev_attr_cntr_idx.attr, | ||
1203 | &dev_attr_cntr_rld_val.attr, | ||
1204 | &dev_attr_cntr_event.attr, | ||
1205 | &dev_attr_cntr_rld_event.attr, | ||
1206 | &dev_attr_cntr_val.attr, | ||
1207 | &dev_attr_seq_12_event.attr, | ||
1208 | &dev_attr_seq_21_event.attr, | ||
1209 | &dev_attr_seq_23_event.attr, | ||
1210 | &dev_attr_seq_31_event.attr, | ||
1211 | &dev_attr_seq_32_event.attr, | ||
1212 | &dev_attr_seq_13_event.attr, | ||
1213 | &dev_attr_seq_curr_state.attr, | ||
1214 | &dev_attr_ctxid_idx.attr, | ||
1215 | &dev_attr_ctxid_pid.attr, | ||
1216 | &dev_attr_ctxid_mask.attr, | ||
1217 | &dev_attr_sync_freq.attr, | ||
1218 | &dev_attr_timestamp_event.attr, | ||
1219 | &dev_attr_traceid.attr, | ||
1220 | &dev_attr_cpu.attr, | ||
1221 | NULL, | ||
1222 | }; | ||
1223 | |||
1224 | #define coresight_simple_func(name, offset) \ | ||
1225 | static ssize_t name##_show(struct device *_dev, \ | ||
1226 | struct device_attribute *attr, char *buf) \ | ||
1227 | { \ | ||
1228 | struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ | ||
1229 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | ||
1230 | readl_relaxed(drvdata->base + offset)); \ | ||
1231 | } \ | ||
1232 | DEVICE_ATTR_RO(name) | ||
1233 | |||
1234 | coresight_simple_func(etmccr, ETMCCR); | ||
1235 | coresight_simple_func(etmccer, ETMCCER); | ||
1236 | coresight_simple_func(etmscr, ETMSCR); | ||
1237 | coresight_simple_func(etmidr, ETMIDR); | ||
1238 | coresight_simple_func(etmcr, ETMCR); | ||
1239 | coresight_simple_func(etmtraceidr, ETMTRACEIDR); | ||
1240 | coresight_simple_func(etmteevr, ETMTEEVR); | ||
1241 | coresight_simple_func(etmtssvr, ETMTSSCR); | ||
1242 | coresight_simple_func(etmtecr1, ETMTECR1); | ||
1243 | coresight_simple_func(etmtecr2, ETMTECR2); | ||
1244 | |||
1245 | static struct attribute *coresight_etm_mgmt_attrs[] = { | ||
1246 | &dev_attr_etmccr.attr, | ||
1247 | &dev_attr_etmccer.attr, | ||
1248 | &dev_attr_etmscr.attr, | ||
1249 | &dev_attr_etmidr.attr, | ||
1250 | &dev_attr_etmcr.attr, | ||
1251 | &dev_attr_etmtraceidr.attr, | ||
1252 | &dev_attr_etmteevr.attr, | ||
1253 | &dev_attr_etmtssvr.attr, | ||
1254 | &dev_attr_etmtecr1.attr, | ||
1255 | &dev_attr_etmtecr2.attr, | ||
1256 | NULL, | ||
1257 | }; | ||
1258 | |||
1259 | static const struct attribute_group coresight_etm_group = { | ||
1260 | .attrs = coresight_etm_attrs, | ||
1261 | }; | ||
1262 | |||
1263 | static const struct attribute_group coresight_etm_mgmt_group = { | ||
1264 | .attrs = coresight_etm_mgmt_attrs, | ||
1265 | .name = "mgmt", | ||
1266 | }; | ||
1267 | |||
1268 | const struct attribute_group *coresight_etm_groups[] = { | ||
1269 | &coresight_etm_group, | ||
1270 | &coresight_etm_mgmt_group, | ||
1271 | NULL, | ||
1272 | }; | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index d630b7ece735..d83ab82672e4 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
2 | * | 2 | * |
3 | * Description: CoreSight Program Flow Trace driver | ||
4 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
@@ -11,7 +13,7 @@ | |||
11 | */ | 13 | */ |
12 | 14 | ||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 16 | #include <linux/moduleparam.h> |
15 | #include <linux/init.h> | 17 | #include <linux/init.h> |
16 | #include <linux/types.h> | 18 | #include <linux/types.h> |
17 | #include <linux/device.h> | 19 | #include <linux/device.h> |
@@ -27,14 +29,21 @@ | |||
27 | #include <linux/cpu.h> | 29 | #include <linux/cpu.h> |
28 | #include <linux/of.h> | 30 | #include <linux/of.h> |
29 | #include <linux/coresight.h> | 31 | #include <linux/coresight.h> |
32 | #include <linux/coresight-pmu.h> | ||
30 | #include <linux/amba/bus.h> | 33 | #include <linux/amba/bus.h> |
31 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
32 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
33 | #include <linux/clk.h> | 36 | #include <linux/clk.h> |
37 | #include <linux/perf_event.h> | ||
34 | #include <asm/sections.h> | 38 | #include <asm/sections.h> |
35 | 39 | ||
36 | #include "coresight-etm.h" | 40 | #include "coresight-etm.h" |
41 | #include "coresight-etm-perf.h" | ||
37 | 42 | ||
43 | /* | ||
44 | * Not really modular but using module_param is the easiest way to | ||
45 | * remain consistent with existing use cases for now. | ||
46 | */ | ||
38 | static int boot_enable; | 47 | static int boot_enable; |
39 | module_param_named(boot_enable, boot_enable, int, S_IRUGO); | 48 | module_param_named(boot_enable, boot_enable, int, S_IRUGO); |
40 | 49 | ||
@@ -42,45 +51,16 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); | |||
42 | static int etm_count; | 51 | static int etm_count; |
43 | static struct etm_drvdata *etmdrvdata[NR_CPUS]; | 52 | static struct etm_drvdata *etmdrvdata[NR_CPUS]; |
44 | 53 | ||
45 | static inline void etm_writel(struct etm_drvdata *drvdata, | ||
46 | u32 val, u32 off) | ||
47 | { | ||
48 | if (drvdata->use_cp14) { | ||
49 | if (etm_writel_cp14(off, val)) { | ||
50 | dev_err(drvdata->dev, | ||
51 | "invalid CP14 access to ETM reg: %#x", off); | ||
52 | } | ||
53 | } else { | ||
54 | writel_relaxed(val, drvdata->base + off); | ||
55 | } | ||
56 | } | ||
57 | |||
58 | static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off) | ||
59 | { | ||
60 | u32 val; | ||
61 | |||
62 | if (drvdata->use_cp14) { | ||
63 | if (etm_readl_cp14(off, &val)) { | ||
64 | dev_err(drvdata->dev, | ||
65 | "invalid CP14 access to ETM reg: %#x", off); | ||
66 | } | ||
67 | } else { | ||
68 | val = readl_relaxed(drvdata->base + off); | ||
69 | } | ||
70 | |||
71 | return val; | ||
72 | } | ||
73 | |||
74 | /* | 54 | /* |
75 | * Memory mapped writes to clear os lock are not supported on some processors | 55 | * Memory mapped writes to clear os lock are not supported on some processors |
76 | * and OS lock must be unlocked before any memory mapped access on such | 56 | * and OS lock must be unlocked before any memory mapped access on such |
77 | * processors, otherwise memory mapped reads/writes will be invalid. | 57 | * processors, otherwise memory mapped reads/writes will be invalid. |
78 | */ | 58 | */ |
79 | static void etm_os_unlock(void *info) | 59 | static void etm_os_unlock(struct etm_drvdata *drvdata) |
80 | { | 60 | { |
81 | struct etm_drvdata *drvdata = (struct etm_drvdata *)info; | ||
82 | /* Writing any value to ETMOSLAR unlocks the trace registers */ | 61 | /* Writing any value to ETMOSLAR unlocks the trace registers */ |
83 | etm_writel(drvdata, 0x0, ETMOSLAR); | 62 | etm_writel(drvdata, 0x0, ETMOSLAR); |
63 | drvdata->os_unlock = true; | ||
84 | isb(); | 64 | isb(); |
85 | } | 65 | } |
86 | 66 | ||
@@ -215,36 +195,156 @@ static void etm_clr_prog(struct etm_drvdata *drvdata) | |||
215 | } | 195 | } |
216 | } | 196 | } |
217 | 197 | ||
218 | static void etm_set_default(struct etm_drvdata *drvdata) | 198 | void etm_set_default(struct etm_config *config) |
219 | { | 199 | { |
220 | int i; | 200 | int i; |
221 | 201 | ||
222 | drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; | 202 | if (WARN_ON_ONCE(!config)) |
223 | drvdata->enable_event = ETM_HARD_WIRE_RES_A; | 203 | return; |
224 | 204 | ||
225 | drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL; | 205 | /* |
226 | drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL; | 206 | * Taken verbatim from the TRM: |
227 | drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL; | 207 | * |
228 | drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL; | 208 | * To trace all memory: |
229 | drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL; | 209 | * set bit [24] in register 0x009, the ETMTECR1, to 1 |
230 | drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL; | 210 | * set all other bits in register 0x009, the ETMTECR1, to 0 |
231 | drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL; | 211 | * set all bits in register 0x007, the ETMTECR2, to 0 |
212 | * set register 0x008, the ETMTEEVR, to 0x6F (TRUE). | ||
213 | */ | ||
214 | config->enable_ctrl1 = BIT(24); | ||
215 | config->enable_ctrl2 = 0x0; | ||
216 | config->enable_event = ETM_HARD_WIRE_RES_A; | ||
232 | 217 | ||
233 | for (i = 0; i < drvdata->nr_cntr; i++) { | 218 | config->trigger_event = ETM_DEFAULT_EVENT_VAL; |
234 | drvdata->cntr_rld_val[i] = 0x0; | 219 | config->enable_event = ETM_HARD_WIRE_RES_A; |
235 | drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; | 220 | |
236 | drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; | 221 | config->seq_12_event = ETM_DEFAULT_EVENT_VAL; |
237 | drvdata->cntr_val[i] = 0x0; | 222 | config->seq_21_event = ETM_DEFAULT_EVENT_VAL; |
223 | config->seq_23_event = ETM_DEFAULT_EVENT_VAL; | ||
224 | config->seq_31_event = ETM_DEFAULT_EVENT_VAL; | ||
225 | config->seq_32_event = ETM_DEFAULT_EVENT_VAL; | ||
226 | config->seq_13_event = ETM_DEFAULT_EVENT_VAL; | ||
227 | config->timestamp_event = ETM_DEFAULT_EVENT_VAL; | ||
228 | |||
229 | for (i = 0; i < ETM_MAX_CNTR; i++) { | ||
230 | config->cntr_rld_val[i] = 0x0; | ||
231 | config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; | ||
232 | config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; | ||
233 | config->cntr_val[i] = 0x0; | ||
238 | } | 234 | } |
239 | 235 | ||
240 | drvdata->seq_curr_state = 0x0; | 236 | config->seq_curr_state = 0x0; |
241 | drvdata->ctxid_idx = 0x0; | 237 | config->ctxid_idx = 0x0; |
242 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) { | 238 | for (i = 0; i < ETM_MAX_CTXID_CMP; i++) { |
243 | drvdata->ctxid_pid[i] = 0x0; | 239 | config->ctxid_pid[i] = 0x0; |
244 | drvdata->ctxid_vpid[i] = 0x0; | 240 | config->ctxid_vpid[i] = 0x0; |
245 | } | 241 | } |
246 | 242 | ||
247 | drvdata->ctxid_mask = 0x0; | 243 | config->ctxid_mask = 0x0; |
244 | } | ||
245 | |||
246 | void etm_config_trace_mode(struct etm_config *config) | ||
247 | { | ||
248 | u32 flags, mode; | ||
249 | |||
250 | mode = config->mode; | ||
251 | |||
252 | mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); | ||
253 | |||
254 | /* excluding kernel AND user space doesn't make sense */ | ||
255 | if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) | ||
256 | return; | ||
257 | |||
258 | /* nothing to do if neither flags are set */ | ||
259 | if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) | ||
260 | return; | ||
261 | |||
262 | flags = (1 << 0 | /* instruction execute */ | ||
263 | 3 << 3 | /* ARM instruction */ | ||
264 | 0 << 5 | /* No data value comparison */ | ||
265 | 0 << 7 | /* No exact mach */ | ||
266 | 0 << 8); /* Ignore context ID */ | ||
267 | |||
268 | /* No need to worry about single address comparators. */ | ||
269 | config->enable_ctrl2 = 0x0; | ||
270 | |||
271 | /* Bit 0 is address range comparator 1 */ | ||
272 | config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; | ||
273 | |||
274 | /* | ||
275 | * On ETMv3.5: | ||
276 | * ETMACTRn[13,11] == Non-secure state comparison control | ||
277 | * ETMACTRn[12,10] == Secure state comparison control | ||
278 | * | ||
279 | * b00 == Match in all modes in this state | ||
280 | * b01 == Do not match in any more in this state | ||
281 | * b10 == Match in all modes excepts user mode in this state | ||
282 | * b11 == Match only in user mode in this state | ||
283 | */ | ||
284 | |||
285 | /* Tracing in secure mode is not supported at this time */ | ||
286 | flags |= (0 << 12 | 1 << 10); | ||
287 | |||
288 | if (mode & ETM_MODE_EXCL_USER) { | ||
289 | /* exclude user, match all modes except user mode */ | ||
290 | flags |= (1 << 13 | 0 << 11); | ||
291 | } else { | ||
292 | /* exclude kernel, match only in user mode */ | ||
293 | flags |= (1 << 13 | 1 << 11); | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * The ETMEEVR register is already set to "hard wire A". As such | ||
298 | * all there is to do is setup an address comparator that spans | ||
299 | * the entire address range and configure the state and mode bits. | ||
300 | */ | ||
301 | config->addr_val[0] = (u32) 0x0; | ||
302 | config->addr_val[1] = (u32) ~0x0; | ||
303 | config->addr_acctype[0] = flags; | ||
304 | config->addr_acctype[1] = flags; | ||
305 | config->addr_type[0] = ETM_ADDR_TYPE_RANGE; | ||
306 | config->addr_type[1] = ETM_ADDR_TYPE_RANGE; | ||
307 | } | ||
308 | |||
309 | #define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN) | ||
310 | |||
311 | static int etm_parse_event_config(struct etm_drvdata *drvdata, | ||
312 | struct perf_event_attr *attr) | ||
313 | { | ||
314 | struct etm_config *config = &drvdata->config; | ||
315 | |||
316 | if (!attr) | ||
317 | return -EINVAL; | ||
318 | |||
319 | /* Clear configuration from previous run */ | ||
320 | memset(config, 0, sizeof(struct etm_config)); | ||
321 | |||
322 | if (attr->exclude_kernel) | ||
323 | config->mode = ETM_MODE_EXCL_KERN; | ||
324 | |||
325 | if (attr->exclude_user) | ||
326 | config->mode = ETM_MODE_EXCL_USER; | ||
327 | |||
328 | /* Always start from the default config */ | ||
329 | etm_set_default(config); | ||
330 | |||
331 | /* | ||
332 | * By default the tracers are configured to trace the whole address | ||
333 | * range. Narrow the field only if requested by user space. | ||
334 | */ | ||
335 | if (config->mode) | ||
336 | etm_config_trace_mode(config); | ||
337 | |||
338 | /* | ||
339 | * At this time only cycle accurate and timestamp options are | ||
340 | * available. | ||
341 | */ | ||
342 | if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) | ||
343 | return -EINVAL; | ||
344 | |||
345 | config->ctrl = attr->config; | ||
346 | |||
347 | return 0; | ||
248 | } | 348 | } |
249 | 349 | ||
250 | static void etm_enable_hw(void *info) | 350 | static void etm_enable_hw(void *info) |
@@ -252,6 +352,7 @@ static void etm_enable_hw(void *info) | |||
252 | int i; | 352 | int i; |
253 | u32 etmcr; | 353 | u32 etmcr; |
254 | struct etm_drvdata *drvdata = info; | 354 | struct etm_drvdata *drvdata = info; |
355 | struct etm_config *config = &drvdata->config; | ||
255 | 356 | ||
256 | CS_UNLOCK(drvdata->base); | 357 | CS_UNLOCK(drvdata->base); |
257 | 358 | ||
@@ -265,65 +366,74 @@ static void etm_enable_hw(void *info) | |||
265 | etm_set_prog(drvdata); | 366 | etm_set_prog(drvdata); |
266 | 367 | ||
267 | etmcr = etm_readl(drvdata, ETMCR); | 368 | etmcr = etm_readl(drvdata, ETMCR); |
268 | etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG); | 369 | /* Clear setting from a previous run if need be */ |
370 | etmcr &= ~ETM3X_SUPPORTED_OPTIONS; | ||
269 | etmcr |= drvdata->port_size; | 371 | etmcr |= drvdata->port_size; |
270 | etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR); | 372 | etmcr |= ETMCR_ETM_EN; |
271 | etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER); | 373 | etm_writel(drvdata, config->ctrl | etmcr, ETMCR); |
272 | etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR); | 374 | etm_writel(drvdata, config->trigger_event, ETMTRIGGER); |
273 | etm_writel(drvdata, drvdata->enable_event, ETMTEEVR); | 375 | etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR); |
274 | etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1); | 376 | etm_writel(drvdata, config->enable_event, ETMTEEVR); |
275 | etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR); | 377 | etm_writel(drvdata, config->enable_ctrl1, ETMTECR1); |
378 | etm_writel(drvdata, config->fifofull_level, ETMFFLR); | ||
276 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | 379 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { |
277 | etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i)); | 380 | etm_writel(drvdata, config->addr_val[i], ETMACVRn(i)); |
278 | etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i)); | 381 | etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i)); |
279 | } | 382 | } |
280 | for (i = 0; i < drvdata->nr_cntr; i++) { | 383 | for (i = 0; i < drvdata->nr_cntr; i++) { |
281 | etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i)); | 384 | etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i)); |
282 | etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i)); | 385 | etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i)); |
283 | etm_writel(drvdata, drvdata->cntr_rld_event[i], | 386 | etm_writel(drvdata, config->cntr_rld_event[i], |
284 | ETMCNTRLDEVRn(i)); | 387 | ETMCNTRLDEVRn(i)); |
285 | etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i)); | 388 | etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i)); |
286 | } | 389 | } |
287 | etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR); | 390 | etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR); |
288 | etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR); | 391 | etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR); |
289 | etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR); | 392 | etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR); |
290 | etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR); | 393 | etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR); |
291 | etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR); | 394 | etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR); |
292 | etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR); | 395 | etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR); |
293 | etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR); | 396 | etm_writel(drvdata, config->seq_curr_state, ETMSQR); |
294 | for (i = 0; i < drvdata->nr_ext_out; i++) | 397 | for (i = 0; i < drvdata->nr_ext_out; i++) |
295 | etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); | 398 | etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); |
296 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) | 399 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) |
297 | etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i)); | 400 | etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i)); |
298 | etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR); | 401 | etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR); |
299 | etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR); | 402 | etm_writel(drvdata, config->sync_freq, ETMSYNCFR); |
300 | /* No external input selected */ | 403 | /* No external input selected */ |
301 | etm_writel(drvdata, 0x0, ETMEXTINSELR); | 404 | etm_writel(drvdata, 0x0, ETMEXTINSELR); |
302 | etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR); | 405 | etm_writel(drvdata, config->timestamp_event, ETMTSEVR); |
303 | /* No auxiliary control selected */ | 406 | /* No auxiliary control selected */ |
304 | etm_writel(drvdata, 0x0, ETMAUXCR); | 407 | etm_writel(drvdata, 0x0, ETMAUXCR); |
305 | etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); | 408 | etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); |
306 | /* No VMID comparator value selected */ | 409 | /* No VMID comparator value selected */ |
307 | etm_writel(drvdata, 0x0, ETMVMIDCVR); | 410 | etm_writel(drvdata, 0x0, ETMVMIDCVR); |
308 | 411 | ||
309 | /* Ensures trace output is enabled from this ETM */ | ||
310 | etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR); | ||
311 | |||
312 | etm_clr_prog(drvdata); | 412 | etm_clr_prog(drvdata); |
313 | CS_LOCK(drvdata->base); | 413 | CS_LOCK(drvdata->base); |
314 | 414 | ||
315 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); | 415 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); |
316 | } | 416 | } |
317 | 417 | ||
318 | static int etm_trace_id(struct coresight_device *csdev) | 418 | static int etm_cpu_id(struct coresight_device *csdev) |
319 | { | 419 | { |
320 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 420 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
421 | |||
422 | return drvdata->cpu; | ||
423 | } | ||
424 | |||
425 | int etm_get_trace_id(struct etm_drvdata *drvdata) | ||
426 | { | ||
321 | unsigned long flags; | 427 | unsigned long flags; |
322 | int trace_id = -1; | 428 | int trace_id = -1; |
323 | 429 | ||
324 | if (!drvdata->enable) | 430 | if (!drvdata) |
431 | goto out; | ||
432 | |||
433 | if (!local_read(&drvdata->mode)) | ||
325 | return drvdata->traceid; | 434 | return drvdata->traceid; |
326 | pm_runtime_get_sync(csdev->dev.parent); | 435 | |
436 | pm_runtime_get_sync(drvdata->dev); | ||
327 | 437 | ||
328 | spin_lock_irqsave(&drvdata->spinlock, flags); | 438 | spin_lock_irqsave(&drvdata->spinlock, flags); |
329 | 439 | ||
@@ -332,17 +442,41 @@ static int etm_trace_id(struct coresight_device *csdev) | |||
332 | CS_LOCK(drvdata->base); | 442 | CS_LOCK(drvdata->base); |
333 | 443 | ||
334 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 444 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
335 | pm_runtime_put(csdev->dev.parent); | 445 | pm_runtime_put(drvdata->dev); |
336 | 446 | ||
447 | out: | ||
337 | return trace_id; | 448 | return trace_id; |
449 | |||
450 | } | ||
451 | |||
452 | static int etm_trace_id(struct coresight_device *csdev) | ||
453 | { | ||
454 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
455 | |||
456 | return etm_get_trace_id(drvdata); | ||
338 | } | 457 | } |
339 | 458 | ||
340 | static int etm_enable(struct coresight_device *csdev) | 459 | static int etm_enable_perf(struct coresight_device *csdev, |
460 | struct perf_event_attr *attr) | ||
461 | { | ||
462 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
463 | |||
464 | if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) | ||
465 | return -EINVAL; | ||
466 | |||
467 | /* Configure the tracer based on the session's specifics */ | ||
468 | etm_parse_event_config(drvdata, attr); | ||
469 | /* And enable it */ | ||
470 | etm_enable_hw(drvdata); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | static int etm_enable_sysfs(struct coresight_device *csdev) | ||
341 | { | 476 | { |
342 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 477 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
343 | int ret; | 478 | int ret; |
344 | 479 | ||
345 | pm_runtime_get_sync(csdev->dev.parent); | ||
346 | spin_lock(&drvdata->spinlock); | 480 | spin_lock(&drvdata->spinlock); |
347 | 481 | ||
348 | /* | 482 | /* |
@@ -357,16 +491,45 @@ static int etm_enable(struct coresight_device *csdev) | |||
357 | goto err; | 491 | goto err; |
358 | } | 492 | } |
359 | 493 | ||
360 | drvdata->enable = true; | ||
361 | drvdata->sticky_enable = true; | 494 | drvdata->sticky_enable = true; |
362 | |||
363 | spin_unlock(&drvdata->spinlock); | 495 | spin_unlock(&drvdata->spinlock); |
364 | 496 | ||
365 | dev_info(drvdata->dev, "ETM tracing enabled\n"); | 497 | dev_info(drvdata->dev, "ETM tracing enabled\n"); |
366 | return 0; | 498 | return 0; |
499 | |||
367 | err: | 500 | err: |
368 | spin_unlock(&drvdata->spinlock); | 501 | spin_unlock(&drvdata->spinlock); |
369 | pm_runtime_put(csdev->dev.parent); | 502 | return ret; |
503 | } | ||
504 | |||
505 | static int etm_enable(struct coresight_device *csdev, | ||
506 | struct perf_event_attr *attr, u32 mode) | ||
507 | { | ||
508 | int ret; | ||
509 | u32 val; | ||
510 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
511 | |||
512 | val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); | ||
513 | |||
514 | /* Someone is already using the tracer */ | ||
515 | if (val) | ||
516 | return -EBUSY; | ||
517 | |||
518 | switch (mode) { | ||
519 | case CS_MODE_SYSFS: | ||
520 | ret = etm_enable_sysfs(csdev); | ||
521 | break; | ||
522 | case CS_MODE_PERF: | ||
523 | ret = etm_enable_perf(csdev, attr); | ||
524 | break; | ||
525 | default: | ||
526 | ret = -EINVAL; | ||
527 | } | ||
528 | |||
529 | /* The tracer didn't start */ | ||
530 | if (ret) | ||
531 | local_set(&drvdata->mode, CS_MODE_DISABLED); | ||
532 | |||
370 | return ret; | 533 | return ret; |
371 | } | 534 | } |
372 | 535 | ||
@@ -374,18 +537,16 @@ static void etm_disable_hw(void *info) | |||
374 | { | 537 | { |
375 | int i; | 538 | int i; |
376 | struct etm_drvdata *drvdata = info; | 539 | struct etm_drvdata *drvdata = info; |
540 | struct etm_config *config = &drvdata->config; | ||
377 | 541 | ||
378 | CS_UNLOCK(drvdata->base); | 542 | CS_UNLOCK(drvdata->base); |
379 | etm_set_prog(drvdata); | 543 | etm_set_prog(drvdata); |
380 | 544 | ||
381 | /* Program trace enable to low by using always false event */ | ||
382 | etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR); | ||
383 | |||
384 | /* Read back sequencer and counters for post trace analysis */ | 545 | /* Read back sequencer and counters for post trace analysis */ |
385 | drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); | 546 | config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); |
386 | 547 | ||
387 | for (i = 0; i < drvdata->nr_cntr; i++) | 548 | for (i = 0; i < drvdata->nr_cntr; i++) |
388 | drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); | 549 | config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); |
389 | 550 | ||
390 | etm_set_pwrdwn(drvdata); | 551 | etm_set_pwrdwn(drvdata); |
391 | CS_LOCK(drvdata->base); | 552 | CS_LOCK(drvdata->base); |
@@ -393,7 +554,28 @@ static void etm_disable_hw(void *info) | |||
393 | dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); | 554 | dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); |
394 | } | 555 | } |
395 | 556 | ||
396 | static void etm_disable(struct coresight_device *csdev) | 557 | static void etm_disable_perf(struct coresight_device *csdev) |
558 | { | ||
559 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
560 | |||
561 | if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) | ||
562 | return; | ||
563 | |||
564 | CS_UNLOCK(drvdata->base); | ||
565 | |||
566 | /* Setting the prog bit disables tracing immediately */ | ||
567 | etm_set_prog(drvdata); | ||
568 | |||
569 | /* | ||
570 | * There is no way to know when the tracer will be used again so | ||
571 | * power down the tracer. | ||
572 | */ | ||
573 | etm_set_pwrdwn(drvdata); | ||
574 | |||
575 | CS_LOCK(drvdata->base); | ||
576 | } | ||
577 | |||
578 | static void etm_disable_sysfs(struct coresight_device *csdev) | ||
397 | { | 579 | { |
398 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 580 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
399 | 581 | ||
@@ -411,1235 +593,52 @@ static void etm_disable(struct coresight_device *csdev) | |||
411 | * ensures that register writes occur when cpu is powered. | 593 | * ensures that register writes occur when cpu is powered. |
412 | */ | 594 | */ |
413 | smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); | 595 | smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); |
414 | drvdata->enable = false; | ||
415 | 596 | ||
416 | spin_unlock(&drvdata->spinlock); | 597 | spin_unlock(&drvdata->spinlock); |
417 | put_online_cpus(); | 598 | put_online_cpus(); |
418 | pm_runtime_put(csdev->dev.parent); | ||
419 | 599 | ||
420 | dev_info(drvdata->dev, "ETM tracing disabled\n"); | 600 | dev_info(drvdata->dev, "ETM tracing disabled\n"); |
421 | } | 601 | } |
422 | 602 | ||
423 | static const struct coresight_ops_source etm_source_ops = { | 603 | static void etm_disable(struct coresight_device *csdev) |
424 | .trace_id = etm_trace_id, | ||
425 | .enable = etm_enable, | ||
426 | .disable = etm_disable, | ||
427 | }; | ||
428 | |||
429 | static const struct coresight_ops etm_cs_ops = { | ||
430 | .source_ops = &etm_source_ops, | ||
431 | }; | ||
432 | |||
433 | static ssize_t nr_addr_cmp_show(struct device *dev, | ||
434 | struct device_attribute *attr, char *buf) | ||
435 | { | ||
436 | unsigned long val; | ||
437 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
438 | |||
439 | val = drvdata->nr_addr_cmp; | ||
440 | return sprintf(buf, "%#lx\n", val); | ||
441 | } | ||
442 | static DEVICE_ATTR_RO(nr_addr_cmp); | ||
443 | |||
444 | static ssize_t nr_cntr_show(struct device *dev, | ||
445 | struct device_attribute *attr, char *buf) | ||
446 | { unsigned long val; | ||
447 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
448 | |||
449 | val = drvdata->nr_cntr; | ||
450 | return sprintf(buf, "%#lx\n", val); | ||
451 | } | ||
452 | static DEVICE_ATTR_RO(nr_cntr); | ||
453 | |||
454 | static ssize_t nr_ctxid_cmp_show(struct device *dev, | ||
455 | struct device_attribute *attr, char *buf) | ||
456 | { | ||
457 | unsigned long val; | ||
458 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
459 | |||
460 | val = drvdata->nr_ctxid_cmp; | ||
461 | return sprintf(buf, "%#lx\n", val); | ||
462 | } | ||
463 | static DEVICE_ATTR_RO(nr_ctxid_cmp); | ||
464 | |||
465 | static ssize_t etmsr_show(struct device *dev, | ||
466 | struct device_attribute *attr, char *buf) | ||
467 | { | ||
468 | unsigned long flags, val; | ||
469 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
470 | |||
471 | pm_runtime_get_sync(drvdata->dev); | ||
472 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
473 | CS_UNLOCK(drvdata->base); | ||
474 | |||
475 | val = etm_readl(drvdata, ETMSR); | ||
476 | |||
477 | CS_LOCK(drvdata->base); | ||
478 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
479 | pm_runtime_put(drvdata->dev); | ||
480 | |||
481 | return sprintf(buf, "%#lx\n", val); | ||
482 | } | ||
483 | static DEVICE_ATTR_RO(etmsr); | ||
484 | |||
485 | static ssize_t reset_store(struct device *dev, | ||
486 | struct device_attribute *attr, | ||
487 | const char *buf, size_t size) | ||
488 | { | ||
489 | int i, ret; | ||
490 | unsigned long val; | ||
491 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
492 | |||
493 | ret = kstrtoul(buf, 16, &val); | ||
494 | if (ret) | ||
495 | return ret; | ||
496 | |||
497 | if (val) { | ||
498 | spin_lock(&drvdata->spinlock); | ||
499 | drvdata->mode = ETM_MODE_EXCLUDE; | ||
500 | drvdata->ctrl = 0x0; | ||
501 | drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; | ||
502 | drvdata->startstop_ctrl = 0x0; | ||
503 | drvdata->addr_idx = 0x0; | ||
504 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | ||
505 | drvdata->addr_val[i] = 0x0; | ||
506 | drvdata->addr_acctype[i] = 0x0; | ||
507 | drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; | ||
508 | } | ||
509 | drvdata->cntr_idx = 0x0; | ||
510 | |||
511 | etm_set_default(drvdata); | ||
512 | spin_unlock(&drvdata->spinlock); | ||
513 | } | ||
514 | |||
515 | return size; | ||
516 | } | ||
517 | static DEVICE_ATTR_WO(reset); | ||
518 | |||
519 | static ssize_t mode_show(struct device *dev, | ||
520 | struct device_attribute *attr, char *buf) | ||
521 | { | ||
522 | unsigned long val; | ||
523 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
524 | |||
525 | val = drvdata->mode; | ||
526 | return sprintf(buf, "%#lx\n", val); | ||
527 | } | ||
528 | |||
529 | static ssize_t mode_store(struct device *dev, | ||
530 | struct device_attribute *attr, | ||
531 | const char *buf, size_t size) | ||
532 | { | ||
533 | int ret; | ||
534 | unsigned long val; | ||
535 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
536 | |||
537 | ret = kstrtoul(buf, 16, &val); | ||
538 | if (ret) | ||
539 | return ret; | ||
540 | |||
541 | spin_lock(&drvdata->spinlock); | ||
542 | drvdata->mode = val & ETM_MODE_ALL; | ||
543 | |||
544 | if (drvdata->mode & ETM_MODE_EXCLUDE) | ||
545 | drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC; | ||
546 | else | ||
547 | drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC; | ||
548 | |||
549 | if (drvdata->mode & ETM_MODE_CYCACC) | ||
550 | drvdata->ctrl |= ETMCR_CYC_ACC; | ||
551 | else | ||
552 | drvdata->ctrl &= ~ETMCR_CYC_ACC; | ||
553 | |||
554 | if (drvdata->mode & ETM_MODE_STALL) { | ||
555 | if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { | ||
556 | dev_warn(drvdata->dev, "stall mode not supported\n"); | ||
557 | ret = -EINVAL; | ||
558 | goto err_unlock; | ||
559 | } | ||
560 | drvdata->ctrl |= ETMCR_STALL_MODE; | ||
561 | } else | ||
562 | drvdata->ctrl &= ~ETMCR_STALL_MODE; | ||
563 | |||
564 | if (drvdata->mode & ETM_MODE_TIMESTAMP) { | ||
565 | if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { | ||
566 | dev_warn(drvdata->dev, "timestamp not supported\n"); | ||
567 | ret = -EINVAL; | ||
568 | goto err_unlock; | ||
569 | } | ||
570 | drvdata->ctrl |= ETMCR_TIMESTAMP_EN; | ||
571 | } else | ||
572 | drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN; | ||
573 | |||
574 | if (drvdata->mode & ETM_MODE_CTXID) | ||
575 | drvdata->ctrl |= ETMCR_CTXID_SIZE; | ||
576 | else | ||
577 | drvdata->ctrl &= ~ETMCR_CTXID_SIZE; | ||
578 | spin_unlock(&drvdata->spinlock); | ||
579 | |||
580 | return size; | ||
581 | |||
582 | err_unlock: | ||
583 | spin_unlock(&drvdata->spinlock); | ||
584 | return ret; | ||
585 | } | ||
586 | static DEVICE_ATTR_RW(mode); | ||
587 | |||
588 | static ssize_t trigger_event_show(struct device *dev, | ||
589 | struct device_attribute *attr, char *buf) | ||
590 | { | ||
591 | unsigned long val; | ||
592 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
593 | |||
594 | val = drvdata->trigger_event; | ||
595 | return sprintf(buf, "%#lx\n", val); | ||
596 | } | ||
597 | |||
598 | static ssize_t trigger_event_store(struct device *dev, | ||
599 | struct device_attribute *attr, | ||
600 | const char *buf, size_t size) | ||
601 | { | ||
602 | int ret; | ||
603 | unsigned long val; | ||
604 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
605 | |||
606 | ret = kstrtoul(buf, 16, &val); | ||
607 | if (ret) | ||
608 | return ret; | ||
609 | |||
610 | drvdata->trigger_event = val & ETM_EVENT_MASK; | ||
611 | |||
612 | return size; | ||
613 | } | ||
614 | static DEVICE_ATTR_RW(trigger_event); | ||
615 | |||
616 | static ssize_t enable_event_show(struct device *dev, | ||
617 | struct device_attribute *attr, char *buf) | ||
618 | { | ||
619 | unsigned long val; | ||
620 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
621 | |||
622 | val = drvdata->enable_event; | ||
623 | return sprintf(buf, "%#lx\n", val); | ||
624 | } | ||
625 | |||
626 | static ssize_t enable_event_store(struct device *dev, | ||
627 | struct device_attribute *attr, | ||
628 | const char *buf, size_t size) | ||
629 | { | ||
630 | int ret; | ||
631 | unsigned long val; | ||
632 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
633 | |||
634 | ret = kstrtoul(buf, 16, &val); | ||
635 | if (ret) | ||
636 | return ret; | ||
637 | |||
638 | drvdata->enable_event = val & ETM_EVENT_MASK; | ||
639 | |||
640 | return size; | ||
641 | } | ||
642 | static DEVICE_ATTR_RW(enable_event); | ||
643 | |||
644 | static ssize_t fifofull_level_show(struct device *dev, | ||
645 | struct device_attribute *attr, char *buf) | ||
646 | { | ||
647 | unsigned long val; | ||
648 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
649 | |||
650 | val = drvdata->fifofull_level; | ||
651 | return sprintf(buf, "%#lx\n", val); | ||
652 | } | ||
653 | |||
654 | static ssize_t fifofull_level_store(struct device *dev, | ||
655 | struct device_attribute *attr, | ||
656 | const char *buf, size_t size) | ||
657 | { | ||
658 | int ret; | ||
659 | unsigned long val; | ||
660 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
661 | |||
662 | ret = kstrtoul(buf, 16, &val); | ||
663 | if (ret) | ||
664 | return ret; | ||
665 | |||
666 | drvdata->fifofull_level = val; | ||
667 | |||
668 | return size; | ||
669 | } | ||
670 | static DEVICE_ATTR_RW(fifofull_level); | ||
671 | |||
672 | static ssize_t addr_idx_show(struct device *dev, | ||
673 | struct device_attribute *attr, char *buf) | ||
674 | { | ||
675 | unsigned long val; | ||
676 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
677 | |||
678 | val = drvdata->addr_idx; | ||
679 | return sprintf(buf, "%#lx\n", val); | ||
680 | } | ||
681 | |||
682 | static ssize_t addr_idx_store(struct device *dev, | ||
683 | struct device_attribute *attr, | ||
684 | const char *buf, size_t size) | ||
685 | { | ||
686 | int ret; | ||
687 | unsigned long val; | ||
688 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
689 | |||
690 | ret = kstrtoul(buf, 16, &val); | ||
691 | if (ret) | ||
692 | return ret; | ||
693 | |||
694 | if (val >= drvdata->nr_addr_cmp) | ||
695 | return -EINVAL; | ||
696 | |||
697 | /* | ||
698 | * Use spinlock to ensure index doesn't change while it gets | ||
699 | * dereferenced multiple times within a spinlock block elsewhere. | ||
700 | */ | ||
701 | spin_lock(&drvdata->spinlock); | ||
702 | drvdata->addr_idx = val; | ||
703 | spin_unlock(&drvdata->spinlock); | ||
704 | |||
705 | return size; | ||
706 | } | ||
707 | static DEVICE_ATTR_RW(addr_idx); | ||
708 | |||
709 | static ssize_t addr_single_show(struct device *dev, | ||
710 | struct device_attribute *attr, char *buf) | ||
711 | { | ||
712 | u8 idx; | ||
713 | unsigned long val; | ||
714 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
715 | |||
716 | spin_lock(&drvdata->spinlock); | ||
717 | idx = drvdata->addr_idx; | ||
718 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
719 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
720 | spin_unlock(&drvdata->spinlock); | ||
721 | return -EINVAL; | ||
722 | } | ||
723 | |||
724 | val = drvdata->addr_val[idx]; | ||
725 | spin_unlock(&drvdata->spinlock); | ||
726 | |||
727 | return sprintf(buf, "%#lx\n", val); | ||
728 | } | ||
729 | |||
730 | static ssize_t addr_single_store(struct device *dev, | ||
731 | struct device_attribute *attr, | ||
732 | const char *buf, size_t size) | ||
733 | { | ||
734 | u8 idx; | ||
735 | int ret; | ||
736 | unsigned long val; | ||
737 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
738 | |||
739 | ret = kstrtoul(buf, 16, &val); | ||
740 | if (ret) | ||
741 | return ret; | ||
742 | |||
743 | spin_lock(&drvdata->spinlock); | ||
744 | idx = drvdata->addr_idx; | ||
745 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
746 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
747 | spin_unlock(&drvdata->spinlock); | ||
748 | return -EINVAL; | ||
749 | } | ||
750 | |||
751 | drvdata->addr_val[idx] = val; | ||
752 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; | ||
753 | spin_unlock(&drvdata->spinlock); | ||
754 | |||
755 | return size; | ||
756 | } | ||
757 | static DEVICE_ATTR_RW(addr_single); | ||
758 | |||
759 | static ssize_t addr_range_show(struct device *dev, | ||
760 | struct device_attribute *attr, char *buf) | ||
761 | { | ||
762 | u8 idx; | ||
763 | unsigned long val1, val2; | ||
764 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
765 | |||
766 | spin_lock(&drvdata->spinlock); | ||
767 | idx = drvdata->addr_idx; | ||
768 | if (idx % 2 != 0) { | ||
769 | spin_unlock(&drvdata->spinlock); | ||
770 | return -EPERM; | ||
771 | } | ||
772 | if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
773 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
774 | (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
775 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
776 | spin_unlock(&drvdata->spinlock); | ||
777 | return -EPERM; | ||
778 | } | ||
779 | |||
780 | val1 = drvdata->addr_val[idx]; | ||
781 | val2 = drvdata->addr_val[idx + 1]; | ||
782 | spin_unlock(&drvdata->spinlock); | ||
783 | |||
784 | return sprintf(buf, "%#lx %#lx\n", val1, val2); | ||
785 | } | ||
786 | |||
787 | static ssize_t addr_range_store(struct device *dev, | ||
788 | struct device_attribute *attr, | ||
789 | const char *buf, size_t size) | ||
790 | { | ||
791 | u8 idx; | ||
792 | unsigned long val1, val2; | ||
793 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
794 | |||
795 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
796 | return -EINVAL; | ||
797 | /* Lower address comparator cannot have a higher address value */ | ||
798 | if (val1 > val2) | ||
799 | return -EINVAL; | ||
800 | |||
801 | spin_lock(&drvdata->spinlock); | ||
802 | idx = drvdata->addr_idx; | ||
803 | if (idx % 2 != 0) { | ||
804 | spin_unlock(&drvdata->spinlock); | ||
805 | return -EPERM; | ||
806 | } | ||
807 | if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
808 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
809 | (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
810 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
811 | spin_unlock(&drvdata->spinlock); | ||
812 | return -EPERM; | ||
813 | } | ||
814 | |||
815 | drvdata->addr_val[idx] = val1; | ||
816 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE; | ||
817 | drvdata->addr_val[idx + 1] = val2; | ||
818 | drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; | ||
819 | drvdata->enable_ctrl1 |= (1 << (idx/2)); | ||
820 | spin_unlock(&drvdata->spinlock); | ||
821 | |||
822 | return size; | ||
823 | } | ||
824 | static DEVICE_ATTR_RW(addr_range); | ||
825 | |||
826 | static ssize_t addr_start_show(struct device *dev, | ||
827 | struct device_attribute *attr, char *buf) | ||
828 | { | ||
829 | u8 idx; | ||
830 | unsigned long val; | ||
831 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
832 | |||
833 | spin_lock(&drvdata->spinlock); | ||
834 | idx = drvdata->addr_idx; | ||
835 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
836 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
837 | spin_unlock(&drvdata->spinlock); | ||
838 | return -EPERM; | ||
839 | } | ||
840 | |||
841 | val = drvdata->addr_val[idx]; | ||
842 | spin_unlock(&drvdata->spinlock); | ||
843 | |||
844 | return sprintf(buf, "%#lx\n", val); | ||
845 | } | ||
846 | |||
847 | static ssize_t addr_start_store(struct device *dev, | ||
848 | struct device_attribute *attr, | ||
849 | const char *buf, size_t size) | ||
850 | { | ||
851 | u8 idx; | ||
852 | int ret; | ||
853 | unsigned long val; | ||
854 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
855 | |||
856 | ret = kstrtoul(buf, 16, &val); | ||
857 | if (ret) | ||
858 | return ret; | ||
859 | |||
860 | spin_lock(&drvdata->spinlock); | ||
861 | idx = drvdata->addr_idx; | ||
862 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
863 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
864 | spin_unlock(&drvdata->spinlock); | ||
865 | return -EPERM; | ||
866 | } | ||
867 | |||
868 | drvdata->addr_val[idx] = val; | ||
869 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_START; | ||
870 | drvdata->startstop_ctrl |= (1 << idx); | ||
871 | drvdata->enable_ctrl1 |= BIT(25); | ||
872 | spin_unlock(&drvdata->spinlock); | ||
873 | |||
874 | return size; | ||
875 | } | ||
876 | static DEVICE_ATTR_RW(addr_start); | ||
877 | |||
878 | static ssize_t addr_stop_show(struct device *dev, | ||
879 | struct device_attribute *attr, char *buf) | ||
880 | { | ||
881 | u8 idx; | ||
882 | unsigned long val; | ||
883 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
884 | |||
885 | spin_lock(&drvdata->spinlock); | ||
886 | idx = drvdata->addr_idx; | ||
887 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
888 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
889 | spin_unlock(&drvdata->spinlock); | ||
890 | return -EPERM; | ||
891 | } | ||
892 | |||
893 | val = drvdata->addr_val[idx]; | ||
894 | spin_unlock(&drvdata->spinlock); | ||
895 | |||
896 | return sprintf(buf, "%#lx\n", val); | ||
897 | } | ||
898 | |||
899 | static ssize_t addr_stop_store(struct device *dev, | ||
900 | struct device_attribute *attr, | ||
901 | const char *buf, size_t size) | ||
902 | { | ||
903 | u8 idx; | ||
904 | int ret; | ||
905 | unsigned long val; | ||
906 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
907 | |||
908 | ret = kstrtoul(buf, 16, &val); | ||
909 | if (ret) | ||
910 | return ret; | ||
911 | |||
912 | spin_lock(&drvdata->spinlock); | ||
913 | idx = drvdata->addr_idx; | ||
914 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
915 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
916 | spin_unlock(&drvdata->spinlock); | ||
917 | return -EPERM; | ||
918 | } | ||
919 | |||
920 | drvdata->addr_val[idx] = val; | ||
921 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP; | ||
922 | drvdata->startstop_ctrl |= (1 << (idx + 16)); | ||
923 | drvdata->enable_ctrl1 |= ETMTECR1_START_STOP; | ||
924 | spin_unlock(&drvdata->spinlock); | ||
925 | |||
926 | return size; | ||
927 | } | ||
928 | static DEVICE_ATTR_RW(addr_stop); | ||
929 | |||
930 | static ssize_t addr_acctype_show(struct device *dev, | ||
931 | struct device_attribute *attr, char *buf) | ||
932 | { | ||
933 | unsigned long val; | ||
934 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
935 | |||
936 | spin_lock(&drvdata->spinlock); | ||
937 | val = drvdata->addr_acctype[drvdata->addr_idx]; | ||
938 | spin_unlock(&drvdata->spinlock); | ||
939 | |||
940 | return sprintf(buf, "%#lx\n", val); | ||
941 | } | ||
942 | |||
943 | static ssize_t addr_acctype_store(struct device *dev, | ||
944 | struct device_attribute *attr, | ||
945 | const char *buf, size_t size) | ||
946 | { | ||
947 | int ret; | ||
948 | unsigned long val; | ||
949 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
950 | |||
951 | ret = kstrtoul(buf, 16, &val); | ||
952 | if (ret) | ||
953 | return ret; | ||
954 | |||
955 | spin_lock(&drvdata->spinlock); | ||
956 | drvdata->addr_acctype[drvdata->addr_idx] = val; | ||
957 | spin_unlock(&drvdata->spinlock); | ||
958 | |||
959 | return size; | ||
960 | } | ||
961 | static DEVICE_ATTR_RW(addr_acctype); | ||
962 | |||
963 | static ssize_t cntr_idx_show(struct device *dev, | ||
964 | struct device_attribute *attr, char *buf) | ||
965 | { | ||
966 | unsigned long val; | ||
967 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
968 | |||
969 | val = drvdata->cntr_idx; | ||
970 | return sprintf(buf, "%#lx\n", val); | ||
971 | } | ||
972 | |||
973 | static ssize_t cntr_idx_store(struct device *dev, | ||
974 | struct device_attribute *attr, | ||
975 | const char *buf, size_t size) | ||
976 | { | ||
977 | int ret; | ||
978 | unsigned long val; | ||
979 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
980 | |||
981 | ret = kstrtoul(buf, 16, &val); | ||
982 | if (ret) | ||
983 | return ret; | ||
984 | |||
985 | if (val >= drvdata->nr_cntr) | ||
986 | return -EINVAL; | ||
987 | /* | ||
988 | * Use spinlock to ensure index doesn't change while it gets | ||
989 | * dereferenced multiple times within a spinlock block elsewhere. | ||
990 | */ | ||
991 | spin_lock(&drvdata->spinlock); | ||
992 | drvdata->cntr_idx = val; | ||
993 | spin_unlock(&drvdata->spinlock); | ||
994 | |||
995 | return size; | ||
996 | } | ||
997 | static DEVICE_ATTR_RW(cntr_idx); | ||
998 | |||
999 | static ssize_t cntr_rld_val_show(struct device *dev, | ||
1000 | struct device_attribute *attr, char *buf) | ||
1001 | { | ||
1002 | unsigned long val; | ||
1003 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1004 | |||
1005 | spin_lock(&drvdata->spinlock); | ||
1006 | val = drvdata->cntr_rld_val[drvdata->cntr_idx]; | ||
1007 | spin_unlock(&drvdata->spinlock); | ||
1008 | |||
1009 | return sprintf(buf, "%#lx\n", val); | ||
1010 | } | ||
1011 | |||
1012 | static ssize_t cntr_rld_val_store(struct device *dev, | ||
1013 | struct device_attribute *attr, | ||
1014 | const char *buf, size_t size) | ||
1015 | { | ||
1016 | int ret; | ||
1017 | unsigned long val; | ||
1018 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1019 | |||
1020 | ret = kstrtoul(buf, 16, &val); | ||
1021 | if (ret) | ||
1022 | return ret; | ||
1023 | |||
1024 | spin_lock(&drvdata->spinlock); | ||
1025 | drvdata->cntr_rld_val[drvdata->cntr_idx] = val; | ||
1026 | spin_unlock(&drvdata->spinlock); | ||
1027 | |||
1028 | return size; | ||
1029 | } | ||
1030 | static DEVICE_ATTR_RW(cntr_rld_val); | ||
1031 | |||
1032 | static ssize_t cntr_event_show(struct device *dev, | ||
1033 | struct device_attribute *attr, char *buf) | ||
1034 | { | ||
1035 | unsigned long val; | ||
1036 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1037 | |||
1038 | spin_lock(&drvdata->spinlock); | ||
1039 | val = drvdata->cntr_event[drvdata->cntr_idx]; | ||
1040 | spin_unlock(&drvdata->spinlock); | ||
1041 | |||
1042 | return sprintf(buf, "%#lx\n", val); | ||
1043 | } | ||
1044 | |||
1045 | static ssize_t cntr_event_store(struct device *dev, | ||
1046 | struct device_attribute *attr, | ||
1047 | const char *buf, size_t size) | ||
1048 | { | ||
1049 | int ret; | ||
1050 | unsigned long val; | ||
1051 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1052 | |||
1053 | ret = kstrtoul(buf, 16, &val); | ||
1054 | if (ret) | ||
1055 | return ret; | ||
1056 | |||
1057 | spin_lock(&drvdata->spinlock); | ||
1058 | drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; | ||
1059 | spin_unlock(&drvdata->spinlock); | ||
1060 | |||
1061 | return size; | ||
1062 | } | ||
1063 | static DEVICE_ATTR_RW(cntr_event); | ||
1064 | |||
1065 | static ssize_t cntr_rld_event_show(struct device *dev, | ||
1066 | struct device_attribute *attr, char *buf) | ||
1067 | { | ||
1068 | unsigned long val; | ||
1069 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1070 | |||
1071 | spin_lock(&drvdata->spinlock); | ||
1072 | val = drvdata->cntr_rld_event[drvdata->cntr_idx]; | ||
1073 | spin_unlock(&drvdata->spinlock); | ||
1074 | |||
1075 | return sprintf(buf, "%#lx\n", val); | ||
1076 | } | ||
1077 | |||
1078 | static ssize_t cntr_rld_event_store(struct device *dev, | ||
1079 | struct device_attribute *attr, | ||
1080 | const char *buf, size_t size) | ||
1081 | { | ||
1082 | int ret; | ||
1083 | unsigned long val; | ||
1084 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1085 | |||
1086 | ret = kstrtoul(buf, 16, &val); | ||
1087 | if (ret) | ||
1088 | return ret; | ||
1089 | |||
1090 | spin_lock(&drvdata->spinlock); | ||
1091 | drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; | ||
1092 | spin_unlock(&drvdata->spinlock); | ||
1093 | |||
1094 | return size; | ||
1095 | } | ||
1096 | static DEVICE_ATTR_RW(cntr_rld_event); | ||
1097 | |||
1098 | static ssize_t cntr_val_show(struct device *dev, | ||
1099 | struct device_attribute *attr, char *buf) | ||
1100 | { | ||
1101 | int i, ret = 0; | ||
1102 | u32 val; | ||
1103 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1104 | |||
1105 | if (!drvdata->enable) { | ||
1106 | spin_lock(&drvdata->spinlock); | ||
1107 | for (i = 0; i < drvdata->nr_cntr; i++) | ||
1108 | ret += sprintf(buf, "counter %d: %x\n", | ||
1109 | i, drvdata->cntr_val[i]); | ||
1110 | spin_unlock(&drvdata->spinlock); | ||
1111 | return ret; | ||
1112 | } | ||
1113 | |||
1114 | for (i = 0; i < drvdata->nr_cntr; i++) { | ||
1115 | val = etm_readl(drvdata, ETMCNTVRn(i)); | ||
1116 | ret += sprintf(buf, "counter %d: %x\n", i, val); | ||
1117 | } | ||
1118 | |||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1122 | static ssize_t cntr_val_store(struct device *dev, | ||
1123 | struct device_attribute *attr, | ||
1124 | const char *buf, size_t size) | ||
1125 | { | ||
1126 | int ret; | ||
1127 | unsigned long val; | ||
1128 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1129 | |||
1130 | ret = kstrtoul(buf, 16, &val); | ||
1131 | if (ret) | ||
1132 | return ret; | ||
1133 | |||
1134 | spin_lock(&drvdata->spinlock); | ||
1135 | drvdata->cntr_val[drvdata->cntr_idx] = val; | ||
1136 | spin_unlock(&drvdata->spinlock); | ||
1137 | |||
1138 | return size; | ||
1139 | } | ||
1140 | static DEVICE_ATTR_RW(cntr_val); | ||
1141 | |||
1142 | static ssize_t seq_12_event_show(struct device *dev, | ||
1143 | struct device_attribute *attr, char *buf) | ||
1144 | { | ||
1145 | unsigned long val; | ||
1146 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1147 | |||
1148 | val = drvdata->seq_12_event; | ||
1149 | return sprintf(buf, "%#lx\n", val); | ||
1150 | } | ||
1151 | |||
1152 | static ssize_t seq_12_event_store(struct device *dev, | ||
1153 | struct device_attribute *attr, | ||
1154 | const char *buf, size_t size) | ||
1155 | { | ||
1156 | int ret; | ||
1157 | unsigned long val; | ||
1158 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1159 | |||
1160 | ret = kstrtoul(buf, 16, &val); | ||
1161 | if (ret) | ||
1162 | return ret; | ||
1163 | |||
1164 | drvdata->seq_12_event = val & ETM_EVENT_MASK; | ||
1165 | return size; | ||
1166 | } | ||
1167 | static DEVICE_ATTR_RW(seq_12_event); | ||
1168 | |||
1169 | static ssize_t seq_21_event_show(struct device *dev, | ||
1170 | struct device_attribute *attr, char *buf) | ||
1171 | { | ||
1172 | unsigned long val; | ||
1173 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1174 | |||
1175 | val = drvdata->seq_21_event; | ||
1176 | return sprintf(buf, "%#lx\n", val); | ||
1177 | } | ||
1178 | |||
1179 | static ssize_t seq_21_event_store(struct device *dev, | ||
1180 | struct device_attribute *attr, | ||
1181 | const char *buf, size_t size) | ||
1182 | { | ||
1183 | int ret; | ||
1184 | unsigned long val; | ||
1185 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1186 | |||
1187 | ret = kstrtoul(buf, 16, &val); | ||
1188 | if (ret) | ||
1189 | return ret; | ||
1190 | |||
1191 | drvdata->seq_21_event = val & ETM_EVENT_MASK; | ||
1192 | return size; | ||
1193 | } | ||
1194 | static DEVICE_ATTR_RW(seq_21_event); | ||
1195 | |||
1196 | static ssize_t seq_23_event_show(struct device *dev, | ||
1197 | struct device_attribute *attr, char *buf) | ||
1198 | { | ||
1199 | unsigned long val; | ||
1200 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1201 | |||
1202 | val = drvdata->seq_23_event; | ||
1203 | return sprintf(buf, "%#lx\n", val); | ||
1204 | } | ||
1205 | |||
1206 | static ssize_t seq_23_event_store(struct device *dev, | ||
1207 | struct device_attribute *attr, | ||
1208 | const char *buf, size_t size) | ||
1209 | { | ||
1210 | int ret; | ||
1211 | unsigned long val; | ||
1212 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1213 | |||
1214 | ret = kstrtoul(buf, 16, &val); | ||
1215 | if (ret) | ||
1216 | return ret; | ||
1217 | |||
1218 | drvdata->seq_23_event = val & ETM_EVENT_MASK; | ||
1219 | return size; | ||
1220 | } | ||
1221 | static DEVICE_ATTR_RW(seq_23_event); | ||
1222 | |||
1223 | static ssize_t seq_31_event_show(struct device *dev, | ||
1224 | struct device_attribute *attr, char *buf) | ||
1225 | { | ||
1226 | unsigned long val; | ||
1227 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1228 | |||
1229 | val = drvdata->seq_31_event; | ||
1230 | return sprintf(buf, "%#lx\n", val); | ||
1231 | } | ||
1232 | |||
1233 | static ssize_t seq_31_event_store(struct device *dev, | ||
1234 | struct device_attribute *attr, | ||
1235 | const char *buf, size_t size) | ||
1236 | { | ||
1237 | int ret; | ||
1238 | unsigned long val; | ||
1239 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1240 | |||
1241 | ret = kstrtoul(buf, 16, &val); | ||
1242 | if (ret) | ||
1243 | return ret; | ||
1244 | |||
1245 | drvdata->seq_31_event = val & ETM_EVENT_MASK; | ||
1246 | return size; | ||
1247 | } | ||
1248 | static DEVICE_ATTR_RW(seq_31_event); | ||
1249 | |||
1250 | static ssize_t seq_32_event_show(struct device *dev, | ||
1251 | struct device_attribute *attr, char *buf) | ||
1252 | { | ||
1253 | unsigned long val; | ||
1254 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1255 | |||
1256 | val = drvdata->seq_32_event; | ||
1257 | return sprintf(buf, "%#lx\n", val); | ||
1258 | } | ||
1259 | |||
1260 | static ssize_t seq_32_event_store(struct device *dev, | ||
1261 | struct device_attribute *attr, | ||
1262 | const char *buf, size_t size) | ||
1263 | { | ||
1264 | int ret; | ||
1265 | unsigned long val; | ||
1266 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1267 | |||
1268 | ret = kstrtoul(buf, 16, &val); | ||
1269 | if (ret) | ||
1270 | return ret; | ||
1271 | |||
1272 | drvdata->seq_32_event = val & ETM_EVENT_MASK; | ||
1273 | return size; | ||
1274 | } | ||
1275 | static DEVICE_ATTR_RW(seq_32_event); | ||
1276 | |||
1277 | static ssize_t seq_13_event_show(struct device *dev, | ||
1278 | struct device_attribute *attr, char *buf) | ||
1279 | { | ||
1280 | unsigned long val; | ||
1281 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1282 | |||
1283 | val = drvdata->seq_13_event; | ||
1284 | return sprintf(buf, "%#lx\n", val); | ||
1285 | } | ||
1286 | |||
1287 | static ssize_t seq_13_event_store(struct device *dev, | ||
1288 | struct device_attribute *attr, | ||
1289 | const char *buf, size_t size) | ||
1290 | { | ||
1291 | int ret; | ||
1292 | unsigned long val; | ||
1293 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1294 | |||
1295 | ret = kstrtoul(buf, 16, &val); | ||
1296 | if (ret) | ||
1297 | return ret; | ||
1298 | |||
1299 | drvdata->seq_13_event = val & ETM_EVENT_MASK; | ||
1300 | return size; | ||
1301 | } | ||
1302 | static DEVICE_ATTR_RW(seq_13_event); | ||
1303 | |||
1304 | static ssize_t seq_curr_state_show(struct device *dev, | ||
1305 | struct device_attribute *attr, char *buf) | ||
1306 | { | ||
1307 | unsigned long val, flags; | ||
1308 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1309 | |||
1310 | if (!drvdata->enable) { | ||
1311 | val = drvdata->seq_curr_state; | ||
1312 | goto out; | ||
1313 | } | ||
1314 | |||
1315 | pm_runtime_get_sync(drvdata->dev); | ||
1316 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
1317 | |||
1318 | CS_UNLOCK(drvdata->base); | ||
1319 | val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); | ||
1320 | CS_LOCK(drvdata->base); | ||
1321 | |||
1322 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
1323 | pm_runtime_put(drvdata->dev); | ||
1324 | out: | ||
1325 | return sprintf(buf, "%#lx\n", val); | ||
1326 | } | ||
1327 | |||
1328 | static ssize_t seq_curr_state_store(struct device *dev, | ||
1329 | struct device_attribute *attr, | ||
1330 | const char *buf, size_t size) | ||
1331 | { | ||
1332 | int ret; | ||
1333 | unsigned long val; | ||
1334 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1335 | |||
1336 | ret = kstrtoul(buf, 16, &val); | ||
1337 | if (ret) | ||
1338 | return ret; | ||
1339 | |||
1340 | if (val > ETM_SEQ_STATE_MAX_VAL) | ||
1341 | return -EINVAL; | ||
1342 | |||
1343 | drvdata->seq_curr_state = val; | ||
1344 | |||
1345 | return size; | ||
1346 | } | ||
1347 | static DEVICE_ATTR_RW(seq_curr_state); | ||
1348 | |||
1349 | static ssize_t ctxid_idx_show(struct device *dev, | ||
1350 | struct device_attribute *attr, char *buf) | ||
1351 | { | ||
1352 | unsigned long val; | ||
1353 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1354 | |||
1355 | val = drvdata->ctxid_idx; | ||
1356 | return sprintf(buf, "%#lx\n", val); | ||
1357 | } | ||
1358 | |||
1359 | static ssize_t ctxid_idx_store(struct device *dev, | ||
1360 | struct device_attribute *attr, | ||
1361 | const char *buf, size_t size) | ||
1362 | { | 604 | { |
1363 | int ret; | 605 | u32 mode; |
1364 | unsigned long val; | 606 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
1365 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1366 | |||
1367 | ret = kstrtoul(buf, 16, &val); | ||
1368 | if (ret) | ||
1369 | return ret; | ||
1370 | |||
1371 | if (val >= drvdata->nr_ctxid_cmp) | ||
1372 | return -EINVAL; | ||
1373 | 607 | ||
1374 | /* | 608 | /* |
1375 | * Use spinlock to ensure index doesn't change while it gets | 609 | * For as long as the tracer isn't disabled another entity can't |
1376 | * dereferenced multiple times within a spinlock block elsewhere. | 610 | * change its status. As such we can read the status here without |
611 | * fearing it will change under us. | ||
1377 | */ | 612 | */ |
1378 | spin_lock(&drvdata->spinlock); | 613 | mode = local_read(&drvdata->mode); |
1379 | drvdata->ctxid_idx = val; | ||
1380 | spin_unlock(&drvdata->spinlock); | ||
1381 | |||
1382 | return size; | ||
1383 | } | ||
1384 | static DEVICE_ATTR_RW(ctxid_idx); | ||
1385 | |||
1386 | static ssize_t ctxid_pid_show(struct device *dev, | ||
1387 | struct device_attribute *attr, char *buf) | ||
1388 | { | ||
1389 | unsigned long val; | ||
1390 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1391 | |||
1392 | spin_lock(&drvdata->spinlock); | ||
1393 | val = drvdata->ctxid_vpid[drvdata->ctxid_idx]; | ||
1394 | spin_unlock(&drvdata->spinlock); | ||
1395 | 614 | ||
1396 | return sprintf(buf, "%#lx\n", val); | 615 | switch (mode) { |
1397 | } | 616 | case CS_MODE_DISABLED: |
1398 | 617 | break; | |
1399 | static ssize_t ctxid_pid_store(struct device *dev, | 618 | case CS_MODE_SYSFS: |
1400 | struct device_attribute *attr, | 619 | etm_disable_sysfs(csdev); |
1401 | const char *buf, size_t size) | 620 | break; |
1402 | { | 621 | case CS_MODE_PERF: |
1403 | int ret; | 622 | etm_disable_perf(csdev); |
1404 | unsigned long vpid, pid; | 623 | break; |
1405 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | 624 | default: |
1406 | 625 | WARN_ON_ONCE(mode); | |
1407 | ret = kstrtoul(buf, 16, &vpid); | 626 | return; |
1408 | if (ret) | ||
1409 | return ret; | ||
1410 | |||
1411 | pid = coresight_vpid_to_pid(vpid); | ||
1412 | |||
1413 | spin_lock(&drvdata->spinlock); | ||
1414 | drvdata->ctxid_pid[drvdata->ctxid_idx] = pid; | ||
1415 | drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid; | ||
1416 | spin_unlock(&drvdata->spinlock); | ||
1417 | |||
1418 | return size; | ||
1419 | } | ||
1420 | static DEVICE_ATTR_RW(ctxid_pid); | ||
1421 | |||
1422 | static ssize_t ctxid_mask_show(struct device *dev, | ||
1423 | struct device_attribute *attr, char *buf) | ||
1424 | { | ||
1425 | unsigned long val; | ||
1426 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1427 | |||
1428 | val = drvdata->ctxid_mask; | ||
1429 | return sprintf(buf, "%#lx\n", val); | ||
1430 | } | ||
1431 | |||
1432 | static ssize_t ctxid_mask_store(struct device *dev, | ||
1433 | struct device_attribute *attr, | ||
1434 | const char *buf, size_t size) | ||
1435 | { | ||
1436 | int ret; | ||
1437 | unsigned long val; | ||
1438 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1439 | |||
1440 | ret = kstrtoul(buf, 16, &val); | ||
1441 | if (ret) | ||
1442 | return ret; | ||
1443 | |||
1444 | drvdata->ctxid_mask = val; | ||
1445 | return size; | ||
1446 | } | ||
1447 | static DEVICE_ATTR_RW(ctxid_mask); | ||
1448 | |||
1449 | static ssize_t sync_freq_show(struct device *dev, | ||
1450 | struct device_attribute *attr, char *buf) | ||
1451 | { | ||
1452 | unsigned long val; | ||
1453 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1454 | |||
1455 | val = drvdata->sync_freq; | ||
1456 | return sprintf(buf, "%#lx\n", val); | ||
1457 | } | ||
1458 | |||
1459 | static ssize_t sync_freq_store(struct device *dev, | ||
1460 | struct device_attribute *attr, | ||
1461 | const char *buf, size_t size) | ||
1462 | { | ||
1463 | int ret; | ||
1464 | unsigned long val; | ||
1465 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1466 | |||
1467 | ret = kstrtoul(buf, 16, &val); | ||
1468 | if (ret) | ||
1469 | return ret; | ||
1470 | |||
1471 | drvdata->sync_freq = val & ETM_SYNC_MASK; | ||
1472 | return size; | ||
1473 | } | ||
1474 | static DEVICE_ATTR_RW(sync_freq); | ||
1475 | |||
1476 | static ssize_t timestamp_event_show(struct device *dev, | ||
1477 | struct device_attribute *attr, char *buf) | ||
1478 | { | ||
1479 | unsigned long val; | ||
1480 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1481 | |||
1482 | val = drvdata->timestamp_event; | ||
1483 | return sprintf(buf, "%#lx\n", val); | ||
1484 | } | ||
1485 | |||
1486 | static ssize_t timestamp_event_store(struct device *dev, | ||
1487 | struct device_attribute *attr, | ||
1488 | const char *buf, size_t size) | ||
1489 | { | ||
1490 | int ret; | ||
1491 | unsigned long val; | ||
1492 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1493 | |||
1494 | ret = kstrtoul(buf, 16, &val); | ||
1495 | if (ret) | ||
1496 | return ret; | ||
1497 | |||
1498 | drvdata->timestamp_event = val & ETM_EVENT_MASK; | ||
1499 | return size; | ||
1500 | } | ||
1501 | static DEVICE_ATTR_RW(timestamp_event); | ||
1502 | |||
1503 | static ssize_t cpu_show(struct device *dev, | ||
1504 | struct device_attribute *attr, char *buf) | ||
1505 | { | ||
1506 | int val; | ||
1507 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1508 | |||
1509 | val = drvdata->cpu; | ||
1510 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | ||
1511 | |||
1512 | } | ||
1513 | static DEVICE_ATTR_RO(cpu); | ||
1514 | |||
1515 | static ssize_t traceid_show(struct device *dev, | ||
1516 | struct device_attribute *attr, char *buf) | ||
1517 | { | ||
1518 | unsigned long val, flags; | ||
1519 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1520 | |||
1521 | if (!drvdata->enable) { | ||
1522 | val = drvdata->traceid; | ||
1523 | goto out; | ||
1524 | } | 627 | } |
1525 | 628 | ||
1526 | pm_runtime_get_sync(drvdata->dev); | 629 | if (mode) |
1527 | spin_lock_irqsave(&drvdata->spinlock, flags); | 630 | local_set(&drvdata->mode, CS_MODE_DISABLED); |
1528 | CS_UNLOCK(drvdata->base); | ||
1529 | |||
1530 | val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); | ||
1531 | |||
1532 | CS_LOCK(drvdata->base); | ||
1533 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
1534 | pm_runtime_put(drvdata->dev); | ||
1535 | out: | ||
1536 | return sprintf(buf, "%#lx\n", val); | ||
1537 | } | ||
1538 | |||
1539 | static ssize_t traceid_store(struct device *dev, | ||
1540 | struct device_attribute *attr, | ||
1541 | const char *buf, size_t size) | ||
1542 | { | ||
1543 | int ret; | ||
1544 | unsigned long val; | ||
1545 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
1546 | |||
1547 | ret = kstrtoul(buf, 16, &val); | ||
1548 | if (ret) | ||
1549 | return ret; | ||
1550 | |||
1551 | drvdata->traceid = val & ETM_TRACEID_MASK; | ||
1552 | return size; | ||
1553 | } | 631 | } |
1554 | static DEVICE_ATTR_RW(traceid); | ||
1555 | |||
1556 | static struct attribute *coresight_etm_attrs[] = { | ||
1557 | &dev_attr_nr_addr_cmp.attr, | ||
1558 | &dev_attr_nr_cntr.attr, | ||
1559 | &dev_attr_nr_ctxid_cmp.attr, | ||
1560 | &dev_attr_etmsr.attr, | ||
1561 | &dev_attr_reset.attr, | ||
1562 | &dev_attr_mode.attr, | ||
1563 | &dev_attr_trigger_event.attr, | ||
1564 | &dev_attr_enable_event.attr, | ||
1565 | &dev_attr_fifofull_level.attr, | ||
1566 | &dev_attr_addr_idx.attr, | ||
1567 | &dev_attr_addr_single.attr, | ||
1568 | &dev_attr_addr_range.attr, | ||
1569 | &dev_attr_addr_start.attr, | ||
1570 | &dev_attr_addr_stop.attr, | ||
1571 | &dev_attr_addr_acctype.attr, | ||
1572 | &dev_attr_cntr_idx.attr, | ||
1573 | &dev_attr_cntr_rld_val.attr, | ||
1574 | &dev_attr_cntr_event.attr, | ||
1575 | &dev_attr_cntr_rld_event.attr, | ||
1576 | &dev_attr_cntr_val.attr, | ||
1577 | &dev_attr_seq_12_event.attr, | ||
1578 | &dev_attr_seq_21_event.attr, | ||
1579 | &dev_attr_seq_23_event.attr, | ||
1580 | &dev_attr_seq_31_event.attr, | ||
1581 | &dev_attr_seq_32_event.attr, | ||
1582 | &dev_attr_seq_13_event.attr, | ||
1583 | &dev_attr_seq_curr_state.attr, | ||
1584 | &dev_attr_ctxid_idx.attr, | ||
1585 | &dev_attr_ctxid_pid.attr, | ||
1586 | &dev_attr_ctxid_mask.attr, | ||
1587 | &dev_attr_sync_freq.attr, | ||
1588 | &dev_attr_timestamp_event.attr, | ||
1589 | &dev_attr_traceid.attr, | ||
1590 | &dev_attr_cpu.attr, | ||
1591 | NULL, | ||
1592 | }; | ||
1593 | |||
1594 | #define coresight_simple_func(name, offset) \ | ||
1595 | static ssize_t name##_show(struct device *_dev, \ | ||
1596 | struct device_attribute *attr, char *buf) \ | ||
1597 | { \ | ||
1598 | struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ | ||
1599 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | ||
1600 | readl_relaxed(drvdata->base + offset)); \ | ||
1601 | } \ | ||
1602 | DEVICE_ATTR_RO(name) | ||
1603 | |||
1604 | coresight_simple_func(etmccr, ETMCCR); | ||
1605 | coresight_simple_func(etmccer, ETMCCER); | ||
1606 | coresight_simple_func(etmscr, ETMSCR); | ||
1607 | coresight_simple_func(etmidr, ETMIDR); | ||
1608 | coresight_simple_func(etmcr, ETMCR); | ||
1609 | coresight_simple_func(etmtraceidr, ETMTRACEIDR); | ||
1610 | coresight_simple_func(etmteevr, ETMTEEVR); | ||
1611 | coresight_simple_func(etmtssvr, ETMTSSCR); | ||
1612 | coresight_simple_func(etmtecr1, ETMTECR1); | ||
1613 | coresight_simple_func(etmtecr2, ETMTECR2); | ||
1614 | |||
1615 | static struct attribute *coresight_etm_mgmt_attrs[] = { | ||
1616 | &dev_attr_etmccr.attr, | ||
1617 | &dev_attr_etmccer.attr, | ||
1618 | &dev_attr_etmscr.attr, | ||
1619 | &dev_attr_etmidr.attr, | ||
1620 | &dev_attr_etmcr.attr, | ||
1621 | &dev_attr_etmtraceidr.attr, | ||
1622 | &dev_attr_etmteevr.attr, | ||
1623 | &dev_attr_etmtssvr.attr, | ||
1624 | &dev_attr_etmtecr1.attr, | ||
1625 | &dev_attr_etmtecr2.attr, | ||
1626 | NULL, | ||
1627 | }; | ||
1628 | 632 | ||
1629 | static const struct attribute_group coresight_etm_group = { | 633 | static const struct coresight_ops_source etm_source_ops = { |
1630 | .attrs = coresight_etm_attrs, | 634 | .cpu_id = etm_cpu_id, |
1631 | }; | 635 | .trace_id = etm_trace_id, |
1632 | 636 | .enable = etm_enable, | |
1633 | 637 | .disable = etm_disable, | |
1634 | static const struct attribute_group coresight_etm_mgmt_group = { | ||
1635 | .attrs = coresight_etm_mgmt_attrs, | ||
1636 | .name = "mgmt", | ||
1637 | }; | 638 | }; |
1638 | 639 | ||
1639 | static const struct attribute_group *coresight_etm_groups[] = { | 640 | static const struct coresight_ops etm_cs_ops = { |
1640 | &coresight_etm_group, | 641 | .source_ops = &etm_source_ops, |
1641 | &coresight_etm_mgmt_group, | ||
1642 | NULL, | ||
1643 | }; | 642 | }; |
1644 | 643 | ||
1645 | static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, | 644 | static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, |
@@ -1658,7 +657,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, | |||
1658 | etmdrvdata[cpu]->os_unlock = true; | 657 | etmdrvdata[cpu]->os_unlock = true; |
1659 | } | 658 | } |
1660 | 659 | ||
1661 | if (etmdrvdata[cpu]->enable) | 660 | if (local_read(&etmdrvdata[cpu]->mode)) |
1662 | etm_enable_hw(etmdrvdata[cpu]); | 661 | etm_enable_hw(etmdrvdata[cpu]); |
1663 | spin_unlock(&etmdrvdata[cpu]->spinlock); | 662 | spin_unlock(&etmdrvdata[cpu]->spinlock); |
1664 | break; | 663 | break; |
@@ -1671,7 +670,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, | |||
1671 | 670 | ||
1672 | case CPU_DYING: | 671 | case CPU_DYING: |
1673 | spin_lock(&etmdrvdata[cpu]->spinlock); | 672 | spin_lock(&etmdrvdata[cpu]->spinlock); |
1674 | if (etmdrvdata[cpu]->enable) | 673 | if (local_read(&etmdrvdata[cpu]->mode)) |
1675 | etm_disable_hw(etmdrvdata[cpu]); | 674 | etm_disable_hw(etmdrvdata[cpu]); |
1676 | spin_unlock(&etmdrvdata[cpu]->spinlock); | 675 | spin_unlock(&etmdrvdata[cpu]->spinlock); |
1677 | break; | 676 | break; |
@@ -1707,6 +706,9 @@ static void etm_init_arch_data(void *info) | |||
1707 | u32 etmccr; | 706 | u32 etmccr; |
1708 | struct etm_drvdata *drvdata = info; | 707 | struct etm_drvdata *drvdata = info; |
1709 | 708 | ||
709 | /* Make sure all registers are accessible */ | ||
710 | etm_os_unlock(drvdata); | ||
711 | |||
1710 | CS_UNLOCK(drvdata->base); | 712 | CS_UNLOCK(drvdata->base); |
1711 | 713 | ||
1712 | /* First dummy read */ | 714 | /* First dummy read */ |
@@ -1743,40 +745,9 @@ static void etm_init_arch_data(void *info) | |||
1743 | CS_LOCK(drvdata->base); | 745 | CS_LOCK(drvdata->base); |
1744 | } | 746 | } |
1745 | 747 | ||
1746 | static void etm_init_default_data(struct etm_drvdata *drvdata) | 748 | static void etm_init_trace_id(struct etm_drvdata *drvdata) |
1747 | { | 749 | { |
1748 | /* | 750 | drvdata->traceid = coresight_get_trace_id(drvdata->cpu); |
1749 | * A trace ID of value 0 is invalid, so let's start at some | ||
1750 | * random value that fits in 7 bits and will be just as good. | ||
1751 | */ | ||
1752 | static int etm3x_traceid = 0x10; | ||
1753 | |||
1754 | u32 flags = (1 << 0 | /* instruction execute*/ | ||
1755 | 3 << 3 | /* ARM instruction */ | ||
1756 | 0 << 5 | /* No data value comparison */ | ||
1757 | 0 << 7 | /* No exact mach */ | ||
1758 | 0 << 8 | /* Ignore context ID */ | ||
1759 | 0 << 10); /* Security ignored */ | ||
1760 | |||
1761 | /* | ||
1762 | * Initial configuration only - guarantees sources handled by | ||
1763 | * this driver have a unique ID at startup time but not between | ||
1764 | * all other types of sources. For that we lean on the core | ||
1765 | * framework. | ||
1766 | */ | ||
1767 | drvdata->traceid = etm3x_traceid++; | ||
1768 | drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN); | ||
1769 | drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; | ||
1770 | if (drvdata->nr_addr_cmp >= 2) { | ||
1771 | drvdata->addr_val[0] = (u32) _stext; | ||
1772 | drvdata->addr_val[1] = (u32) _etext; | ||
1773 | drvdata->addr_acctype[0] = flags; | ||
1774 | drvdata->addr_acctype[1] = flags; | ||
1775 | drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; | ||
1776 | drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; | ||
1777 | } | ||
1778 | |||
1779 | etm_set_default(drvdata); | ||
1780 | } | 751 | } |
1781 | 752 | ||
1782 | static int etm_probe(struct amba_device *adev, const struct amba_id *id) | 753 | static int etm_probe(struct amba_device *adev, const struct amba_id *id) |
@@ -1831,9 +802,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
1831 | get_online_cpus(); | 802 | get_online_cpus(); |
1832 | etmdrvdata[drvdata->cpu] = drvdata; | 803 | etmdrvdata[drvdata->cpu] = drvdata; |
1833 | 804 | ||
1834 | if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1)) | ||
1835 | drvdata->os_unlock = true; | ||
1836 | |||
1837 | if (smp_call_function_single(drvdata->cpu, | 805 | if (smp_call_function_single(drvdata->cpu, |
1838 | etm_init_arch_data, drvdata, 1)) | 806 | etm_init_arch_data, drvdata, 1)) |
1839 | dev_err(dev, "ETM arch init failed\n"); | 807 | dev_err(dev, "ETM arch init failed\n"); |
@@ -1847,7 +815,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
1847 | ret = -EINVAL; | 815 | ret = -EINVAL; |
1848 | goto err_arch_supported; | 816 | goto err_arch_supported; |
1849 | } | 817 | } |
1850 | etm_init_default_data(drvdata); | 818 | |
819 | etm_init_trace_id(drvdata); | ||
820 | etm_set_default(&drvdata->config); | ||
1851 | 821 | ||
1852 | desc->type = CORESIGHT_DEV_TYPE_SOURCE; | 822 | desc->type = CORESIGHT_DEV_TYPE_SOURCE; |
1853 | desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; | 823 | desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; |
@@ -1861,6 +831,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
1861 | goto err_arch_supported; | 831 | goto err_arch_supported; |
1862 | } | 832 | } |
1863 | 833 | ||
834 | ret = etm_perf_symlink(drvdata->csdev, true); | ||
835 | if (ret) { | ||
836 | coresight_unregister(drvdata->csdev); | ||
837 | goto err_arch_supported; | ||
838 | } | ||
839 | |||
1864 | pm_runtime_put(&adev->dev); | 840 | pm_runtime_put(&adev->dev); |
1865 | dev_info(dev, "%s initialized\n", (char *)id->data); | 841 | dev_info(dev, "%s initialized\n", (char *)id->data); |
1866 | 842 | ||
@@ -1877,17 +853,6 @@ err_arch_supported: | |||
1877 | return ret; | 853 | return ret; |
1878 | } | 854 | } |
1879 | 855 | ||
1880 | static int etm_remove(struct amba_device *adev) | ||
1881 | { | ||
1882 | struct etm_drvdata *drvdata = amba_get_drvdata(adev); | ||
1883 | |||
1884 | coresight_unregister(drvdata->csdev); | ||
1885 | if (--etm_count == 0) | ||
1886 | unregister_hotcpu_notifier(&etm_cpu_notifier); | ||
1887 | |||
1888 | return 0; | ||
1889 | } | ||
1890 | |||
1891 | #ifdef CONFIG_PM | 856 | #ifdef CONFIG_PM |
1892 | static int etm_runtime_suspend(struct device *dev) | 857 | static int etm_runtime_suspend(struct device *dev) |
1893 | { | 858 | { |
@@ -1948,13 +913,9 @@ static struct amba_driver etm_driver = { | |||
1948 | .name = "coresight-etm3x", | 913 | .name = "coresight-etm3x", |
1949 | .owner = THIS_MODULE, | 914 | .owner = THIS_MODULE, |
1950 | .pm = &etm_dev_pm_ops, | 915 | .pm = &etm_dev_pm_ops, |
916 | .suppress_bind_attrs = true, | ||
1951 | }, | 917 | }, |
1952 | .probe = etm_probe, | 918 | .probe = etm_probe, |
1953 | .remove = etm_remove, | ||
1954 | .id_table = etm_ids, | 919 | .id_table = etm_ids, |
1955 | }; | 920 | }; |
1956 | 921 | builtin_amba_driver(etm_driver); | |
1957 | module_amba_driver(etm_driver); | ||
1958 | |||
1959 | MODULE_LICENSE("GPL v2"); | ||
1960 | MODULE_DESCRIPTION("CoreSight Program Flow Trace driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index a6707642bb23..1c59bd36834c 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/module.h> | ||
19 | #include <linux/io.h> | 18 | #include <linux/io.h> |
20 | #include <linux/err.h> | 19 | #include <linux/err.h> |
21 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
@@ -32,6 +31,7 @@ | |||
32 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
33 | #include <linux/uaccess.h> | 32 | #include <linux/uaccess.h> |
34 | #include <linux/pm_runtime.h> | 33 | #include <linux/pm_runtime.h> |
34 | #include <linux/perf_event.h> | ||
35 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
36 | 36 | ||
37 | #include "coresight-etm4x.h" | 37 | #include "coresight-etm4x.h" |
@@ -63,6 +63,13 @@ static bool etm4_arch_supported(u8 arch) | |||
63 | return true; | 63 | return true; |
64 | } | 64 | } |
65 | 65 | ||
66 | static int etm4_cpu_id(struct coresight_device *csdev) | ||
67 | { | ||
68 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
69 | |||
70 | return drvdata->cpu; | ||
71 | } | ||
72 | |||
66 | static int etm4_trace_id(struct coresight_device *csdev) | 73 | static int etm4_trace_id(struct coresight_device *csdev) |
67 | { | 74 | { |
68 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 75 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
@@ -72,7 +79,6 @@ static int etm4_trace_id(struct coresight_device *csdev) | |||
72 | if (!drvdata->enable) | 79 | if (!drvdata->enable) |
73 | return drvdata->trcid; | 80 | return drvdata->trcid; |
74 | 81 | ||
75 | pm_runtime_get_sync(drvdata->dev); | ||
76 | spin_lock_irqsave(&drvdata->spinlock, flags); | 82 | spin_lock_irqsave(&drvdata->spinlock, flags); |
77 | 83 | ||
78 | CS_UNLOCK(drvdata->base); | 84 | CS_UNLOCK(drvdata->base); |
@@ -81,7 +87,6 @@ static int etm4_trace_id(struct coresight_device *csdev) | |||
81 | CS_LOCK(drvdata->base); | 87 | CS_LOCK(drvdata->base); |
82 | 88 | ||
83 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 89 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
84 | pm_runtime_put(drvdata->dev); | ||
85 | 90 | ||
86 | return trace_id; | 91 | return trace_id; |
87 | } | 92 | } |
@@ -182,12 +187,12 @@ static void etm4_enable_hw(void *info) | |||
182 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); | 187 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); |
183 | } | 188 | } |
184 | 189 | ||
185 | static int etm4_enable(struct coresight_device *csdev) | 190 | static int etm4_enable(struct coresight_device *csdev, |
191 | struct perf_event_attr *attr, u32 mode) | ||
186 | { | 192 | { |
187 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 193 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
188 | int ret; | 194 | int ret; |
189 | 195 | ||
190 | pm_runtime_get_sync(drvdata->dev); | ||
191 | spin_lock(&drvdata->spinlock); | 196 | spin_lock(&drvdata->spinlock); |
192 | 197 | ||
193 | /* | 198 | /* |
@@ -207,7 +212,6 @@ static int etm4_enable(struct coresight_device *csdev) | |||
207 | return 0; | 212 | return 0; |
208 | err: | 213 | err: |
209 | spin_unlock(&drvdata->spinlock); | 214 | spin_unlock(&drvdata->spinlock); |
210 | pm_runtime_put(drvdata->dev); | ||
211 | return ret; | 215 | return ret; |
212 | } | 216 | } |
213 | 217 | ||
@@ -256,12 +260,11 @@ static void etm4_disable(struct coresight_device *csdev) | |||
256 | spin_unlock(&drvdata->spinlock); | 260 | spin_unlock(&drvdata->spinlock); |
257 | put_online_cpus(); | 261 | put_online_cpus(); |
258 | 262 | ||
259 | pm_runtime_put(drvdata->dev); | ||
260 | |||
261 | dev_info(drvdata->dev, "ETM tracing disabled\n"); | 263 | dev_info(drvdata->dev, "ETM tracing disabled\n"); |
262 | } | 264 | } |
263 | 265 | ||
264 | static const struct coresight_ops_source etm4_source_ops = { | 266 | static const struct coresight_ops_source etm4_source_ops = { |
267 | .cpu_id = etm4_cpu_id, | ||
265 | .trace_id = etm4_trace_id, | 268 | .trace_id = etm4_trace_id, |
266 | .enable = etm4_enable, | 269 | .enable = etm4_enable, |
267 | .disable = etm4_disable, | 270 | .disable = etm4_disable, |
@@ -2219,7 +2222,7 @@ static ssize_t name##_show(struct device *_dev, \ | |||
2219 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | 2222 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ |
2220 | readl_relaxed(drvdata->base + offset)); \ | 2223 | readl_relaxed(drvdata->base + offset)); \ |
2221 | } \ | 2224 | } \ |
2222 | DEVICE_ATTR_RO(name) | 2225 | static DEVICE_ATTR_RO(name) |
2223 | 2226 | ||
2224 | coresight_simple_func(trcoslsr, TRCOSLSR); | 2227 | coresight_simple_func(trcoslsr, TRCOSLSR); |
2225 | coresight_simple_func(trcpdcr, TRCPDCR); | 2228 | coresight_simple_func(trcpdcr, TRCPDCR); |
@@ -2684,17 +2687,6 @@ err_coresight_register: | |||
2684 | return ret; | 2687 | return ret; |
2685 | } | 2688 | } |
2686 | 2689 | ||
2687 | static int etm4_remove(struct amba_device *adev) | ||
2688 | { | ||
2689 | struct etmv4_drvdata *drvdata = amba_get_drvdata(adev); | ||
2690 | |||
2691 | coresight_unregister(drvdata->csdev); | ||
2692 | if (--etm4_count == 0) | ||
2693 | unregister_hotcpu_notifier(&etm4_cpu_notifier); | ||
2694 | |||
2695 | return 0; | ||
2696 | } | ||
2697 | |||
2698 | static struct amba_id etm4_ids[] = { | 2690 | static struct amba_id etm4_ids[] = { |
2699 | { /* ETM 4.0 - Qualcomm */ | 2691 | { /* ETM 4.0 - Qualcomm */ |
2700 | .id = 0x0003b95d, | 2692 | .id = 0x0003b95d, |
@@ -2712,10 +2704,9 @@ static struct amba_id etm4_ids[] = { | |||
2712 | static struct amba_driver etm4x_driver = { | 2704 | static struct amba_driver etm4x_driver = { |
2713 | .drv = { | 2705 | .drv = { |
2714 | .name = "coresight-etm4x", | 2706 | .name = "coresight-etm4x", |
2707 | .suppress_bind_attrs = true, | ||
2715 | }, | 2708 | }, |
2716 | .probe = etm4_probe, | 2709 | .probe = etm4_probe, |
2717 | .remove = etm4_remove, | ||
2718 | .id_table = etm4_ids, | 2710 | .id_table = etm4_ids, |
2719 | }; | 2711 | }; |
2720 | 2712 | builtin_amba_driver(etm4x_driver); | |
2721 | module_amba_driver(etm4x_driver); | ||
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 2e36bde7fcb4..0600ca30649d 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
2 | * | 2 | * |
3 | * Description: CoreSight Funnel driver | ||
4 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
@@ -11,7 +13,6 @@ | |||
11 | */ | 13 | */ |
12 | 14 | ||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
16 | #include <linux/types.h> | 17 | #include <linux/types.h> |
17 | #include <linux/device.h> | 18 | #include <linux/device.h> |
@@ -69,7 +70,6 @@ static int funnel_enable(struct coresight_device *csdev, int inport, | |||
69 | { | 70 | { |
70 | struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 71 | struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
71 | 72 | ||
72 | pm_runtime_get_sync(drvdata->dev); | ||
73 | funnel_enable_hw(drvdata, inport); | 73 | funnel_enable_hw(drvdata, inport); |
74 | 74 | ||
75 | dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); | 75 | dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); |
@@ -95,7 +95,6 @@ static void funnel_disable(struct coresight_device *csdev, int inport, | |||
95 | struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 95 | struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
96 | 96 | ||
97 | funnel_disable_hw(drvdata, inport); | 97 | funnel_disable_hw(drvdata, inport); |
98 | pm_runtime_put(drvdata->dev); | ||
99 | 98 | ||
100 | dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); | 99 | dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); |
101 | } | 100 | } |
@@ -226,14 +225,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id) | |||
226 | return 0; | 225 | return 0; |
227 | } | 226 | } |
228 | 227 | ||
229 | static int funnel_remove(struct amba_device *adev) | ||
230 | { | ||
231 | struct funnel_drvdata *drvdata = amba_get_drvdata(adev); | ||
232 | |||
233 | coresight_unregister(drvdata->csdev); | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | #ifdef CONFIG_PM | 228 | #ifdef CONFIG_PM |
238 | static int funnel_runtime_suspend(struct device *dev) | 229 | static int funnel_runtime_suspend(struct device *dev) |
239 | { | 230 | { |
@@ -273,13 +264,9 @@ static struct amba_driver funnel_driver = { | |||
273 | .name = "coresight-funnel", | 264 | .name = "coresight-funnel", |
274 | .owner = THIS_MODULE, | 265 | .owner = THIS_MODULE, |
275 | .pm = &funnel_dev_pm_ops, | 266 | .pm = &funnel_dev_pm_ops, |
267 | .suppress_bind_attrs = true, | ||
276 | }, | 268 | }, |
277 | .probe = funnel_probe, | 269 | .probe = funnel_probe, |
278 | .remove = funnel_remove, | ||
279 | .id_table = funnel_ids, | 270 | .id_table = funnel_ids, |
280 | }; | 271 | }; |
281 | 272 | builtin_amba_driver(funnel_driver); | |
282 | module_amba_driver(funnel_driver); | ||
283 | |||
284 | MODULE_LICENSE("GPL v2"); | ||
285 | MODULE_DESCRIPTION("CoreSight Funnel driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 62fcd98cc7cf..333eddaed339 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h | |||
@@ -34,6 +34,15 @@ | |||
34 | #define TIMEOUT_US 100 | 34 | #define TIMEOUT_US 100 |
35 | #define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb) | 35 | #define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb) |
36 | 36 | ||
37 | #define ETM_MODE_EXCL_KERN BIT(30) | ||
38 | #define ETM_MODE_EXCL_USER BIT(31) | ||
39 | |||
40 | enum cs_mode { | ||
41 | CS_MODE_DISABLED, | ||
42 | CS_MODE_SYSFS, | ||
43 | CS_MODE_PERF, | ||
44 | }; | ||
45 | |||
37 | static inline void CS_LOCK(void __iomem *addr) | 46 | static inline void CS_LOCK(void __iomem *addr) |
38 | { | 47 | { |
39 | do { | 48 | do { |
@@ -52,6 +61,12 @@ static inline void CS_UNLOCK(void __iomem *addr) | |||
52 | } while (0); | 61 | } while (0); |
53 | } | 62 | } |
54 | 63 | ||
64 | void coresight_disable_path(struct list_head *path); | ||
65 | int coresight_enable_path(struct list_head *path, u32 mode); | ||
66 | struct coresight_device *coresight_get_sink(struct list_head *path); | ||
67 | struct list_head *coresight_build_path(struct coresight_device *csdev); | ||
68 | void coresight_release_path(struct list_head *path); | ||
69 | |||
55 | #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X | 70 | #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X |
56 | extern int etm_readl_cp14(u32 off, unsigned int *val); | 71 | extern int etm_readl_cp14(u32 off, unsigned int *val); |
57 | extern int etm_writel_cp14(u32 off, u32 val); | 72 | extern int etm_writel_cp14(u32 off, u32 val); |
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c index 584059e9e866..700f710e4bfa 100644 --- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c +++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/coresight.h> | 16 | #include <linux/coresight.h> |
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/module.h> | ||
19 | #include <linux/err.h> | 18 | #include <linux/err.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
21 | #include <linux/io.h> | 20 | #include <linux/io.h> |
@@ -48,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport, | |||
48 | { | 47 | { |
49 | struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); | 48 | struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); |
50 | 49 | ||
51 | pm_runtime_get_sync(drvdata->dev); | ||
52 | |||
53 | CS_UNLOCK(drvdata->base); | 50 | CS_UNLOCK(drvdata->base); |
54 | 51 | ||
55 | /* | 52 | /* |
@@ -86,8 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport, | |||
86 | 83 | ||
87 | CS_LOCK(drvdata->base); | 84 | CS_LOCK(drvdata->base); |
88 | 85 | ||
89 | pm_runtime_put(drvdata->dev); | ||
90 | |||
91 | dev_info(drvdata->dev, "REPLICATOR disabled\n"); | 86 | dev_info(drvdata->dev, "REPLICATOR disabled\n"); |
92 | } | 87 | } |
93 | 88 | ||
@@ -156,15 +151,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id) | |||
156 | return 0; | 151 | return 0; |
157 | } | 152 | } |
158 | 153 | ||
159 | static int replicator_remove(struct amba_device *adev) | ||
160 | { | ||
161 | struct replicator_state *drvdata = amba_get_drvdata(adev); | ||
162 | |||
163 | pm_runtime_disable(&adev->dev); | ||
164 | coresight_unregister(drvdata->csdev); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | #ifdef CONFIG_PM | 154 | #ifdef CONFIG_PM |
169 | static int replicator_runtime_suspend(struct device *dev) | 155 | static int replicator_runtime_suspend(struct device *dev) |
170 | { | 156 | { |
@@ -206,10 +192,9 @@ static struct amba_driver replicator_driver = { | |||
206 | .drv = { | 192 | .drv = { |
207 | .name = "coresight-replicator-qcom", | 193 | .name = "coresight-replicator-qcom", |
208 | .pm = &replicator_dev_pm_ops, | 194 | .pm = &replicator_dev_pm_ops, |
195 | .suppress_bind_attrs = true, | ||
209 | }, | 196 | }, |
210 | .probe = replicator_probe, | 197 | .probe = replicator_probe, |
211 | .remove = replicator_remove, | ||
212 | .id_table = replicator_ids, | 198 | .id_table = replicator_ids, |
213 | }; | 199 | }; |
214 | 200 | builtin_amba_driver(replicator_driver); | |
215 | module_amba_driver(replicator_driver); | ||
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 963ac197c253..4299c0569340 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
2 | * | 2 | * |
3 | * Description: CoreSight Replicator driver | ||
4 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
@@ -11,7 +13,6 @@ | |||
11 | */ | 13 | */ |
12 | 14 | ||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | ||
15 | #include <linux/device.h> | 16 | #include <linux/device.h> |
16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
17 | #include <linux/io.h> | 18 | #include <linux/io.h> |
@@ -41,7 +42,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport, | |||
41 | { | 42 | { |
42 | struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 43 | struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
43 | 44 | ||
44 | pm_runtime_get_sync(drvdata->dev); | ||
45 | dev_info(drvdata->dev, "REPLICATOR enabled\n"); | 45 | dev_info(drvdata->dev, "REPLICATOR enabled\n"); |
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
@@ -51,7 +51,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport, | |||
51 | { | 51 | { |
52 | struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 52 | struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
53 | 53 | ||
54 | pm_runtime_put(drvdata->dev); | ||
55 | dev_info(drvdata->dev, "REPLICATOR disabled\n"); | 54 | dev_info(drvdata->dev, "REPLICATOR disabled\n"); |
56 | } | 55 | } |
57 | 56 | ||
@@ -127,20 +126,6 @@ out_disable_pm: | |||
127 | return ret; | 126 | return ret; |
128 | } | 127 | } |
129 | 128 | ||
130 | static int replicator_remove(struct platform_device *pdev) | ||
131 | { | ||
132 | struct replicator_drvdata *drvdata = platform_get_drvdata(pdev); | ||
133 | |||
134 | coresight_unregister(drvdata->csdev); | ||
135 | pm_runtime_get_sync(&pdev->dev); | ||
136 | if (!IS_ERR(drvdata->atclk)) | ||
137 | clk_disable_unprepare(drvdata->atclk); | ||
138 | pm_runtime_put_noidle(&pdev->dev); | ||
139 | pm_runtime_disable(&pdev->dev); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | #ifdef CONFIG_PM | 129 | #ifdef CONFIG_PM |
145 | static int replicator_runtime_suspend(struct device *dev) | 130 | static int replicator_runtime_suspend(struct device *dev) |
146 | { | 131 | { |
@@ -175,15 +160,11 @@ static const struct of_device_id replicator_match[] = { | |||
175 | 160 | ||
176 | static struct platform_driver replicator_driver = { | 161 | static struct platform_driver replicator_driver = { |
177 | .probe = replicator_probe, | 162 | .probe = replicator_probe, |
178 | .remove = replicator_remove, | ||
179 | .driver = { | 163 | .driver = { |
180 | .name = "coresight-replicator", | 164 | .name = "coresight-replicator", |
181 | .of_match_table = replicator_match, | 165 | .of_match_table = replicator_match, |
182 | .pm = &replicator_dev_pm_ops, | 166 | .pm = &replicator_dev_pm_ops, |
167 | .suppress_bind_attrs = true, | ||
183 | }, | 168 | }, |
184 | }; | 169 | }; |
185 | |||
186 | builtin_platform_driver(replicator_driver); | 170 | builtin_platform_driver(replicator_driver); |
187 | |||
188 | MODULE_LICENSE("GPL v2"); | ||
189 | MODULE_DESCRIPTION("CoreSight Replicator driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index a57c7ec1661f..1be191f5d39c 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved. |
2 | * | 2 | * |
3 | * Description: CoreSight Trace Memory Controller driver | ||
4 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
@@ -11,7 +13,6 @@ | |||
11 | */ | 13 | */ |
12 | 14 | ||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
16 | #include <linux/types.h> | 17 | #include <linux/types.h> |
17 | #include <linux/device.h> | 18 | #include <linux/device.h> |
@@ -124,7 +125,7 @@ struct tmc_drvdata { | |||
124 | bool reading; | 125 | bool reading; |
125 | char *buf; | 126 | char *buf; |
126 | dma_addr_t paddr; | 127 | dma_addr_t paddr; |
127 | void __iomem *vaddr; | 128 | void *vaddr; |
128 | u32 size; | 129 | u32 size; |
129 | bool enable; | 130 | bool enable; |
130 | enum tmc_config_type config_type; | 131 | enum tmc_config_type config_type; |
@@ -242,12 +243,9 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) | |||
242 | { | 243 | { |
243 | unsigned long flags; | 244 | unsigned long flags; |
244 | 245 | ||
245 | pm_runtime_get_sync(drvdata->dev); | ||
246 | |||
247 | spin_lock_irqsave(&drvdata->spinlock, flags); | 246 | spin_lock_irqsave(&drvdata->spinlock, flags); |
248 | if (drvdata->reading) { | 247 | if (drvdata->reading) { |
249 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 248 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
250 | pm_runtime_put(drvdata->dev); | ||
251 | return -EBUSY; | 249 | return -EBUSY; |
252 | } | 250 | } |
253 | 251 | ||
@@ -268,7 +266,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) | |||
268 | return 0; | 266 | return 0; |
269 | } | 267 | } |
270 | 268 | ||
271 | static int tmc_enable_sink(struct coresight_device *csdev) | 269 | static int tmc_enable_sink(struct coresight_device *csdev, u32 mode) |
272 | { | 270 | { |
273 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 271 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
274 | 272 | ||
@@ -381,8 +379,6 @@ out: | |||
381 | drvdata->enable = false; | 379 | drvdata->enable = false; |
382 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 380 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
383 | 381 | ||
384 | pm_runtime_put(drvdata->dev); | ||
385 | |||
386 | dev_info(drvdata->dev, "TMC disabled\n"); | 382 | dev_info(drvdata->dev, "TMC disabled\n"); |
387 | } | 383 | } |
388 | 384 | ||
@@ -766,23 +762,10 @@ err_misc_register: | |||
766 | err_devm_kzalloc: | 762 | err_devm_kzalloc: |
767 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) | 763 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) |
768 | dma_free_coherent(dev, drvdata->size, | 764 | dma_free_coherent(dev, drvdata->size, |
769 | &drvdata->paddr, GFP_KERNEL); | 765 | drvdata->vaddr, drvdata->paddr); |
770 | return ret; | 766 | return ret; |
771 | } | 767 | } |
772 | 768 | ||
773 | static int tmc_remove(struct amba_device *adev) | ||
774 | { | ||
775 | struct tmc_drvdata *drvdata = amba_get_drvdata(adev); | ||
776 | |||
777 | misc_deregister(&drvdata->miscdev); | ||
778 | coresight_unregister(drvdata->csdev); | ||
779 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) | ||
780 | dma_free_coherent(drvdata->dev, drvdata->size, | ||
781 | &drvdata->paddr, GFP_KERNEL); | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | static struct amba_id tmc_ids[] = { | 769 | static struct amba_id tmc_ids[] = { |
787 | { | 770 | { |
788 | .id = 0x0003b961, | 771 | .id = 0x0003b961, |
@@ -795,13 +778,9 @@ static struct amba_driver tmc_driver = { | |||
795 | .drv = { | 778 | .drv = { |
796 | .name = "coresight-tmc", | 779 | .name = "coresight-tmc", |
797 | .owner = THIS_MODULE, | 780 | .owner = THIS_MODULE, |
781 | .suppress_bind_attrs = true, | ||
798 | }, | 782 | }, |
799 | .probe = tmc_probe, | 783 | .probe = tmc_probe, |
800 | .remove = tmc_remove, | ||
801 | .id_table = tmc_ids, | 784 | .id_table = tmc_ids, |
802 | }; | 785 | }; |
803 | 786 | builtin_amba_driver(tmc_driver); | |
804 | module_amba_driver(tmc_driver); | ||
805 | |||
806 | MODULE_LICENSE("GPL v2"); | ||
807 | MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 7214efd10db5..8fb09d9237ab 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
2 | * | 2 | * |
3 | * Description: CoreSight Trace Port Interface Unit driver | ||
4 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
@@ -11,7 +13,6 @@ | |||
11 | */ | 13 | */ |
12 | 14 | ||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
16 | #include <linux/device.h> | 17 | #include <linux/device.h> |
17 | #include <linux/io.h> | 18 | #include <linux/io.h> |
@@ -70,11 +71,10 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata) | |||
70 | CS_LOCK(drvdata->base); | 71 | CS_LOCK(drvdata->base); |
71 | } | 72 | } |
72 | 73 | ||
73 | static int tpiu_enable(struct coresight_device *csdev) | 74 | static int tpiu_enable(struct coresight_device *csdev, u32 mode) |
74 | { | 75 | { |
75 | struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 76 | struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
76 | 77 | ||
77 | pm_runtime_get_sync(csdev->dev.parent); | ||
78 | tpiu_enable_hw(drvdata); | 78 | tpiu_enable_hw(drvdata); |
79 | 79 | ||
80 | dev_info(drvdata->dev, "TPIU enabled\n"); | 80 | dev_info(drvdata->dev, "TPIU enabled\n"); |
@@ -98,7 +98,6 @@ static void tpiu_disable(struct coresight_device *csdev) | |||
98 | struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 98 | struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
99 | 99 | ||
100 | tpiu_disable_hw(drvdata); | 100 | tpiu_disable_hw(drvdata); |
101 | pm_runtime_put(csdev->dev.parent); | ||
102 | 101 | ||
103 | dev_info(drvdata->dev, "TPIU disabled\n"); | 102 | dev_info(drvdata->dev, "TPIU disabled\n"); |
104 | } | 103 | } |
@@ -172,14 +171,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) | |||
172 | return 0; | 171 | return 0; |
173 | } | 172 | } |
174 | 173 | ||
175 | static int tpiu_remove(struct amba_device *adev) | ||
176 | { | ||
177 | struct tpiu_drvdata *drvdata = amba_get_drvdata(adev); | ||
178 | |||
179 | coresight_unregister(drvdata->csdev); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | #ifdef CONFIG_PM | 174 | #ifdef CONFIG_PM |
184 | static int tpiu_runtime_suspend(struct device *dev) | 175 | static int tpiu_runtime_suspend(struct device *dev) |
185 | { | 176 | { |
@@ -223,13 +214,9 @@ static struct amba_driver tpiu_driver = { | |||
223 | .name = "coresight-tpiu", | 214 | .name = "coresight-tpiu", |
224 | .owner = THIS_MODULE, | 215 | .owner = THIS_MODULE, |
225 | .pm = &tpiu_dev_pm_ops, | 216 | .pm = &tpiu_dev_pm_ops, |
217 | .suppress_bind_attrs = true, | ||
226 | }, | 218 | }, |
227 | .probe = tpiu_probe, | 219 | .probe = tpiu_probe, |
228 | .remove = tpiu_remove, | ||
229 | .id_table = tpiu_ids, | 220 | .id_table = tpiu_ids, |
230 | }; | 221 | }; |
231 | 222 | builtin_amba_driver(tpiu_driver); | |
232 | module_amba_driver(tpiu_driver); | ||
233 | |||
234 | MODULE_LICENSE("GPL v2"); | ||
235 | MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 93738dfbf631..2ea5961092c1 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | 14 | #include <linux/init.h> |
16 | #include <linux/types.h> | 15 | #include <linux/types.h> |
17 | #include <linux/device.h> | 16 | #include <linux/device.h> |
@@ -24,11 +23,28 @@ | |||
24 | #include <linux/coresight.h> | 23 | #include <linux/coresight.h> |
25 | #include <linux/of_platform.h> | 24 | #include <linux/of_platform.h> |
26 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/pm_runtime.h> | ||
27 | 27 | ||
28 | #include "coresight-priv.h" | 28 | #include "coresight-priv.h" |
29 | 29 | ||
30 | static DEFINE_MUTEX(coresight_mutex); | 30 | static DEFINE_MUTEX(coresight_mutex); |
31 | 31 | ||
32 | /** | ||
33 | * struct coresight_node - elements of a path, from source to sink | ||
34 | * @csdev: Address of an element. | ||
35 | * @link: hook to the list. | ||
36 | */ | ||
37 | struct coresight_node { | ||
38 | struct coresight_device *csdev; | ||
39 | struct list_head link; | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * When operating Coresight drivers from the sysFS interface, only a single | ||
44 | * path can exist from a tracer (associated to a CPU) to a sink. | ||
45 | */ | ||
46 | static DEFINE_PER_CPU(struct list_head *, sysfs_path); | ||
47 | |||
32 | static int coresight_id_match(struct device *dev, void *data) | 48 | static int coresight_id_match(struct device *dev, void *data) |
33 | { | 49 | { |
34 | int trace_id, i_trace_id; | 50 | int trace_id, i_trace_id; |
@@ -68,15 +84,12 @@ static int coresight_source_is_unique(struct coresight_device *csdev) | |||
68 | csdev, coresight_id_match); | 84 | csdev, coresight_id_match); |
69 | } | 85 | } |
70 | 86 | ||
71 | static int coresight_find_link_inport(struct coresight_device *csdev) | 87 | static int coresight_find_link_inport(struct coresight_device *csdev, |
88 | struct coresight_device *parent) | ||
72 | { | 89 | { |
73 | int i; | 90 | int i; |
74 | struct coresight_device *parent; | ||
75 | struct coresight_connection *conn; | 91 | struct coresight_connection *conn; |
76 | 92 | ||
77 | parent = container_of(csdev->path_link.next, | ||
78 | struct coresight_device, path_link); | ||
79 | |||
80 | for (i = 0; i < parent->nr_outport; i++) { | 93 | for (i = 0; i < parent->nr_outport; i++) { |
81 | conn = &parent->conns[i]; | 94 | conn = &parent->conns[i]; |
82 | if (conn->child_dev == csdev) | 95 | if (conn->child_dev == csdev) |
@@ -89,15 +102,12 @@ static int coresight_find_link_inport(struct coresight_device *csdev) | |||
89 | return 0; | 102 | return 0; |
90 | } | 103 | } |
91 | 104 | ||
92 | static int coresight_find_link_outport(struct coresight_device *csdev) | 105 | static int coresight_find_link_outport(struct coresight_device *csdev, |
106 | struct coresight_device *child) | ||
93 | { | 107 | { |
94 | int i; | 108 | int i; |
95 | struct coresight_device *child; | ||
96 | struct coresight_connection *conn; | 109 | struct coresight_connection *conn; |
97 | 110 | ||
98 | child = container_of(csdev->path_link.prev, | ||
99 | struct coresight_device, path_link); | ||
100 | |||
101 | for (i = 0; i < csdev->nr_outport; i++) { | 111 | for (i = 0; i < csdev->nr_outport; i++) { |
102 | conn = &csdev->conns[i]; | 112 | conn = &csdev->conns[i]; |
103 | if (conn->child_dev == child) | 113 | if (conn->child_dev == child) |
@@ -110,13 +120,13 @@ static int coresight_find_link_outport(struct coresight_device *csdev) | |||
110 | return 0; | 120 | return 0; |
111 | } | 121 | } |
112 | 122 | ||
113 | static int coresight_enable_sink(struct coresight_device *csdev) | 123 | static int coresight_enable_sink(struct coresight_device *csdev, u32 mode) |
114 | { | 124 | { |
115 | int ret; | 125 | int ret; |
116 | 126 | ||
117 | if (!csdev->enable) { | 127 | if (!csdev->enable) { |
118 | if (sink_ops(csdev)->enable) { | 128 | if (sink_ops(csdev)->enable) { |
119 | ret = sink_ops(csdev)->enable(csdev); | 129 | ret = sink_ops(csdev)->enable(csdev, mode); |
120 | if (ret) | 130 | if (ret) |
121 | return ret; | 131 | return ret; |
122 | } | 132 | } |
@@ -138,14 +148,19 @@ static void coresight_disable_sink(struct coresight_device *csdev) | |||
138 | } | 148 | } |
139 | } | 149 | } |
140 | 150 | ||
141 | static int coresight_enable_link(struct coresight_device *csdev) | 151 | static int coresight_enable_link(struct coresight_device *csdev, |
152 | struct coresight_device *parent, | ||
153 | struct coresight_device *child) | ||
142 | { | 154 | { |
143 | int ret; | 155 | int ret; |
144 | int link_subtype; | 156 | int link_subtype; |
145 | int refport, inport, outport; | 157 | int refport, inport, outport; |
146 | 158 | ||
147 | inport = coresight_find_link_inport(csdev); | 159 | if (!parent || !child) |
148 | outport = coresight_find_link_outport(csdev); | 160 | return -EINVAL; |
161 | |||
162 | inport = coresight_find_link_inport(csdev, parent); | ||
163 | outport = coresight_find_link_outport(csdev, child); | ||
149 | link_subtype = csdev->subtype.link_subtype; | 164 | link_subtype = csdev->subtype.link_subtype; |
150 | 165 | ||
151 | if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) | 166 | if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) |
@@ -168,14 +183,19 @@ static int coresight_enable_link(struct coresight_device *csdev) | |||
168 | return 0; | 183 | return 0; |
169 | } | 184 | } |
170 | 185 | ||
171 | static void coresight_disable_link(struct coresight_device *csdev) | 186 | static void coresight_disable_link(struct coresight_device *csdev, |
187 | struct coresight_device *parent, | ||
188 | struct coresight_device *child) | ||
172 | { | 189 | { |
173 | int i, nr_conns; | 190 | int i, nr_conns; |
174 | int link_subtype; | 191 | int link_subtype; |
175 | int refport, inport, outport; | 192 | int refport, inport, outport; |
176 | 193 | ||
177 | inport = coresight_find_link_inport(csdev); | 194 | if (!parent || !child) |
178 | outport = coresight_find_link_outport(csdev); | 195 | return; |
196 | |||
197 | inport = coresight_find_link_inport(csdev, parent); | ||
198 | outport = coresight_find_link_outport(csdev, child); | ||
179 | link_subtype = csdev->subtype.link_subtype; | 199 | link_subtype = csdev->subtype.link_subtype; |
180 | 200 | ||
181 | if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { | 201 | if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { |
@@ -201,7 +221,7 @@ static void coresight_disable_link(struct coresight_device *csdev) | |||
201 | csdev->enable = false; | 221 | csdev->enable = false; |
202 | } | 222 | } |
203 | 223 | ||
204 | static int coresight_enable_source(struct coresight_device *csdev) | 224 | static int coresight_enable_source(struct coresight_device *csdev, u32 mode) |
205 | { | 225 | { |
206 | int ret; | 226 | int ret; |
207 | 227 | ||
@@ -213,7 +233,7 @@ static int coresight_enable_source(struct coresight_device *csdev) | |||
213 | 233 | ||
214 | if (!csdev->enable) { | 234 | if (!csdev->enable) { |
215 | if (source_ops(csdev)->enable) { | 235 | if (source_ops(csdev)->enable) { |
216 | ret = source_ops(csdev)->enable(csdev); | 236 | ret = source_ops(csdev)->enable(csdev, NULL, mode); |
217 | if (ret) | 237 | if (ret) |
218 | return ret; | 238 | return ret; |
219 | } | 239 | } |
@@ -235,109 +255,188 @@ static void coresight_disable_source(struct coresight_device *csdev) | |||
235 | } | 255 | } |
236 | } | 256 | } |
237 | 257 | ||
238 | static int coresight_enable_path(struct list_head *path) | 258 | void coresight_disable_path(struct list_head *path) |
239 | { | 259 | { |
240 | int ret = 0; | 260 | struct coresight_node *nd; |
241 | struct coresight_device *cd; | 261 | struct coresight_device *csdev, *parent, *child; |
242 | 262 | ||
243 | /* | 263 | list_for_each_entry(nd, path, link) { |
244 | * At this point we have a full @path, from source to sink. The | 264 | csdev = nd->csdev; |
245 | * sink is the first entry and the source the last one. Go through | 265 | |
246 | * all the components and enable them one by one. | 266 | switch (csdev->type) { |
247 | */ | 267 | case CORESIGHT_DEV_TYPE_SINK: |
248 | list_for_each_entry(cd, path, path_link) { | 268 | case CORESIGHT_DEV_TYPE_LINKSINK: |
249 | if (cd == list_first_entry(path, struct coresight_device, | 269 | coresight_disable_sink(csdev); |
250 | path_link)) { | 270 | break; |
251 | ret = coresight_enable_sink(cd); | 271 | case CORESIGHT_DEV_TYPE_SOURCE: |
252 | } else if (list_is_last(&cd->path_link, path)) { | 272 | /* sources are disabled from either sysFS or Perf */ |
253 | /* | 273 | break; |
254 | * Don't enable the source just yet - this needs to | 274 | case CORESIGHT_DEV_TYPE_LINK: |
255 | * happen at the very end when all links and sink | 275 | parent = list_prev_entry(nd, link)->csdev; |
256 | * along the path have been configured properly. | 276 | child = list_next_entry(nd, link)->csdev; |
257 | */ | 277 | coresight_disable_link(csdev, parent, child); |
258 | ; | 278 | break; |
259 | } else { | 279 | default: |
260 | ret = coresight_enable_link(cd); | 280 | break; |
261 | } | 281 | } |
262 | if (ret) | ||
263 | goto err; | ||
264 | } | 282 | } |
283 | } | ||
265 | 284 | ||
266 | return 0; | 285 | int coresight_enable_path(struct list_head *path, u32 mode) |
267 | err: | 286 | { |
268 | list_for_each_entry_continue_reverse(cd, path, path_link) { | 287 | |
269 | if (cd == list_first_entry(path, struct coresight_device, | 288 | int ret = 0; |
270 | path_link)) { | 289 | struct coresight_node *nd; |
271 | coresight_disable_sink(cd); | 290 | struct coresight_device *csdev, *parent, *child; |
272 | } else if (list_is_last(&cd->path_link, path)) { | 291 | |
273 | ; | 292 | list_for_each_entry_reverse(nd, path, link) { |
274 | } else { | 293 | csdev = nd->csdev; |
275 | coresight_disable_link(cd); | 294 | |
295 | switch (csdev->type) { | ||
296 | case CORESIGHT_DEV_TYPE_SINK: | ||
297 | case CORESIGHT_DEV_TYPE_LINKSINK: | ||
298 | ret = coresight_enable_sink(csdev, mode); | ||
299 | if (ret) | ||
300 | goto err; | ||
301 | break; | ||
302 | case CORESIGHT_DEV_TYPE_SOURCE: | ||
303 | /* sources are enabled from either sysFS or Perf */ | ||
304 | break; | ||
305 | case CORESIGHT_DEV_TYPE_LINK: | ||
306 | parent = list_prev_entry(nd, link)->csdev; | ||
307 | child = list_next_entry(nd, link)->csdev; | ||
308 | ret = coresight_enable_link(csdev, parent, child); | ||
309 | if (ret) | ||
310 | goto err; | ||
311 | break; | ||
312 | default: | ||
313 | goto err; | ||
276 | } | 314 | } |
277 | } | 315 | } |
278 | 316 | ||
317 | out: | ||
279 | return ret; | 318 | return ret; |
319 | err: | ||
320 | coresight_disable_path(path); | ||
321 | goto out; | ||
280 | } | 322 | } |
281 | 323 | ||
282 | static int coresight_disable_path(struct list_head *path) | 324 | struct coresight_device *coresight_get_sink(struct list_head *path) |
283 | { | 325 | { |
284 | struct coresight_device *cd; | 326 | struct coresight_device *csdev; |
285 | 327 | ||
286 | list_for_each_entry_reverse(cd, path, path_link) { | 328 | if (!path) |
287 | if (cd == list_first_entry(path, struct coresight_device, | 329 | return NULL; |
288 | path_link)) { | 330 | |
289 | coresight_disable_sink(cd); | 331 | csdev = list_last_entry(path, struct coresight_node, link)->csdev; |
290 | } else if (list_is_last(&cd->path_link, path)) { | 332 | if (csdev->type != CORESIGHT_DEV_TYPE_SINK && |
291 | /* | 333 | csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) |
292 | * The source has already been stopped, no need | 334 | return NULL; |
293 | * to do it again here. | 335 | |
294 | */ | 336 | return csdev; |
295 | ; | 337 | } |
296 | } else { | 338 | |
297 | coresight_disable_link(cd); | 339 | /** |
340 | * _coresight_build_path - recursively build a path from a @csdev to a sink. | ||
341 | * @csdev: The device to start from. | ||
342 | * @path: The list to add devices to. | ||
343 | * | ||
344 | * The tree of Coresight device is traversed until an activated sink is | ||
345 | * found. From there the sink is added to the list along with all the | ||
346 | * devices that led to that point - the end result is a list from source | ||
347 | * to sink. In that list the source is the first device and the sink the | ||
348 | * last one. | ||
349 | */ | ||
350 | static int _coresight_build_path(struct coresight_device *csdev, | ||
351 | struct list_head *path) | ||
352 | { | ||
353 | int i; | ||
354 | bool found = false; | ||
355 | struct coresight_node *node; | ||
356 | struct coresight_connection *conn; | ||
357 | |||
358 | /* An activated sink has been found. Enqueue the element */ | ||
359 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || | ||
360 | csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated) | ||
361 | goto out; | ||
362 | |||
363 | /* Not a sink - recursively explore each port found on this element */ | ||
364 | for (i = 0; i < csdev->nr_outport; i++) { | ||
365 | conn = &csdev->conns[i]; | ||
366 | if (_coresight_build_path(conn->child_dev, path) == 0) { | ||
367 | found = true; | ||
368 | break; | ||
298 | } | 369 | } |
299 | } | 370 | } |
300 | 371 | ||
372 | if (!found) | ||
373 | return -ENODEV; | ||
374 | |||
375 | out: | ||
376 | /* | ||
377 | * A path from this element to a sink has been found. The elements | ||
378 | * leading to the sink are already enqueued, all that is left to do | ||
379 | * is tell the PM runtime core we need this element and add a node | ||
380 | * for it. | ||
381 | */ | ||
382 | node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL); | ||
383 | if (!node) | ||
384 | return -ENOMEM; | ||
385 | |||
386 | node->csdev = csdev; | ||
387 | list_add(&node->link, path); | ||
388 | pm_runtime_get_sync(csdev->dev.parent); | ||
389 | |||
301 | return 0; | 390 | return 0; |
302 | } | 391 | } |
303 | 392 | ||
304 | static int coresight_build_paths(struct coresight_device *csdev, | 393 | struct list_head *coresight_build_path(struct coresight_device *csdev) |
305 | struct list_head *path, | ||
306 | bool enable) | ||
307 | { | 394 | { |
308 | int i, ret = -EINVAL; | 395 | struct list_head *path; |
309 | struct coresight_connection *conn; | ||
310 | 396 | ||
311 | list_add(&csdev->path_link, path); | 397 | path = kzalloc(sizeof(struct list_head), GFP_KERNEL); |
398 | if (!path) | ||
399 | return NULL; | ||
312 | 400 | ||
313 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || | 401 | INIT_LIST_HEAD(path); |
314 | csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && | 402 | |
315 | csdev->activated) { | 403 | if (_coresight_build_path(csdev, path)) { |
316 | if (enable) | 404 | kfree(path); |
317 | ret = coresight_enable_path(path); | 405 | path = NULL; |
318 | else | ||
319 | ret = coresight_disable_path(path); | ||
320 | } else { | ||
321 | for (i = 0; i < csdev->nr_outport; i++) { | ||
322 | conn = &csdev->conns[i]; | ||
323 | if (coresight_build_paths(conn->child_dev, | ||
324 | path, enable) == 0) | ||
325 | ret = 0; | ||
326 | } | ||
327 | } | 406 | } |
328 | 407 | ||
329 | if (list_first_entry(path, struct coresight_device, path_link) != csdev) | 408 | return path; |
330 | dev_err(&csdev->dev, "wrong device in %s\n", __func__); | 409 | } |
331 | 410 | ||
332 | list_del(&csdev->path_link); | 411 | /** |
412 | * coresight_release_path - release a previously built path. | ||
413 | * @path: the path to release. | ||
414 | * | ||
415 | * Go through all the elements of a path and 1) removed it from the list and | ||
416 | * 2) free the memory allocated for each node. | ||
417 | */ | ||
418 | void coresight_release_path(struct list_head *path) | ||
419 | { | ||
420 | struct coresight_device *csdev; | ||
421 | struct coresight_node *nd, *next; | ||
333 | 422 | ||
334 | return ret; | 423 | list_for_each_entry_safe(nd, next, path, link) { |
424 | csdev = nd->csdev; | ||
425 | |||
426 | pm_runtime_put_sync(csdev->dev.parent); | ||
427 | list_del(&nd->link); | ||
428 | kfree(nd); | ||
429 | } | ||
430 | |||
431 | kfree(path); | ||
432 | path = NULL; | ||
335 | } | 433 | } |
336 | 434 | ||
337 | int coresight_enable(struct coresight_device *csdev) | 435 | int coresight_enable(struct coresight_device *csdev) |
338 | { | 436 | { |
339 | int ret = 0; | 437 | int ret = 0; |
340 | LIST_HEAD(path); | 438 | int cpu; |
439 | struct list_head *path; | ||
341 | 440 | ||
342 | mutex_lock(&coresight_mutex); | 441 | mutex_lock(&coresight_mutex); |
343 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { | 442 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { |
@@ -348,22 +447,47 @@ int coresight_enable(struct coresight_device *csdev) | |||
348 | if (csdev->enable) | 447 | if (csdev->enable) |
349 | goto out; | 448 | goto out; |
350 | 449 | ||
351 | if (coresight_build_paths(csdev, &path, true)) { | 450 | path = coresight_build_path(csdev); |
352 | dev_err(&csdev->dev, "building path(s) failed\n"); | 451 | if (!path) { |
452 | pr_err("building path(s) failed\n"); | ||
353 | goto out; | 453 | goto out; |
354 | } | 454 | } |
355 | 455 | ||
356 | if (coresight_enable_source(csdev)) | 456 | ret = coresight_enable_path(path, CS_MODE_SYSFS); |
357 | dev_err(&csdev->dev, "source enable failed\n"); | 457 | if (ret) |
458 | goto err_path; | ||
459 | |||
460 | ret = coresight_enable_source(csdev, CS_MODE_SYSFS); | ||
461 | if (ret) | ||
462 | goto err_source; | ||
463 | |||
464 | /* | ||
465 | * When working from sysFS it is important to keep track | ||
466 | * of the paths that were created so that they can be | ||
467 | * undone in 'coresight_disable()'. Since there can only | ||
468 | * be a single session per tracer (when working from sysFS) | ||
469 | * a per-cpu variable will do just fine. | ||
470 | */ | ||
471 | cpu = source_ops(csdev)->cpu_id(csdev); | ||
472 | per_cpu(sysfs_path, cpu) = path; | ||
473 | |||
358 | out: | 474 | out: |
359 | mutex_unlock(&coresight_mutex); | 475 | mutex_unlock(&coresight_mutex); |
360 | return ret; | 476 | return ret; |
477 | |||
478 | err_source: | ||
479 | coresight_disable_path(path); | ||
480 | |||
481 | err_path: | ||
482 | coresight_release_path(path); | ||
483 | goto out; | ||
361 | } | 484 | } |
362 | EXPORT_SYMBOL_GPL(coresight_enable); | 485 | EXPORT_SYMBOL_GPL(coresight_enable); |
363 | 486 | ||
364 | void coresight_disable(struct coresight_device *csdev) | 487 | void coresight_disable(struct coresight_device *csdev) |
365 | { | 488 | { |
366 | LIST_HEAD(path); | 489 | int cpu; |
490 | struct list_head *path; | ||
367 | 491 | ||
368 | mutex_lock(&coresight_mutex); | 492 | mutex_lock(&coresight_mutex); |
369 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { | 493 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { |
@@ -373,9 +497,12 @@ void coresight_disable(struct coresight_device *csdev) | |||
373 | if (!csdev->enable) | 497 | if (!csdev->enable) |
374 | goto out; | 498 | goto out; |
375 | 499 | ||
500 | cpu = source_ops(csdev)->cpu_id(csdev); | ||
501 | path = per_cpu(sysfs_path, cpu); | ||
376 | coresight_disable_source(csdev); | 502 | coresight_disable_source(csdev); |
377 | if (coresight_build_paths(csdev, &path, false)) | 503 | coresight_disable_path(path); |
378 | dev_err(&csdev->dev, "releasing path(s) failed\n"); | 504 | coresight_release_path(path); |
505 | per_cpu(sysfs_path, cpu) = NULL; | ||
379 | 506 | ||
380 | out: | 507 | out: |
381 | mutex_unlock(&coresight_mutex); | 508 | mutex_unlock(&coresight_mutex); |
@@ -481,6 +608,8 @@ static void coresight_device_release(struct device *dev) | |||
481 | { | 608 | { |
482 | struct coresight_device *csdev = to_coresight_device(dev); | 609 | struct coresight_device *csdev = to_coresight_device(dev); |
483 | 610 | ||
611 | kfree(csdev->conns); | ||
612 | kfree(csdev->refcnt); | ||
484 | kfree(csdev); | 613 | kfree(csdev); |
485 | } | 614 | } |
486 | 615 | ||
@@ -536,7 +665,7 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev) | |||
536 | * are hooked-up with each newly added component. | 665 | * are hooked-up with each newly added component. |
537 | */ | 666 | */ |
538 | bus_for_each_dev(&coresight_bustype, NULL, | 667 | bus_for_each_dev(&coresight_bustype, NULL, |
539 | csdev, coresight_orphan_match); | 668 | csdev, coresight_orphan_match); |
540 | } | 669 | } |
541 | 670 | ||
542 | 671 | ||
@@ -568,6 +697,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev) | |||
568 | 697 | ||
569 | if (dev) { | 698 | if (dev) { |
570 | conn->child_dev = to_coresight_device(dev); | 699 | conn->child_dev = to_coresight_device(dev); |
700 | /* and put reference from 'bus_find_device()' */ | ||
701 | put_device(dev); | ||
571 | } else { | 702 | } else { |
572 | csdev->orphan = true; | 703 | csdev->orphan = true; |
573 | conn->child_dev = NULL; | 704 | conn->child_dev = NULL; |
@@ -575,6 +706,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev) | |||
575 | } | 706 | } |
576 | } | 707 | } |
577 | 708 | ||
709 | static int coresight_remove_match(struct device *dev, void *data) | ||
710 | { | ||
711 | int i; | ||
712 | struct coresight_device *csdev, *iterator; | ||
713 | struct coresight_connection *conn; | ||
714 | |||
715 | csdev = data; | ||
716 | iterator = to_coresight_device(dev); | ||
717 | |||
718 | /* No need to check oneself */ | ||
719 | if (csdev == iterator) | ||
720 | return 0; | ||
721 | |||
722 | /* | ||
723 | * Circle throuch all the connection of that component. If we find | ||
724 | * a connection whose name matches @csdev, remove it. | ||
725 | */ | ||
726 | for (i = 0; i < iterator->nr_outport; i++) { | ||
727 | conn = &iterator->conns[i]; | ||
728 | |||
729 | if (conn->child_dev == NULL) | ||
730 | continue; | ||
731 | |||
732 | if (!strcmp(dev_name(&csdev->dev), conn->child_name)) { | ||
733 | iterator->orphan = true; | ||
734 | conn->child_dev = NULL; | ||
735 | /* No need to continue */ | ||
736 | break; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * Returning '0' ensures that all known component on the | ||
742 | * bus will be checked. | ||
743 | */ | ||
744 | return 0; | ||
745 | } | ||
746 | |||
747 | static void coresight_remove_conns(struct coresight_device *csdev) | ||
748 | { | ||
749 | bus_for_each_dev(&coresight_bustype, NULL, | ||
750 | csdev, coresight_remove_match); | ||
751 | } | ||
752 | |||
578 | /** | 753 | /** |
579 | * coresight_timeout - loop until a bit has changed to a specific state. | 754 | * coresight_timeout - loop until a bit has changed to a specific state. |
580 | * @addr: base address of the area of interest. | 755 | * @addr: base address of the area of interest. |
@@ -713,13 +888,8 @@ EXPORT_SYMBOL_GPL(coresight_register); | |||
713 | 888 | ||
714 | void coresight_unregister(struct coresight_device *csdev) | 889 | void coresight_unregister(struct coresight_device *csdev) |
715 | { | 890 | { |
716 | mutex_lock(&coresight_mutex); | 891 | /* Remove references of that device in the topology */ |
717 | 892 | coresight_remove_conns(csdev); | |
718 | kfree(csdev->conns); | ||
719 | device_unregister(&csdev->dev); | 893 | device_unregister(&csdev->dev); |
720 | |||
721 | mutex_unlock(&coresight_mutex); | ||
722 | } | 894 | } |
723 | EXPORT_SYMBOL_GPL(coresight_unregister); | 895 | EXPORT_SYMBOL_GPL(coresight_unregister); |
724 | |||
725 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c index b0973617826f..b68da1888fd5 100644 --- a/drivers/hwtracing/coresight/of_coresight.c +++ b/drivers/hwtracing/coresight/of_coresight.c | |||
@@ -10,7 +10,6 @@ | |||
10 | * GNU General Public License for more details. | 10 | * GNU General Public License for more details. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/types.h> | 13 | #include <linux/types.h> |
15 | #include <linux/err.h> | 14 | #include <linux/err.h> |
16 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
@@ -86,7 +85,7 @@ static int of_coresight_alloc_memory(struct device *dev, | |||
86 | return -ENOMEM; | 85 | return -ENOMEM; |
87 | 86 | ||
88 | /* Children connected to this component via @outports */ | 87 | /* Children connected to this component via @outports */ |
89 | pdata->child_names = devm_kzalloc(dev, pdata->nr_outport * | 88 | pdata->child_names = devm_kzalloc(dev, pdata->nr_outport * |
90 | sizeof(*pdata->child_names), | 89 | sizeof(*pdata->child_names), |
91 | GFP_KERNEL); | 90 | GFP_KERNEL); |
92 | if (!pdata->child_names) | 91 | if (!pdata->child_names) |
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig index b7a9073d968b..1b412f8a56b5 100644 --- a/drivers/hwtracing/intel_th/Kconfig +++ b/drivers/hwtracing/intel_th/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config INTEL_TH | 1 | config INTEL_TH |
2 | tristate "Intel(R) Trace Hub controller" | 2 | tristate "Intel(R) Trace Hub controller" |
3 | depends on HAS_DMA && HAS_IOMEM | ||
3 | help | 4 | help |
4 | Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that | 5 | Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that |
5 | produce, switch and output trace data from multiple hardware and | 6 | produce, switch and output trace data from multiple hardware and |
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 165d3001c301..4272f2ce5f6e 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c | |||
@@ -124,17 +124,34 @@ static struct device_type intel_th_source_device_type = { | |||
124 | .release = intel_th_device_release, | 124 | .release = intel_th_device_release, |
125 | }; | 125 | }; |
126 | 126 | ||
127 | static struct intel_th *to_intel_th(struct intel_th_device *thdev) | ||
128 | { | ||
129 | /* | ||
130 | * subdevice tree is flat: if this one is not a switch, its | ||
131 | * parent must be | ||
132 | */ | ||
133 | if (thdev->type != INTEL_TH_SWITCH) | ||
134 | thdev = to_intel_th_hub(thdev); | ||
135 | |||
136 | if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH)) | ||
137 | return NULL; | ||
138 | |||
139 | return dev_get_drvdata(thdev->dev.parent); | ||
140 | } | ||
141 | |||
127 | static char *intel_th_output_devnode(struct device *dev, umode_t *mode, | 142 | static char *intel_th_output_devnode(struct device *dev, umode_t *mode, |
128 | kuid_t *uid, kgid_t *gid) | 143 | kuid_t *uid, kgid_t *gid) |
129 | { | 144 | { |
130 | struct intel_th_device *thdev = to_intel_th_device(dev); | 145 | struct intel_th_device *thdev = to_intel_th_device(dev); |
146 | struct intel_th *th = to_intel_th(thdev); | ||
131 | char *node; | 147 | char *node; |
132 | 148 | ||
133 | if (thdev->id >= 0) | 149 | if (thdev->id >= 0) |
134 | node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", 0, thdev->name, | 150 | node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", th->id, |
135 | thdev->id); | 151 | thdev->name, thdev->id); |
136 | else | 152 | else |
137 | node = kasprintf(GFP_KERNEL, "intel_th%d/%s", 0, thdev->name); | 153 | node = kasprintf(GFP_KERNEL, "intel_th%d/%s", th->id, |
154 | thdev->name); | ||
138 | 155 | ||
139 | return node; | 156 | return node; |
140 | } | 157 | } |
@@ -319,6 +336,7 @@ static struct intel_th_subdevice { | |||
319 | unsigned nres; | 336 | unsigned nres; |
320 | unsigned type; | 337 | unsigned type; |
321 | unsigned otype; | 338 | unsigned otype; |
339 | unsigned scrpd; | ||
322 | int id; | 340 | int id; |
323 | } intel_th_subdevices[TH_SUBDEVICE_MAX] = { | 341 | } intel_th_subdevices[TH_SUBDEVICE_MAX] = { |
324 | { | 342 | { |
@@ -352,6 +370,7 @@ static struct intel_th_subdevice { | |||
352 | .id = 0, | 370 | .id = 0, |
353 | .type = INTEL_TH_OUTPUT, | 371 | .type = INTEL_TH_OUTPUT, |
354 | .otype = GTH_MSU, | 372 | .otype = GTH_MSU, |
373 | .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED, | ||
355 | }, | 374 | }, |
356 | { | 375 | { |
357 | .nres = 2, | 376 | .nres = 2, |
@@ -371,6 +390,7 @@ static struct intel_th_subdevice { | |||
371 | .id = 1, | 390 | .id = 1, |
372 | .type = INTEL_TH_OUTPUT, | 391 | .type = INTEL_TH_OUTPUT, |
373 | .otype = GTH_MSU, | 392 | .otype = GTH_MSU, |
393 | .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED, | ||
374 | }, | 394 | }, |
375 | { | 395 | { |
376 | .nres = 2, | 396 | .nres = 2, |
@@ -403,6 +423,7 @@ static struct intel_th_subdevice { | |||
403 | .name = "pti", | 423 | .name = "pti", |
404 | .type = INTEL_TH_OUTPUT, | 424 | .type = INTEL_TH_OUTPUT, |
405 | .otype = GTH_PTI, | 425 | .otype = GTH_PTI, |
426 | .scrpd = SCRPD_PTI_IS_PRIM_DEST, | ||
406 | }, | 427 | }, |
407 | { | 428 | { |
408 | .nres = 1, | 429 | .nres = 1, |
@@ -477,6 +498,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres, | |||
477 | thdev->dev.devt = MKDEV(th->major, i); | 498 | thdev->dev.devt = MKDEV(th->major, i); |
478 | thdev->output.type = subdev->otype; | 499 | thdev->output.type = subdev->otype; |
479 | thdev->output.port = -1; | 500 | thdev->output.port = -1; |
501 | thdev->output.scratchpad = subdev->scrpd; | ||
480 | } | 502 | } |
481 | 503 | ||
482 | err = device_add(&thdev->dev); | 504 | err = device_add(&thdev->dev); |
@@ -579,6 +601,8 @@ intel_th_alloc(struct device *dev, struct resource *devres, | |||
579 | } | 601 | } |
580 | th->dev = dev; | 602 | th->dev = dev; |
581 | 603 | ||
604 | dev_set_drvdata(dev, th); | ||
605 | |||
582 | err = intel_th_populate(th, devres, ndevres, irq); | 606 | err = intel_th_populate(th, devres, ndevres, irq); |
583 | if (err) | 607 | if (err) |
584 | goto err_chrdev; | 608 | goto err_chrdev; |
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index 2dc5378ccd3a..9beea0b54231 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c | |||
@@ -146,24 +146,6 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port) | |||
146 | iowrite32(val, gth->base + reg); | 146 | iowrite32(val, gth->base + reg); |
147 | } | 147 | } |
148 | 148 | ||
149 | /*static int gth_master_get(struct gth_device *gth, unsigned int master) | ||
150 | { | ||
151 | unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u); | ||
152 | unsigned int shift = (master & 0x7) * 4; | ||
153 | u32 val; | ||
154 | |||
155 | if (master >= 256) { | ||
156 | reg = REG_GTH_GSWTDEST; | ||
157 | shift = 0; | ||
158 | } | ||
159 | |||
160 | val = ioread32(gth->base + reg); | ||
161 | val &= (0xf << shift); | ||
162 | val >>= shift; | ||
163 | |||
164 | return val ? val & 0x7 : -1; | ||
165 | }*/ | ||
166 | |||
167 | static ssize_t master_attr_show(struct device *dev, | 149 | static ssize_t master_attr_show(struct device *dev, |
168 | struct device_attribute *attr, | 150 | struct device_attribute *attr, |
169 | char *buf) | 151 | char *buf) |
@@ -304,6 +286,10 @@ static int intel_th_gth_reset(struct gth_device *gth) | |||
304 | if (scratchpad & SCRPD_DEBUGGER_IN_USE) | 286 | if (scratchpad & SCRPD_DEBUGGER_IN_USE) |
305 | return -EBUSY; | 287 | return -EBUSY; |
306 | 288 | ||
289 | /* Always save/restore STH and TU registers in S0ix entry/exit */ | ||
290 | scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED; | ||
291 | iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0); | ||
292 | |||
307 | /* output ports */ | 293 | /* output ports */ |
308 | for (port = 0; port < 8; port++) { | 294 | for (port = 0; port < 8; port++) { |
309 | if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) == | 295 | if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) == |
@@ -506,6 +492,10 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, | |||
506 | if (!count) | 492 | if (!count) |
507 | dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n", | 493 | dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n", |
508 | output->port); | 494 | output->port); |
495 | |||
496 | reg = ioread32(gth->base + REG_GTH_SCRPD0); | ||
497 | reg &= ~output->scratchpad; | ||
498 | iowrite32(reg, gth->base + REG_GTH_SCRPD0); | ||
509 | } | 499 | } |
510 | 500 | ||
511 | /** | 501 | /** |
@@ -520,7 +510,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, | |||
520 | struct intel_th_output *output) | 510 | struct intel_th_output *output) |
521 | { | 511 | { |
522 | struct gth_device *gth = dev_get_drvdata(&thdev->dev); | 512 | struct gth_device *gth = dev_get_drvdata(&thdev->dev); |
523 | u32 scr = 0xfc0000; | 513 | u32 scr = 0xfc0000, scrpd; |
524 | int master; | 514 | int master; |
525 | 515 | ||
526 | spin_lock(>h->gth_lock); | 516 | spin_lock(>h->gth_lock); |
@@ -535,6 +525,10 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, | |||
535 | output->active = true; | 525 | output->active = true; |
536 | spin_unlock(>h->gth_lock); | 526 | spin_unlock(>h->gth_lock); |
537 | 527 | ||
528 | scrpd = ioread32(gth->base + REG_GTH_SCRPD0); | ||
529 | scrpd |= output->scratchpad; | ||
530 | iowrite32(scrpd, gth->base + REG_GTH_SCRPD0); | ||
531 | |||
538 | iowrite32(scr, gth->base + REG_GTH_SCR); | 532 | iowrite32(scr, gth->base + REG_GTH_SCR); |
539 | iowrite32(0, gth->base + REG_GTH_SCR2); | 533 | iowrite32(0, gth->base + REG_GTH_SCR2); |
540 | } | 534 | } |
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h index 3b714b7a61db..56f0d2620577 100644 --- a/drivers/hwtracing/intel_th/gth.h +++ b/drivers/hwtracing/intel_th/gth.h | |||
@@ -57,9 +57,6 @@ enum { | |||
57 | REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ | 57 | REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ |
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* Externall debugger is using Intel TH */ | ||
61 | #define SCRPD_DEBUGGER_IN_USE BIT(24) | ||
62 | |||
63 | /* waiting for Pipeline Empty bit(s) to assert for GTH */ | 60 | /* waiting for Pipeline Empty bit(s) to assert for GTH */ |
64 | #define GTH_PLE_WAITLOOP_DEPTH 10000 | 61 | #define GTH_PLE_WAITLOOP_DEPTH 10000 |
65 | 62 | ||
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 57fd72b20fae..eedd09332db6 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h | |||
@@ -30,6 +30,7 @@ enum { | |||
30 | * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices | 30 | * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices |
31 | * @port: output port number, assigned by the switch | 31 | * @port: output port number, assigned by the switch |
32 | * @type: GTH_{MSU,CTP,PTI} | 32 | * @type: GTH_{MSU,CTP,PTI} |
33 | * @scratchpad: scratchpad bits to flag when this output is enabled | ||
33 | * @multiblock: true for multiblock output configuration | 34 | * @multiblock: true for multiblock output configuration |
34 | * @active: true when this output is enabled | 35 | * @active: true when this output is enabled |
35 | * | 36 | * |
@@ -41,6 +42,7 @@ enum { | |||
41 | struct intel_th_output { | 42 | struct intel_th_output { |
42 | int port; | 43 | int port; |
43 | unsigned int type; | 44 | unsigned int type; |
45 | unsigned int scratchpad; | ||
44 | bool multiblock; | 46 | bool multiblock; |
45 | bool active; | 47 | bool active; |
46 | }; | 48 | }; |
@@ -241,4 +243,43 @@ enum { | |||
241 | GTH_PTI = 4, /* MIPI-PTI */ | 243 | GTH_PTI = 4, /* MIPI-PTI */ |
242 | }; | 244 | }; |
243 | 245 | ||
246 | /* | ||
247 | * Scratchpad bits: tell firmware and external debuggers | ||
248 | * what we are up to. | ||
249 | */ | ||
250 | enum { | ||
251 | /* Memory is the primary destination */ | ||
252 | SCRPD_MEM_IS_PRIM_DEST = BIT(0), | ||
253 | /* XHCI DbC is the primary destination */ | ||
254 | SCRPD_DBC_IS_PRIM_DEST = BIT(1), | ||
255 | /* PTI is the primary destination */ | ||
256 | SCRPD_PTI_IS_PRIM_DEST = BIT(2), | ||
257 | /* BSSB is the primary destination */ | ||
258 | SCRPD_BSSB_IS_PRIM_DEST = BIT(3), | ||
259 | /* PTI is the alternate destination */ | ||
260 | SCRPD_PTI_IS_ALT_DEST = BIT(4), | ||
261 | /* BSSB is the alternate destination */ | ||
262 | SCRPD_BSSB_IS_ALT_DEST = BIT(5), | ||
263 | /* DeepSx exit occurred */ | ||
264 | SCRPD_DEEPSX_EXIT = BIT(6), | ||
265 | /* S4 exit occurred */ | ||
266 | SCRPD_S4_EXIT = BIT(7), | ||
267 | /* S5 exit occurred */ | ||
268 | SCRPD_S5_EXIT = BIT(8), | ||
269 | /* MSU controller 0/1 is enabled */ | ||
270 | SCRPD_MSC0_IS_ENABLED = BIT(9), | ||
271 | SCRPD_MSC1_IS_ENABLED = BIT(10), | ||
272 | /* Sx exit occurred */ | ||
273 | SCRPD_SX_EXIT = BIT(11), | ||
274 | /* Trigger Unit is enabled */ | ||
275 | SCRPD_TRIGGER_IS_ENABLED = BIT(12), | ||
276 | SCRPD_ODLA_IS_ENABLED = BIT(13), | ||
277 | SCRPD_SOCHAP_IS_ENABLED = BIT(14), | ||
278 | SCRPD_STH_IS_ENABLED = BIT(15), | ||
279 | SCRPD_DCIH_IS_ENABLED = BIT(16), | ||
280 | SCRPD_VER_IS_ENABLED = BIT(17), | ||
281 | /* External debugger is using Intel TH */ | ||
282 | SCRPD_DEBUGGER_IN_USE = BIT(24), | ||
283 | }; | ||
284 | |||
244 | #endif | 285 | #endif |
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 70ca27e45602..d9d6022c5aca 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c | |||
@@ -408,7 +408,7 @@ msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, | |||
408 | * Second time (wrap_count==1), it's just like any other block, | 408 | * Second time (wrap_count==1), it's just like any other block, |
409 | * containing data in the range of [MSC_BDESC..data_bytes]. | 409 | * containing data in the range of [MSC_BDESC..data_bytes]. |
410 | */ | 410 | */ |
411 | if (iter->block == iter->start_block && iter->wrap_count) { | 411 | if (iter->block == iter->start_block && iter->wrap_count == 2) { |
412 | tocopy = DATA_IN_PAGE - data_bytes; | 412 | tocopy = DATA_IN_PAGE - data_bytes; |
413 | src += data_bytes; | 413 | src += data_bytes; |
414 | } | 414 | } |
@@ -1112,12 +1112,11 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, | |||
1112 | size = msc->nr_pages << PAGE_SHIFT; | 1112 | size = msc->nr_pages << PAGE_SHIFT; |
1113 | 1113 | ||
1114 | if (!size) | 1114 | if (!size) |
1115 | return 0; | 1115 | goto put_count; |
1116 | 1116 | ||
1117 | if (off >= size) { | 1117 | if (off >= size) |
1118 | len = 0; | ||
1119 | goto put_count; | 1118 | goto put_count; |
1120 | } | 1119 | |
1121 | if (off + len >= size) | 1120 | if (off + len >= size) |
1122 | len = size - off; | 1121 | len = size - off; |
1123 | 1122 | ||
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 641e87936064..bca7a2ac00d6 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c | |||
@@ -46,8 +46,6 @@ static int intel_th_pci_probe(struct pci_dev *pdev, | |||
46 | if (IS_ERR(th)) | 46 | if (IS_ERR(th)) |
47 | return PTR_ERR(th); | 47 | return PTR_ERR(th); |
48 | 48 | ||
49 | pci_set_drvdata(pdev, th); | ||
50 | |||
51 | return 0; | 49 | return 0; |
52 | } | 50 | } |
53 | 51 | ||
@@ -67,6 +65,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
67 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126), | 65 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126), |
68 | .driver_data = (kernel_ulong_t)0, | 66 | .driver_data = (kernel_ulong_t)0, |
69 | }, | 67 | }, |
68 | { | ||
69 | /* Apollo Lake */ | ||
70 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a8e), | ||
71 | .driver_data = (kernel_ulong_t)0, | ||
72 | }, | ||
73 | { | ||
74 | /* Broxton */ | ||
75 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80), | ||
76 | .driver_data = (kernel_ulong_t)0, | ||
77 | }, | ||
70 | { 0 }, | 78 | { 0 }, |
71 | }; | 79 | }; |
72 | 80 | ||
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index 56101c33e10f..e1aee61dd7b3 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c | |||
@@ -94,10 +94,13 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master, | |||
94 | case STP_PACKET_TRIG: | 94 | case STP_PACKET_TRIG: |
95 | if (flags & STP_PACKET_TIMESTAMPED) | 95 | if (flags & STP_PACKET_TIMESTAMPED) |
96 | reg += 4; | 96 | reg += 4; |
97 | iowrite8(*payload, sth->base + reg); | 97 | writeb_relaxed(*payload, sth->base + reg); |
98 | break; | 98 | break; |
99 | 99 | ||
100 | case STP_PACKET_MERR: | 100 | case STP_PACKET_MERR: |
101 | if (size > 4) | ||
102 | size = 4; | ||
103 | |||
101 | sth_iowrite(&out->MERR, payload, size); | 104 | sth_iowrite(&out->MERR, payload, size); |
102 | break; | 105 | break; |
103 | 106 | ||
@@ -107,8 +110,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master, | |||
107 | else | 110 | else |
108 | outp = (u64 __iomem *)&out->FLAG; | 111 | outp = (u64 __iomem *)&out->FLAG; |
109 | 112 | ||
110 | size = 1; | 113 | size = 0; |
111 | sth_iowrite(outp, payload, size); | 114 | writeb_relaxed(0, outp); |
112 | break; | 115 | break; |
113 | 116 | ||
114 | case STP_PACKET_USER: | 117 | case STP_PACKET_USER: |
@@ -129,6 +132,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master, | |||
129 | 132 | ||
130 | sth_iowrite(outp, payload, size); | 133 | sth_iowrite(outp, payload, size); |
131 | break; | 134 | break; |
135 | default: | ||
136 | return -ENOTSUPP; | ||
132 | } | 137 | } |
133 | 138 | ||
134 | return size; | 139 | return size; |
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig index 83e9f591a54b..847a39b35307 100644 --- a/drivers/hwtracing/stm/Kconfig +++ b/drivers/hwtracing/stm/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config STM | 1 | config STM |
2 | tristate "System Trace Module devices" | 2 | tristate "System Trace Module devices" |
3 | select CONFIGFS_FS | 3 | select CONFIGFS_FS |
4 | select SRCU | ||
4 | help | 5 | help |
5 | A System Trace Module (STM) is a device exporting data in System | 6 | A System Trace Module (STM) is a device exporting data in System |
6 | Trace Protocol (STP) format as defined by MIPI STP standards. | 7 | Trace Protocol (STP) format as defined by MIPI STP standards. |
@@ -8,6 +9,8 @@ config STM | |||
8 | 9 | ||
9 | Say Y here to enable System Trace Module device support. | 10 | Say Y here to enable System Trace Module device support. |
10 | 11 | ||
12 | if STM | ||
13 | |||
11 | config STM_DUMMY | 14 | config STM_DUMMY |
12 | tristate "Dummy STM driver" | 15 | tristate "Dummy STM driver" |
13 | help | 16 | help |
@@ -24,3 +27,16 @@ config STM_SOURCE_CONSOLE | |||
24 | 27 | ||
25 | If you want to send kernel console messages over STM devices, | 28 | If you want to send kernel console messages over STM devices, |
26 | say Y. | 29 | say Y. |
30 | |||
31 | config STM_SOURCE_HEARTBEAT | ||
32 | tristate "Heartbeat over STM devices" | ||
33 | help | ||
34 | This is a kernel space trace source that sends periodic | ||
35 | heartbeat messages to trace hosts over STM devices. It is | ||
36 | also useful for testing stm class drivers and the stm class | ||
37 | framework itself. | ||
38 | |||
39 | If you want to send heartbeat messages over STM devices, | ||
40 | say Y. | ||
41 | |||
42 | endif | ||
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile index f9312c38dd7a..a9ce3d487e57 100644 --- a/drivers/hwtracing/stm/Makefile +++ b/drivers/hwtracing/stm/Makefile | |||
@@ -5,5 +5,7 @@ stm_core-y := core.o policy.o | |||
5 | obj-$(CONFIG_STM_DUMMY) += dummy_stm.o | 5 | obj-$(CONFIG_STM_DUMMY) += dummy_stm.o |
6 | 6 | ||
7 | obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o | 7 | obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o |
8 | obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o | ||
8 | 9 | ||
9 | stm_console-y := console.o | 10 | stm_console-y := console.o |
11 | stm_heartbeat-y := heartbeat.o | ||
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index b6445d9e5453..de80d45d8df9 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c | |||
@@ -113,6 +113,7 @@ struct stm_device *stm_find_device(const char *buf) | |||
113 | 113 | ||
114 | stm = to_stm_device(dev); | 114 | stm = to_stm_device(dev); |
115 | if (!try_module_get(stm->owner)) { | 115 | if (!try_module_get(stm->owner)) { |
116 | /* matches class_find_device() above */ | ||
116 | put_device(dev); | 117 | put_device(dev); |
117 | return NULL; | 118 | return NULL; |
118 | } | 119 | } |
@@ -125,7 +126,7 @@ struct stm_device *stm_find_device(const char *buf) | |||
125 | * @stm: stm device, previously acquired by stm_find_device() | 126 | * @stm: stm device, previously acquired by stm_find_device() |
126 | * | 127 | * |
127 | * This drops the module reference and device reference taken by | 128 | * This drops the module reference and device reference taken by |
128 | * stm_find_device(). | 129 | * stm_find_device() or stm_char_open(). |
129 | */ | 130 | */ |
130 | void stm_put_device(struct stm_device *stm) | 131 | void stm_put_device(struct stm_device *stm) |
131 | { | 132 | { |
@@ -185,6 +186,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output) | |||
185 | { | 186 | { |
186 | struct stp_master *master = stm_master(stm, output->master); | 187 | struct stp_master *master = stm_master(stm, output->master); |
187 | 188 | ||
189 | lockdep_assert_held(&stm->mc_lock); | ||
190 | lockdep_assert_held(&output->lock); | ||
191 | |||
188 | if (WARN_ON_ONCE(master->nr_free < output->nr_chans)) | 192 | if (WARN_ON_ONCE(master->nr_free < output->nr_chans)) |
189 | return; | 193 | return; |
190 | 194 | ||
@@ -199,6 +203,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output) | |||
199 | { | 203 | { |
200 | struct stp_master *master = stm_master(stm, output->master); | 204 | struct stp_master *master = stm_master(stm, output->master); |
201 | 205 | ||
206 | lockdep_assert_held(&stm->mc_lock); | ||
207 | lockdep_assert_held(&output->lock); | ||
208 | |||
202 | bitmap_release_region(&master->chan_map[0], output->channel, | 209 | bitmap_release_region(&master->chan_map[0], output->channel, |
203 | ilog2(output->nr_chans)); | 210 | ilog2(output->nr_chans)); |
204 | 211 | ||
@@ -233,7 +240,7 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start, | |||
233 | return -1; | 240 | return -1; |
234 | } | 241 | } |
235 | 242 | ||
236 | static unsigned int | 243 | static int |
237 | stm_find_master_chan(struct stm_device *stm, unsigned int width, | 244 | stm_find_master_chan(struct stm_device *stm, unsigned int width, |
238 | unsigned int *mstart, unsigned int mend, | 245 | unsigned int *mstart, unsigned int mend, |
239 | unsigned int *cstart, unsigned int cend) | 246 | unsigned int *cstart, unsigned int cend) |
@@ -288,12 +295,13 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, | |||
288 | } | 295 | } |
289 | 296 | ||
290 | spin_lock(&stm->mc_lock); | 297 | spin_lock(&stm->mc_lock); |
298 | spin_lock(&output->lock); | ||
291 | /* output is already assigned -- shouldn't happen */ | 299 | /* output is already assigned -- shouldn't happen */ |
292 | if (WARN_ON_ONCE(output->nr_chans)) | 300 | if (WARN_ON_ONCE(output->nr_chans)) |
293 | goto unlock; | 301 | goto unlock; |
294 | 302 | ||
295 | ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend); | 303 | ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend); |
296 | if (ret) | 304 | if (ret < 0) |
297 | goto unlock; | 305 | goto unlock; |
298 | 306 | ||
299 | output->master = midx; | 307 | output->master = midx; |
@@ -304,6 +312,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, | |||
304 | 312 | ||
305 | ret = 0; | 313 | ret = 0; |
306 | unlock: | 314 | unlock: |
315 | spin_unlock(&output->lock); | ||
307 | spin_unlock(&stm->mc_lock); | 316 | spin_unlock(&stm->mc_lock); |
308 | 317 | ||
309 | return ret; | 318 | return ret; |
@@ -312,11 +321,18 @@ unlock: | |||
312 | static void stm_output_free(struct stm_device *stm, struct stm_output *output) | 321 | static void stm_output_free(struct stm_device *stm, struct stm_output *output) |
313 | { | 322 | { |
314 | spin_lock(&stm->mc_lock); | 323 | spin_lock(&stm->mc_lock); |
324 | spin_lock(&output->lock); | ||
315 | if (output->nr_chans) | 325 | if (output->nr_chans) |
316 | stm_output_disclaim(stm, output); | 326 | stm_output_disclaim(stm, output); |
327 | spin_unlock(&output->lock); | ||
317 | spin_unlock(&stm->mc_lock); | 328 | spin_unlock(&stm->mc_lock); |
318 | } | 329 | } |
319 | 330 | ||
331 | static void stm_output_init(struct stm_output *output) | ||
332 | { | ||
333 | spin_lock_init(&output->lock); | ||
334 | } | ||
335 | |||
320 | static int major_match(struct device *dev, const void *data) | 336 | static int major_match(struct device *dev, const void *data) |
321 | { | 337 | { |
322 | unsigned int major = *(unsigned int *)data; | 338 | unsigned int major = *(unsigned int *)data; |
@@ -339,6 +355,7 @@ static int stm_char_open(struct inode *inode, struct file *file) | |||
339 | if (!stmf) | 355 | if (!stmf) |
340 | return -ENOMEM; | 356 | return -ENOMEM; |
341 | 357 | ||
358 | stm_output_init(&stmf->output); | ||
342 | stmf->stm = to_stm_device(dev); | 359 | stmf->stm = to_stm_device(dev); |
343 | 360 | ||
344 | if (!try_module_get(stmf->stm->owner)) | 361 | if (!try_module_get(stmf->stm->owner)) |
@@ -349,6 +366,8 @@ static int stm_char_open(struct inode *inode, struct file *file) | |||
349 | return nonseekable_open(inode, file); | 366 | return nonseekable_open(inode, file); |
350 | 367 | ||
351 | err_free: | 368 | err_free: |
369 | /* matches class_find_device() above */ | ||
370 | put_device(dev); | ||
352 | kfree(stmf); | 371 | kfree(stmf); |
353 | 372 | ||
354 | return err; | 373 | return err; |
@@ -357,9 +376,19 @@ err_free: | |||
357 | static int stm_char_release(struct inode *inode, struct file *file) | 376 | static int stm_char_release(struct inode *inode, struct file *file) |
358 | { | 377 | { |
359 | struct stm_file *stmf = file->private_data; | 378 | struct stm_file *stmf = file->private_data; |
379 | struct stm_device *stm = stmf->stm; | ||
380 | |||
381 | if (stm->data->unlink) | ||
382 | stm->data->unlink(stm->data, stmf->output.master, | ||
383 | stmf->output.channel); | ||
360 | 384 | ||
361 | stm_output_free(stmf->stm, &stmf->output); | 385 | stm_output_free(stm, &stmf->output); |
362 | stm_put_device(stmf->stm); | 386 | |
387 | /* | ||
388 | * matches the stm_char_open()'s | ||
389 | * class_find_device() + try_module_get() | ||
390 | */ | ||
391 | stm_put_device(stm); | ||
363 | kfree(stmf); | 392 | kfree(stmf); |
364 | 393 | ||
365 | return 0; | 394 | return 0; |
@@ -380,8 +409,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width) | |||
380 | return ret; | 409 | return ret; |
381 | } | 410 | } |
382 | 411 | ||
383 | static void stm_write(struct stm_data *data, unsigned int master, | 412 | static ssize_t stm_write(struct stm_data *data, unsigned int master, |
384 | unsigned int channel, const char *buf, size_t count) | 413 | unsigned int channel, const char *buf, size_t count) |
385 | { | 414 | { |
386 | unsigned int flags = STP_PACKET_TIMESTAMPED; | 415 | unsigned int flags = STP_PACKET_TIMESTAMPED; |
387 | const unsigned char *p = buf, nil = 0; | 416 | const unsigned char *p = buf, nil = 0; |
@@ -393,9 +422,14 @@ static void stm_write(struct stm_data *data, unsigned int master, | |||
393 | sz = data->packet(data, master, channel, STP_PACKET_DATA, flags, | 422 | sz = data->packet(data, master, channel, STP_PACKET_DATA, flags, |
394 | sz, p); | 423 | sz, p); |
395 | flags = 0; | 424 | flags = 0; |
425 | |||
426 | if (sz < 0) | ||
427 | break; | ||
396 | } | 428 | } |
397 | 429 | ||
398 | data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); | 430 | data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); |
431 | |||
432 | return pos; | ||
399 | } | 433 | } |
400 | 434 | ||
401 | static ssize_t stm_char_write(struct file *file, const char __user *buf, | 435 | static ssize_t stm_char_write(struct file *file, const char __user *buf, |
@@ -406,6 +440,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, | |||
406 | char *kbuf; | 440 | char *kbuf; |
407 | int err; | 441 | int err; |
408 | 442 | ||
443 | if (count + 1 > PAGE_SIZE) | ||
444 | count = PAGE_SIZE - 1; | ||
445 | |||
409 | /* | 446 | /* |
410 | * if no m/c have been assigned to this writer up to this | 447 | * if no m/c have been assigned to this writer up to this |
411 | * point, use "default" policy entry | 448 | * point, use "default" policy entry |
@@ -430,8 +467,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, | |||
430 | return -EFAULT; | 467 | return -EFAULT; |
431 | } | 468 | } |
432 | 469 | ||
433 | stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf, | 470 | count = stm_write(stm->data, stmf->output.master, stmf->output.channel, |
434 | count); | 471 | kbuf, count); |
435 | 472 | ||
436 | kfree(kbuf); | 473 | kfree(kbuf); |
437 | 474 | ||
@@ -515,10 +552,8 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) | |||
515 | ret = stm->data->link(stm->data, stmf->output.master, | 552 | ret = stm->data->link(stm->data, stmf->output.master, |
516 | stmf->output.channel); | 553 | stmf->output.channel); |
517 | 554 | ||
518 | if (ret) { | 555 | if (ret) |
519 | stm_output_free(stmf->stm, &stmf->output); | 556 | stm_output_free(stmf->stm, &stmf->output); |
520 | stm_put_device(stmf->stm); | ||
521 | } | ||
522 | 557 | ||
523 | err_free: | 558 | err_free: |
524 | kfree(id); | 559 | kfree(id); |
@@ -618,7 +653,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
618 | if (!stm_data->packet || !stm_data->sw_nchannels) | 653 | if (!stm_data->packet || !stm_data->sw_nchannels) |
619 | return -EINVAL; | 654 | return -EINVAL; |
620 | 655 | ||
621 | nmasters = stm_data->sw_end - stm_data->sw_start; | 656 | nmasters = stm_data->sw_end - stm_data->sw_start + 1; |
622 | stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); | 657 | stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); |
623 | if (!stm) | 658 | if (!stm) |
624 | return -ENOMEM; | 659 | return -ENOMEM; |
@@ -641,6 +676,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
641 | if (err) | 676 | if (err) |
642 | goto err_device; | 677 | goto err_device; |
643 | 678 | ||
679 | mutex_init(&stm->link_mutex); | ||
644 | spin_lock_init(&stm->link_lock); | 680 | spin_lock_init(&stm->link_lock); |
645 | INIT_LIST_HEAD(&stm->link_list); | 681 | INIT_LIST_HEAD(&stm->link_list); |
646 | 682 | ||
@@ -654,6 +690,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
654 | return 0; | 690 | return 0; |
655 | 691 | ||
656 | err_device: | 692 | err_device: |
693 | /* matches device_initialize() above */ | ||
657 | put_device(&stm->dev); | 694 | put_device(&stm->dev); |
658 | err_free: | 695 | err_free: |
659 | kfree(stm); | 696 | kfree(stm); |
@@ -662,20 +699,28 @@ err_free: | |||
662 | } | 699 | } |
663 | EXPORT_SYMBOL_GPL(stm_register_device); | 700 | EXPORT_SYMBOL_GPL(stm_register_device); |
664 | 701 | ||
665 | static void __stm_source_link_drop(struct stm_source_device *src, | 702 | static int __stm_source_link_drop(struct stm_source_device *src, |
666 | struct stm_device *stm); | 703 | struct stm_device *stm); |
667 | 704 | ||
668 | void stm_unregister_device(struct stm_data *stm_data) | 705 | void stm_unregister_device(struct stm_data *stm_data) |
669 | { | 706 | { |
670 | struct stm_device *stm = stm_data->stm; | 707 | struct stm_device *stm = stm_data->stm; |
671 | struct stm_source_device *src, *iter; | 708 | struct stm_source_device *src, *iter; |
672 | int i; | 709 | int i, ret; |
673 | 710 | ||
674 | spin_lock(&stm->link_lock); | 711 | mutex_lock(&stm->link_mutex); |
675 | list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) { | 712 | list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) { |
676 | __stm_source_link_drop(src, stm); | 713 | ret = __stm_source_link_drop(src, stm); |
714 | /* | ||
715 | * src <-> stm link must not change under the same | ||
716 | * stm::link_mutex, so complain loudly if it has; | ||
717 | * also in this situation ret!=0 means this src is | ||
718 | * not connected to this stm and it should be otherwise | ||
719 | * safe to proceed with the tear-down of stm. | ||
720 | */ | ||
721 | WARN_ON_ONCE(ret); | ||
677 | } | 722 | } |
678 | spin_unlock(&stm->link_lock); | 723 | mutex_unlock(&stm->link_mutex); |
679 | 724 | ||
680 | synchronize_srcu(&stm_source_srcu); | 725 | synchronize_srcu(&stm_source_srcu); |
681 | 726 | ||
@@ -686,7 +731,7 @@ void stm_unregister_device(struct stm_data *stm_data) | |||
686 | stp_policy_unbind(stm->policy); | 731 | stp_policy_unbind(stm->policy); |
687 | mutex_unlock(&stm->policy_mutex); | 732 | mutex_unlock(&stm->policy_mutex); |
688 | 733 | ||
689 | for (i = 0; i < stm->sw_nmasters; i++) | 734 | for (i = stm->data->sw_start; i <= stm->data->sw_end; i++) |
690 | stp_master_free(stm, i); | 735 | stp_master_free(stm, i); |
691 | 736 | ||
692 | device_unregister(&stm->dev); | 737 | device_unregister(&stm->dev); |
@@ -694,6 +739,17 @@ void stm_unregister_device(struct stm_data *stm_data) | |||
694 | } | 739 | } |
695 | EXPORT_SYMBOL_GPL(stm_unregister_device); | 740 | EXPORT_SYMBOL_GPL(stm_unregister_device); |
696 | 741 | ||
742 | /* | ||
743 | * stm::link_list access serialization uses a spinlock and a mutex; holding | ||
744 | * either of them guarantees that the list is stable; modification requires | ||
745 | * holding both of them. | ||
746 | * | ||
747 | * Lock ordering is as follows: | ||
748 | * stm::link_mutex | ||
749 | * stm::link_lock | ||
750 | * src::link_lock | ||
751 | */ | ||
752 | |||
697 | /** | 753 | /** |
698 | * stm_source_link_add() - connect an stm_source device to an stm device | 754 | * stm_source_link_add() - connect an stm_source device to an stm device |
699 | * @src: stm_source device | 755 | * @src: stm_source device |
@@ -710,6 +766,7 @@ static int stm_source_link_add(struct stm_source_device *src, | |||
710 | char *id; | 766 | char *id; |
711 | int err; | 767 | int err; |
712 | 768 | ||
769 | mutex_lock(&stm->link_mutex); | ||
713 | spin_lock(&stm->link_lock); | 770 | spin_lock(&stm->link_lock); |
714 | spin_lock(&src->link_lock); | 771 | spin_lock(&src->link_lock); |
715 | 772 | ||
@@ -719,6 +776,7 @@ static int stm_source_link_add(struct stm_source_device *src, | |||
719 | 776 | ||
720 | spin_unlock(&src->link_lock); | 777 | spin_unlock(&src->link_lock); |
721 | spin_unlock(&stm->link_lock); | 778 | spin_unlock(&stm->link_lock); |
779 | mutex_unlock(&stm->link_mutex); | ||
722 | 780 | ||
723 | id = kstrdup(src->data->name, GFP_KERNEL); | 781 | id = kstrdup(src->data->name, GFP_KERNEL); |
724 | if (id) { | 782 | if (id) { |
@@ -753,9 +811,9 @@ static int stm_source_link_add(struct stm_source_device *src, | |||
753 | 811 | ||
754 | fail_free_output: | 812 | fail_free_output: |
755 | stm_output_free(stm, &src->output); | 813 | stm_output_free(stm, &src->output); |
756 | stm_put_device(stm); | ||
757 | 814 | ||
758 | fail_detach: | 815 | fail_detach: |
816 | mutex_lock(&stm->link_mutex); | ||
759 | spin_lock(&stm->link_lock); | 817 | spin_lock(&stm->link_lock); |
760 | spin_lock(&src->link_lock); | 818 | spin_lock(&src->link_lock); |
761 | 819 | ||
@@ -764,6 +822,7 @@ fail_detach: | |||
764 | 822 | ||
765 | spin_unlock(&src->link_lock); | 823 | spin_unlock(&src->link_lock); |
766 | spin_unlock(&stm->link_lock); | 824 | spin_unlock(&stm->link_lock); |
825 | mutex_unlock(&stm->link_mutex); | ||
767 | 826 | ||
768 | return err; | 827 | return err; |
769 | } | 828 | } |
@@ -776,28 +835,55 @@ fail_detach: | |||
776 | * If @stm is @src::link, disconnect them from one another and put the | 835 | * If @stm is @src::link, disconnect them from one another and put the |
777 | * reference on the @stm device. | 836 | * reference on the @stm device. |
778 | * | 837 | * |
779 | * Caller must hold stm::link_lock. | 838 | * Caller must hold stm::link_mutex. |
780 | */ | 839 | */ |
781 | static void __stm_source_link_drop(struct stm_source_device *src, | 840 | static int __stm_source_link_drop(struct stm_source_device *src, |
782 | struct stm_device *stm) | 841 | struct stm_device *stm) |
783 | { | 842 | { |
784 | struct stm_device *link; | 843 | struct stm_device *link; |
844 | int ret = 0; | ||
845 | |||
846 | lockdep_assert_held(&stm->link_mutex); | ||
785 | 847 | ||
848 | /* for stm::link_list modification, we hold both mutex and spinlock */ | ||
849 | spin_lock(&stm->link_lock); | ||
786 | spin_lock(&src->link_lock); | 850 | spin_lock(&src->link_lock); |
787 | link = srcu_dereference_check(src->link, &stm_source_srcu, 1); | 851 | link = srcu_dereference_check(src->link, &stm_source_srcu, 1); |
788 | if (WARN_ON_ONCE(link != stm)) { | 852 | |
789 | spin_unlock(&src->link_lock); | 853 | /* |
790 | return; | 854 | * The linked device may have changed since we last looked, because |
855 | * we weren't holding the src::link_lock back then; if this is the | ||
856 | * case, tell the caller to retry. | ||
857 | */ | ||
858 | if (link != stm) { | ||
859 | ret = -EAGAIN; | ||
860 | goto unlock; | ||
791 | } | 861 | } |
792 | 862 | ||
793 | stm_output_free(link, &src->output); | 863 | stm_output_free(link, &src->output); |
794 | /* caller must hold stm::link_lock */ | ||
795 | list_del_init(&src->link_entry); | 864 | list_del_init(&src->link_entry); |
796 | /* matches stm_find_device() from stm_source_link_store() */ | 865 | /* matches stm_find_device() from stm_source_link_store() */ |
797 | stm_put_device(link); | 866 | stm_put_device(link); |
798 | rcu_assign_pointer(src->link, NULL); | 867 | rcu_assign_pointer(src->link, NULL); |
799 | 868 | ||
869 | unlock: | ||
800 | spin_unlock(&src->link_lock); | 870 | spin_unlock(&src->link_lock); |
871 | spin_unlock(&stm->link_lock); | ||
872 | |||
873 | /* | ||
874 | * Call the unlink callbacks for both source and stm, when we know | ||
875 | * that we have actually performed the unlinking. | ||
876 | */ | ||
877 | if (!ret) { | ||
878 | if (src->data->unlink) | ||
879 | src->data->unlink(src->data); | ||
880 | |||
881 | if (stm->data->unlink) | ||
882 | stm->data->unlink(stm->data, src->output.master, | ||
883 | src->output.channel); | ||
884 | } | ||
885 | |||
886 | return ret; | ||
801 | } | 887 | } |
802 | 888 | ||
803 | /** | 889 | /** |
@@ -813,21 +899,29 @@ static void __stm_source_link_drop(struct stm_source_device *src, | |||
813 | static void stm_source_link_drop(struct stm_source_device *src) | 899 | static void stm_source_link_drop(struct stm_source_device *src) |
814 | { | 900 | { |
815 | struct stm_device *stm; | 901 | struct stm_device *stm; |
816 | int idx; | 902 | int idx, ret; |
817 | 903 | ||
904 | retry: | ||
818 | idx = srcu_read_lock(&stm_source_srcu); | 905 | idx = srcu_read_lock(&stm_source_srcu); |
906 | /* | ||
907 | * The stm device will be valid for the duration of this | ||
908 | * read section, but the link may change before we grab | ||
909 | * the src::link_lock in __stm_source_link_drop(). | ||
910 | */ | ||
819 | stm = srcu_dereference(src->link, &stm_source_srcu); | 911 | stm = srcu_dereference(src->link, &stm_source_srcu); |
820 | 912 | ||
913 | ret = 0; | ||
821 | if (stm) { | 914 | if (stm) { |
822 | if (src->data->unlink) | 915 | mutex_lock(&stm->link_mutex); |
823 | src->data->unlink(src->data); | 916 | ret = __stm_source_link_drop(src, stm); |
824 | 917 | mutex_unlock(&stm->link_mutex); | |
825 | spin_lock(&stm->link_lock); | ||
826 | __stm_source_link_drop(src, stm); | ||
827 | spin_unlock(&stm->link_lock); | ||
828 | } | 918 | } |
829 | 919 | ||
830 | srcu_read_unlock(&stm_source_srcu, idx); | 920 | srcu_read_unlock(&stm_source_srcu, idx); |
921 | |||
922 | /* if it did change, retry */ | ||
923 | if (ret == -EAGAIN) | ||
924 | goto retry; | ||
831 | } | 925 | } |
832 | 926 | ||
833 | static ssize_t stm_source_link_show(struct device *dev, | 927 | static ssize_t stm_source_link_show(struct device *dev, |
@@ -862,8 +956,10 @@ static ssize_t stm_source_link_store(struct device *dev, | |||
862 | return -EINVAL; | 956 | return -EINVAL; |
863 | 957 | ||
864 | err = stm_source_link_add(src, link); | 958 | err = stm_source_link_add(src, link); |
865 | if (err) | 959 | if (err) { |
960 | /* matches the stm_find_device() above */ | ||
866 | stm_put_device(link); | 961 | stm_put_device(link); |
962 | } | ||
867 | 963 | ||
868 | return err ? : count; | 964 | return err ? : count; |
869 | } | 965 | } |
@@ -925,6 +1021,7 @@ int stm_source_register_device(struct device *parent, | |||
925 | if (err) | 1021 | if (err) |
926 | goto err; | 1022 | goto err; |
927 | 1023 | ||
1024 | stm_output_init(&src->output); | ||
928 | spin_lock_init(&src->link_lock); | 1025 | spin_lock_init(&src->link_lock); |
929 | INIT_LIST_HEAD(&src->link_entry); | 1026 | INIT_LIST_HEAD(&src->link_entry); |
930 | src->data = data; | 1027 | src->data = data; |
@@ -973,9 +1070,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan, | |||
973 | 1070 | ||
974 | stm = srcu_dereference(src->link, &stm_source_srcu); | 1071 | stm = srcu_dereference(src->link, &stm_source_srcu); |
975 | if (stm) | 1072 | if (stm) |
976 | stm_write(stm->data, src->output.master, | 1073 | count = stm_write(stm->data, src->output.master, |
977 | src->output.channel + chan, | 1074 | src->output.channel + chan, |
978 | buf, count); | 1075 | buf, count); |
979 | else | 1076 | else |
980 | count = -ENODEV; | 1077 | count = -ENODEV; |
981 | 1078 | ||
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c index 3709bef0b21f..310adf57e7a1 100644 --- a/drivers/hwtracing/stm/dummy_stm.c +++ b/drivers/hwtracing/stm/dummy_stm.c | |||
@@ -40,22 +40,75 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master, | |||
40 | return size; | 40 | return size; |
41 | } | 41 | } |
42 | 42 | ||
43 | static struct stm_data dummy_stm = { | 43 | #define DUMMY_STM_MAX 32 |
44 | .name = "dummy_stm", | 44 | |
45 | .sw_start = 0x0000, | 45 | static struct stm_data dummy_stm[DUMMY_STM_MAX]; |
46 | .sw_end = 0xffff, | 46 | |
47 | .sw_nchannels = 0xffff, | 47 | static int nr_dummies = 4; |
48 | .packet = dummy_stm_packet, | 48 | |
49 | }; | 49 | module_param(nr_dummies, int, 0600); |
50 | |||
51 | static unsigned int dummy_stm_nr; | ||
52 | |||
53 | static unsigned int fail_mode; | ||
54 | |||
55 | module_param(fail_mode, int, 0600); | ||
56 | |||
57 | static int dummy_stm_link(struct stm_data *data, unsigned int master, | ||
58 | unsigned int channel) | ||
59 | { | ||
60 | if (fail_mode && (channel & fail_mode)) | ||
61 | return -EINVAL; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
50 | 65 | ||
51 | static int dummy_stm_init(void) | 66 | static int dummy_stm_init(void) |
52 | { | 67 | { |
53 | return stm_register_device(NULL, &dummy_stm, THIS_MODULE); | 68 | int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies); |
69 | |||
70 | if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX) | ||
71 | return -EINVAL; | ||
72 | |||
73 | for (i = 0; i < __nr_dummies; i++) { | ||
74 | dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i); | ||
75 | if (!dummy_stm[i].name) | ||
76 | goto fail_unregister; | ||
77 | |||
78 | dummy_stm[i].sw_start = 0x0000; | ||
79 | dummy_stm[i].sw_end = 0xffff; | ||
80 | dummy_stm[i].sw_nchannels = 0xffff; | ||
81 | dummy_stm[i].packet = dummy_stm_packet; | ||
82 | dummy_stm[i].link = dummy_stm_link; | ||
83 | |||
84 | ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE); | ||
85 | if (ret) | ||
86 | goto fail_free; | ||
87 | } | ||
88 | |||
89 | dummy_stm_nr = __nr_dummies; | ||
90 | |||
91 | return 0; | ||
92 | |||
93 | fail_unregister: | ||
94 | for (i--; i >= 0; i--) { | ||
95 | stm_unregister_device(&dummy_stm[i]); | ||
96 | fail_free: | ||
97 | kfree(dummy_stm[i].name); | ||
98 | } | ||
99 | |||
100 | return ret; | ||
101 | |||
54 | } | 102 | } |
55 | 103 | ||
56 | static void dummy_stm_exit(void) | 104 | static void dummy_stm_exit(void) |
57 | { | 105 | { |
58 | stm_unregister_device(&dummy_stm); | 106 | int i; |
107 | |||
108 | for (i = 0; i < dummy_stm_nr; i++) { | ||
109 | stm_unregister_device(&dummy_stm[i]); | ||
110 | kfree(dummy_stm[i].name); | ||
111 | } | ||
59 | } | 112 | } |
60 | 113 | ||
61 | module_init(dummy_stm_init); | 114 | module_init(dummy_stm_init); |
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c new file mode 100644 index 000000000000..0133571b506f --- /dev/null +++ b/drivers/hwtracing/stm/heartbeat.c | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * Simple heartbeat STM source driver | ||
3 | * Copyright (c) 2016, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Heartbeat STM source will send repetitive messages over STM devices to a | ||
15 | * trace host. | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/hrtimer.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/stm.h> | ||
23 | |||
24 | #define STM_HEARTBEAT_MAX 32 | ||
25 | |||
26 | static int nr_devs = 4; | ||
27 | static int interval_ms = 10; | ||
28 | |||
29 | module_param(nr_devs, int, 0600); | ||
30 | module_param(interval_ms, int, 0600); | ||
31 | |||
32 | static struct stm_heartbeat { | ||
33 | struct stm_source_data data; | ||
34 | struct hrtimer hrtimer; | ||
35 | unsigned int active; | ||
36 | } stm_heartbeat[STM_HEARTBEAT_MAX]; | ||
37 | |||
38 | static unsigned int nr_instances; | ||
39 | |||
40 | static const char str[] = "heartbeat stm source driver is here to serve you"; | ||
41 | |||
42 | static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr) | ||
43 | { | ||
44 | struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat, | ||
45 | hrtimer); | ||
46 | |||
47 | stm_source_write(&heartbeat->data, 0, str, sizeof str); | ||
48 | if (heartbeat->active) | ||
49 | hrtimer_forward_now(hr, ms_to_ktime(interval_ms)); | ||
50 | |||
51 | return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART; | ||
52 | } | ||
53 | |||
54 | static int stm_heartbeat_link(struct stm_source_data *data) | ||
55 | { | ||
56 | struct stm_heartbeat *heartbeat = | ||
57 | container_of(data, struct stm_heartbeat, data); | ||
58 | |||
59 | heartbeat->active = 1; | ||
60 | hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms), | ||
61 | HRTIMER_MODE_ABS); | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | static void stm_heartbeat_unlink(struct stm_source_data *data) | ||
67 | { | ||
68 | struct stm_heartbeat *heartbeat = | ||
69 | container_of(data, struct stm_heartbeat, data); | ||
70 | |||
71 | heartbeat->active = 0; | ||
72 | hrtimer_cancel(&heartbeat->hrtimer); | ||
73 | } | ||
74 | |||
75 | static int stm_heartbeat_init(void) | ||
76 | { | ||
77 | int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs); | ||
78 | |||
79 | if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX) | ||
80 | return -EINVAL; | ||
81 | |||
82 | for (i = 0; i < __nr_instances; i++) { | ||
83 | stm_heartbeat[i].data.name = | ||
84 | kasprintf(GFP_KERNEL, "heartbeat.%d", i); | ||
85 | if (!stm_heartbeat[i].data.name) | ||
86 | goto fail_unregister; | ||
87 | |||
88 | stm_heartbeat[i].data.nr_chans = 1; | ||
89 | stm_heartbeat[i].data.link = stm_heartbeat_link; | ||
90 | stm_heartbeat[i].data.unlink = stm_heartbeat_unlink; | ||
91 | hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC, | ||
92 | HRTIMER_MODE_ABS); | ||
93 | stm_heartbeat[i].hrtimer.function = | ||
94 | stm_heartbeat_hrtimer_handler; | ||
95 | |||
96 | ret = stm_source_register_device(NULL, &stm_heartbeat[i].data); | ||
97 | if (ret) | ||
98 | goto fail_free; | ||
99 | } | ||
100 | |||
101 | nr_instances = __nr_instances; | ||
102 | |||
103 | return 0; | ||
104 | |||
105 | fail_unregister: | ||
106 | for (i--; i >= 0; i--) { | ||
107 | stm_source_unregister_device(&stm_heartbeat[i].data); | ||
108 | fail_free: | ||
109 | kfree(stm_heartbeat[i].data.name); | ||
110 | } | ||
111 | |||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | static void stm_heartbeat_exit(void) | ||
116 | { | ||
117 | int i; | ||
118 | |||
119 | for (i = 0; i < nr_instances; i++) { | ||
120 | stm_source_unregister_device(&stm_heartbeat[i].data); | ||
121 | kfree(stm_heartbeat[i].data.name); | ||
122 | } | ||
123 | } | ||
124 | |||
125 | module_init(stm_heartbeat_init); | ||
126 | module_exit(stm_heartbeat_exit); | ||
127 | |||
128 | MODULE_LICENSE("GPL v2"); | ||
129 | MODULE_DESCRIPTION("stm_heartbeat driver"); | ||
130 | MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); | ||
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 11ab6d01adf6..1db189657b2b 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c | |||
@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy) | |||
272 | { | 272 | { |
273 | struct stm_device *stm = policy->stm; | 273 | struct stm_device *stm = policy->stm; |
274 | 274 | ||
275 | /* | ||
276 | * stp_policy_release() will not call here if the policy is already | ||
277 | * unbound; other users should not either, as no link exists between | ||
278 | * this policy and anything else in that case | ||
279 | */ | ||
275 | if (WARN_ON_ONCE(!policy->stm)) | 280 | if (WARN_ON_ONCE(!policy->stm)) |
276 | return; | 281 | return; |
277 | 282 | ||
278 | mutex_lock(&stm->policy_mutex); | 283 | lockdep_assert_held(&stm->policy_mutex); |
279 | stm->policy = NULL; | ||
280 | mutex_unlock(&stm->policy_mutex); | ||
281 | 284 | ||
285 | stm->policy = NULL; | ||
282 | policy->stm = NULL; | 286 | policy->stm = NULL; |
283 | 287 | ||
284 | stm_put_device(stm); | 288 | stm_put_device(stm); |
@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy) | |||
287 | static void stp_policy_release(struct config_item *item) | 291 | static void stp_policy_release(struct config_item *item) |
288 | { | 292 | { |
289 | struct stp_policy *policy = to_stp_policy(item); | 293 | struct stp_policy *policy = to_stp_policy(item); |
294 | struct stm_device *stm = policy->stm; | ||
290 | 295 | ||
296 | /* a policy *can* be unbound and still exist in configfs tree */ | ||
297 | if (!stm) | ||
298 | return; | ||
299 | |||
300 | mutex_lock(&stm->policy_mutex); | ||
291 | stp_policy_unbind(policy); | 301 | stp_policy_unbind(policy); |
302 | mutex_unlock(&stm->policy_mutex); | ||
303 | |||
292 | kfree(policy); | 304 | kfree(policy); |
293 | } | 305 | } |
294 | 306 | ||
@@ -320,10 +332,11 @@ stp_policies_make(struct config_group *group, const char *name) | |||
320 | 332 | ||
321 | /* | 333 | /* |
322 | * node must look like <device_name>.<policy_name>, where | 334 | * node must look like <device_name>.<policy_name>, where |
323 | * <device_name> is the name of an existing stm device and | 335 | * <device_name> is the name of an existing stm device; may |
324 | * <policy_name> is an arbitrary string | 336 | * contain dots; |
337 | * <policy_name> is an arbitrary string; may not contain dots | ||
325 | */ | 338 | */ |
326 | p = strchr(devname, '.'); | 339 | p = strrchr(devname, '.'); |
327 | if (!p) { | 340 | if (!p) { |
328 | kfree(devname); | 341 | kfree(devname); |
329 | return ERR_PTR(-EINVAL); | 342 | return ERR_PTR(-EINVAL); |
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h index 95ece0292c99..4e8c6926260f 100644 --- a/drivers/hwtracing/stm/stm.h +++ b/drivers/hwtracing/stm/stm.h | |||
@@ -45,6 +45,7 @@ struct stm_device { | |||
45 | int major; | 45 | int major; |
46 | unsigned int sw_nmasters; | 46 | unsigned int sw_nmasters; |
47 | struct stm_data *data; | 47 | struct stm_data *data; |
48 | struct mutex link_mutex; | ||
48 | spinlock_t link_lock; | 49 | spinlock_t link_lock; |
49 | struct list_head link_list; | 50 | struct list_head link_list; |
50 | /* master allocation */ | 51 | /* master allocation */ |
@@ -56,6 +57,7 @@ struct stm_device { | |||
56 | container_of((_d), struct stm_device, dev) | 57 | container_of((_d), struct stm_device, dev) |
57 | 58 | ||
58 | struct stm_output { | 59 | struct stm_output { |
60 | spinlock_t lock; | ||
59 | unsigned int master; | 61 | unsigned int master; |
60 | unsigned int channel; | 62 | unsigned int channel; |
61 | unsigned int nr_chans; | 63 | unsigned int nr_chans; |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 054fc10cb3b6..15579514d120 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -440,7 +440,7 @@ config ARM_CHARLCD | |||
440 | still useful. | 440 | still useful. |
441 | 441 | ||
442 | config BMP085 | 442 | config BMP085 |
443 | bool | 443 | tristate |
444 | depends on SYSFS | 444 | depends on SYSFS |
445 | 445 | ||
446 | config BMP085_I2C | 446 | config BMP085_I2C |
@@ -470,7 +470,7 @@ config BMP085_SPI | |||
470 | config PCH_PHUB | 470 | config PCH_PHUB |
471 | tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" | 471 | tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" |
472 | select GENERIC_NET_UTILS | 472 | select GENERIC_NET_UTILS |
473 | depends on PCI && (X86_32 || COMPILE_TEST) | 473 | depends on PCI && (X86_32 || MIPS || COMPILE_TEST) |
474 | help | 474 | help |
475 | This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of | 475 | This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of |
476 | Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded | 476 | Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded |
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index 15e88078ba1e..fe1672747bc1 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c | |||
@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg) | |||
216 | */ | 216 | */ |
217 | value = swab16(value); | 217 | value = swab16(value); |
218 | 218 | ||
219 | if (dpot->uid == DPOT_UID(AD5271_ID)) | 219 | if (dpot->uid == DPOT_UID(AD5274_ID)) |
220 | value = value >> 2; | 220 | value = value >> 2; |
221 | return value; | 221 | return value; |
222 | default: | 222 | default: |
@@ -452,7 +452,7 @@ static ssize_t sysfs_set_reg(struct device *dev, | |||
452 | int err; | 452 | int err; |
453 | 453 | ||
454 | if (reg & DPOT_ADDR_OTP_EN) { | 454 | if (reg & DPOT_ADDR_OTP_EN) { |
455 | if (!strncmp(buf, "enabled", sizeof("enabled"))) | 455 | if (sysfs_streq(buf, "enabled")) |
456 | set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); | 456 | set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); |
457 | else | 457 | else |
458 | clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); | 458 | clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask); |
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c index a3e789b85cc8..dfb72ecfa604 100644 --- a/drivers/misc/apds990x.c +++ b/drivers/misc/apds990x.c | |||
@@ -1215,7 +1215,7 @@ static int apds990x_remove(struct i2c_client *client) | |||
1215 | #ifdef CONFIG_PM_SLEEP | 1215 | #ifdef CONFIG_PM_SLEEP |
1216 | static int apds990x_suspend(struct device *dev) | 1216 | static int apds990x_suspend(struct device *dev) |
1217 | { | 1217 | { |
1218 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1218 | struct i2c_client *client = to_i2c_client(dev); |
1219 | struct apds990x_chip *chip = i2c_get_clientdata(client); | 1219 | struct apds990x_chip *chip = i2c_get_clientdata(client); |
1220 | 1220 | ||
1221 | apds990x_chip_off(chip); | 1221 | apds990x_chip_off(chip); |
@@ -1224,7 +1224,7 @@ static int apds990x_suspend(struct device *dev) | |||
1224 | 1224 | ||
1225 | static int apds990x_resume(struct device *dev) | 1225 | static int apds990x_resume(struct device *dev) |
1226 | { | 1226 | { |
1227 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1227 | struct i2c_client *client = to_i2c_client(dev); |
1228 | struct apds990x_chip *chip = i2c_get_clientdata(client); | 1228 | struct apds990x_chip *chip = i2c_get_clientdata(client); |
1229 | 1229 | ||
1230 | /* | 1230 | /* |
@@ -1240,7 +1240,7 @@ static int apds990x_resume(struct device *dev) | |||
1240 | #ifdef CONFIG_PM | 1240 | #ifdef CONFIG_PM |
1241 | static int apds990x_runtime_suspend(struct device *dev) | 1241 | static int apds990x_runtime_suspend(struct device *dev) |
1242 | { | 1242 | { |
1243 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1243 | struct i2c_client *client = to_i2c_client(dev); |
1244 | struct apds990x_chip *chip = i2c_get_clientdata(client); | 1244 | struct apds990x_chip *chip = i2c_get_clientdata(client); |
1245 | 1245 | ||
1246 | apds990x_chip_off(chip); | 1246 | apds990x_chip_off(chip); |
@@ -1249,7 +1249,7 @@ static int apds990x_runtime_suspend(struct device *dev) | |||
1249 | 1249 | ||
1250 | static int apds990x_runtime_resume(struct device *dev) | 1250 | static int apds990x_runtime_resume(struct device *dev) |
1251 | { | 1251 | { |
1252 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1252 | struct i2c_client *client = to_i2c_client(dev); |
1253 | struct apds990x_chip *chip = i2c_get_clientdata(client); | 1253 | struct apds990x_chip *chip = i2c_get_clientdata(client); |
1254 | 1254 | ||
1255 | apds990x_chip_on(chip); | 1255 | apds990x_chip_on(chip); |
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c index c65b5ea5d5ef..b3176ee92b90 100644 --- a/drivers/misc/arm-charlcd.c +++ b/drivers/misc/arm-charlcd.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * Author: Linus Walleij <triad@df.lth.se> | 8 | * Author: Linus Walleij <triad@df.lth.se> |
9 | */ | 9 | */ |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/module.h> | ||
12 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
13 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
14 | #include <linux/of.h> | 13 | #include <linux/of.h> |
@@ -328,20 +327,6 @@ out_no_resource: | |||
328 | return ret; | 327 | return ret; |
329 | } | 328 | } |
330 | 329 | ||
331 | static int __exit charlcd_remove(struct platform_device *pdev) | ||
332 | { | ||
333 | struct charlcd *lcd = platform_get_drvdata(pdev); | ||
334 | |||
335 | if (lcd) { | ||
336 | free_irq(lcd->irq, lcd); | ||
337 | iounmap(lcd->virtbase); | ||
338 | release_mem_region(lcd->phybase, lcd->physize); | ||
339 | kfree(lcd); | ||
340 | } | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static int charlcd_suspend(struct device *dev) | 330 | static int charlcd_suspend(struct device *dev) |
346 | { | 331 | { |
347 | struct platform_device *pdev = to_platform_device(dev); | 332 | struct platform_device *pdev = to_platform_device(dev); |
@@ -376,13 +361,8 @@ static struct platform_driver charlcd_driver = { | |||
376 | .driver = { | 361 | .driver = { |
377 | .name = DRIVERNAME, | 362 | .name = DRIVERNAME, |
378 | .pm = &charlcd_pm_ops, | 363 | .pm = &charlcd_pm_ops, |
364 | .suppress_bind_attrs = true, | ||
379 | .of_match_table = of_match_ptr(charlcd_match), | 365 | .of_match_table = of_match_ptr(charlcd_match), |
380 | }, | 366 | }, |
381 | .remove = __exit_p(charlcd_remove), | ||
382 | }; | 367 | }; |
383 | 368 | builtin_platform_driver_probe(charlcd_driver, charlcd_probe); | |
384 | module_platform_driver_probe(charlcd_driver, charlcd_probe); | ||
385 | |||
386 | MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>"); | ||
387 | MODULE_DESCRIPTION("ARM Character LCD Driver"); | ||
388 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c index 753d7ecdadaa..845466e45b95 100644 --- a/drivers/misc/bh1770glc.c +++ b/drivers/misc/bh1770glc.c | |||
@@ -1323,7 +1323,7 @@ static int bh1770_remove(struct i2c_client *client) | |||
1323 | #ifdef CONFIG_PM_SLEEP | 1323 | #ifdef CONFIG_PM_SLEEP |
1324 | static int bh1770_suspend(struct device *dev) | 1324 | static int bh1770_suspend(struct device *dev) |
1325 | { | 1325 | { |
1326 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1326 | struct i2c_client *client = to_i2c_client(dev); |
1327 | struct bh1770_chip *chip = i2c_get_clientdata(client); | 1327 | struct bh1770_chip *chip = i2c_get_clientdata(client); |
1328 | 1328 | ||
1329 | bh1770_chip_off(chip); | 1329 | bh1770_chip_off(chip); |
@@ -1333,7 +1333,7 @@ static int bh1770_suspend(struct device *dev) | |||
1333 | 1333 | ||
1334 | static int bh1770_resume(struct device *dev) | 1334 | static int bh1770_resume(struct device *dev) |
1335 | { | 1335 | { |
1336 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1336 | struct i2c_client *client = to_i2c_client(dev); |
1337 | struct bh1770_chip *chip = i2c_get_clientdata(client); | 1337 | struct bh1770_chip *chip = i2c_get_clientdata(client); |
1338 | int ret = 0; | 1338 | int ret = 0; |
1339 | 1339 | ||
@@ -1361,7 +1361,7 @@ static int bh1770_resume(struct device *dev) | |||
1361 | #ifdef CONFIG_PM | 1361 | #ifdef CONFIG_PM |
1362 | static int bh1770_runtime_suspend(struct device *dev) | 1362 | static int bh1770_runtime_suspend(struct device *dev) |
1363 | { | 1363 | { |
1364 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1364 | struct i2c_client *client = to_i2c_client(dev); |
1365 | struct bh1770_chip *chip = i2c_get_clientdata(client); | 1365 | struct bh1770_chip *chip = i2c_get_clientdata(client); |
1366 | 1366 | ||
1367 | bh1770_chip_off(chip); | 1367 | bh1770_chip_off(chip); |
@@ -1371,7 +1371,7 @@ static int bh1770_runtime_suspend(struct device *dev) | |||
1371 | 1371 | ||
1372 | static int bh1770_runtime_resume(struct device *dev) | 1372 | static int bh1770_runtime_resume(struct device *dev) |
1373 | { | 1373 | { |
1374 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1374 | struct i2c_client *client = to_i2c_client(dev); |
1375 | struct bh1770_chip *chip = i2c_get_clientdata(client); | 1375 | struct bh1770_chip *chip = i2c_get_clientdata(client); |
1376 | 1376 | ||
1377 | bh1770_chip_on(chip); | 1377 | bh1770_chip_on(chip); |
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index cc8645b5369d..1922cb8f6b88 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c | |||
@@ -721,9 +721,7 @@ static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj, | |||
721 | struct bin_attribute *attr, | 721 | struct bin_attribute *attr, |
722 | char *buffer, loff_t offset, size_t count) | 722 | char *buffer, loff_t offset, size_t count) |
723 | { | 723 | { |
724 | struct c2port_device *c2dev = | 724 | struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); |
725 | dev_get_drvdata(container_of(kobj, | ||
726 | struct device, kobj)); | ||
727 | ssize_t ret; | 725 | ssize_t ret; |
728 | 726 | ||
729 | /* Check the device and flash access status */ | 727 | /* Check the device and flash access status */ |
@@ -838,9 +836,7 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj, | |||
838 | struct bin_attribute *attr, | 836 | struct bin_attribute *attr, |
839 | char *buffer, loff_t offset, size_t count) | 837 | char *buffer, loff_t offset, size_t count) |
840 | { | 838 | { |
841 | struct c2port_device *c2dev = | 839 | struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); |
842 | dev_get_drvdata(container_of(kobj, | ||
843 | struct device, kobj)); | ||
844 | int ret; | 840 | int ret; |
845 | 841 | ||
846 | /* Check the device access status */ | 842 | /* Check the device access status */ |
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 02006f7109a8..038af5d45145 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c | |||
@@ -386,8 +386,7 @@ static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj, | |||
386 | struct bin_attribute *bin_attr, char *buf, | 386 | struct bin_attribute *bin_attr, char *buf, |
387 | loff_t off, size_t count) | 387 | loff_t off, size_t count) |
388 | { | 388 | { |
389 | struct cxl_afu *afu = to_cxl_afu(container_of(kobj, | 389 | struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj)); |
390 | struct device, kobj)); | ||
391 | 390 | ||
392 | return cxl_afu_read_err_buffer(afu, buf, off, count); | 391 | return cxl_afu_read_err_buffer(afu, buf, off, count); |
393 | } | 392 | } |
@@ -467,7 +466,7 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj, | |||
467 | loff_t off, size_t count) | 466 | loff_t off, size_t count) |
468 | { | 467 | { |
469 | struct afu_config_record *cr = to_cr(kobj); | 468 | struct afu_config_record *cr = to_cr(kobj); |
470 | struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj)); | 469 | struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent)); |
471 | 470 | ||
472 | u64 i, j, val; | 471 | u64 i, j, val; |
473 | 472 | ||
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index 04f2e1fa9dd1..cfc493c2e30a 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig | |||
@@ -3,6 +3,8 @@ menu "EEPROM support" | |||
3 | config EEPROM_AT24 | 3 | config EEPROM_AT24 |
4 | tristate "I2C EEPROMs / RAMs / ROMs from most vendors" | 4 | tristate "I2C EEPROMs / RAMs / ROMs from most vendors" |
5 | depends on I2C && SYSFS | 5 | depends on I2C && SYSFS |
6 | select REGMAP | ||
7 | select NVMEM | ||
6 | help | 8 | help |
7 | Enable this driver to get read/write support to most I2C EEPROMs | 9 | Enable this driver to get read/write support to most I2C EEPROMs |
8 | and compatible devices like FRAMs, SRAMs, ROMs etc. After you | 10 | and compatible devices like FRAMs, SRAMs, ROMs etc. After you |
@@ -30,6 +32,8 @@ config EEPROM_AT24 | |||
30 | config EEPROM_AT25 | 32 | config EEPROM_AT25 |
31 | tristate "SPI EEPROMs from most vendors" | 33 | tristate "SPI EEPROMs from most vendors" |
32 | depends on SPI && SYSFS | 34 | depends on SPI && SYSFS |
35 | select REGMAP | ||
36 | select NVMEM | ||
33 | help | 37 | help |
34 | Enable this driver to get read/write support to most SPI EEPROMs, | 38 | Enable this driver to get read/write support to most SPI EEPROMs, |
35 | after you configure the board init code to know about each eeprom | 39 | after you configure the board init code to know about each eeprom |
@@ -74,6 +78,8 @@ config EEPROM_93CX6 | |||
74 | config EEPROM_93XX46 | 78 | config EEPROM_93XX46 |
75 | tristate "Microwire EEPROM 93XX46 support" | 79 | tristate "Microwire EEPROM 93XX46 support" |
76 | depends on SPI && SYSFS | 80 | depends on SPI && SYSFS |
81 | select REGMAP | ||
82 | select NVMEM | ||
77 | help | 83 | help |
78 | Driver for the microwire EEPROM chipsets 93xx46x. The driver | 84 | Driver for the microwire EEPROM chipsets 93xx46x. The driver |
79 | supports both read and write commands and also the command to | 85 | supports both read and write commands and also the command to |
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 5d7c0900fa1b..089d6943f68a 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/sysfs.h> | ||
19 | #include <linux/mod_devicetable.h> | 18 | #include <linux/mod_devicetable.h> |
20 | #include <linux/log2.h> | 19 | #include <linux/log2.h> |
21 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
@@ -23,6 +22,8 @@ | |||
23 | #include <linux/of.h> | 22 | #include <linux/of.h> |
24 | #include <linux/acpi.h> | 23 | #include <linux/acpi.h> |
25 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
25 | #include <linux/nvmem-provider.h> | ||
26 | #include <linux/regmap.h> | ||
26 | #include <linux/platform_data/at24.h> | 27 | #include <linux/platform_data/at24.h> |
27 | 28 | ||
28 | /* | 29 | /* |
@@ -55,7 +56,6 @@ | |||
55 | 56 | ||
56 | struct at24_data { | 57 | struct at24_data { |
57 | struct at24_platform_data chip; | 58 | struct at24_platform_data chip; |
58 | struct memory_accessor macc; | ||
59 | int use_smbus; | 59 | int use_smbus; |
60 | int use_smbus_write; | 60 | int use_smbus_write; |
61 | 61 | ||
@@ -64,12 +64,15 @@ struct at24_data { | |||
64 | * but not from changes by other I2C masters. | 64 | * but not from changes by other I2C masters. |
65 | */ | 65 | */ |
66 | struct mutex lock; | 66 | struct mutex lock; |
67 | struct bin_attribute bin; | ||
68 | 67 | ||
69 | u8 *writebuf; | 68 | u8 *writebuf; |
70 | unsigned write_max; | 69 | unsigned write_max; |
71 | unsigned num_addresses; | 70 | unsigned num_addresses; |
72 | 71 | ||
72 | struct regmap_config regmap_config; | ||
73 | struct nvmem_config nvmem_config; | ||
74 | struct nvmem_device *nvmem; | ||
75 | |||
73 | /* | 76 | /* |
74 | * Some chips tie up multiple I2C addresses; dummy devices reserve | 77 | * Some chips tie up multiple I2C addresses; dummy devices reserve |
75 | * them for us, and we'll use them with SMBus calls. | 78 | * them for us, and we'll use them with SMBus calls. |
@@ -283,17 +286,6 @@ static ssize_t at24_read(struct at24_data *at24, | |||
283 | return retval; | 286 | return retval; |
284 | } | 287 | } |
285 | 288 | ||
286 | static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj, | ||
287 | struct bin_attribute *attr, | ||
288 | char *buf, loff_t off, size_t count) | ||
289 | { | ||
290 | struct at24_data *at24; | ||
291 | |||
292 | at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); | ||
293 | return at24_read(at24, buf, off, count); | ||
294 | } | ||
295 | |||
296 | |||
297 | /* | 289 | /* |
298 | * Note that if the hardware write-protect pin is pulled high, the whole | 290 | * Note that if the hardware write-protect pin is pulled high, the whole |
299 | * chip is normally write protected. But there are plenty of product | 291 | * chip is normally write protected. But there are plenty of product |
@@ -414,40 +406,49 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off, | |||
414 | return retval; | 406 | return retval; |
415 | } | 407 | } |
416 | 408 | ||
417 | static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj, | ||
418 | struct bin_attribute *attr, | ||
419 | char *buf, loff_t off, size_t count) | ||
420 | { | ||
421 | struct at24_data *at24; | ||
422 | |||
423 | at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); | ||
424 | return at24_write(at24, buf, off, count); | ||
425 | } | ||
426 | |||
427 | /*-------------------------------------------------------------------------*/ | 409 | /*-------------------------------------------------------------------------*/ |
428 | 410 | ||
429 | /* | 411 | /* |
430 | * This lets other kernel code access the eeprom data. For example, it | 412 | * Provide a regmap interface, which is registered with the NVMEM |
431 | * might hold a board's Ethernet address, or board-specific calibration | 413 | * framework |
432 | * data generated on the manufacturing floor. | 414 | */ |
433 | */ | 415 | static int at24_regmap_read(void *context, const void *reg, size_t reg_size, |
434 | 416 | void *val, size_t val_size) | |
435 | static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf, | ||
436 | off_t offset, size_t count) | ||
437 | { | 417 | { |
438 | struct at24_data *at24 = container_of(macc, struct at24_data, macc); | 418 | struct at24_data *at24 = context; |
419 | off_t offset = *(u32 *)reg; | ||
420 | int err; | ||
439 | 421 | ||
440 | return at24_read(at24, buf, offset, count); | 422 | err = at24_read(at24, val, offset, val_size); |
423 | if (err) | ||
424 | return err; | ||
425 | return 0; | ||
441 | } | 426 | } |
442 | 427 | ||
443 | static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf, | 428 | static int at24_regmap_write(void *context, const void *data, size_t count) |
444 | off_t offset, size_t count) | ||
445 | { | 429 | { |
446 | struct at24_data *at24 = container_of(macc, struct at24_data, macc); | 430 | struct at24_data *at24 = context; |
431 | const char *buf; | ||
432 | u32 offset; | ||
433 | size_t len; | ||
434 | int err; | ||
447 | 435 | ||
448 | return at24_write(at24, buf, offset, count); | 436 | memcpy(&offset, data, sizeof(offset)); |
437 | buf = (const char *)data + sizeof(offset); | ||
438 | len = count - sizeof(offset); | ||
439 | |||
440 | err = at24_write(at24, buf, offset, len); | ||
441 | if (err) | ||
442 | return err; | ||
443 | return 0; | ||
449 | } | 444 | } |
450 | 445 | ||
446 | static const struct regmap_bus at24_regmap_bus = { | ||
447 | .read = at24_regmap_read, | ||
448 | .write = at24_regmap_write, | ||
449 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
450 | }; | ||
451 | |||
451 | /*-------------------------------------------------------------------------*/ | 452 | /*-------------------------------------------------------------------------*/ |
452 | 453 | ||
453 | #ifdef CONFIG_OF | 454 | #ifdef CONFIG_OF |
@@ -481,6 +482,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
481 | struct at24_data *at24; | 482 | struct at24_data *at24; |
482 | int err; | 483 | int err; |
483 | unsigned i, num_addresses; | 484 | unsigned i, num_addresses; |
485 | struct regmap *regmap; | ||
484 | 486 | ||
485 | if (client->dev.platform_data) { | 487 | if (client->dev.platform_data) { |
486 | chip = *(struct at24_platform_data *)client->dev.platform_data; | 488 | chip = *(struct at24_platform_data *)client->dev.platform_data; |
@@ -573,29 +575,12 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
573 | at24->chip = chip; | 575 | at24->chip = chip; |
574 | at24->num_addresses = num_addresses; | 576 | at24->num_addresses = num_addresses; |
575 | 577 | ||
576 | /* | ||
577 | * Export the EEPROM bytes through sysfs, since that's convenient. | ||
578 | * By default, only root should see the data (maybe passwords etc) | ||
579 | */ | ||
580 | sysfs_bin_attr_init(&at24->bin); | ||
581 | at24->bin.attr.name = "eeprom"; | ||
582 | at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR; | ||
583 | at24->bin.read = at24_bin_read; | ||
584 | at24->bin.size = chip.byte_len; | ||
585 | |||
586 | at24->macc.read = at24_macc_read; | ||
587 | |||
588 | writable = !(chip.flags & AT24_FLAG_READONLY); | 578 | writable = !(chip.flags & AT24_FLAG_READONLY); |
589 | if (writable) { | 579 | if (writable) { |
590 | if (!use_smbus || use_smbus_write) { | 580 | if (!use_smbus || use_smbus_write) { |
591 | 581 | ||
592 | unsigned write_max = chip.page_size; | 582 | unsigned write_max = chip.page_size; |
593 | 583 | ||
594 | at24->macc.write = at24_macc_write; | ||
595 | |||
596 | at24->bin.write = at24_bin_write; | ||
597 | at24->bin.attr.mode |= S_IWUSR; | ||
598 | |||
599 | if (write_max > io_limit) | 584 | if (write_max > io_limit) |
600 | write_max = io_limit; | 585 | write_max = io_limit; |
601 | if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX) | 586 | if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX) |
@@ -627,14 +612,38 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
627 | } | 612 | } |
628 | } | 613 | } |
629 | 614 | ||
630 | err = sysfs_create_bin_file(&client->dev.kobj, &at24->bin); | 615 | at24->regmap_config.reg_bits = 32; |
631 | if (err) | 616 | at24->regmap_config.val_bits = 8; |
617 | at24->regmap_config.reg_stride = 1; | ||
618 | at24->regmap_config.max_register = chip.byte_len - 1; | ||
619 | |||
620 | regmap = devm_regmap_init(&client->dev, &at24_regmap_bus, at24, | ||
621 | &at24->regmap_config); | ||
622 | if (IS_ERR(regmap)) { | ||
623 | dev_err(&client->dev, "regmap init failed\n"); | ||
624 | err = PTR_ERR(regmap); | ||
625 | goto err_clients; | ||
626 | } | ||
627 | |||
628 | at24->nvmem_config.name = dev_name(&client->dev); | ||
629 | at24->nvmem_config.dev = &client->dev; | ||
630 | at24->nvmem_config.read_only = !writable; | ||
631 | at24->nvmem_config.root_only = true; | ||
632 | at24->nvmem_config.owner = THIS_MODULE; | ||
633 | at24->nvmem_config.compat = true; | ||
634 | at24->nvmem_config.base_dev = &client->dev; | ||
635 | |||
636 | at24->nvmem = nvmem_register(&at24->nvmem_config); | ||
637 | |||
638 | if (IS_ERR(at24->nvmem)) { | ||
639 | err = PTR_ERR(at24->nvmem); | ||
632 | goto err_clients; | 640 | goto err_clients; |
641 | } | ||
633 | 642 | ||
634 | i2c_set_clientdata(client, at24); | 643 | i2c_set_clientdata(client, at24); |
635 | 644 | ||
636 | dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n", | 645 | dev_info(&client->dev, "%u byte %s EEPROM, %s, %u bytes/write\n", |
637 | at24->bin.size, client->name, | 646 | chip.byte_len, client->name, |
638 | writable ? "writable" : "read-only", at24->write_max); | 647 | writable ? "writable" : "read-only", at24->write_max); |
639 | if (use_smbus == I2C_SMBUS_WORD_DATA || | 648 | if (use_smbus == I2C_SMBUS_WORD_DATA || |
640 | use_smbus == I2C_SMBUS_BYTE_DATA) { | 649 | use_smbus == I2C_SMBUS_BYTE_DATA) { |
@@ -645,7 +654,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
645 | 654 | ||
646 | /* export data to kernel code */ | 655 | /* export data to kernel code */ |
647 | if (chip.setup) | 656 | if (chip.setup) |
648 | chip.setup(&at24->macc, chip.context); | 657 | chip.setup(at24->nvmem, chip.context); |
649 | 658 | ||
650 | return 0; | 659 | return 0; |
651 | 660 | ||
@@ -663,7 +672,8 @@ static int at24_remove(struct i2c_client *client) | |||
663 | int i; | 672 | int i; |
664 | 673 | ||
665 | at24 = i2c_get_clientdata(client); | 674 | at24 = i2c_get_clientdata(client); |
666 | sysfs_remove_bin_file(&client->dev.kobj, &at24->bin); | 675 | |
676 | nvmem_unregister(at24->nvmem); | ||
667 | 677 | ||
668 | for (i = 1; i < at24->num_addresses; i++) | 678 | for (i = 1; i < at24->num_addresses; i++) |
669 | i2c_unregister_device(at24->client[i]); | 679 | i2c_unregister_device(at24->client[i]); |
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index f850ef556bcc..fa36a6e37084 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | 18 | ||
19 | #include <linux/nvmem-provider.h> | ||
20 | #include <linux/regmap.h> | ||
19 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
20 | #include <linux/spi/eeprom.h> | 22 | #include <linux/spi/eeprom.h> |
21 | #include <linux/property.h> | 23 | #include <linux/property.h> |
@@ -29,11 +31,12 @@ | |||
29 | 31 | ||
30 | struct at25_data { | 32 | struct at25_data { |
31 | struct spi_device *spi; | 33 | struct spi_device *spi; |
32 | struct memory_accessor mem; | ||
33 | struct mutex lock; | 34 | struct mutex lock; |
34 | struct spi_eeprom chip; | 35 | struct spi_eeprom chip; |
35 | struct bin_attribute bin; | ||
36 | unsigned addrlen; | 36 | unsigned addrlen; |
37 | struct regmap_config regmap_config; | ||
38 | struct nvmem_config nvmem_config; | ||
39 | struct nvmem_device *nvmem; | ||
37 | }; | 40 | }; |
38 | 41 | ||
39 | #define AT25_WREN 0x06 /* latch the write enable */ | 42 | #define AT25_WREN 0x06 /* latch the write enable */ |
@@ -77,10 +80,10 @@ at25_ee_read( | |||
77 | struct spi_message m; | 80 | struct spi_message m; |
78 | u8 instr; | 81 | u8 instr; |
79 | 82 | ||
80 | if (unlikely(offset >= at25->bin.size)) | 83 | if (unlikely(offset >= at25->chip.byte_len)) |
81 | return 0; | 84 | return 0; |
82 | if ((offset + count) > at25->bin.size) | 85 | if ((offset + count) > at25->chip.byte_len) |
83 | count = at25->bin.size - offset; | 86 | count = at25->chip.byte_len - offset; |
84 | if (unlikely(!count)) | 87 | if (unlikely(!count)) |
85 | return count; | 88 | return count; |
86 | 89 | ||
@@ -131,21 +134,19 @@ at25_ee_read( | |||
131 | return status ? status : count; | 134 | return status ? status : count; |
132 | } | 135 | } |
133 | 136 | ||
134 | static ssize_t | 137 | static int at25_regmap_read(void *context, const void *reg, size_t reg_size, |
135 | at25_bin_read(struct file *filp, struct kobject *kobj, | 138 | void *val, size_t val_size) |
136 | struct bin_attribute *bin_attr, | ||
137 | char *buf, loff_t off, size_t count) | ||
138 | { | 139 | { |
139 | struct device *dev; | 140 | struct at25_data *at25 = context; |
140 | struct at25_data *at25; | 141 | off_t offset = *(u32 *)reg; |
142 | int err; | ||
141 | 143 | ||
142 | dev = container_of(kobj, struct device, kobj); | 144 | err = at25_ee_read(at25, val, offset, val_size); |
143 | at25 = dev_get_drvdata(dev); | 145 | if (err) |
144 | 146 | return err; | |
145 | return at25_ee_read(at25, buf, off, count); | 147 | return 0; |
146 | } | 148 | } |
147 | 149 | ||
148 | |||
149 | static ssize_t | 150 | static ssize_t |
150 | at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | 151 | at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, |
151 | size_t count) | 152 | size_t count) |
@@ -155,10 +156,10 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | |||
155 | unsigned buf_size; | 156 | unsigned buf_size; |
156 | u8 *bounce; | 157 | u8 *bounce; |
157 | 158 | ||
158 | if (unlikely(off >= at25->bin.size)) | 159 | if (unlikely(off >= at25->chip.byte_len)) |
159 | return -EFBIG; | 160 | return -EFBIG; |
160 | if ((off + count) > at25->bin.size) | 161 | if ((off + count) > at25->chip.byte_len) |
161 | count = at25->bin.size - off; | 162 | count = at25->chip.byte_len - off; |
162 | if (unlikely(!count)) | 163 | if (unlikely(!count)) |
163 | return count; | 164 | return count; |
164 | 165 | ||
@@ -265,39 +266,29 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | |||
265 | return written ? written : status; | 266 | return written ? written : status; |
266 | } | 267 | } |
267 | 268 | ||
268 | static ssize_t | 269 | static int at25_regmap_write(void *context, const void *data, size_t count) |
269 | at25_bin_write(struct file *filp, struct kobject *kobj, | ||
270 | struct bin_attribute *bin_attr, | ||
271 | char *buf, loff_t off, size_t count) | ||
272 | { | 270 | { |
273 | struct device *dev; | 271 | struct at25_data *at25 = context; |
274 | struct at25_data *at25; | 272 | const char *buf; |
275 | 273 | u32 offset; | |
276 | dev = container_of(kobj, struct device, kobj); | 274 | size_t len; |
277 | at25 = dev_get_drvdata(dev); | 275 | int err; |
278 | |||
279 | return at25_ee_write(at25, buf, off, count); | ||
280 | } | ||
281 | 276 | ||
282 | /*-------------------------------------------------------------------------*/ | 277 | memcpy(&offset, data, sizeof(offset)); |
283 | 278 | buf = (const char *)data + sizeof(offset); | |
284 | /* Let in-kernel code access the eeprom data. */ | 279 | len = count - sizeof(offset); |
285 | |||
286 | static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf, | ||
287 | off_t offset, size_t count) | ||
288 | { | ||
289 | struct at25_data *at25 = container_of(mem, struct at25_data, mem); | ||
290 | 280 | ||
291 | return at25_ee_read(at25, buf, offset, count); | 281 | err = at25_ee_write(at25, buf, offset, len); |
282 | if (err) | ||
283 | return err; | ||
284 | return 0; | ||
292 | } | 285 | } |
293 | 286 | ||
294 | static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf, | 287 | static const struct regmap_bus at25_regmap_bus = { |
295 | off_t offset, size_t count) | 288 | .read = at25_regmap_read, |
296 | { | 289 | .write = at25_regmap_write, |
297 | struct at25_data *at25 = container_of(mem, struct at25_data, mem); | 290 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, |
298 | 291 | }; | |
299 | return at25_ee_write(at25, buf, offset, count); | ||
300 | } | ||
301 | 292 | ||
302 | /*-------------------------------------------------------------------------*/ | 293 | /*-------------------------------------------------------------------------*/ |
303 | 294 | ||
@@ -358,6 +349,7 @@ static int at25_probe(struct spi_device *spi) | |||
358 | { | 349 | { |
359 | struct at25_data *at25 = NULL; | 350 | struct at25_data *at25 = NULL; |
360 | struct spi_eeprom chip; | 351 | struct spi_eeprom chip; |
352 | struct regmap *regmap; | ||
361 | int err; | 353 | int err; |
362 | int sr; | 354 | int sr; |
363 | int addrlen; | 355 | int addrlen; |
@@ -402,40 +394,35 @@ static int at25_probe(struct spi_device *spi) | |||
402 | spi_set_drvdata(spi, at25); | 394 | spi_set_drvdata(spi, at25); |
403 | at25->addrlen = addrlen; | 395 | at25->addrlen = addrlen; |
404 | 396 | ||
405 | /* Export the EEPROM bytes through sysfs, since that's convenient. | 397 | at25->regmap_config.reg_bits = 32; |
406 | * And maybe to other kernel code; it might hold a board's Ethernet | 398 | at25->regmap_config.val_bits = 8; |
407 | * address, or board-specific calibration data generated on the | 399 | at25->regmap_config.reg_stride = 1; |
408 | * manufacturing floor. | 400 | at25->regmap_config.max_register = chip.byte_len - 1; |
409 | * | ||
410 | * Default to root-only access to the data; EEPROMs often hold data | ||
411 | * that's sensitive for read and/or write, like ethernet addresses, | ||
412 | * security codes, board-specific manufacturing calibrations, etc. | ||
413 | */ | ||
414 | sysfs_bin_attr_init(&at25->bin); | ||
415 | at25->bin.attr.name = "eeprom"; | ||
416 | at25->bin.attr.mode = S_IRUSR; | ||
417 | at25->bin.read = at25_bin_read; | ||
418 | at25->mem.read = at25_mem_read; | ||
419 | |||
420 | at25->bin.size = at25->chip.byte_len; | ||
421 | if (!(chip.flags & EE_READONLY)) { | ||
422 | at25->bin.write = at25_bin_write; | ||
423 | at25->bin.attr.mode |= S_IWUSR; | ||
424 | at25->mem.write = at25_mem_write; | ||
425 | } | ||
426 | 401 | ||
427 | err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin); | 402 | regmap = devm_regmap_init(&spi->dev, &at25_regmap_bus, at25, |
428 | if (err) | 403 | &at25->regmap_config); |
429 | return err; | 404 | if (IS_ERR(regmap)) { |
430 | 405 | dev_err(&spi->dev, "regmap init failed\n"); | |
431 | if (chip.setup) | 406 | return PTR_ERR(regmap); |
432 | chip.setup(&at25->mem, chip.context); | 407 | } |
433 | 408 | ||
434 | dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n", | 409 | at25->nvmem_config.name = dev_name(&spi->dev); |
435 | (at25->bin.size < 1024) | 410 | at25->nvmem_config.dev = &spi->dev; |
436 | ? at25->bin.size | 411 | at25->nvmem_config.read_only = chip.flags & EE_READONLY; |
437 | : (at25->bin.size / 1024), | 412 | at25->nvmem_config.root_only = true; |
438 | (at25->bin.size < 1024) ? "Byte" : "KByte", | 413 | at25->nvmem_config.owner = THIS_MODULE; |
414 | at25->nvmem_config.compat = true; | ||
415 | at25->nvmem_config.base_dev = &spi->dev; | ||
416 | |||
417 | at25->nvmem = nvmem_register(&at25->nvmem_config); | ||
418 | if (IS_ERR(at25->nvmem)) | ||
419 | return PTR_ERR(at25->nvmem); | ||
420 | |||
421 | dev_info(&spi->dev, "%d %s %s eeprom%s, pagesize %u\n", | ||
422 | (chip.byte_len < 1024) | ||
423 | ? chip.byte_len | ||
424 | : (chip.byte_len / 1024), | ||
425 | (chip.byte_len < 1024) ? "Byte" : "KByte", | ||
439 | at25->chip.name, | 426 | at25->chip.name, |
440 | (chip.flags & EE_READONLY) ? " (readonly)" : "", | 427 | (chip.flags & EE_READONLY) ? " (readonly)" : "", |
441 | at25->chip.page_size); | 428 | at25->chip.page_size); |
@@ -447,7 +434,8 @@ static int at25_remove(struct spi_device *spi) | |||
447 | struct at25_data *at25; | 434 | struct at25_data *at25; |
448 | 435 | ||
449 | at25 = spi_get_drvdata(spi); | 436 | at25 = spi_get_drvdata(spi); |
450 | sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin); | 437 | nvmem_unregister(at25->nvmem); |
438 | |||
451 | return 0; | 439 | return 0; |
452 | } | 440 | } |
453 | 441 | ||
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 7342fd637031..3d1d55157e5f 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c | |||
@@ -84,7 +84,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, | |||
84 | struct bin_attribute *bin_attr, | 84 | struct bin_attribute *bin_attr, |
85 | char *buf, loff_t off, size_t count) | 85 | char *buf, loff_t off, size_t count) |
86 | { | 86 | { |
87 | struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); | 87 | struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj)); |
88 | struct eeprom_data *data = i2c_get_clientdata(client); | 88 | struct eeprom_data *data = i2c_get_clientdata(client); |
89 | u8 slice; | 89 | u8 slice; |
90 | 90 | ||
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index ff63f05edc76..426fe2fd5238 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c | |||
@@ -10,12 +10,17 @@ | |||
10 | 10 | ||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/gpio/consumer.h> | ||
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/of.h> | ||
18 | #include <linux/of_device.h> | ||
19 | #include <linux/of_gpio.h> | ||
16 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
17 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
18 | #include <linux/sysfs.h> | 22 | #include <linux/nvmem-provider.h> |
23 | #include <linux/regmap.h> | ||
19 | #include <linux/eeprom_93xx46.h> | 24 | #include <linux/eeprom_93xx46.h> |
20 | 25 | ||
21 | #define OP_START 0x4 | 26 | #define OP_START 0x4 |
@@ -25,73 +30,111 @@ | |||
25 | #define ADDR_ERAL 0x20 | 30 | #define ADDR_ERAL 0x20 |
26 | #define ADDR_EWEN 0x30 | 31 | #define ADDR_EWEN 0x30 |
27 | 32 | ||
33 | struct eeprom_93xx46_devtype_data { | ||
34 | unsigned int quirks; | ||
35 | }; | ||
36 | |||
37 | static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = { | ||
38 | .quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ | | ||
39 | EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH, | ||
40 | }; | ||
41 | |||
28 | struct eeprom_93xx46_dev { | 42 | struct eeprom_93xx46_dev { |
29 | struct spi_device *spi; | 43 | struct spi_device *spi; |
30 | struct eeprom_93xx46_platform_data *pdata; | 44 | struct eeprom_93xx46_platform_data *pdata; |
31 | struct bin_attribute bin; | ||
32 | struct mutex lock; | 45 | struct mutex lock; |
46 | struct regmap_config regmap_config; | ||
47 | struct nvmem_config nvmem_config; | ||
48 | struct nvmem_device *nvmem; | ||
33 | int addrlen; | 49 | int addrlen; |
50 | int size; | ||
34 | }; | 51 | }; |
35 | 52 | ||
53 | static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev) | ||
54 | { | ||
55 | return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ; | ||
56 | } | ||
57 | |||
58 | static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev) | ||
59 | { | ||
60 | return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; | ||
61 | } | ||
62 | |||
36 | static ssize_t | 63 | static ssize_t |
37 | eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj, | 64 | eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf, |
38 | struct bin_attribute *bin_attr, | 65 | unsigned off, size_t count) |
39 | char *buf, loff_t off, size_t count) | ||
40 | { | 66 | { |
41 | struct eeprom_93xx46_dev *edev; | 67 | ssize_t ret = 0; |
42 | struct device *dev; | ||
43 | struct spi_message m; | ||
44 | struct spi_transfer t[2]; | ||
45 | int bits, ret; | ||
46 | u16 cmd_addr; | ||
47 | 68 | ||
48 | dev = container_of(kobj, struct device, kobj); | 69 | if (unlikely(off >= edev->size)) |
49 | edev = dev_get_drvdata(dev); | 70 | return 0; |
71 | if ((off + count) > edev->size) | ||
72 | count = edev->size - off; | ||
73 | if (unlikely(!count)) | ||
74 | return count; | ||
50 | 75 | ||
51 | cmd_addr = OP_READ << edev->addrlen; | 76 | mutex_lock(&edev->lock); |
52 | 77 | ||
53 | if (edev->addrlen == 7) { | 78 | if (edev->pdata->prepare) |
54 | cmd_addr |= off & 0x7f; | 79 | edev->pdata->prepare(edev); |
55 | bits = 10; | ||
56 | } else { | ||
57 | cmd_addr |= off & 0x3f; | ||
58 | bits = 9; | ||
59 | } | ||
60 | 80 | ||
61 | dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", | 81 | while (count) { |
62 | cmd_addr, edev->spi->max_speed_hz); | 82 | struct spi_message m; |
83 | struct spi_transfer t[2] = { { 0 } }; | ||
84 | u16 cmd_addr = OP_READ << edev->addrlen; | ||
85 | size_t nbytes = count; | ||
86 | int bits; | ||
87 | int err; | ||
88 | |||
89 | if (edev->addrlen == 7) { | ||
90 | cmd_addr |= off & 0x7f; | ||
91 | bits = 10; | ||
92 | if (has_quirk_single_word_read(edev)) | ||
93 | nbytes = 1; | ||
94 | } else { | ||
95 | cmd_addr |= (off >> 1) & 0x3f; | ||
96 | bits = 9; | ||
97 | if (has_quirk_single_word_read(edev)) | ||
98 | nbytes = 2; | ||
99 | } | ||
63 | 100 | ||
64 | spi_message_init(&m); | 101 | dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", |
65 | memset(t, 0, sizeof(t)); | 102 | cmd_addr, edev->spi->max_speed_hz); |
66 | 103 | ||
67 | t[0].tx_buf = (char *)&cmd_addr; | 104 | spi_message_init(&m); |
68 | t[0].len = 2; | ||
69 | t[0].bits_per_word = bits; | ||
70 | spi_message_add_tail(&t[0], &m); | ||
71 | 105 | ||
72 | t[1].rx_buf = buf; | 106 | t[0].tx_buf = (char *)&cmd_addr; |
73 | t[1].len = count; | 107 | t[0].len = 2; |
74 | t[1].bits_per_word = 8; | 108 | t[0].bits_per_word = bits; |
75 | spi_message_add_tail(&t[1], &m); | 109 | spi_message_add_tail(&t[0], &m); |
76 | 110 | ||
77 | mutex_lock(&edev->lock); | 111 | t[1].rx_buf = buf; |
112 | t[1].len = count; | ||
113 | t[1].bits_per_word = 8; | ||
114 | spi_message_add_tail(&t[1], &m); | ||
78 | 115 | ||
79 | if (edev->pdata->prepare) | 116 | err = spi_sync(edev->spi, &m); |
80 | edev->pdata->prepare(edev); | 117 | /* have to wait at least Tcsl ns */ |
118 | ndelay(250); | ||
81 | 119 | ||
82 | ret = spi_sync(edev->spi, &m); | 120 | if (err) { |
83 | /* have to wait at least Tcsl ns */ | 121 | dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", |
84 | ndelay(250); | 122 | nbytes, (int)off, err); |
85 | if (ret) { | 123 | ret = err; |
86 | dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", | 124 | break; |
87 | count, (int)off, ret); | 125 | } |
126 | |||
127 | buf += nbytes; | ||
128 | off += nbytes; | ||
129 | count -= nbytes; | ||
130 | ret += nbytes; | ||
88 | } | 131 | } |
89 | 132 | ||
90 | if (edev->pdata->finish) | 133 | if (edev->pdata->finish) |
91 | edev->pdata->finish(edev); | 134 | edev->pdata->finish(edev); |
92 | 135 | ||
93 | mutex_unlock(&edev->lock); | 136 | mutex_unlock(&edev->lock); |
94 | return ret ? : count; | 137 | return ret; |
95 | } | 138 | } |
96 | 139 | ||
97 | static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) | 140 | static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) |
@@ -110,7 +153,13 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) | |||
110 | bits = 9; | 153 | bits = 9; |
111 | } | 154 | } |
112 | 155 | ||
113 | dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr); | 156 | if (has_quirk_instruction_length(edev)) { |
157 | cmd_addr <<= 2; | ||
158 | bits += 2; | ||
159 | } | ||
160 | |||
161 | dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n", | ||
162 | is_on ? "en" : "ds", cmd_addr, bits); | ||
114 | 163 | ||
115 | spi_message_init(&m); | 164 | spi_message_init(&m); |
116 | memset(&t, 0, sizeof(t)); | 165 | memset(&t, 0, sizeof(t)); |
@@ -155,7 +204,7 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev, | |||
155 | bits = 10; | 204 | bits = 10; |
156 | data_len = 1; | 205 | data_len = 1; |
157 | } else { | 206 | } else { |
158 | cmd_addr |= off & 0x3f; | 207 | cmd_addr |= (off >> 1) & 0x3f; |
159 | bits = 9; | 208 | bits = 9; |
160 | data_len = 2; | 209 | data_len = 2; |
161 | } | 210 | } |
@@ -182,16 +231,17 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev, | |||
182 | } | 231 | } |
183 | 232 | ||
184 | static ssize_t | 233 | static ssize_t |
185 | eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj, | 234 | eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf, |
186 | struct bin_attribute *bin_attr, | 235 | loff_t off, size_t count) |
187 | char *buf, loff_t off, size_t count) | ||
188 | { | 236 | { |
189 | struct eeprom_93xx46_dev *edev; | ||
190 | struct device *dev; | ||
191 | int i, ret, step = 1; | 237 | int i, ret, step = 1; |
192 | 238 | ||
193 | dev = container_of(kobj, struct device, kobj); | 239 | if (unlikely(off >= edev->size)) |
194 | edev = dev_get_drvdata(dev); | 240 | return -EFBIG; |
241 | if ((off + count) > edev->size) | ||
242 | count = edev->size - off; | ||
243 | if (unlikely(!count)) | ||
244 | return count; | ||
195 | 245 | ||
196 | /* only write even number of bytes on 16-bit devices */ | 246 | /* only write even number of bytes on 16-bit devices */ |
197 | if (edev->addrlen == 6) { | 247 | if (edev->addrlen == 6) { |
@@ -228,6 +278,49 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj, | |||
228 | return ret ? : count; | 278 | return ret ? : count; |
229 | } | 279 | } |
230 | 280 | ||
281 | /* | ||
282 | * Provide a regmap interface, which is registered with the NVMEM | ||
283 | * framework | ||
284 | */ | ||
285 | static int eeprom_93xx46_regmap_read(void *context, const void *reg, | ||
286 | size_t reg_size, void *val, | ||
287 | size_t val_size) | ||
288 | { | ||
289 | struct eeprom_93xx46_dev *eeprom_93xx46 = context; | ||
290 | off_t offset = *(u32 *)reg; | ||
291 | int err; | ||
292 | |||
293 | err = eeprom_93xx46_read(eeprom_93xx46, val, offset, val_size); | ||
294 | if (err) | ||
295 | return err; | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static int eeprom_93xx46_regmap_write(void *context, const void *data, | ||
300 | size_t count) | ||
301 | { | ||
302 | struct eeprom_93xx46_dev *eeprom_93xx46 = context; | ||
303 | const char *buf; | ||
304 | u32 offset; | ||
305 | size_t len; | ||
306 | int err; | ||
307 | |||
308 | memcpy(&offset, data, sizeof(offset)); | ||
309 | buf = (const char *)data + sizeof(offset); | ||
310 | len = count - sizeof(offset); | ||
311 | |||
312 | err = eeprom_93xx46_write(eeprom_93xx46, buf, offset, len); | ||
313 | if (err) | ||
314 | return err; | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static const struct regmap_bus eeprom_93xx46_regmap_bus = { | ||
319 | .read = eeprom_93xx46_regmap_read, | ||
320 | .write = eeprom_93xx46_regmap_write, | ||
321 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
322 | }; | ||
323 | |||
231 | static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) | 324 | static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) |
232 | { | 325 | { |
233 | struct eeprom_93xx46_platform_data *pd = edev->pdata; | 326 | struct eeprom_93xx46_platform_data *pd = edev->pdata; |
@@ -245,6 +338,13 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) | |||
245 | bits = 9; | 338 | bits = 9; |
246 | } | 339 | } |
247 | 340 | ||
341 | if (has_quirk_instruction_length(edev)) { | ||
342 | cmd_addr <<= 2; | ||
343 | bits += 2; | ||
344 | } | ||
345 | |||
346 | dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits); | ||
347 | |||
248 | spi_message_init(&m); | 348 | spi_message_init(&m); |
249 | memset(&t, 0, sizeof(t)); | 349 | memset(&t, 0, sizeof(t)); |
250 | 350 | ||
@@ -294,12 +394,101 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev, | |||
294 | } | 394 | } |
295 | static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase); | 395 | static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase); |
296 | 396 | ||
397 | static void select_assert(void *context) | ||
398 | { | ||
399 | struct eeprom_93xx46_dev *edev = context; | ||
400 | |||
401 | gpiod_set_value_cansleep(edev->pdata->select, 1); | ||
402 | } | ||
403 | |||
404 | static void select_deassert(void *context) | ||
405 | { | ||
406 | struct eeprom_93xx46_dev *edev = context; | ||
407 | |||
408 | gpiod_set_value_cansleep(edev->pdata->select, 0); | ||
409 | } | ||
410 | |||
411 | static const struct of_device_id eeprom_93xx46_of_table[] = { | ||
412 | { .compatible = "eeprom-93xx46", }, | ||
413 | { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, }, | ||
414 | {} | ||
415 | }; | ||
416 | MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table); | ||
417 | |||
418 | static int eeprom_93xx46_probe_dt(struct spi_device *spi) | ||
419 | { | ||
420 | const struct of_device_id *of_id = | ||
421 | of_match_device(eeprom_93xx46_of_table, &spi->dev); | ||
422 | struct device_node *np = spi->dev.of_node; | ||
423 | struct eeprom_93xx46_platform_data *pd; | ||
424 | u32 tmp; | ||
425 | int gpio; | ||
426 | enum of_gpio_flags of_flags; | ||
427 | int ret; | ||
428 | |||
429 | pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL); | ||
430 | if (!pd) | ||
431 | return -ENOMEM; | ||
432 | |||
433 | ret = of_property_read_u32(np, "data-size", &tmp); | ||
434 | if (ret < 0) { | ||
435 | dev_err(&spi->dev, "data-size property not found\n"); | ||
436 | return ret; | ||
437 | } | ||
438 | |||
439 | if (tmp == 8) { | ||
440 | pd->flags |= EE_ADDR8; | ||
441 | } else if (tmp == 16) { | ||
442 | pd->flags |= EE_ADDR16; | ||
443 | } else { | ||
444 | dev_err(&spi->dev, "invalid data-size (%d)\n", tmp); | ||
445 | return -EINVAL; | ||
446 | } | ||
447 | |||
448 | if (of_property_read_bool(np, "read-only")) | ||
449 | pd->flags |= EE_READONLY; | ||
450 | |||
451 | gpio = of_get_named_gpio_flags(np, "select-gpios", 0, &of_flags); | ||
452 | if (gpio_is_valid(gpio)) { | ||
453 | unsigned long flags = | ||
454 | of_flags == OF_GPIO_ACTIVE_LOW ? GPIOF_ACTIVE_LOW : 0; | ||
455 | |||
456 | ret = devm_gpio_request_one(&spi->dev, gpio, flags, | ||
457 | "eeprom_93xx46_select"); | ||
458 | if (ret) | ||
459 | return ret; | ||
460 | |||
461 | pd->select = gpio_to_desc(gpio); | ||
462 | pd->prepare = select_assert; | ||
463 | pd->finish = select_deassert; | ||
464 | |||
465 | gpiod_direction_output(pd->select, 0); | ||
466 | } | ||
467 | |||
468 | if (of_id->data) { | ||
469 | const struct eeprom_93xx46_devtype_data *data = of_id->data; | ||
470 | |||
471 | pd->quirks = data->quirks; | ||
472 | } | ||
473 | |||
474 | spi->dev.platform_data = pd; | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
297 | static int eeprom_93xx46_probe(struct spi_device *spi) | 479 | static int eeprom_93xx46_probe(struct spi_device *spi) |
298 | { | 480 | { |
299 | struct eeprom_93xx46_platform_data *pd; | 481 | struct eeprom_93xx46_platform_data *pd; |
300 | struct eeprom_93xx46_dev *edev; | 482 | struct eeprom_93xx46_dev *edev; |
483 | struct regmap *regmap; | ||
301 | int err; | 484 | int err; |
302 | 485 | ||
486 | if (spi->dev.of_node) { | ||
487 | err = eeprom_93xx46_probe_dt(spi); | ||
488 | if (err < 0) | ||
489 | return err; | ||
490 | } | ||
491 | |||
303 | pd = spi->dev.platform_data; | 492 | pd = spi->dev.platform_data; |
304 | if (!pd) { | 493 | if (!pd) { |
305 | dev_err(&spi->dev, "missing platform data\n"); | 494 | dev_err(&spi->dev, "missing platform data\n"); |
@@ -325,19 +514,34 @@ static int eeprom_93xx46_probe(struct spi_device *spi) | |||
325 | edev->spi = spi_dev_get(spi); | 514 | edev->spi = spi_dev_get(spi); |
326 | edev->pdata = pd; | 515 | edev->pdata = pd; |
327 | 516 | ||
328 | sysfs_bin_attr_init(&edev->bin); | 517 | edev->size = 128; |
329 | edev->bin.attr.name = "eeprom"; | 518 | |
330 | edev->bin.attr.mode = S_IRUSR; | 519 | edev->regmap_config.reg_bits = 32; |
331 | edev->bin.read = eeprom_93xx46_bin_read; | 520 | edev->regmap_config.val_bits = 8; |
332 | edev->bin.size = 128; | 521 | edev->regmap_config.reg_stride = 1; |
333 | if (!(pd->flags & EE_READONLY)) { | 522 | edev->regmap_config.max_register = edev->size - 1; |
334 | edev->bin.write = eeprom_93xx46_bin_write; | 523 | |
335 | edev->bin.attr.mode |= S_IWUSR; | 524 | regmap = devm_regmap_init(&spi->dev, &eeprom_93xx46_regmap_bus, edev, |
525 | &edev->regmap_config); | ||
526 | if (IS_ERR(regmap)) { | ||
527 | dev_err(&spi->dev, "regmap init failed\n"); | ||
528 | err = PTR_ERR(regmap); | ||
529 | goto fail; | ||
336 | } | 530 | } |
337 | 531 | ||
338 | err = sysfs_create_bin_file(&spi->dev.kobj, &edev->bin); | 532 | edev->nvmem_config.name = dev_name(&spi->dev); |
339 | if (err) | 533 | edev->nvmem_config.dev = &spi->dev; |
534 | edev->nvmem_config.read_only = pd->flags & EE_READONLY; | ||
535 | edev->nvmem_config.root_only = true; | ||
536 | edev->nvmem_config.owner = THIS_MODULE; | ||
537 | edev->nvmem_config.compat = true; | ||
538 | edev->nvmem_config.base_dev = &spi->dev; | ||
539 | |||
540 | edev->nvmem = nvmem_register(&edev->nvmem_config); | ||
541 | if (IS_ERR(edev->nvmem)) { | ||
542 | err = PTR_ERR(edev->nvmem); | ||
340 | goto fail; | 543 | goto fail; |
544 | } | ||
341 | 545 | ||
342 | dev_info(&spi->dev, "%d-bit eeprom %s\n", | 546 | dev_info(&spi->dev, "%d-bit eeprom %s\n", |
343 | (pd->flags & EE_ADDR8) ? 8 : 16, | 547 | (pd->flags & EE_ADDR8) ? 8 : 16, |
@@ -359,10 +563,11 @@ static int eeprom_93xx46_remove(struct spi_device *spi) | |||
359 | { | 563 | { |
360 | struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi); | 564 | struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi); |
361 | 565 | ||
566 | nvmem_unregister(edev->nvmem); | ||
567 | |||
362 | if (!(edev->pdata->flags & EE_READONLY)) | 568 | if (!(edev->pdata->flags & EE_READONLY)) |
363 | device_remove_file(&spi->dev, &dev_attr_erase); | 569 | device_remove_file(&spi->dev, &dev_attr_erase); |
364 | 570 | ||
365 | sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin); | ||
366 | kfree(edev); | 571 | kfree(edev); |
367 | return 0; | 572 | return 0; |
368 | } | 573 | } |
@@ -370,6 +575,7 @@ static int eeprom_93xx46_remove(struct spi_device *spi) | |||
370 | static struct spi_driver eeprom_93xx46_driver = { | 575 | static struct spi_driver eeprom_93xx46_driver = { |
371 | .driver = { | 576 | .driver = { |
372 | .name = "93xx46", | 577 | .name = "93xx46", |
578 | .of_match_table = of_match_ptr(eeprom_93xx46_of_table), | ||
373 | }, | 579 | }, |
374 | .probe = eeprom_93xx46_probe, | 580 | .probe = eeprom_93xx46_probe, |
375 | .remove = eeprom_93xx46_remove, | 581 | .remove = eeprom_93xx46_remove, |
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c index 6ab31eff0536..c24c9b7c1dd3 100644 --- a/drivers/misc/genwqe/card_sysfs.c +++ b/drivers/misc/genwqe/card_sysfs.c | |||
@@ -278,7 +278,7 @@ static umode_t genwqe_is_visible(struct kobject *kobj, | |||
278 | struct attribute *attr, int n) | 278 | struct attribute *attr, int n) |
279 | { | 279 | { |
280 | unsigned int j; | 280 | unsigned int j; |
281 | struct device *dev = container_of(kobj, struct device, kobj); | 281 | struct device *dev = kobj_to_dev(kobj); |
282 | struct genwqe_dev *cd = dev_get_drvdata(dev); | 282 | struct genwqe_dev *cd = dev_get_drvdata(dev); |
283 | umode_t mode = attr->mode; | 283 | umode_t mode = attr->mode; |
284 | 284 | ||
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h index 5bd127727d8e..9fea49d2e15b 100644 --- a/drivers/misc/ibmasm/ibmasm.h +++ b/drivers/misc/ibmasm/ibmasm.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/kref.h> | 34 | #include <linux/kref.h> |
35 | #include <linux/device.h> | 35 | #include <linux/device.h> |
36 | #include <linux/input.h> | 36 | #include <linux/input.h> |
37 | #include <linux/time64.h> | ||
37 | 38 | ||
38 | /* Driver identification */ | 39 | /* Driver identification */ |
39 | #define DRIVER_NAME "ibmasm" | 40 | #define DRIVER_NAME "ibmasm" |
@@ -53,9 +54,11 @@ extern int ibmasm_debug; | |||
53 | 54 | ||
54 | static inline char *get_timestamp(char *buf) | 55 | static inline char *get_timestamp(char *buf) |
55 | { | 56 | { |
56 | struct timeval now; | 57 | struct timespec64 now; |
57 | do_gettimeofday(&now); | 58 | |
58 | sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec); | 59 | ktime_get_real_ts64(&now); |
60 | sprintf(buf, "%llu.%.08lu", (long long)now.tv_sec, | ||
61 | now.tv_nsec / NSEC_PER_USEC); | ||
59 | return buf; | 62 | return buf; |
60 | } | 63 | } |
61 | 64 | ||
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index 0c3bb7e3ee80..14b7d539fed6 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c | |||
@@ -209,7 +209,7 @@ static int lis3lv02d_i2c_remove(struct i2c_client *client) | |||
209 | #ifdef CONFIG_PM_SLEEP | 209 | #ifdef CONFIG_PM_SLEEP |
210 | static int lis3lv02d_i2c_suspend(struct device *dev) | 210 | static int lis3lv02d_i2c_suspend(struct device *dev) |
211 | { | 211 | { |
212 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 212 | struct i2c_client *client = to_i2c_client(dev); |
213 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 213 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
214 | 214 | ||
215 | if (!lis3->pdata || !lis3->pdata->wakeup_flags) | 215 | if (!lis3->pdata || !lis3->pdata->wakeup_flags) |
@@ -219,7 +219,7 @@ static int lis3lv02d_i2c_suspend(struct device *dev) | |||
219 | 219 | ||
220 | static int lis3lv02d_i2c_resume(struct device *dev) | 220 | static int lis3lv02d_i2c_resume(struct device *dev) |
221 | { | 221 | { |
222 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 222 | struct i2c_client *client = to_i2c_client(dev); |
223 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 223 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
224 | 224 | ||
225 | /* | 225 | /* |
@@ -238,7 +238,7 @@ static int lis3lv02d_i2c_resume(struct device *dev) | |||
238 | #ifdef CONFIG_PM | 238 | #ifdef CONFIG_PM |
239 | static int lis3_i2c_runtime_suspend(struct device *dev) | 239 | static int lis3_i2c_runtime_suspend(struct device *dev) |
240 | { | 240 | { |
241 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 241 | struct i2c_client *client = to_i2c_client(dev); |
242 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 242 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
243 | 243 | ||
244 | lis3lv02d_poweroff(lis3); | 244 | lis3lv02d_poweroff(lis3); |
@@ -247,7 +247,7 @@ static int lis3_i2c_runtime_suspend(struct device *dev) | |||
247 | 247 | ||
248 | static int lis3_i2c_runtime_resume(struct device *dev) | 248 | static int lis3_i2c_runtime_resume(struct device *dev) |
249 | { | 249 | { |
250 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 250 | struct i2c_client *client = to_i2c_client(dev); |
251 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 251 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
252 | 252 | ||
253 | lis3lv02d_poweron(lis3); | 253 | lis3lv02d_poweron(lis3); |
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 2a6eaf1122b4..5f1a36b8fbb0 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c | |||
@@ -92,6 +92,9 @@ enum ctype { | |||
92 | CT_UNALIGNED_LOAD_STORE_WRITE, | 92 | CT_UNALIGNED_LOAD_STORE_WRITE, |
93 | CT_OVERWRITE_ALLOCATION, | 93 | CT_OVERWRITE_ALLOCATION, |
94 | CT_WRITE_AFTER_FREE, | 94 | CT_WRITE_AFTER_FREE, |
95 | CT_READ_AFTER_FREE, | ||
96 | CT_WRITE_BUDDY_AFTER_FREE, | ||
97 | CT_READ_BUDDY_AFTER_FREE, | ||
95 | CT_SOFTLOCKUP, | 98 | CT_SOFTLOCKUP, |
96 | CT_HARDLOCKUP, | 99 | CT_HARDLOCKUP, |
97 | CT_SPINLOCKUP, | 100 | CT_SPINLOCKUP, |
@@ -105,6 +108,7 @@ enum ctype { | |||
105 | CT_WRITE_RO, | 108 | CT_WRITE_RO, |
106 | CT_WRITE_RO_AFTER_INIT, | 109 | CT_WRITE_RO_AFTER_INIT, |
107 | CT_WRITE_KERN, | 110 | CT_WRITE_KERN, |
111 | CT_WRAP_ATOMIC | ||
108 | }; | 112 | }; |
109 | 113 | ||
110 | static char* cp_name[] = { | 114 | static char* cp_name[] = { |
@@ -130,6 +134,9 @@ static char* cp_type[] = { | |||
130 | "UNALIGNED_LOAD_STORE_WRITE", | 134 | "UNALIGNED_LOAD_STORE_WRITE", |
131 | "OVERWRITE_ALLOCATION", | 135 | "OVERWRITE_ALLOCATION", |
132 | "WRITE_AFTER_FREE", | 136 | "WRITE_AFTER_FREE", |
137 | "READ_AFTER_FREE", | ||
138 | "WRITE_BUDDY_AFTER_FREE", | ||
139 | "READ_BUDDY_AFTER_FREE", | ||
133 | "SOFTLOCKUP", | 140 | "SOFTLOCKUP", |
134 | "HARDLOCKUP", | 141 | "HARDLOCKUP", |
135 | "SPINLOCKUP", | 142 | "SPINLOCKUP", |
@@ -143,6 +150,7 @@ static char* cp_type[] = { | |||
143 | "WRITE_RO", | 150 | "WRITE_RO", |
144 | "WRITE_RO_AFTER_INIT", | 151 | "WRITE_RO_AFTER_INIT", |
145 | "WRITE_KERN", | 152 | "WRITE_KERN", |
153 | "WRAP_ATOMIC" | ||
146 | }; | 154 | }; |
147 | 155 | ||
148 | static struct jprobe lkdtm; | 156 | static struct jprobe lkdtm; |
@@ -338,7 +346,7 @@ static noinline void corrupt_stack(void) | |||
338 | memset((void *)data, 0, 64); | 346 | memset((void *)data, 0, 64); |
339 | } | 347 | } |
340 | 348 | ||
341 | static void execute_location(void *dst) | 349 | static void noinline execute_location(void *dst) |
342 | { | 350 | { |
343 | void (*func)(void) = dst; | 351 | void (*func)(void) = dst; |
344 | 352 | ||
@@ -412,12 +420,109 @@ static void lkdtm_do_action(enum ctype which) | |||
412 | break; | 420 | break; |
413 | } | 421 | } |
414 | case CT_WRITE_AFTER_FREE: { | 422 | case CT_WRITE_AFTER_FREE: { |
423 | int *base, *again; | ||
415 | size_t len = 1024; | 424 | size_t len = 1024; |
416 | u32 *data = kmalloc(len, GFP_KERNEL); | 425 | /* |
426 | * The slub allocator uses the first word to store the free | ||
427 | * pointer in some configurations. Use the middle of the | ||
428 | * allocation to avoid running into the freelist | ||
429 | */ | ||
430 | size_t offset = (len / sizeof(*base)) / 2; | ||
431 | |||
432 | base = kmalloc(len, GFP_KERNEL); | ||
433 | pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]); | ||
434 | pr_info("Attempting bad write to freed memory at %p\n", | ||
435 | &base[offset]); | ||
436 | kfree(base); | ||
437 | base[offset] = 0x0abcdef0; | ||
438 | /* Attempt to notice the overwrite. */ | ||
439 | again = kmalloc(len, GFP_KERNEL); | ||
440 | kfree(again); | ||
441 | if (again != base) | ||
442 | pr_info("Hmm, didn't get the same memory range.\n"); | ||
417 | 443 | ||
418 | kfree(data); | 444 | break; |
445 | } | ||
446 | case CT_READ_AFTER_FREE: { | ||
447 | int *base, *val, saw; | ||
448 | size_t len = 1024; | ||
449 | /* | ||
450 | * The slub allocator uses the first word to store the free | ||
451 | * pointer in some configurations. Use the middle of the | ||
452 | * allocation to avoid running into the freelist | ||
453 | */ | ||
454 | size_t offset = (len / sizeof(*base)) / 2; | ||
455 | |||
456 | base = kmalloc(len, GFP_KERNEL); | ||
457 | if (!base) | ||
458 | break; | ||
459 | |||
460 | val = kmalloc(len, GFP_KERNEL); | ||
461 | if (!val) | ||
462 | break; | ||
463 | |||
464 | *val = 0x12345678; | ||
465 | base[offset] = *val; | ||
466 | pr_info("Value in memory before free: %x\n", base[offset]); | ||
467 | |||
468 | kfree(base); | ||
469 | |||
470 | pr_info("Attempting bad read from freed memory\n"); | ||
471 | saw = base[offset]; | ||
472 | if (saw != *val) { | ||
473 | /* Good! Poisoning happened, so declare a win. */ | ||
474 | pr_info("Memory correctly poisoned (%x)\n", saw); | ||
475 | BUG(); | ||
476 | } | ||
477 | pr_info("Memory was not poisoned\n"); | ||
478 | |||
479 | kfree(val); | ||
480 | break; | ||
481 | } | ||
482 | case CT_WRITE_BUDDY_AFTER_FREE: { | ||
483 | unsigned long p = __get_free_page(GFP_KERNEL); | ||
484 | if (!p) | ||
485 | break; | ||
486 | pr_info("Writing to the buddy page before free\n"); | ||
487 | memset((void *)p, 0x3, PAGE_SIZE); | ||
488 | free_page(p); | ||
419 | schedule(); | 489 | schedule(); |
420 | memset(data, 0x78, len); | 490 | pr_info("Attempting bad write to the buddy page after free\n"); |
491 | memset((void *)p, 0x78, PAGE_SIZE); | ||
492 | /* Attempt to notice the overwrite. */ | ||
493 | p = __get_free_page(GFP_KERNEL); | ||
494 | free_page(p); | ||
495 | schedule(); | ||
496 | |||
497 | break; | ||
498 | } | ||
499 | case CT_READ_BUDDY_AFTER_FREE: { | ||
500 | unsigned long p = __get_free_page(GFP_KERNEL); | ||
501 | int saw, *val = kmalloc(1024, GFP_KERNEL); | ||
502 | int *base; | ||
503 | |||
504 | if (!p) | ||
505 | break; | ||
506 | |||
507 | if (!val) | ||
508 | break; | ||
509 | |||
510 | base = (int *)p; | ||
511 | |||
512 | *val = 0x12345678; | ||
513 | base[0] = *val; | ||
514 | pr_info("Value in memory before free: %x\n", base[0]); | ||
515 | free_page(p); | ||
516 | pr_info("Attempting to read from freed memory\n"); | ||
517 | saw = base[0]; | ||
518 | if (saw != *val) { | ||
519 | /* Good! Poisoning happened, so declare a win. */ | ||
520 | pr_info("Memory correctly poisoned (%x)\n", saw); | ||
521 | BUG(); | ||
522 | } | ||
523 | pr_info("Buddy page was not poisoned\n"); | ||
524 | |||
525 | kfree(val); | ||
421 | break; | 526 | break; |
422 | } | 527 | } |
423 | case CT_SOFTLOCKUP: | 528 | case CT_SOFTLOCKUP: |
@@ -548,6 +653,17 @@ static void lkdtm_do_action(enum ctype which) | |||
548 | do_overwritten(); | 653 | do_overwritten(); |
549 | break; | 654 | break; |
550 | } | 655 | } |
656 | case CT_WRAP_ATOMIC: { | ||
657 | atomic_t under = ATOMIC_INIT(INT_MIN); | ||
658 | atomic_t over = ATOMIC_INIT(INT_MAX); | ||
659 | |||
660 | pr_info("attempting atomic underflow\n"); | ||
661 | atomic_dec(&under); | ||
662 | pr_info("attempting atomic overflow\n"); | ||
663 | atomic_inc(&over); | ||
664 | |||
665 | return; | ||
666 | } | ||
551 | case CT_NONE: | 667 | case CT_NONE: |
552 | default: | 668 | default: |
553 | break; | 669 | break; |
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index d23384dde73b..c49e1d2269af 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config INTEL_MEI | 1 | config INTEL_MEI |
2 | tristate "Intel Management Engine Interface" | 2 | tristate "Intel Management Engine Interface" |
3 | depends on X86 && PCI && WATCHDOG_CORE | 3 | depends on X86 && PCI |
4 | help | 4 | help |
5 | The Intel Management Engine (Intel ME) provides Manageability, | 5 | The Intel Management Engine (Intel ME) provides Manageability, |
6 | Security and Media services for system containing Intel chipsets. | 6 | Security and Media services for system containing Intel chipsets. |
@@ -12,7 +12,7 @@ config INTEL_MEI | |||
12 | config INTEL_MEI_ME | 12 | config INTEL_MEI_ME |
13 | tristate "ME Enabled Intel Chipsets" | 13 | tristate "ME Enabled Intel Chipsets" |
14 | select INTEL_MEI | 14 | select INTEL_MEI |
15 | depends on X86 && PCI && WATCHDOG_CORE | 15 | depends on X86 && PCI |
16 | help | 16 | help |
17 | MEI support for ME Enabled Intel chipsets. | 17 | MEI support for ME Enabled Intel chipsets. |
18 | 18 | ||
@@ -37,7 +37,7 @@ config INTEL_MEI_ME | |||
37 | config INTEL_MEI_TXE | 37 | config INTEL_MEI_TXE |
38 | tristate "Intel Trusted Execution Environment with ME Interface" | 38 | tristate "Intel Trusted Execution Environment with ME Interface" |
39 | select INTEL_MEI | 39 | select INTEL_MEI |
40 | depends on X86 && PCI && WATCHDOG_CORE | 40 | depends on X86 && PCI |
41 | help | 41 | help |
42 | MEI Support for Trusted Execution Environment device on Intel SoCs | 42 | MEI Support for Trusted Execution Environment device on Intel SoCs |
43 | 43 | ||
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 01447ca21c26..59e6b0aede34 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile | |||
@@ -9,7 +9,6 @@ mei-objs += interrupt.o | |||
9 | mei-objs += client.o | 9 | mei-objs += client.o |
10 | mei-objs += main.o | 10 | mei-objs += main.o |
11 | mei-objs += amthif.o | 11 | mei-objs += amthif.o |
12 | mei-objs += wd.o | ||
13 | mei-objs += bus.o | 12 | mei-objs += bus.o |
14 | mei-objs += bus-fixup.o | 13 | mei-objs += bus-fixup.o |
15 | mei-$(CONFIG_DEBUG_FS) += debugfs.o | 14 | mei-$(CONFIG_DEBUG_FS) += debugfs.o |
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index cd0403f09267..194360a5f782 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c | |||
@@ -50,7 +50,6 @@ void mei_amthif_reset_params(struct mei_device *dev) | |||
50 | dev->iamthif_current_cb = NULL; | 50 | dev->iamthif_current_cb = NULL; |
51 | dev->iamthif_canceled = false; | 51 | dev->iamthif_canceled = false; |
52 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 52 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
53 | dev->iamthif_timer = 0; | ||
54 | dev->iamthif_stall_timer = 0; | 53 | dev->iamthif_stall_timer = 0; |
55 | dev->iamthif_open_count = 0; | 54 | dev->iamthif_open_count = 0; |
56 | } | 55 | } |
@@ -68,11 +67,14 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl) | |||
68 | struct mei_cl *cl = &dev->iamthif_cl; | 67 | struct mei_cl *cl = &dev->iamthif_cl; |
69 | int ret; | 68 | int ret; |
70 | 69 | ||
70 | if (mei_cl_is_connected(cl)) | ||
71 | return 0; | ||
72 | |||
71 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 73 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
72 | 74 | ||
73 | mei_cl_init(cl, dev); | 75 | mei_cl_init(cl, dev); |
74 | 76 | ||
75 | ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); | 77 | ret = mei_cl_link(cl); |
76 | if (ret < 0) { | 78 | if (ret < 0) { |
77 | dev_err(dev->dev, "amthif: failed cl_link %d\n", ret); | 79 | dev_err(dev->dev, "amthif: failed cl_link %d\n", ret); |
78 | return ret; | 80 | return ret; |
@@ -80,32 +82,10 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl) | |||
80 | 82 | ||
81 | ret = mei_cl_connect(cl, me_cl, NULL); | 83 | ret = mei_cl_connect(cl, me_cl, NULL); |
82 | 84 | ||
83 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | ||
84 | |||
85 | return ret; | 85 | return ret; |
86 | } | 86 | } |
87 | 87 | ||
88 | /** | 88 | /** |
89 | * mei_amthif_find_read_list_entry - finds a amthilist entry for current file | ||
90 | * | ||
91 | * @dev: the device structure | ||
92 | * @file: pointer to file object | ||
93 | * | ||
94 | * Return: returned a list entry on success, NULL on failure. | ||
95 | */ | ||
96 | struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, | ||
97 | struct file *file) | ||
98 | { | ||
99 | struct mei_cl_cb *cb; | ||
100 | |||
101 | list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) | ||
102 | if (cb->file_object == file) | ||
103 | return cb; | ||
104 | return NULL; | ||
105 | } | ||
106 | |||
107 | |||
108 | /** | ||
109 | * mei_amthif_read - read data from AMTHIF client | 89 | * mei_amthif_read - read data from AMTHIF client |
110 | * | 90 | * |
111 | * @dev: the device structure | 91 | * @dev: the device structure |
@@ -126,18 +106,11 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
126 | { | 106 | { |
127 | struct mei_cl *cl = file->private_data; | 107 | struct mei_cl *cl = file->private_data; |
128 | struct mei_cl_cb *cb; | 108 | struct mei_cl_cb *cb; |
129 | unsigned long timeout; | ||
130 | int rets; | 109 | int rets; |
131 | int wait_ret; | 110 | int wait_ret; |
132 | 111 | ||
133 | /* Only possible if we are in timeout */ | ||
134 | if (!cl) { | ||
135 | dev_err(dev->dev, "bad file ext.\n"); | ||
136 | return -ETIME; | ||
137 | } | ||
138 | |||
139 | dev_dbg(dev->dev, "checking amthif data\n"); | 112 | dev_dbg(dev->dev, "checking amthif data\n"); |
140 | cb = mei_amthif_find_read_list_entry(dev, file); | 113 | cb = mei_cl_read_cb(cl, file); |
141 | 114 | ||
142 | /* Check for if we can block or not*/ | 115 | /* Check for if we can block or not*/ |
143 | if (cb == NULL && file->f_flags & O_NONBLOCK) | 116 | if (cb == NULL && file->f_flags & O_NONBLOCK) |
@@ -149,8 +122,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
149 | /* unlock the Mutex */ | 122 | /* unlock the Mutex */ |
150 | mutex_unlock(&dev->device_lock); | 123 | mutex_unlock(&dev->device_lock); |
151 | 124 | ||
152 | wait_ret = wait_event_interruptible(dev->iamthif_cl.wait, | 125 | wait_ret = wait_event_interruptible(cl->rx_wait, |
153 | (cb = mei_amthif_find_read_list_entry(dev, file))); | 126 | !list_empty(&cl->rd_completed) || |
127 | !mei_cl_is_connected(cl)); | ||
154 | 128 | ||
155 | /* Locking again the Mutex */ | 129 | /* Locking again the Mutex */ |
156 | mutex_lock(&dev->device_lock); | 130 | mutex_lock(&dev->device_lock); |
@@ -158,7 +132,12 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
158 | if (wait_ret) | 132 | if (wait_ret) |
159 | return -ERESTARTSYS; | 133 | return -ERESTARTSYS; |
160 | 134 | ||
161 | dev_dbg(dev->dev, "woke up from sleep\n"); | 135 | if (!mei_cl_is_connected(cl)) { |
136 | rets = -EBUSY; | ||
137 | goto out; | ||
138 | } | ||
139 | |||
140 | cb = mei_cl_read_cb(cl, file); | ||
162 | } | 141 | } |
163 | 142 | ||
164 | if (cb->status) { | 143 | if (cb->status) { |
@@ -168,24 +147,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
168 | } | 147 | } |
169 | 148 | ||
170 | dev_dbg(dev->dev, "Got amthif data\n"); | 149 | dev_dbg(dev->dev, "Got amthif data\n"); |
171 | dev->iamthif_timer = 0; | ||
172 | |||
173 | timeout = cb->read_time + | ||
174 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); | ||
175 | dev_dbg(dev->dev, "amthif timeout = %lud\n", | ||
176 | timeout); | ||
177 | |||
178 | if (time_after(jiffies, timeout)) { | ||
179 | dev_dbg(dev->dev, "amthif Time out\n"); | ||
180 | /* 15 sec for the message has expired */ | ||
181 | list_del_init(&cb->list); | ||
182 | rets = -ETIME; | ||
183 | goto free; | ||
184 | } | ||
185 | /* if the whole message will fit remove it from the list */ | 150 | /* if the whole message will fit remove it from the list */ |
186 | if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset)) | 151 | if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset)) |
187 | list_del_init(&cb->list); | 152 | list_del_init(&cb->list); |
188 | else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { | 153 | else if (cb->buf_idx <= *offset) { |
189 | /* end of the message has been reached */ | 154 | /* end of the message has been reached */ |
190 | list_del_init(&cb->list); | 155 | list_del_init(&cb->list); |
191 | rets = 0; | 156 | rets = 0; |
@@ -195,9 +160,8 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
195 | * remove message from deletion list | 160 | * remove message from deletion list |
196 | */ | 161 | */ |
197 | 162 | ||
198 | dev_dbg(dev->dev, "amthif cb->buf size - %d\n", | 163 | dev_dbg(dev->dev, "amthif cb->buf.size - %zu cb->buf_idx - %zu\n", |
199 | cb->buf.size); | 164 | cb->buf.size, cb->buf_idx); |
200 | dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); | ||
201 | 165 | ||
202 | /* length is being truncated to PAGE_SIZE, however, | 166 | /* length is being truncated to PAGE_SIZE, however, |
203 | * the buf_idx may point beyond */ | 167 | * the buf_idx may point beyond */ |
@@ -229,7 +193,7 @@ out: | |||
229 | * | 193 | * |
230 | * Return: 0 on success, <0 on failure. | 194 | * Return: 0 on success, <0 on failure. |
231 | */ | 195 | */ |
232 | static int mei_amthif_read_start(struct mei_cl *cl, struct file *file) | 196 | static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file) |
233 | { | 197 | { |
234 | struct mei_device *dev = cl->dev; | 198 | struct mei_device *dev = cl->dev; |
235 | struct mei_cl_cb *cb; | 199 | struct mei_cl_cb *cb; |
@@ -248,7 +212,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file) | |||
248 | list_add_tail(&cb->list, &dev->ctrl_wr_list.list); | 212 | list_add_tail(&cb->list, &dev->ctrl_wr_list.list); |
249 | 213 | ||
250 | dev->iamthif_state = MEI_IAMTHIF_READING; | 214 | dev->iamthif_state = MEI_IAMTHIF_READING; |
251 | dev->iamthif_file_object = cb->file_object; | 215 | dev->iamthif_fp = cb->fp; |
252 | dev->iamthif_current_cb = cb; | 216 | dev->iamthif_current_cb = cb; |
253 | 217 | ||
254 | return 0; | 218 | return 0; |
@@ -277,7 +241,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb) | |||
277 | 241 | ||
278 | dev->iamthif_state = MEI_IAMTHIF_WRITING; | 242 | dev->iamthif_state = MEI_IAMTHIF_WRITING; |
279 | dev->iamthif_current_cb = cb; | 243 | dev->iamthif_current_cb = cb; |
280 | dev->iamthif_file_object = cb->file_object; | 244 | dev->iamthif_fp = cb->fp; |
281 | dev->iamthif_canceled = false; | 245 | dev->iamthif_canceled = false; |
282 | 246 | ||
283 | ret = mei_cl_write(cl, cb, false); | 247 | ret = mei_cl_write(cl, cb, false); |
@@ -285,7 +249,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb) | |||
285 | return ret; | 249 | return ret; |
286 | 250 | ||
287 | if (cb->completed) | 251 | if (cb->completed) |
288 | cb->status = mei_amthif_read_start(cl, cb->file_object); | 252 | cb->status = mei_amthif_read_start(cl, cb->fp); |
289 | 253 | ||
290 | return 0; | 254 | return 0; |
291 | } | 255 | } |
@@ -304,8 +268,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev) | |||
304 | 268 | ||
305 | dev->iamthif_canceled = false; | 269 | dev->iamthif_canceled = false; |
306 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 270 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
307 | dev->iamthif_timer = 0; | 271 | dev->iamthif_fp = NULL; |
308 | dev->iamthif_file_object = NULL; | ||
309 | 272 | ||
310 | dev_dbg(dev->dev, "complete amthif cmd_list cb.\n"); | 273 | dev_dbg(dev->dev, "complete amthif cmd_list cb.\n"); |
311 | 274 | ||
@@ -329,17 +292,17 @@ int mei_amthif_run_next_cmd(struct mei_device *dev) | |||
329 | int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb) | 292 | int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb) |
330 | { | 293 | { |
331 | 294 | ||
332 | struct mei_device *dev; | 295 | struct mei_device *dev = cl->dev; |
333 | |||
334 | if (WARN_ON(!cl || !cl->dev)) | ||
335 | return -ENODEV; | ||
336 | 296 | ||
337 | if (WARN_ON(!cb)) | 297 | list_add_tail(&cb->list, &dev->amthif_cmd_list.list); |
338 | return -EINVAL; | ||
339 | 298 | ||
340 | dev = cl->dev; | 299 | /* |
300 | * The previous request is still in processing, queue this one. | ||
301 | */ | ||
302 | if (dev->iamthif_state > MEI_IAMTHIF_IDLE && | ||
303 | dev->iamthif_state < MEI_IAMTHIF_READ_COMPLETE) | ||
304 | return 0; | ||
341 | 305 | ||
342 | list_add_tail(&cb->list, &dev->amthif_cmd_list.list); | ||
343 | return mei_amthif_run_next_cmd(dev); | 306 | return mei_amthif_run_next_cmd(dev); |
344 | } | 307 | } |
345 | 308 | ||
@@ -360,10 +323,10 @@ unsigned int mei_amthif_poll(struct mei_device *dev, | |||
360 | { | 323 | { |
361 | unsigned int mask = 0; | 324 | unsigned int mask = 0; |
362 | 325 | ||
363 | poll_wait(file, &dev->iamthif_cl.wait, wait); | 326 | poll_wait(file, &dev->iamthif_cl.rx_wait, wait); |
364 | 327 | ||
365 | if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && | 328 | if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && |
366 | dev->iamthif_file_object == file) { | 329 | dev->iamthif_fp == file) { |
367 | 330 | ||
368 | mask |= POLLIN | POLLRDNORM; | 331 | mask |= POLLIN | POLLRDNORM; |
369 | mei_amthif_run_next_cmd(dev); | 332 | mei_amthif_run_next_cmd(dev); |
@@ -393,7 +356,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
393 | return ret; | 356 | return ret; |
394 | 357 | ||
395 | if (cb->completed) | 358 | if (cb->completed) |
396 | cb->status = mei_amthif_read_start(cl, cb->file_object); | 359 | cb->status = mei_amthif_read_start(cl, cb->fp); |
397 | 360 | ||
398 | return 0; | 361 | return 0; |
399 | } | 362 | } |
@@ -437,11 +400,12 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl, | |||
437 | /** | 400 | /** |
438 | * mei_amthif_complete - complete amthif callback. | 401 | * mei_amthif_complete - complete amthif callback. |
439 | * | 402 | * |
440 | * @dev: the device structure. | 403 | * @cl: host client |
441 | * @cb: callback block. | 404 | * @cb: callback block. |
442 | */ | 405 | */ |
443 | void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) | 406 | void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb) |
444 | { | 407 | { |
408 | struct mei_device *dev = cl->dev; | ||
445 | 409 | ||
446 | if (cb->fop_type == MEI_FOP_WRITE) { | 410 | if (cb->fop_type == MEI_FOP_WRITE) { |
447 | if (!cb->status) { | 411 | if (!cb->status) { |
@@ -453,25 +417,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) | |||
453 | * in case of error enqueue the write cb to complete read list | 417 | * in case of error enqueue the write cb to complete read list |
454 | * so it can be propagated to the reader | 418 | * so it can be propagated to the reader |
455 | */ | 419 | */ |
456 | list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); | 420 | list_add_tail(&cb->list, &cl->rd_completed); |
457 | wake_up_interruptible(&dev->iamthif_cl.wait); | 421 | wake_up_interruptible(&cl->rx_wait); |
458 | return; | 422 | return; |
459 | } | 423 | } |
460 | 424 | ||
461 | if (!dev->iamthif_canceled) { | 425 | if (!dev->iamthif_canceled) { |
462 | dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; | 426 | dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; |
463 | dev->iamthif_stall_timer = 0; | 427 | dev->iamthif_stall_timer = 0; |
464 | list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); | 428 | list_add_tail(&cb->list, &cl->rd_completed); |
465 | dev_dbg(dev->dev, "amthif read completed\n"); | 429 | dev_dbg(dev->dev, "amthif read completed\n"); |
466 | dev->iamthif_timer = jiffies; | ||
467 | dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", | ||
468 | dev->iamthif_timer); | ||
469 | } else { | 430 | } else { |
470 | mei_amthif_run_next_cmd(dev); | 431 | mei_amthif_run_next_cmd(dev); |
471 | } | 432 | } |
472 | 433 | ||
473 | dev_dbg(dev->dev, "completing amthif call back.\n"); | 434 | dev_dbg(dev->dev, "completing amthif call back.\n"); |
474 | wake_up_interruptible(&dev->iamthif_cl.wait); | 435 | wake_up_interruptible(&cl->rx_wait); |
475 | } | 436 | } |
476 | 437 | ||
477 | /** | 438 | /** |
@@ -497,7 +458,7 @@ static bool mei_clear_list(struct mei_device *dev, | |||
497 | /* list all list member */ | 458 | /* list all list member */ |
498 | list_for_each_entry_safe(cb, next, mei_cb_list, list) { | 459 | list_for_each_entry_safe(cb, next, mei_cb_list, list) { |
499 | /* check if list member associated with a file */ | 460 | /* check if list member associated with a file */ |
500 | if (file == cb->file_object) { | 461 | if (file == cb->fp) { |
501 | /* check if cb equal to current iamthif cb */ | 462 | /* check if cb equal to current iamthif cb */ |
502 | if (dev->iamthif_current_cb == cb) { | 463 | if (dev->iamthif_current_cb == cb) { |
503 | dev->iamthif_current_cb = NULL; | 464 | dev->iamthif_current_cb = NULL; |
@@ -523,13 +484,14 @@ static bool mei_clear_list(struct mei_device *dev, | |||
523 | * | 484 | * |
524 | * Return: true if callback removed from the list, false otherwise | 485 | * Return: true if callback removed from the list, false otherwise |
525 | */ | 486 | */ |
526 | static bool mei_clear_lists(struct mei_device *dev, struct file *file) | 487 | static bool mei_clear_lists(struct mei_device *dev, const struct file *file) |
527 | { | 488 | { |
528 | bool removed = false; | 489 | bool removed = false; |
490 | struct mei_cl *cl = &dev->iamthif_cl; | ||
529 | 491 | ||
530 | /* remove callbacks associated with a file */ | 492 | /* remove callbacks associated with a file */ |
531 | mei_clear_list(dev, file, &dev->amthif_cmd_list.list); | 493 | mei_clear_list(dev, file, &dev->amthif_cmd_list.list); |
532 | if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list)) | 494 | if (mei_clear_list(dev, file, &cl->rd_completed)) |
533 | removed = true; | 495 | removed = true; |
534 | 496 | ||
535 | mei_clear_list(dev, file, &dev->ctrl_rd_list.list); | 497 | mei_clear_list(dev, file, &dev->ctrl_rd_list.list); |
@@ -546,7 +508,7 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file) | |||
546 | /* check if iamthif_current_cb not NULL */ | 508 | /* check if iamthif_current_cb not NULL */ |
547 | if (dev->iamthif_current_cb && !removed) { | 509 | if (dev->iamthif_current_cb && !removed) { |
548 | /* check file and iamthif current cb association */ | 510 | /* check file and iamthif current cb association */ |
549 | if (dev->iamthif_current_cb->file_object == file) { | 511 | if (dev->iamthif_current_cb->fp == file) { |
550 | /* remove cb */ | 512 | /* remove cb */ |
551 | mei_io_cb_free(dev->iamthif_current_cb); | 513 | mei_io_cb_free(dev->iamthif_current_cb); |
552 | dev->iamthif_current_cb = NULL; | 514 | dev->iamthif_current_cb = NULL; |
@@ -569,7 +531,7 @@ int mei_amthif_release(struct mei_device *dev, struct file *file) | |||
569 | if (dev->iamthif_open_count > 0) | 531 | if (dev->iamthif_open_count > 0) |
570 | dev->iamthif_open_count--; | 532 | dev->iamthif_open_count--; |
571 | 533 | ||
572 | if (dev->iamthif_file_object == file && | 534 | if (dev->iamthif_fp == file && |
573 | dev->iamthif_state != MEI_IAMTHIF_IDLE) { | 535 | dev->iamthif_state != MEI_IAMTHIF_IDLE) { |
574 | 536 | ||
575 | dev_dbg(dev->dev, "amthif canceled iamthif state %d\n", | 537 | dev_dbg(dev->dev, "amthif canceled iamthif state %d\n", |
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 020de5919c21..e9e6ea3ab73c 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c | |||
@@ -35,6 +35,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; | |||
35 | #define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \ | 35 | #define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \ |
36 | 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) | 36 | 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) |
37 | 37 | ||
38 | #define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \ | ||
39 | 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB) | ||
40 | |||
38 | #define MEI_UUID_ANY NULL_UUID_LE | 41 | #define MEI_UUID_ANY NULL_UUID_LE |
39 | 42 | ||
40 | /** | 43 | /** |
@@ -48,8 +51,7 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; | |||
48 | */ | 51 | */ |
49 | static void number_of_connections(struct mei_cl_device *cldev) | 52 | static void number_of_connections(struct mei_cl_device *cldev) |
50 | { | 53 | { |
51 | dev_dbg(&cldev->dev, "running hook %s on %pUl\n", | 54 | dev_dbg(&cldev->dev, "running hook %s\n", __func__); |
52 | __func__, mei_me_cl_uuid(cldev->me_cl)); | ||
53 | 55 | ||
54 | if (cldev->me_cl->props.max_number_of_connections > 1) | 56 | if (cldev->me_cl->props.max_number_of_connections > 1) |
55 | cldev->do_match = 0; | 57 | cldev->do_match = 0; |
@@ -62,11 +64,36 @@ static void number_of_connections(struct mei_cl_device *cldev) | |||
62 | */ | 64 | */ |
63 | static void blacklist(struct mei_cl_device *cldev) | 65 | static void blacklist(struct mei_cl_device *cldev) |
64 | { | 66 | { |
65 | dev_dbg(&cldev->dev, "running hook %s on %pUl\n", | 67 | dev_dbg(&cldev->dev, "running hook %s\n", __func__); |
66 | __func__, mei_me_cl_uuid(cldev->me_cl)); | 68 | |
67 | cldev->do_match = 0; | 69 | cldev->do_match = 0; |
68 | } | 70 | } |
69 | 71 | ||
72 | /** | ||
73 | * mei_wd - wd client on the bus, change protocol version | ||
74 | * as the API has changed. | ||
75 | * | ||
76 | * @cldev: me clients device | ||
77 | */ | ||
78 | #if IS_ENABLED(CONFIG_INTEL_MEI_ME) | ||
79 | #include <linux/pci.h> | ||
80 | #include "hw-me-regs.h" | ||
81 | static void mei_wd(struct mei_cl_device *cldev) | ||
82 | { | ||
83 | struct pci_dev *pdev = to_pci_dev(cldev->dev.parent); | ||
84 | |||
85 | dev_dbg(&cldev->dev, "running hook %s\n", __func__); | ||
86 | if (pdev->device == MEI_DEV_ID_WPT_LP || | ||
87 | pdev->device == MEI_DEV_ID_SPT || | ||
88 | pdev->device == MEI_DEV_ID_SPT_H) | ||
89 | cldev->me_cl->props.protocol_version = 0x2; | ||
90 | |||
91 | cldev->do_match = 1; | ||
92 | } | ||
93 | #else | ||
94 | static inline void mei_wd(struct mei_cl_device *cldev) {} | ||
95 | #endif /* CONFIG_INTEL_MEI_ME */ | ||
96 | |||
70 | struct mei_nfc_cmd { | 97 | struct mei_nfc_cmd { |
71 | u8 command; | 98 | u8 command; |
72 | u8 status; | 99 | u8 status; |
@@ -208,12 +235,11 @@ static void mei_nfc(struct mei_cl_device *cldev) | |||
208 | 235 | ||
209 | bus = cldev->bus; | 236 | bus = cldev->bus; |
210 | 237 | ||
211 | dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n", | 238 | dev_dbg(&cldev->dev, "running hook %s\n", __func__); |
212 | __func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match); | ||
213 | 239 | ||
214 | mutex_lock(&bus->device_lock); | 240 | mutex_lock(&bus->device_lock); |
215 | /* we need to connect to INFO GUID */ | 241 | /* we need to connect to INFO GUID */ |
216 | cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY); | 242 | cl = mei_cl_alloc_linked(bus); |
217 | if (IS_ERR(cl)) { | 243 | if (IS_ERR(cl)) { |
218 | ret = PTR_ERR(cl); | 244 | ret = PTR_ERR(cl); |
219 | cl = NULL; | 245 | cl = NULL; |
@@ -282,6 +308,7 @@ static struct mei_fixup { | |||
282 | MEI_FIXUP(MEI_UUID_ANY, number_of_connections), | 308 | MEI_FIXUP(MEI_UUID_ANY, number_of_connections), |
283 | MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist), | 309 | MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist), |
284 | MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), | 310 | MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), |
311 | MEI_FIXUP(MEI_UUID_WD, mei_wd), | ||
285 | }; | 312 | }; |
286 | 313 | ||
287 | /** | 314 | /** |
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 0b05aa938799..5d5996e39a67 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
@@ -44,7 +44,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
44 | bool blocking) | 44 | bool blocking) |
45 | { | 45 | { |
46 | struct mei_device *bus; | 46 | struct mei_device *bus; |
47 | struct mei_cl_cb *cb = NULL; | 47 | struct mei_cl_cb *cb; |
48 | ssize_t rets; | 48 | ssize_t rets; |
49 | 49 | ||
50 | if (WARN_ON(!cl || !cl->dev)) | 50 | if (WARN_ON(!cl || !cl->dev)) |
@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
53 | bus = cl->dev; | 53 | bus = cl->dev; |
54 | 54 | ||
55 | mutex_lock(&bus->device_lock); | 55 | mutex_lock(&bus->device_lock); |
56 | if (bus->dev_state != MEI_DEV_ENABLED) { | ||
57 | rets = -ENODEV; | ||
58 | goto out; | ||
59 | } | ||
60 | |||
56 | if (!mei_cl_is_connected(cl)) { | 61 | if (!mei_cl_is_connected(cl)) { |
57 | rets = -ENODEV; | 62 | rets = -ENODEV; |
58 | goto out; | 63 | goto out; |
@@ -81,8 +86,6 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
81 | 86 | ||
82 | out: | 87 | out: |
83 | mutex_unlock(&bus->device_lock); | 88 | mutex_unlock(&bus->device_lock); |
84 | if (rets < 0) | ||
85 | mei_io_cb_free(cb); | ||
86 | 89 | ||
87 | return rets; | 90 | return rets; |
88 | } | 91 | } |
@@ -109,6 +112,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) | |||
109 | bus = cl->dev; | 112 | bus = cl->dev; |
110 | 113 | ||
111 | mutex_lock(&bus->device_lock); | 114 | mutex_lock(&bus->device_lock); |
115 | if (bus->dev_state != MEI_DEV_ENABLED) { | ||
116 | rets = -ENODEV; | ||
117 | goto out; | ||
118 | } | ||
112 | 119 | ||
113 | cb = mei_cl_read_cb(cl, NULL); | 120 | cb = mei_cl_read_cb(cl, NULL); |
114 | if (cb) | 121 | if (cb) |
@@ -230,45 +237,55 @@ static void mei_cl_bus_event_work(struct work_struct *work) | |||
230 | * mei_cl_bus_notify_event - schedule notify cb on bus client | 237 | * mei_cl_bus_notify_event - schedule notify cb on bus client |
231 | * | 238 | * |
232 | * @cl: host client | 239 | * @cl: host client |
240 | * | ||
241 | * Return: true if event was scheduled | ||
242 | * false if the client is not waiting for event | ||
233 | */ | 243 | */ |
234 | void mei_cl_bus_notify_event(struct mei_cl *cl) | 244 | bool mei_cl_bus_notify_event(struct mei_cl *cl) |
235 | { | 245 | { |
236 | struct mei_cl_device *cldev = cl->cldev; | 246 | struct mei_cl_device *cldev = cl->cldev; |
237 | 247 | ||
238 | if (!cldev || !cldev->event_cb) | 248 | if (!cldev || !cldev->event_cb) |
239 | return; | 249 | return false; |
240 | 250 | ||
241 | if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF))) | 251 | if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF))) |
242 | return; | 252 | return false; |
243 | 253 | ||
244 | if (!cl->notify_ev) | 254 | if (!cl->notify_ev) |
245 | return; | 255 | return false; |
246 | 256 | ||
247 | set_bit(MEI_CL_EVENT_NOTIF, &cldev->events); | 257 | set_bit(MEI_CL_EVENT_NOTIF, &cldev->events); |
248 | 258 | ||
249 | schedule_work(&cldev->event_work); | 259 | schedule_work(&cldev->event_work); |
250 | 260 | ||
251 | cl->notify_ev = false; | 261 | cl->notify_ev = false; |
262 | |||
263 | return true; | ||
252 | } | 264 | } |
253 | 265 | ||
254 | /** | 266 | /** |
255 | * mei_cl_bus_rx_event - schedule rx evenet | 267 | * mei_cl_bus_rx_event - schedule rx event |
256 | * | 268 | * |
257 | * @cl: host client | 269 | * @cl: host client |
270 | * | ||
271 | * Return: true if event was scheduled | ||
272 | * false if the client is not waiting for event | ||
258 | */ | 273 | */ |
259 | void mei_cl_bus_rx_event(struct mei_cl *cl) | 274 | bool mei_cl_bus_rx_event(struct mei_cl *cl) |
260 | { | 275 | { |
261 | struct mei_cl_device *cldev = cl->cldev; | 276 | struct mei_cl_device *cldev = cl->cldev; |
262 | 277 | ||
263 | if (!cldev || !cldev->event_cb) | 278 | if (!cldev || !cldev->event_cb) |
264 | return; | 279 | return false; |
265 | 280 | ||
266 | if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX))) | 281 | if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX))) |
267 | return; | 282 | return false; |
268 | 283 | ||
269 | set_bit(MEI_CL_EVENT_RX, &cldev->events); | 284 | set_bit(MEI_CL_EVENT_RX, &cldev->events); |
270 | 285 | ||
271 | schedule_work(&cldev->event_work); | 286 | schedule_work(&cldev->event_work); |
287 | |||
288 | return true; | ||
272 | } | 289 | } |
273 | 290 | ||
274 | /** | 291 | /** |
@@ -398,7 +415,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev) | |||
398 | 415 | ||
399 | if (!cl) { | 416 | if (!cl) { |
400 | mutex_lock(&bus->device_lock); | 417 | mutex_lock(&bus->device_lock); |
401 | cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY); | 418 | cl = mei_cl_alloc_linked(bus); |
402 | mutex_unlock(&bus->device_lock); | 419 | mutex_unlock(&bus->device_lock); |
403 | if (IS_ERR(cl)) | 420 | if (IS_ERR(cl)) |
404 | return PTR_ERR(cl); | 421 | return PTR_ERR(cl); |
@@ -958,6 +975,22 @@ void mei_cl_bus_rescan(struct mei_device *bus) | |||
958 | dev_dbg(bus->dev, "rescan end"); | 975 | dev_dbg(bus->dev, "rescan end"); |
959 | } | 976 | } |
960 | 977 | ||
978 | void mei_cl_bus_rescan_work(struct work_struct *work) | ||
979 | { | ||
980 | struct mei_device *bus = | ||
981 | container_of(work, struct mei_device, bus_rescan_work); | ||
982 | struct mei_me_client *me_cl; | ||
983 | |||
984 | mutex_lock(&bus->device_lock); | ||
985 | me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid); | ||
986 | if (me_cl) | ||
987 | mei_amthif_host_init(bus, me_cl); | ||
988 | mei_me_cl_put(me_cl); | ||
989 | mutex_unlock(&bus->device_lock); | ||
990 | |||
991 | mei_cl_bus_rescan(bus); | ||
992 | } | ||
993 | |||
961 | int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, | 994 | int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, |
962 | struct module *owner) | 995 | struct module *owner) |
963 | { | 996 | { |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index a6c87c713193..bab17e4197b6 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -359,7 +359,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb) | |||
359 | * Return: mei_cl_cb pointer or NULL; | 359 | * Return: mei_cl_cb pointer or NULL; |
360 | */ | 360 | */ |
361 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, | 361 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, |
362 | struct file *fp) | 362 | const struct file *fp) |
363 | { | 363 | { |
364 | struct mei_cl_cb *cb; | 364 | struct mei_cl_cb *cb; |
365 | 365 | ||
@@ -368,7 +368,7 @@ struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, | |||
368 | return NULL; | 368 | return NULL; |
369 | 369 | ||
370 | INIT_LIST_HEAD(&cb->list); | 370 | INIT_LIST_HEAD(&cb->list); |
371 | cb->file_object = fp; | 371 | cb->fp = fp; |
372 | cb->cl = cl; | 372 | cb->cl = cl; |
373 | cb->buf_idx = 0; | 373 | cb->buf_idx = 0; |
374 | cb->fop_type = type; | 374 | cb->fop_type = type; |
@@ -455,7 +455,8 @@ int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length) | |||
455 | * Return: cb on success and NULL on failure | 455 | * Return: cb on success and NULL on failure |
456 | */ | 456 | */ |
457 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, | 457 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, |
458 | enum mei_cb_file_ops type, struct file *fp) | 458 | enum mei_cb_file_ops type, |
459 | const struct file *fp) | ||
459 | { | 460 | { |
460 | struct mei_cl_cb *cb; | 461 | struct mei_cl_cb *cb; |
461 | 462 | ||
@@ -485,7 +486,7 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) | |||
485 | struct mei_cl_cb *cb; | 486 | struct mei_cl_cb *cb; |
486 | 487 | ||
487 | list_for_each_entry(cb, &cl->rd_completed, list) | 488 | list_for_each_entry(cb, &cl->rd_completed, list) |
488 | if (!fp || fp == cb->file_object) | 489 | if (!fp || fp == cb->fp) |
489 | return cb; | 490 | return cb; |
490 | 491 | ||
491 | return NULL; | 492 | return NULL; |
@@ -503,12 +504,12 @@ void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp) | |||
503 | struct mei_cl_cb *cb, *next; | 504 | struct mei_cl_cb *cb, *next; |
504 | 505 | ||
505 | list_for_each_entry_safe(cb, next, &cl->rd_completed, list) | 506 | list_for_each_entry_safe(cb, next, &cl->rd_completed, list) |
506 | if (!fp || fp == cb->file_object) | 507 | if (!fp || fp == cb->fp) |
507 | mei_io_cb_free(cb); | 508 | mei_io_cb_free(cb); |
508 | 509 | ||
509 | 510 | ||
510 | list_for_each_entry_safe(cb, next, &cl->rd_pending, list) | 511 | list_for_each_entry_safe(cb, next, &cl->rd_pending, list) |
511 | if (!fp || fp == cb->file_object) | 512 | if (!fp || fp == cb->fp) |
512 | mei_io_cb_free(cb); | 513 | mei_io_cb_free(cb); |
513 | } | 514 | } |
514 | 515 | ||
@@ -535,7 +536,6 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) | |||
535 | mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); | 536 | mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); |
536 | mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); | 537 | mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); |
537 | mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); | 538 | mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); |
538 | mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); | ||
539 | 539 | ||
540 | mei_cl_read_cb_flush(cl, fp); | 540 | mei_cl_read_cb_flush(cl, fp); |
541 | 541 | ||
@@ -587,27 +587,23 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev) | |||
587 | * mei_cl_link - allocate host id in the host map | 587 | * mei_cl_link - allocate host id in the host map |
588 | * | 588 | * |
589 | * @cl: host client | 589 | * @cl: host client |
590 | * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one | ||
591 | * | 590 | * |
592 | * Return: 0 on success | 591 | * Return: 0 on success |
593 | * -EINVAL on incorrect values | 592 | * -EINVAL on incorrect values |
594 | * -EMFILE if open count exceeded. | 593 | * -EMFILE if open count exceeded. |
595 | */ | 594 | */ |
596 | int mei_cl_link(struct mei_cl *cl, int id) | 595 | int mei_cl_link(struct mei_cl *cl) |
597 | { | 596 | { |
598 | struct mei_device *dev; | 597 | struct mei_device *dev; |
599 | long open_handle_count; | 598 | long open_handle_count; |
599 | int id; | ||
600 | 600 | ||
601 | if (WARN_ON(!cl || !cl->dev)) | 601 | if (WARN_ON(!cl || !cl->dev)) |
602 | return -EINVAL; | 602 | return -EINVAL; |
603 | 603 | ||
604 | dev = cl->dev; | 604 | dev = cl->dev; |
605 | 605 | ||
606 | /* If Id is not assigned get one*/ | 606 | id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); |
607 | if (id == MEI_HOST_CLIENT_ID_ANY) | ||
608 | id = find_first_zero_bit(dev->host_clients_map, | ||
609 | MEI_CLIENTS_MAX); | ||
610 | |||
611 | if (id >= MEI_CLIENTS_MAX) { | 607 | if (id >= MEI_CLIENTS_MAX) { |
612 | dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); | 608 | dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); |
613 | return -EMFILE; | 609 | return -EMFILE; |
@@ -648,7 +644,7 @@ int mei_cl_unlink(struct mei_cl *cl) | |||
648 | if (!cl) | 644 | if (!cl) |
649 | return 0; | 645 | return 0; |
650 | 646 | ||
651 | /* wd and amthif might not be initialized */ | 647 | /* amthif might not be initialized */ |
652 | if (!cl->dev) | 648 | if (!cl->dev) |
653 | return 0; | 649 | return 0; |
654 | 650 | ||
@@ -670,31 +666,12 @@ int mei_cl_unlink(struct mei_cl *cl) | |||
670 | return 0; | 666 | return 0; |
671 | } | 667 | } |
672 | 668 | ||
673 | 669 | void mei_host_client_init(struct mei_device *dev) | |
674 | void mei_host_client_init(struct work_struct *work) | ||
675 | { | 670 | { |
676 | struct mei_device *dev = | ||
677 | container_of(work, struct mei_device, init_work); | ||
678 | struct mei_me_client *me_cl; | ||
679 | |||
680 | mutex_lock(&dev->device_lock); | ||
681 | |||
682 | |||
683 | me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid); | ||
684 | if (me_cl) | ||
685 | mei_amthif_host_init(dev, me_cl); | ||
686 | mei_me_cl_put(me_cl); | ||
687 | |||
688 | me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid); | ||
689 | if (me_cl) | ||
690 | mei_wd_host_init(dev, me_cl); | ||
691 | mei_me_cl_put(me_cl); | ||
692 | |||
693 | dev->dev_state = MEI_DEV_ENABLED; | 671 | dev->dev_state = MEI_DEV_ENABLED; |
694 | dev->reset_count = 0; | 672 | dev->reset_count = 0; |
695 | mutex_unlock(&dev->device_lock); | ||
696 | 673 | ||
697 | mei_cl_bus_rescan(dev); | 674 | schedule_work(&dev->bus_rescan_work); |
698 | 675 | ||
699 | pm_runtime_mark_last_busy(dev->dev); | 676 | pm_runtime_mark_last_busy(dev->dev); |
700 | dev_dbg(dev->dev, "rpm: autosuspend\n"); | 677 | dev_dbg(dev->dev, "rpm: autosuspend\n"); |
@@ -726,6 +703,33 @@ bool mei_hbuf_acquire(struct mei_device *dev) | |||
726 | } | 703 | } |
727 | 704 | ||
728 | /** | 705 | /** |
706 | * mei_cl_wake_all - wake up readers, writers and event waiters so | ||
707 | * they can be interrupted | ||
708 | * | ||
709 | * @cl: host client | ||
710 | */ | ||
711 | static void mei_cl_wake_all(struct mei_cl *cl) | ||
712 | { | ||
713 | struct mei_device *dev = cl->dev; | ||
714 | |||
715 | /* synchronized under device mutex */ | ||
716 | if (waitqueue_active(&cl->rx_wait)) { | ||
717 | cl_dbg(dev, cl, "Waking up reading client!\n"); | ||
718 | wake_up_interruptible(&cl->rx_wait); | ||
719 | } | ||
720 | /* synchronized under device mutex */ | ||
721 | if (waitqueue_active(&cl->tx_wait)) { | ||
722 | cl_dbg(dev, cl, "Waking up writing client!\n"); | ||
723 | wake_up_interruptible(&cl->tx_wait); | ||
724 | } | ||
725 | /* synchronized under device mutex */ | ||
726 | if (waitqueue_active(&cl->ev_wait)) { | ||
727 | cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); | ||
728 | wake_up_interruptible(&cl->ev_wait); | ||
729 | } | ||
730 | } | ||
731 | |||
732 | /** | ||
729 | * mei_cl_set_disconnected - set disconnected state and clear | 733 | * mei_cl_set_disconnected - set disconnected state and clear |
730 | * associated states and resources | 734 | * associated states and resources |
731 | * | 735 | * |
@@ -740,8 +744,11 @@ void mei_cl_set_disconnected(struct mei_cl *cl) | |||
740 | return; | 744 | return; |
741 | 745 | ||
742 | cl->state = MEI_FILE_DISCONNECTED; | 746 | cl->state = MEI_FILE_DISCONNECTED; |
747 | mei_io_list_free(&dev->write_list, cl); | ||
748 | mei_io_list_free(&dev->write_waiting_list, cl); | ||
743 | mei_io_list_flush(&dev->ctrl_rd_list, cl); | 749 | mei_io_list_flush(&dev->ctrl_rd_list, cl); |
744 | mei_io_list_flush(&dev->ctrl_wr_list, cl); | 750 | mei_io_list_flush(&dev->ctrl_wr_list, cl); |
751 | mei_cl_wake_all(cl); | ||
745 | cl->mei_flow_ctrl_creds = 0; | 752 | cl->mei_flow_ctrl_creds = 0; |
746 | cl->timer_count = 0; | 753 | cl->timer_count = 0; |
747 | 754 | ||
@@ -1034,7 +1041,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
1034 | * Return: 0 on success, <0 on failure. | 1041 | * Return: 0 on success, <0 on failure. |
1035 | */ | 1042 | */ |
1036 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, | 1043 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, |
1037 | struct file *file) | 1044 | const struct file *file) |
1038 | { | 1045 | { |
1039 | struct mei_device *dev; | 1046 | struct mei_device *dev; |
1040 | struct mei_cl_cb *cb; | 1047 | struct mei_cl_cb *cb; |
@@ -1119,11 +1126,10 @@ nortpm: | |||
1119 | * mei_cl_alloc_linked - allocate and link host client | 1126 | * mei_cl_alloc_linked - allocate and link host client |
1120 | * | 1127 | * |
1121 | * @dev: the device structure | 1128 | * @dev: the device structure |
1122 | * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one | ||
1123 | * | 1129 | * |
1124 | * Return: cl on success ERR_PTR on failure | 1130 | * Return: cl on success ERR_PTR on failure |
1125 | */ | 1131 | */ |
1126 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id) | 1132 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) |
1127 | { | 1133 | { |
1128 | struct mei_cl *cl; | 1134 | struct mei_cl *cl; |
1129 | int ret; | 1135 | int ret; |
@@ -1134,7 +1140,7 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id) | |||
1134 | goto err; | 1140 | goto err; |
1135 | } | 1141 | } |
1136 | 1142 | ||
1137 | ret = mei_cl_link(cl, id); | 1143 | ret = mei_cl_link(cl); |
1138 | if (ret) | 1144 | if (ret) |
1139 | goto err; | 1145 | goto err; |
1140 | 1146 | ||
@@ -1149,11 +1155,12 @@ err: | |||
1149 | /** | 1155 | /** |
1150 | * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. | 1156 | * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. |
1151 | * | 1157 | * |
1152 | * @cl: private data of the file object | 1158 | * @cl: host client |
1159 | * @fp: the file pointer associated with the pointer | ||
1153 | * | 1160 | * |
1154 | * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. | 1161 | * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. |
1155 | */ | 1162 | */ |
1156 | int mei_cl_flow_ctrl_creds(struct mei_cl *cl) | 1163 | static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp) |
1157 | { | 1164 | { |
1158 | int rets; | 1165 | int rets; |
1159 | 1166 | ||
@@ -1164,7 +1171,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) | |||
1164 | return 1; | 1171 | return 1; |
1165 | 1172 | ||
1166 | if (mei_cl_is_fixed_address(cl)) { | 1173 | if (mei_cl_is_fixed_address(cl)) { |
1167 | rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL); | 1174 | rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp); |
1168 | if (rets && rets != -EBUSY) | 1175 | if (rets && rets != -EBUSY) |
1169 | return rets; | 1176 | return rets; |
1170 | return 1; | 1177 | return 1; |
@@ -1186,7 +1193,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) | |||
1186 | * 0 on success | 1193 | * 0 on success |
1187 | * -EINVAL when ctrl credits are <= 0 | 1194 | * -EINVAL when ctrl credits are <= 0 |
1188 | */ | 1195 | */ |
1189 | int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) | 1196 | static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) |
1190 | { | 1197 | { |
1191 | if (WARN_ON(!cl || !cl->me_cl)) | 1198 | if (WARN_ON(!cl || !cl->me_cl)) |
1192 | return -EINVAL; | 1199 | return -EINVAL; |
@@ -1283,7 +1290,8 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
1283 | * | 1290 | * |
1284 | * Return: 0 on such and error otherwise. | 1291 | * Return: 0 on such and error otherwise. |
1285 | */ | 1292 | */ |
1286 | int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request) | 1293 | int mei_cl_notify_request(struct mei_cl *cl, |
1294 | const struct file *file, u8 request) | ||
1287 | { | 1295 | { |
1288 | struct mei_device *dev; | 1296 | struct mei_device *dev; |
1289 | struct mei_cl_cb *cb; | 1297 | struct mei_cl_cb *cb; |
@@ -1368,12 +1376,12 @@ void mei_cl_notify(struct mei_cl *cl) | |||
1368 | 1376 | ||
1369 | cl_dbg(dev, cl, "notify event"); | 1377 | cl_dbg(dev, cl, "notify event"); |
1370 | cl->notify_ev = true; | 1378 | cl->notify_ev = true; |
1371 | wake_up_interruptible_all(&cl->ev_wait); | 1379 | if (!mei_cl_bus_notify_event(cl)) |
1380 | wake_up_interruptible(&cl->ev_wait); | ||
1372 | 1381 | ||
1373 | if (cl->ev_async) | 1382 | if (cl->ev_async) |
1374 | kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); | 1383 | kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); |
1375 | 1384 | ||
1376 | mei_cl_bus_notify_event(cl); | ||
1377 | } | 1385 | } |
1378 | 1386 | ||
1379 | /** | 1387 | /** |
@@ -1422,6 +1430,25 @@ out: | |||
1422 | } | 1430 | } |
1423 | 1431 | ||
1424 | /** | 1432 | /** |
1433 | * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control | ||
1434 | * for given host client | ||
1435 | * | ||
1436 | * @cl: host client | ||
1437 | * | ||
1438 | * Return: true, if found at least one cb. | ||
1439 | */ | ||
1440 | static bool mei_cl_is_read_fc_cb(struct mei_cl *cl) | ||
1441 | { | ||
1442 | struct mei_device *dev = cl->dev; | ||
1443 | struct mei_cl_cb *cb; | ||
1444 | |||
1445 | list_for_each_entry(cb, &dev->ctrl_wr_list.list, list) | ||
1446 | if (cb->fop_type == MEI_FOP_READ && cb->cl == cl) | ||
1447 | return true; | ||
1448 | return false; | ||
1449 | } | ||
1450 | |||
1451 | /** | ||
1425 | * mei_cl_read_start - the start read client message function. | 1452 | * mei_cl_read_start - the start read client message function. |
1426 | * | 1453 | * |
1427 | * @cl: host client | 1454 | * @cl: host client |
@@ -1430,7 +1457,7 @@ out: | |||
1430 | * | 1457 | * |
1431 | * Return: 0 on success, <0 on failure. | 1458 | * Return: 0 on success, <0 on failure. |
1432 | */ | 1459 | */ |
1433 | int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp) | 1460 | int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) |
1434 | { | 1461 | { |
1435 | struct mei_device *dev; | 1462 | struct mei_device *dev; |
1436 | struct mei_cl_cb *cb; | 1463 | struct mei_cl_cb *cb; |
@@ -1445,7 +1472,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp) | |||
1445 | return -ENODEV; | 1472 | return -ENODEV; |
1446 | 1473 | ||
1447 | /* HW currently supports only one pending read */ | 1474 | /* HW currently supports only one pending read */ |
1448 | if (!list_empty(&cl->rd_pending)) | 1475 | if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl)) |
1449 | return -EBUSY; | 1476 | return -EBUSY; |
1450 | 1477 | ||
1451 | if (!mei_me_cl_is_active(cl->me_cl)) { | 1478 | if (!mei_me_cl_is_active(cl->me_cl)) { |
@@ -1524,7 +1551,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
1524 | 1551 | ||
1525 | first_chunk = cb->buf_idx == 0; | 1552 | first_chunk = cb->buf_idx == 0; |
1526 | 1553 | ||
1527 | rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1; | 1554 | rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1; |
1528 | if (rets < 0) | 1555 | if (rets < 0) |
1529 | return rets; | 1556 | return rets; |
1530 | 1557 | ||
@@ -1556,7 +1583,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
1556 | return 0; | 1583 | return 0; |
1557 | } | 1584 | } |
1558 | 1585 | ||
1559 | cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", | 1586 | cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", |
1560 | cb->buf.size, cb->buf_idx); | 1587 | cb->buf.size, cb->buf_idx); |
1561 | 1588 | ||
1562 | rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); | 1589 | rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); |
@@ -1618,7 +1645,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | |||
1618 | if (rets < 0 && rets != -EINPROGRESS) { | 1645 | if (rets < 0 && rets != -EINPROGRESS) { |
1619 | pm_runtime_put_noidle(dev->dev); | 1646 | pm_runtime_put_noidle(dev->dev); |
1620 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | 1647 | cl_err(dev, cl, "rpm: get failed %d\n", rets); |
1621 | return rets; | 1648 | goto free; |
1622 | } | 1649 | } |
1623 | 1650 | ||
1624 | cb->buf_idx = 0; | 1651 | cb->buf_idx = 0; |
@@ -1630,7 +1657,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | |||
1630 | mei_hdr.msg_complete = 0; | 1657 | mei_hdr.msg_complete = 0; |
1631 | mei_hdr.internal = cb->internal; | 1658 | mei_hdr.internal = cb->internal; |
1632 | 1659 | ||
1633 | rets = mei_cl_flow_ctrl_creds(cl); | 1660 | rets = mei_cl_flow_ctrl_creds(cl, cb->fp); |
1634 | if (rets < 0) | 1661 | if (rets < 0) |
1635 | goto err; | 1662 | goto err; |
1636 | 1663 | ||
@@ -1677,7 +1704,8 @@ out: | |||
1677 | 1704 | ||
1678 | mutex_unlock(&dev->device_lock); | 1705 | mutex_unlock(&dev->device_lock); |
1679 | rets = wait_event_interruptible(cl->tx_wait, | 1706 | rets = wait_event_interruptible(cl->tx_wait, |
1680 | cl->writing_state == MEI_WRITE_COMPLETE); | 1707 | cl->writing_state == MEI_WRITE_COMPLETE || |
1708 | (!mei_cl_is_connected(cl))); | ||
1681 | mutex_lock(&dev->device_lock); | 1709 | mutex_lock(&dev->device_lock); |
1682 | /* wait_event_interruptible returns -ERESTARTSYS */ | 1710 | /* wait_event_interruptible returns -ERESTARTSYS */ |
1683 | if (rets) { | 1711 | if (rets) { |
@@ -1685,6 +1713,10 @@ out: | |||
1685 | rets = -EINTR; | 1713 | rets = -EINTR; |
1686 | goto err; | 1714 | goto err; |
1687 | } | 1715 | } |
1716 | if (cl->writing_state != MEI_WRITE_COMPLETE) { | ||
1717 | rets = -EFAULT; | ||
1718 | goto err; | ||
1719 | } | ||
1688 | } | 1720 | } |
1689 | 1721 | ||
1690 | rets = size; | 1722 | rets = size; |
@@ -1692,6 +1724,8 @@ err: | |||
1692 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | 1724 | cl_dbg(dev, cl, "rpm: autosuspend\n"); |
1693 | pm_runtime_mark_last_busy(dev->dev); | 1725 | pm_runtime_mark_last_busy(dev->dev); |
1694 | pm_runtime_put_autosuspend(dev->dev); | 1726 | pm_runtime_put_autosuspend(dev->dev); |
1727 | free: | ||
1728 | mei_io_cb_free(cb); | ||
1695 | 1729 | ||
1696 | return rets; | 1730 | return rets; |
1697 | } | 1731 | } |
@@ -1721,10 +1755,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) | |||
1721 | 1755 | ||
1722 | case MEI_FOP_READ: | 1756 | case MEI_FOP_READ: |
1723 | list_add_tail(&cb->list, &cl->rd_completed); | 1757 | list_add_tail(&cb->list, &cl->rd_completed); |
1724 | if (waitqueue_active(&cl->rx_wait)) | 1758 | if (!mei_cl_bus_rx_event(cl)) |
1725 | wake_up_interruptible_all(&cl->rx_wait); | 1759 | wake_up_interruptible(&cl->rx_wait); |
1726 | else | ||
1727 | mei_cl_bus_rx_event(cl); | ||
1728 | break; | 1760 | break; |
1729 | 1761 | ||
1730 | case MEI_FOP_CONNECT: | 1762 | case MEI_FOP_CONNECT: |
@@ -1753,44 +1785,3 @@ void mei_cl_all_disconnect(struct mei_device *dev) | |||
1753 | list_for_each_entry(cl, &dev->file_list, link) | 1785 | list_for_each_entry(cl, &dev->file_list, link) |
1754 | mei_cl_set_disconnected(cl); | 1786 | mei_cl_set_disconnected(cl); |
1755 | } | 1787 | } |
1756 | |||
1757 | |||
1758 | /** | ||
1759 | * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted | ||
1760 | * | ||
1761 | * @dev: mei device | ||
1762 | */ | ||
1763 | void mei_cl_all_wakeup(struct mei_device *dev) | ||
1764 | { | ||
1765 | struct mei_cl *cl; | ||
1766 | |||
1767 | list_for_each_entry(cl, &dev->file_list, link) { | ||
1768 | if (waitqueue_active(&cl->rx_wait)) { | ||
1769 | cl_dbg(dev, cl, "Waking up reading client!\n"); | ||
1770 | wake_up_interruptible(&cl->rx_wait); | ||
1771 | } | ||
1772 | if (waitqueue_active(&cl->tx_wait)) { | ||
1773 | cl_dbg(dev, cl, "Waking up writing client!\n"); | ||
1774 | wake_up_interruptible(&cl->tx_wait); | ||
1775 | } | ||
1776 | |||
1777 | /* synchronized under device mutex */ | ||
1778 | if (waitqueue_active(&cl->ev_wait)) { | ||
1779 | cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); | ||
1780 | wake_up_interruptible(&cl->ev_wait); | ||
1781 | } | ||
1782 | } | ||
1783 | } | ||
1784 | |||
1785 | /** | ||
1786 | * mei_cl_all_write_clear - clear all pending writes | ||
1787 | * | ||
1788 | * @dev: mei device | ||
1789 | */ | ||
1790 | void mei_cl_all_write_clear(struct mei_device *dev) | ||
1791 | { | ||
1792 | mei_io_list_free(&dev->write_list, NULL); | ||
1793 | mei_io_list_free(&dev->write_waiting_list, NULL); | ||
1794 | } | ||
1795 | |||
1796 | |||
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 04e1aa39243f..0d7a3a1fef78 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h | |||
@@ -18,7 +18,6 @@ | |||
18 | #define _MEI_CLIENT_H_ | 18 | #define _MEI_CLIENT_H_ |
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/watchdog.h> | ||
22 | #include <linux/poll.h> | 21 | #include <linux/poll.h> |
23 | #include <linux/mei.h> | 22 | #include <linux/mei.h> |
24 | 23 | ||
@@ -84,7 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl) | |||
84 | * MEI IO Functions | 83 | * MEI IO Functions |
85 | */ | 84 | */ |
86 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, | 85 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, |
87 | struct file *fp); | 86 | const struct file *fp); |
88 | void mei_io_cb_free(struct mei_cl_cb *priv_cb); | 87 | void mei_io_cb_free(struct mei_cl_cb *priv_cb); |
89 | int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length); | 88 | int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length); |
90 | 89 | ||
@@ -108,21 +107,19 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev); | |||
108 | void mei_cl_init(struct mei_cl *cl, struct mei_device *dev); | 107 | void mei_cl_init(struct mei_cl *cl, struct mei_device *dev); |
109 | 108 | ||
110 | 109 | ||
111 | int mei_cl_link(struct mei_cl *cl, int id); | 110 | int mei_cl_link(struct mei_cl *cl); |
112 | int mei_cl_unlink(struct mei_cl *cl); | 111 | int mei_cl_unlink(struct mei_cl *cl); |
113 | 112 | ||
114 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id); | 113 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev); |
115 | 114 | ||
116 | struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, | 115 | struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, |
117 | const struct file *fp); | 116 | const struct file *fp); |
118 | void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp); | 117 | void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp); |
119 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, | 118 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, |
120 | enum mei_cb_file_ops type, struct file *fp); | 119 | enum mei_cb_file_ops type, |
120 | const struct file *fp); | ||
121 | int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); | 121 | int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); |
122 | 122 | ||
123 | int mei_cl_flow_ctrl_creds(struct mei_cl *cl); | ||
124 | |||
125 | int mei_cl_flow_ctrl_reduce(struct mei_cl *cl); | ||
126 | /* | 123 | /* |
127 | * MEI input output function prototype | 124 | * MEI input output function prototype |
128 | */ | 125 | */ |
@@ -217,10 +214,10 @@ void mei_cl_set_disconnected(struct mei_cl *cl); | |||
217 | int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, | 214 | int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, |
218 | struct mei_cl_cb *cmpl_list); | 215 | struct mei_cl_cb *cmpl_list); |
219 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, | 216 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, |
220 | struct file *file); | 217 | const struct file *file); |
221 | int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, | 218 | int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, |
222 | struct mei_cl_cb *cmpl_list); | 219 | struct mei_cl_cb *cmpl_list); |
223 | int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp); | 220 | int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); |
224 | int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr, | 221 | int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr, |
225 | struct mei_cl_cb *cmpl_list); | 222 | struct mei_cl_cb *cmpl_list); |
226 | int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); | 223 | int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); |
@@ -229,19 +226,18 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
229 | 226 | ||
230 | void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); | 227 | void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); |
231 | 228 | ||
232 | void mei_host_client_init(struct work_struct *work); | 229 | void mei_host_client_init(struct mei_device *dev); |
233 | 230 | ||
234 | u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop); | 231 | u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop); |
235 | enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request); | 232 | enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request); |
236 | int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request); | 233 | int mei_cl_notify_request(struct mei_cl *cl, |
234 | const struct file *file, u8 request); | ||
237 | int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, | 235 | int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, |
238 | struct mei_cl_cb *cmpl_list); | 236 | struct mei_cl_cb *cmpl_list); |
239 | int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev); | 237 | int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev); |
240 | void mei_cl_notify(struct mei_cl *cl); | 238 | void mei_cl_notify(struct mei_cl *cl); |
241 | 239 | ||
242 | void mei_cl_all_disconnect(struct mei_device *dev); | 240 | void mei_cl_all_disconnect(struct mei_device *dev); |
243 | void mei_cl_all_wakeup(struct mei_device *dev); | ||
244 | void mei_cl_all_write_clear(struct mei_device *dev); | ||
245 | 241 | ||
246 | #define MEI_CL_FMT "cl:host=%02d me=%02d " | 242 | #define MEI_CL_FMT "cl:host=%02d me=%02d " |
247 | #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl) | 243 | #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl) |
@@ -249,6 +245,9 @@ void mei_cl_all_write_clear(struct mei_device *dev); | |||
249 | #define cl_dbg(dev, cl, format, arg...) \ | 245 | #define cl_dbg(dev, cl, format, arg...) \ |
250 | dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) | 246 | dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) |
251 | 247 | ||
248 | #define cl_warn(dev, cl, format, arg...) \ | ||
249 | dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) | ||
250 | |||
252 | #define cl_err(dev, cl, format, arg...) \ | 251 | #define cl_err(dev, cl, format, arg...) \ |
253 | dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) | 252 | dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) |
254 | 253 | ||
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index a138d8a27ab5..c6c051b52f55 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c | |||
@@ -50,6 +50,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, | |||
50 | } | 50 | } |
51 | 51 | ||
52 | pos += scnprintf(buf + pos, bufsz - pos, HDR); | 52 | pos += scnprintf(buf + pos, bufsz - pos, HDR); |
53 | #undef HDR | ||
53 | 54 | ||
54 | /* if the driver is not enabled the list won't be consistent */ | 55 | /* if the driver is not enabled the list won't be consistent */ |
55 | if (dev->dev_state != MEI_DEV_ENABLED) | 56 | if (dev->dev_state != MEI_DEV_ENABLED) |
@@ -90,23 +91,37 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, | |||
90 | { | 91 | { |
91 | struct mei_device *dev = fp->private_data; | 92 | struct mei_device *dev = fp->private_data; |
92 | struct mei_cl *cl; | 93 | struct mei_cl *cl; |
93 | const size_t bufsz = 1024; | 94 | size_t bufsz = 1; |
94 | char *buf; | 95 | char *buf; |
95 | int i = 0; | 96 | int i = 0; |
96 | int pos = 0; | 97 | int pos = 0; |
97 | int ret; | 98 | int ret; |
98 | 99 | ||
100 | #define HDR " |me|host|state|rd|wr|\n" | ||
101 | |||
99 | if (!dev) | 102 | if (!dev) |
100 | return -ENODEV; | 103 | return -ENODEV; |
101 | 104 | ||
105 | mutex_lock(&dev->device_lock); | ||
106 | |||
107 | /* | ||
108 | * if the driver is not enabled the list won't be consistent, | ||
109 | * we output empty table | ||
110 | */ | ||
111 | if (dev->dev_state == MEI_DEV_ENABLED) | ||
112 | list_for_each_entry(cl, &dev->file_list, link) | ||
113 | bufsz++; | ||
114 | |||
115 | bufsz *= sizeof(HDR) + 1; | ||
116 | |||
102 | buf = kzalloc(bufsz, GFP_KERNEL); | 117 | buf = kzalloc(bufsz, GFP_KERNEL); |
103 | if (!buf) | 118 | if (!buf) { |
119 | mutex_unlock(&dev->device_lock); | ||
104 | return -ENOMEM; | 120 | return -ENOMEM; |
121 | } | ||
105 | 122 | ||
106 | pos += scnprintf(buf + pos, bufsz - pos, | 123 | pos += scnprintf(buf + pos, bufsz - pos, HDR); |
107 | " |me|host|state|rd|wr|\n"); | 124 | #undef HDR |
108 | |||
109 | mutex_lock(&dev->device_lock); | ||
110 | 125 | ||
111 | /* if the driver is not enabled the list won't be consistent */ | 126 | /* if the driver is not enabled the list won't be consistent */ |
112 | if (dev->dev_state != MEI_DEV_ENABLED) | 127 | if (dev->dev_state != MEI_DEV_ENABLED) |
@@ -115,7 +130,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, | |||
115 | list_for_each_entry(cl, &dev->file_list, link) { | 130 | list_for_each_entry(cl, &dev->file_list, link) { |
116 | 131 | ||
117 | pos += scnprintf(buf + pos, bufsz - pos, | 132 | pos += scnprintf(buf + pos, bufsz - pos, |
118 | "%2d|%2d|%4d|%5d|%2d|%2d|\n", | 133 | "%3d|%2d|%4d|%5d|%2d|%2d|\n", |
119 | i, mei_cl_me_id(cl), cl->host_client_id, cl->state, | 134 | i, mei_cl_me_id(cl), cl->host_client_id, cl->state, |
120 | !list_empty(&cl->rd_completed), cl->writing_state); | 135 | !list_empty(&cl->rd_completed), cl->writing_state); |
121 | i++; | 136 | i++; |
@@ -150,16 +165,21 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, | |||
150 | pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n", | 165 | pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n", |
151 | mei_hbm_state_str(dev->hbm_state)); | 166 | mei_hbm_state_str(dev->hbm_state)); |
152 | 167 | ||
153 | if (dev->hbm_state == MEI_HBM_STARTED) { | 168 | if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS && |
169 | dev->hbm_state <= MEI_HBM_STARTED) { | ||
154 | pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n"); | 170 | pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n"); |
155 | pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n", | 171 | pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n", |
156 | dev->hbm_f_pg_supported); | 172 | dev->hbm_f_pg_supported); |
157 | pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n", | 173 | pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n", |
158 | dev->hbm_f_dc_supported); | 174 | dev->hbm_f_dc_supported); |
175 | pos += scnprintf(buf + pos, bufsz - pos, "\tIE: %01d\n", | ||
176 | dev->hbm_f_ie_supported); | ||
159 | pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n", | 177 | pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n", |
160 | dev->hbm_f_dot_supported); | 178 | dev->hbm_f_dot_supported); |
161 | pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n", | 179 | pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n", |
162 | dev->hbm_f_ev_supported); | 180 | dev->hbm_f_ev_supported); |
181 | pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n", | ||
182 | dev->hbm_f_fa_supported); | ||
163 | } | 183 | } |
164 | 184 | ||
165 | pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", | 185 | pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", |
@@ -175,6 +195,30 @@ static const struct file_operations mei_dbgfs_fops_devstate = { | |||
175 | .llseek = generic_file_llseek, | 195 | .llseek = generic_file_llseek, |
176 | }; | 196 | }; |
177 | 197 | ||
198 | static ssize_t mei_dbgfs_write_allow_fa(struct file *file, | ||
199 | const char __user *user_buf, | ||
200 | size_t count, loff_t *ppos) | ||
201 | { | ||
202 | struct mei_device *dev; | ||
203 | int ret; | ||
204 | |||
205 | dev = container_of(file->private_data, | ||
206 | struct mei_device, allow_fixed_address); | ||
207 | |||
208 | ret = debugfs_write_file_bool(file, user_buf, count, ppos); | ||
209 | if (ret < 0) | ||
210 | return ret; | ||
211 | dev->override_fixed_address = true; | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | static const struct file_operations mei_dbgfs_fops_allow_fa = { | ||
216 | .open = simple_open, | ||
217 | .read = debugfs_read_file_bool, | ||
218 | .write = mei_dbgfs_write_allow_fa, | ||
219 | .llseek = generic_file_llseek, | ||
220 | }; | ||
221 | |||
178 | /** | 222 | /** |
179 | * mei_dbgfs_deregister - Remove the debugfs files and directories | 223 | * mei_dbgfs_deregister - Remove the debugfs files and directories |
180 | * | 224 | * |
@@ -224,8 +268,9 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name) | |||
224 | dev_err(dev->dev, "devstate: registration failed\n"); | 268 | dev_err(dev->dev, "devstate: registration failed\n"); |
225 | goto err; | 269 | goto err; |
226 | } | 270 | } |
227 | f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir, | 271 | f = debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir, |
228 | &dev->allow_fixed_address); | 272 | &dev->allow_fixed_address, |
273 | &mei_dbgfs_fops_allow_fa); | ||
229 | if (!f) { | 274 | if (!f) { |
230 | dev_err(dev->dev, "allow_fixed_address: registration failed\n"); | 275 | dev_err(dev->dev, "allow_fixed_address: registration failed\n"); |
231 | goto err; | 276 | goto err; |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index e7b7aad0999b..5e305d2605f3 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
@@ -301,7 +301,10 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) | |||
301 | enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; | 301 | enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; |
302 | memset(enum_req, 0, len); | 302 | memset(enum_req, 0, len); |
303 | enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; | 303 | enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; |
304 | enum_req->allow_add = dev->hbm_f_dc_supported; | 304 | enum_req->flags |= dev->hbm_f_dc_supported ? |
305 | MEI_HBM_ENUM_F_ALLOW_ADD : 0; | ||
306 | enum_req->flags |= dev->hbm_f_ie_supported ? | ||
307 | MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0; | ||
305 | 308 | ||
306 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); | 309 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
307 | if (ret) { | 310 | if (ret) { |
@@ -401,6 +404,9 @@ static int mei_hbm_fw_add_cl_req(struct mei_device *dev, | |||
401 | if (ret) | 404 | if (ret) |
402 | status = !MEI_HBMS_SUCCESS; | 405 | status = !MEI_HBMS_SUCCESS; |
403 | 406 | ||
407 | if (dev->dev_state == MEI_DEV_ENABLED) | ||
408 | schedule_work(&dev->bus_rescan_work); | ||
409 | |||
404 | return mei_hbm_add_cl_resp(dev, req->me_addr, status); | 410 | return mei_hbm_add_cl_resp(dev, req->me_addr, status); |
405 | } | 411 | } |
406 | 412 | ||
@@ -543,7 +549,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
543 | /* We got all client properties */ | 549 | /* We got all client properties */ |
544 | if (next_client_index == MEI_CLIENTS_MAX) { | 550 | if (next_client_index == MEI_CLIENTS_MAX) { |
545 | dev->hbm_state = MEI_HBM_STARTED; | 551 | dev->hbm_state = MEI_HBM_STARTED; |
546 | schedule_work(&dev->init_work); | 552 | mei_host_client_init(dev); |
547 | 553 | ||
548 | return 0; | 554 | return 0; |
549 | } | 555 | } |
@@ -789,8 +795,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl, | |||
789 | cl->state = MEI_FILE_CONNECTED; | 795 | cl->state = MEI_FILE_CONNECTED; |
790 | else { | 796 | else { |
791 | cl->state = MEI_FILE_DISCONNECT_REPLY; | 797 | cl->state = MEI_FILE_DISCONNECT_REPLY; |
792 | if (rs->status == MEI_CL_CONN_NOT_FOUND) | 798 | if (rs->status == MEI_CL_CONN_NOT_FOUND) { |
793 | mei_me_cl_del(dev, cl->me_cl); | 799 | mei_me_cl_del(dev, cl->me_cl); |
800 | if (dev->dev_state == MEI_DEV_ENABLED) | ||
801 | schedule_work(&dev->bus_rescan_work); | ||
802 | } | ||
794 | } | 803 | } |
795 | cl->status = mei_cl_conn_status_to_errno(rs->status); | 804 | cl->status = mei_cl_conn_status_to_errno(rs->status); |
796 | } | 805 | } |
@@ -866,7 +875,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev, | |||
866 | 875 | ||
867 | cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req); | 876 | cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req); |
868 | if (cl) { | 877 | if (cl) { |
869 | cl_dbg(dev, cl, "fw disconnect request received\n"); | 878 | cl_warn(dev, cl, "fw disconnect request received\n"); |
870 | cl->state = MEI_FILE_DISCONNECTING; | 879 | cl->state = MEI_FILE_DISCONNECTING; |
871 | cl->timer_count = 0; | 880 | cl->timer_count = 0; |
872 | 881 | ||
@@ -972,6 +981,9 @@ static void mei_hbm_config_features(struct mei_device *dev) | |||
972 | if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) | 981 | if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) |
973 | dev->hbm_f_dc_supported = 1; | 982 | dev->hbm_f_dc_supported = 1; |
974 | 983 | ||
984 | if (dev->version.major_version >= HBM_MAJOR_VERSION_IE) | ||
985 | dev->hbm_f_ie_supported = 1; | ||
986 | |||
975 | /* disconnect on connect timeout instead of link reset */ | 987 | /* disconnect on connect timeout instead of link reset */ |
976 | if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) | 988 | if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) |
977 | dev->hbm_f_dot_supported = 1; | 989 | dev->hbm_f_dot_supported = 1; |
@@ -979,6 +991,10 @@ static void mei_hbm_config_features(struct mei_device *dev) | |||
979 | /* Notification Event Support */ | 991 | /* Notification Event Support */ |
980 | if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) | 992 | if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) |
981 | dev->hbm_f_ev_supported = 1; | 993 | dev->hbm_f_ev_supported = 1; |
994 | |||
995 | /* Fixed Address Client Support */ | ||
996 | if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) | ||
997 | dev->hbm_f_fa_supported = 1; | ||
982 | } | 998 | } |
983 | 999 | ||
984 | /** | 1000 | /** |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index a8a68acd3267..0dcb854b4bfc 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -121,6 +121,10 @@ | |||
121 | #define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */ | 121 | #define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */ |
122 | #define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */ | 122 | #define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */ |
123 | #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ | 123 | #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ |
124 | |||
125 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ | ||
126 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ | ||
127 | |||
124 | /* | 128 | /* |
125 | * MEI HW Section | 129 | * MEI HW Section |
126 | */ | 130 | */ |
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 25b1997a62cb..e2fb44cc5c37 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c | |||
@@ -189,8 +189,11 @@ static int mei_me_fw_status(struct mei_device *dev, | |||
189 | 189 | ||
190 | fw_status->count = fw_src->count; | 190 | fw_status->count = fw_src->count; |
191 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { | 191 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { |
192 | ret = pci_read_config_dword(pdev, | 192 | ret = pci_read_config_dword(pdev, fw_src->status[i], |
193 | fw_src->status[i], &fw_status->status[i]); | 193 | &fw_status->status[i]); |
194 | trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X", | ||
195 | fw_src->status[i], | ||
196 | fw_status->status[i]); | ||
194 | if (ret) | 197 | if (ret) |
195 | return ret; | 198 | return ret; |
196 | } | 199 | } |
@@ -215,6 +218,7 @@ static void mei_me_hw_config(struct mei_device *dev) | |||
215 | 218 | ||
216 | reg = 0; | 219 | reg = 0; |
217 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); | 220 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); |
221 | trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); | ||
218 | hw->d0i3_supported = | 222 | hw->d0i3_supported = |
219 | ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); | 223 | ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); |
220 | 224 | ||
@@ -1248,6 +1252,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev) | |||
1248 | u32 reg; | 1252 | u32 reg; |
1249 | 1253 | ||
1250 | pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); | 1254 | pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); |
1255 | trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg); | ||
1251 | /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ | 1256 | /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ |
1252 | return (reg & 0x600) == 0x200; | 1257 | return (reg & 0x600) == 0x200; |
1253 | } | 1258 | } |
@@ -1260,6 +1265,7 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) | |||
1260 | u32 reg; | 1265 | u32 reg; |
1261 | /* Read ME FW Status check for SPS Firmware */ | 1266 | /* Read ME FW Status check for SPS Firmware */ |
1262 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); | 1267 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); |
1268 | trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); | ||
1263 | /* if bits [19:16] = 15, running SPS Firmware */ | 1269 | /* if bits [19:16] = 15, running SPS Firmware */ |
1264 | return (reg & 0xf0000) == 0xf0000; | 1270 | return (reg & 0xf0000) == 0xf0000; |
1265 | } | 1271 | } |
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index bae680c648ff..4a6c1b85f11e 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c | |||
@@ -28,6 +28,9 @@ | |||
28 | #include "client.h" | 28 | #include "client.h" |
29 | #include "hbm.h" | 29 | #include "hbm.h" |
30 | 30 | ||
31 | #include "mei-trace.h" | ||
32 | |||
33 | |||
31 | /** | 34 | /** |
32 | * mei_txe_reg_read - Reads 32bit data from the txe device | 35 | * mei_txe_reg_read - Reads 32bit data from the txe device |
33 | * | 36 | * |
@@ -640,8 +643,11 @@ static int mei_txe_fw_status(struct mei_device *dev, | |||
640 | 643 | ||
641 | fw_status->count = fw_src->count; | 644 | fw_status->count = fw_src->count; |
642 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { | 645 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { |
643 | ret = pci_read_config_dword(pdev, | 646 | ret = pci_read_config_dword(pdev, fw_src->status[i], |
644 | fw_src->status[i], &fw_status->status[i]); | 647 | &fw_status->status[i]); |
648 | trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X", | ||
649 | fw_src->status[i], | ||
650 | fw_status->status[i]); | ||
645 | if (ret) | 651 | if (ret) |
646 | return ret; | 652 | return ret; |
647 | } | 653 | } |
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 4cebde85924f..9daf3f9aed25 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h | |||
@@ -29,7 +29,6 @@ | |||
29 | #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ | 29 | #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ |
30 | 30 | ||
31 | #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ | 31 | #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ |
32 | #define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ | ||
33 | 32 | ||
34 | #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ | 33 | #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ |
35 | #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ | 34 | #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ |
@@ -54,6 +53,12 @@ | |||
54 | #define HBM_MAJOR_VERSION_DC 2 | 53 | #define HBM_MAJOR_VERSION_DC 2 |
55 | 54 | ||
56 | /* | 55 | /* |
56 | * MEI version with immediate reply to enum request support | ||
57 | */ | ||
58 | #define HBM_MINOR_VERSION_IE 0 | ||
59 | #define HBM_MAJOR_VERSION_IE 2 | ||
60 | |||
61 | /* | ||
57 | * MEI version with disconnect on connection timeout support | 62 | * MEI version with disconnect on connection timeout support |
58 | */ | 63 | */ |
59 | #define HBM_MINOR_VERSION_DOT 0 | 64 | #define HBM_MINOR_VERSION_DOT 0 |
@@ -65,6 +70,12 @@ | |||
65 | #define HBM_MINOR_VERSION_EV 0 | 70 | #define HBM_MINOR_VERSION_EV 0 |
66 | #define HBM_MAJOR_VERSION_EV 2 | 71 | #define HBM_MAJOR_VERSION_EV 2 |
67 | 72 | ||
73 | /* | ||
74 | * MEI version with fixed address client support | ||
75 | */ | ||
76 | #define HBM_MINOR_VERSION_FA 0 | ||
77 | #define HBM_MAJOR_VERSION_FA 2 | ||
78 | |||
68 | /* Host bus message command opcode */ | 79 | /* Host bus message command opcode */ |
69 | #define MEI_HBM_CMD_OP_MSK 0x7f | 80 | #define MEI_HBM_CMD_OP_MSK 0x7f |
70 | /* Host bus message command RESPONSE */ | 81 | /* Host bus message command RESPONSE */ |
@@ -241,15 +252,26 @@ struct hbm_me_stop_request { | |||
241 | } __packed; | 252 | } __packed; |
242 | 253 | ||
243 | /** | 254 | /** |
244 | * struct hbm_host_enum_request - enumeration request from host to fw | 255 | * enum hbm_host_enum_flags - enumeration request flags (HBM version >= 2.0) |
245 | * | 256 | * |
246 | * @hbm_cmd: bus message command header | 257 | * @MEI_HBM_ENUM_F_ALLOW_ADD: allow dynamic clients add |
247 | * @allow_add: allow dynamic clients add HBM version >= 2.0 | 258 | * @MEI_HBM_ENUM_F_IMMEDIATE_ENUM: allow FW to send answer immediately |
259 | */ | ||
260 | enum hbm_host_enum_flags { | ||
261 | MEI_HBM_ENUM_F_ALLOW_ADD = BIT(0), | ||
262 | MEI_HBM_ENUM_F_IMMEDIATE_ENUM = BIT(1), | ||
263 | }; | ||
264 | |||
265 | /** | ||
266 | * struct hbm_host_enum_request - enumeration request from host to fw | ||
267 | * | ||
268 | * @hbm_cmd : bus message command header | ||
269 | * @flags : request flags | ||
248 | * @reserved: reserved | 270 | * @reserved: reserved |
249 | */ | 271 | */ |
250 | struct hbm_host_enum_request { | 272 | struct hbm_host_enum_request { |
251 | u8 hbm_cmd; | 273 | u8 hbm_cmd; |
252 | u8 allow_add; | 274 | u8 flags; |
253 | u8 reserved[2]; | 275 | u8 reserved[2]; |
254 | } __packed; | 276 | } __packed; |
255 | 277 | ||
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 3edafc8d3ad4..f7c8dfdb6a12 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
@@ -91,8 +91,8 @@ EXPORT_SYMBOL_GPL(mei_fw_status2str); | |||
91 | */ | 91 | */ |
92 | void mei_cancel_work(struct mei_device *dev) | 92 | void mei_cancel_work(struct mei_device *dev) |
93 | { | 93 | { |
94 | cancel_work_sync(&dev->init_work); | ||
95 | cancel_work_sync(&dev->reset_work); | 94 | cancel_work_sync(&dev->reset_work); |
95 | cancel_work_sync(&dev->bus_rescan_work); | ||
96 | 96 | ||
97 | cancel_delayed_work(&dev->timer_work); | 97 | cancel_delayed_work(&dev->timer_work); |
98 | } | 98 | } |
@@ -148,16 +148,10 @@ int mei_reset(struct mei_device *dev) | |||
148 | state != MEI_DEV_POWER_UP) { | 148 | state != MEI_DEV_POWER_UP) { |
149 | 149 | ||
150 | /* remove all waiting requests */ | 150 | /* remove all waiting requests */ |
151 | mei_cl_all_write_clear(dev); | ||
152 | |||
153 | mei_cl_all_disconnect(dev); | 151 | mei_cl_all_disconnect(dev); |
154 | 152 | ||
155 | /* wake up all readers and writers so they can be interrupted */ | ||
156 | mei_cl_all_wakeup(dev); | ||
157 | |||
158 | /* remove entry if already in list */ | 153 | /* remove entry if already in list */ |
159 | dev_dbg(dev->dev, "remove iamthif and wd from the file list.\n"); | 154 | dev_dbg(dev->dev, "remove iamthif from the file list.\n"); |
160 | mei_cl_unlink(&dev->wd_cl); | ||
161 | mei_cl_unlink(&dev->iamthif_cl); | 155 | mei_cl_unlink(&dev->iamthif_cl); |
162 | mei_amthif_reset_params(dev); | 156 | mei_amthif_reset_params(dev); |
163 | } | 157 | } |
@@ -165,7 +159,6 @@ int mei_reset(struct mei_device *dev) | |||
165 | mei_hbm_reset(dev); | 159 | mei_hbm_reset(dev); |
166 | 160 | ||
167 | dev->rd_msg_hdr = 0; | 161 | dev->rd_msg_hdr = 0; |
168 | dev->wd_pending = false; | ||
169 | 162 | ||
170 | if (ret) { | 163 | if (ret) { |
171 | dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); | 164 | dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); |
@@ -335,16 +328,12 @@ void mei_stop(struct mei_device *dev) | |||
335 | 328 | ||
336 | mutex_lock(&dev->device_lock); | 329 | mutex_lock(&dev->device_lock); |
337 | 330 | ||
338 | mei_wd_stop(dev); | ||
339 | |||
340 | dev->dev_state = MEI_DEV_POWER_DOWN; | 331 | dev->dev_state = MEI_DEV_POWER_DOWN; |
341 | mei_reset(dev); | 332 | mei_reset(dev); |
342 | /* move device to disabled state unconditionally */ | 333 | /* move device to disabled state unconditionally */ |
343 | dev->dev_state = MEI_DEV_DISABLED; | 334 | dev->dev_state = MEI_DEV_DISABLED; |
344 | 335 | ||
345 | mutex_unlock(&dev->device_lock); | 336 | mutex_unlock(&dev->device_lock); |
346 | |||
347 | mei_watchdog_unregister(dev); | ||
348 | } | 337 | } |
349 | EXPORT_SYMBOL_GPL(mei_stop); | 338 | EXPORT_SYMBOL_GPL(mei_stop); |
350 | 339 | ||
@@ -394,7 +383,6 @@ void mei_device_init(struct mei_device *dev, | |||
394 | init_waitqueue_head(&dev->wait_hw_ready); | 383 | init_waitqueue_head(&dev->wait_hw_ready); |
395 | init_waitqueue_head(&dev->wait_pg); | 384 | init_waitqueue_head(&dev->wait_pg); |
396 | init_waitqueue_head(&dev->wait_hbm_start); | 385 | init_waitqueue_head(&dev->wait_hbm_start); |
397 | init_waitqueue_head(&dev->wait_stop_wd); | ||
398 | dev->dev_state = MEI_DEV_INITIALIZING; | 386 | dev->dev_state = MEI_DEV_INITIALIZING; |
399 | dev->reset_count = 0; | 387 | dev->reset_count = 0; |
400 | 388 | ||
@@ -404,13 +392,11 @@ void mei_device_init(struct mei_device *dev, | |||
404 | mei_io_list_init(&dev->ctrl_rd_list); | 392 | mei_io_list_init(&dev->ctrl_rd_list); |
405 | 393 | ||
406 | INIT_DELAYED_WORK(&dev->timer_work, mei_timer); | 394 | INIT_DELAYED_WORK(&dev->timer_work, mei_timer); |
407 | INIT_WORK(&dev->init_work, mei_host_client_init); | ||
408 | INIT_WORK(&dev->reset_work, mei_reset_work); | 395 | INIT_WORK(&dev->reset_work, mei_reset_work); |
396 | INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work); | ||
409 | 397 | ||
410 | INIT_LIST_HEAD(&dev->wd_cl.link); | ||
411 | INIT_LIST_HEAD(&dev->iamthif_cl.link); | 398 | INIT_LIST_HEAD(&dev->iamthif_cl.link); |
412 | mei_io_list_init(&dev->amthif_cmd_list); | 399 | mei_io_list_init(&dev->amthif_cmd_list); |
413 | mei_io_list_init(&dev->amthif_rd_complete_list); | ||
414 | 400 | ||
415 | bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); | 401 | bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); |
416 | dev->open_handle_count = 0; | 402 | dev->open_handle_count = 0; |
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 64b568a0268d..1e5cb1f704f8 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
@@ -48,7 +48,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) | |||
48 | 48 | ||
49 | dev_dbg(dev->dev, "completing call back.\n"); | 49 | dev_dbg(dev->dev, "completing call back.\n"); |
50 | if (cl == &dev->iamthif_cl) | 50 | if (cl == &dev->iamthif_cl) |
51 | mei_amthif_complete(dev, cb); | 51 | mei_amthif_complete(cl, cb); |
52 | else | 52 | else |
53 | mei_cl_complete(cl, cb); | 53 | mei_cl_complete(cl, cb); |
54 | } | 54 | } |
@@ -104,6 +104,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl, | |||
104 | struct mei_device *dev = cl->dev; | 104 | struct mei_device *dev = cl->dev; |
105 | struct mei_cl_cb *cb; | 105 | struct mei_cl_cb *cb; |
106 | unsigned char *buffer = NULL; | 106 | unsigned char *buffer = NULL; |
107 | size_t buf_sz; | ||
107 | 108 | ||
108 | cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); | 109 | cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); |
109 | if (!cb) { | 110 | if (!cb) { |
@@ -124,11 +125,21 @@ int mei_cl_irq_read_msg(struct mei_cl *cl, | |||
124 | goto out; | 125 | goto out; |
125 | } | 126 | } |
126 | 127 | ||
127 | if (cb->buf.size < mei_hdr->length + cb->buf_idx) { | 128 | buf_sz = mei_hdr->length + cb->buf_idx; |
128 | cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", | 129 | /* catch for integer overflow */ |
130 | if (buf_sz < cb->buf_idx) { | ||
131 | cl_err(dev, cl, "message is too big len %d idx %zu\n", | ||
132 | mei_hdr->length, cb->buf_idx); | ||
133 | |||
134 | list_move_tail(&cb->list, &complete_list->list); | ||
135 | cb->status = -EMSGSIZE; | ||
136 | goto out; | ||
137 | } | ||
138 | |||
139 | if (cb->buf.size < buf_sz) { | ||
140 | cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", | ||
129 | cb->buf.size, mei_hdr->length, cb->buf_idx); | 141 | cb->buf.size, mei_hdr->length, cb->buf_idx); |
130 | buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx, | 142 | buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL); |
131 | GFP_KERNEL); | ||
132 | 143 | ||
133 | if (!buffer) { | 144 | if (!buffer) { |
134 | cb->status = -ENOMEM; | 145 | cb->status = -ENOMEM; |
@@ -136,7 +147,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl, | |||
136 | goto out; | 147 | goto out; |
137 | } | 148 | } |
138 | cb->buf.data = buffer; | 149 | cb->buf.data = buffer; |
139 | cb->buf.size = mei_hdr->length + cb->buf_idx; | 150 | cb->buf.size = buf_sz; |
140 | } | 151 | } |
141 | 152 | ||
142 | buffer = cb->buf.data + cb->buf_idx; | 153 | buffer = cb->buf.data + cb->buf_idx; |
@@ -145,8 +156,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl, | |||
145 | cb->buf_idx += mei_hdr->length; | 156 | cb->buf_idx += mei_hdr->length; |
146 | 157 | ||
147 | if (mei_hdr->msg_complete) { | 158 | if (mei_hdr->msg_complete) { |
148 | cb->read_time = jiffies; | 159 | cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); |
149 | cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx); | ||
150 | list_move_tail(&cb->list, &complete_list->list); | 160 | list_move_tail(&cb->list, &complete_list->list); |
151 | } else { | 161 | } else { |
152 | pm_runtime_mark_last_busy(dev->dev); | 162 | pm_runtime_mark_last_busy(dev->dev); |
@@ -229,6 +239,16 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
229 | return 0; | 239 | return 0; |
230 | } | 240 | } |
231 | 241 | ||
242 | static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr) | ||
243 | { | ||
244 | return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0; | ||
245 | } | ||
246 | |||
247 | static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr) | ||
248 | { | ||
249 | return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0; | ||
250 | } | ||
251 | |||
232 | /** | 252 | /** |
233 | * mei_irq_read_handler - bottom half read routine after ISR to | 253 | * mei_irq_read_handler - bottom half read routine after ISR to |
234 | * handle the read processing. | 254 | * handle the read processing. |
@@ -270,7 +290,7 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
270 | } | 290 | } |
271 | 291 | ||
272 | /* HBM message */ | 292 | /* HBM message */ |
273 | if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { | 293 | if (hdr_is_hbm(mei_hdr)) { |
274 | ret = mei_hbm_dispatch(dev, mei_hdr); | 294 | ret = mei_hbm_dispatch(dev, mei_hdr); |
275 | if (ret) { | 295 | if (ret) { |
276 | dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", | 296 | dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", |
@@ -290,6 +310,14 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
290 | 310 | ||
291 | /* if no recipient cl was found we assume corrupted header */ | 311 | /* if no recipient cl was found we assume corrupted header */ |
292 | if (&cl->link == &dev->file_list) { | 312 | if (&cl->link == &dev->file_list) { |
313 | /* A message for not connected fixed address clients | ||
314 | * should be silently discarded | ||
315 | */ | ||
316 | if (hdr_is_fixed(mei_hdr)) { | ||
317 | mei_irq_discard_msg(dev, mei_hdr); | ||
318 | ret = 0; | ||
319 | goto reset_slots; | ||
320 | } | ||
293 | dev_err(dev->dev, "no destination client found 0x%08X\n", | 321 | dev_err(dev->dev, "no destination client found 0x%08X\n", |
294 | dev->rd_msg_hdr); | 322 | dev->rd_msg_hdr); |
295 | ret = -EBADMSG; | 323 | ret = -EBADMSG; |
@@ -360,21 +388,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) | |||
360 | list_move_tail(&cb->list, &cmpl_list->list); | 388 | list_move_tail(&cb->list, &cmpl_list->list); |
361 | } | 389 | } |
362 | 390 | ||
363 | if (dev->wd_state == MEI_WD_STOPPING) { | ||
364 | dev->wd_state = MEI_WD_IDLE; | ||
365 | wake_up(&dev->wait_stop_wd); | ||
366 | } | ||
367 | |||
368 | if (mei_cl_is_connected(&dev->wd_cl)) { | ||
369 | if (dev->wd_pending && | ||
370 | mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { | ||
371 | ret = mei_wd_send(dev); | ||
372 | if (ret) | ||
373 | return ret; | ||
374 | dev->wd_pending = false; | ||
375 | } | ||
376 | } | ||
377 | |||
378 | /* complete control write list CB */ | 391 | /* complete control write list CB */ |
379 | dev_dbg(dev->dev, "complete control write list cb.\n"); | 392 | dev_dbg(dev->dev, "complete control write list cb.\n"); |
380 | list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { | 393 | list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { |
@@ -462,7 +475,6 @@ static void mei_connect_timeout(struct mei_cl *cl) | |||
462 | */ | 475 | */ |
463 | void mei_timer(struct work_struct *work) | 476 | void mei_timer(struct work_struct *work) |
464 | { | 477 | { |
465 | unsigned long timeout; | ||
466 | struct mei_cl *cl; | 478 | struct mei_cl *cl; |
467 | 479 | ||
468 | struct mei_device *dev = container_of(work, | 480 | struct mei_device *dev = container_of(work, |
@@ -508,45 +520,15 @@ void mei_timer(struct work_struct *work) | |||
508 | mei_reset(dev); | 520 | mei_reset(dev); |
509 | dev->iamthif_canceled = false; | 521 | dev->iamthif_canceled = false; |
510 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 522 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
511 | dev->iamthif_timer = 0; | ||
512 | 523 | ||
513 | mei_io_cb_free(dev->iamthif_current_cb); | 524 | mei_io_cb_free(dev->iamthif_current_cb); |
514 | dev->iamthif_current_cb = NULL; | 525 | dev->iamthif_current_cb = NULL; |
515 | 526 | ||
516 | dev->iamthif_file_object = NULL; | 527 | dev->iamthif_fp = NULL; |
517 | mei_amthif_run_next_cmd(dev); | 528 | mei_amthif_run_next_cmd(dev); |
518 | } | 529 | } |
519 | } | 530 | } |
520 | 531 | ||
521 | if (dev->iamthif_timer) { | ||
522 | |||
523 | timeout = dev->iamthif_timer + | ||
524 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); | ||
525 | |||
526 | dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", | ||
527 | dev->iamthif_timer); | ||
528 | dev_dbg(dev->dev, "timeout = %ld\n", timeout); | ||
529 | dev_dbg(dev->dev, "jiffies = %ld\n", jiffies); | ||
530 | if (time_after(jiffies, timeout)) { | ||
531 | /* | ||
532 | * User didn't read the AMTHI data on time (15sec) | ||
533 | * freeing AMTHI for other requests | ||
534 | */ | ||
535 | |||
536 | dev_dbg(dev->dev, "freeing AMTHI for other requests\n"); | ||
537 | |||
538 | mei_io_list_flush(&dev->amthif_rd_complete_list, | ||
539 | &dev->iamthif_cl); | ||
540 | mei_io_cb_free(dev->iamthif_current_cb); | ||
541 | dev->iamthif_current_cb = NULL; | ||
542 | |||
543 | dev->iamthif_file_object->private_data = NULL; | ||
544 | dev->iamthif_file_object = NULL; | ||
545 | dev->iamthif_timer = 0; | ||
546 | mei_amthif_run_next_cmd(dev); | ||
547 | |||
548 | } | ||
549 | } | ||
550 | out: | 532 | out: |
551 | if (dev->dev_state != MEI_DEV_DISABLED) | 533 | if (dev->dev_state != MEI_DEV_DISABLED) |
552 | schedule_delayed_work(&dev->timer_work, 2 * HZ); | 534 | schedule_delayed_work(&dev->timer_work, 2 * HZ); |
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 80f9afcb1382..52635b063873 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c | |||
@@ -65,7 +65,7 @@ static int mei_open(struct inode *inode, struct file *file) | |||
65 | goto err_unlock; | 65 | goto err_unlock; |
66 | } | 66 | } |
67 | 67 | ||
68 | cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY); | 68 | cl = mei_cl_alloc_linked(dev); |
69 | if (IS_ERR(cl)) { | 69 | if (IS_ERR(cl)) { |
70 | err = PTR_ERR(cl); | 70 | err = PTR_ERR(cl); |
71 | goto err_unlock; | 71 | goto err_unlock; |
@@ -159,27 +159,22 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, | |||
159 | goto out; | 159 | goto out; |
160 | } | 160 | } |
161 | 161 | ||
162 | if (ubuf == NULL) { | ||
163 | rets = -EMSGSIZE; | ||
164 | goto out; | ||
165 | } | ||
166 | |||
162 | if (cl == &dev->iamthif_cl) { | 167 | if (cl == &dev->iamthif_cl) { |
163 | rets = mei_amthif_read(dev, file, ubuf, length, offset); | 168 | rets = mei_amthif_read(dev, file, ubuf, length, offset); |
164 | goto out; | 169 | goto out; |
165 | } | 170 | } |
166 | 171 | ||
167 | cb = mei_cl_read_cb(cl, file); | 172 | cb = mei_cl_read_cb(cl, file); |
168 | if (cb) { | 173 | if (cb) |
169 | /* read what left */ | 174 | goto copy_buffer; |
170 | if (cb->buf_idx > *offset) | 175 | |
171 | goto copy_buffer; | 176 | if (*offset > 0) |
172 | /* offset is beyond buf_idx we have no more data return 0 */ | ||
173 | if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { | ||
174 | rets = 0; | ||
175 | goto free; | ||
176 | } | ||
177 | /* Offset needs to be cleaned for contiguous reads*/ | ||
178 | if (cb->buf_idx == 0 && *offset > 0) | ||
179 | *offset = 0; | ||
180 | } else if (*offset > 0) { | ||
181 | *offset = 0; | 177 | *offset = 0; |
182 | } | ||
183 | 178 | ||
184 | err = mei_cl_read_start(cl, length, file); | 179 | err = mei_cl_read_start(cl, length, file); |
185 | if (err && err != -EBUSY) { | 180 | if (err && err != -EBUSY) { |
@@ -214,11 +209,6 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, | |||
214 | 209 | ||
215 | cb = mei_cl_read_cb(cl, file); | 210 | cb = mei_cl_read_cb(cl, file); |
216 | if (!cb) { | 211 | if (!cb) { |
217 | if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) { | ||
218 | cb = mei_cl_read_cb(cl, NULL); | ||
219 | if (cb) | ||
220 | goto copy_buffer; | ||
221 | } | ||
222 | rets = 0; | 212 | rets = 0; |
223 | goto out; | 213 | goto out; |
224 | } | 214 | } |
@@ -231,10 +221,10 @@ copy_buffer: | |||
231 | goto free; | 221 | goto free; |
232 | } | 222 | } |
233 | 223 | ||
234 | cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n", | 224 | cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n", |
235 | cb->buf.size, cb->buf_idx); | 225 | cb->buf.size, cb->buf_idx, *offset); |
236 | if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { | 226 | if (*offset >= cb->buf_idx) { |
237 | rets = -EMSGSIZE; | 227 | rets = 0; |
238 | goto free; | 228 | goto free; |
239 | } | 229 | } |
240 | 230 | ||
@@ -250,11 +240,13 @@ copy_buffer: | |||
250 | 240 | ||
251 | rets = length; | 241 | rets = length; |
252 | *offset += length; | 242 | *offset += length; |
253 | if ((unsigned long)*offset < cb->buf_idx) | 243 | /* not all data was read, keep the cb */ |
244 | if (*offset < cb->buf_idx) | ||
254 | goto out; | 245 | goto out; |
255 | 246 | ||
256 | free: | 247 | free: |
257 | mei_io_cb_free(cb); | 248 | mei_io_cb_free(cb); |
249 | *offset = 0; | ||
258 | 250 | ||
259 | out: | 251 | out: |
260 | cl_dbg(dev, cl, "end mei read rets = %d\n", rets); | 252 | cl_dbg(dev, cl, "end mei read rets = %d\n", rets); |
@@ -275,9 +267,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
275 | size_t length, loff_t *offset) | 267 | size_t length, loff_t *offset) |
276 | { | 268 | { |
277 | struct mei_cl *cl = file->private_data; | 269 | struct mei_cl *cl = file->private_data; |
278 | struct mei_cl_cb *write_cb = NULL; | 270 | struct mei_cl_cb *cb; |
279 | struct mei_device *dev; | 271 | struct mei_device *dev; |
280 | unsigned long timeout = 0; | ||
281 | int rets; | 272 | int rets; |
282 | 273 | ||
283 | if (WARN_ON(!cl || !cl->dev)) | 274 | if (WARN_ON(!cl || !cl->dev)) |
@@ -313,52 +304,31 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
313 | goto out; | 304 | goto out; |
314 | } | 305 | } |
315 | 306 | ||
316 | if (cl == &dev->iamthif_cl) { | ||
317 | write_cb = mei_amthif_find_read_list_entry(dev, file); | ||
318 | |||
319 | if (write_cb) { | ||
320 | timeout = write_cb->read_time + | ||
321 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); | ||
322 | |||
323 | if (time_after(jiffies, timeout)) { | ||
324 | *offset = 0; | ||
325 | mei_io_cb_free(write_cb); | ||
326 | write_cb = NULL; | ||
327 | } | ||
328 | } | ||
329 | } | ||
330 | |||
331 | *offset = 0; | 307 | *offset = 0; |
332 | write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); | 308 | cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); |
333 | if (!write_cb) { | 309 | if (!cb) { |
334 | rets = -ENOMEM; | 310 | rets = -ENOMEM; |
335 | goto out; | 311 | goto out; |
336 | } | 312 | } |
337 | 313 | ||
338 | rets = copy_from_user(write_cb->buf.data, ubuf, length); | 314 | rets = copy_from_user(cb->buf.data, ubuf, length); |
339 | if (rets) { | 315 | if (rets) { |
340 | dev_dbg(dev->dev, "failed to copy data from userland\n"); | 316 | dev_dbg(dev->dev, "failed to copy data from userland\n"); |
341 | rets = -EFAULT; | 317 | rets = -EFAULT; |
318 | mei_io_cb_free(cb); | ||
342 | goto out; | 319 | goto out; |
343 | } | 320 | } |
344 | 321 | ||
345 | if (cl == &dev->iamthif_cl) { | 322 | if (cl == &dev->iamthif_cl) { |
346 | rets = mei_amthif_write(cl, write_cb); | 323 | rets = mei_amthif_write(cl, cb); |
347 | 324 | if (!rets) | |
348 | if (rets) { | 325 | rets = length; |
349 | dev_err(dev->dev, | 326 | goto out; |
350 | "amthif write failed with status = %d\n", rets); | ||
351 | goto out; | ||
352 | } | ||
353 | mutex_unlock(&dev->device_lock); | ||
354 | return length; | ||
355 | } | 327 | } |
356 | 328 | ||
357 | rets = mei_cl_write(cl, write_cb, false); | 329 | rets = mei_cl_write(cl, cb, false); |
358 | out: | 330 | out: |
359 | mutex_unlock(&dev->device_lock); | 331 | mutex_unlock(&dev->device_lock); |
360 | if (rets < 0) | ||
361 | mei_io_cb_free(write_cb); | ||
362 | return rets; | 332 | return rets; |
363 | } | 333 | } |
364 | 334 | ||
@@ -393,12 +363,22 @@ static int mei_ioctl_connect_client(struct file *file, | |||
393 | 363 | ||
394 | /* find ME client we're trying to connect to */ | 364 | /* find ME client we're trying to connect to */ |
395 | me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); | 365 | me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); |
396 | if (!me_cl || | 366 | if (!me_cl) { |
397 | (me_cl->props.fixed_address && !dev->allow_fixed_address)) { | ||
398 | dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", | 367 | dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", |
399 | &data->in_client_uuid); | 368 | &data->in_client_uuid); |
400 | mei_me_cl_put(me_cl); | 369 | rets = -ENOTTY; |
401 | return -ENOTTY; | 370 | goto end; |
371 | } | ||
372 | |||
373 | if (me_cl->props.fixed_address) { | ||
374 | bool forbidden = dev->override_fixed_address ? | ||
375 | !dev->allow_fixed_address : !dev->hbm_f_fa_supported; | ||
376 | if (forbidden) { | ||
377 | dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", | ||
378 | &data->in_client_uuid); | ||
379 | rets = -ENOTTY; | ||
380 | goto end; | ||
381 | } | ||
402 | } | 382 | } |
403 | 383 | ||
404 | dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", | 384 | dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", |
@@ -454,7 +434,7 @@ end: | |||
454 | * | 434 | * |
455 | * Return: 0 on success , <0 on error | 435 | * Return: 0 on success , <0 on error |
456 | */ | 436 | */ |
457 | static int mei_ioctl_client_notify_request(struct file *file, u32 request) | 437 | static int mei_ioctl_client_notify_request(const struct file *file, u32 request) |
458 | { | 438 | { |
459 | struct mei_cl *cl = file->private_data; | 439 | struct mei_cl *cl = file->private_data; |
460 | 440 | ||
@@ -473,7 +453,7 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request) | |||
473 | * | 453 | * |
474 | * Return: 0 on success , <0 on error | 454 | * Return: 0 on success , <0 on error |
475 | */ | 455 | */ |
476 | static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get) | 456 | static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get) |
477 | { | 457 | { |
478 | struct mei_cl *cl = file->private_data; | 458 | struct mei_cl *cl = file->private_data; |
479 | bool notify_ev; | 459 | bool notify_ev; |
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c index 388efb519138..e19e6acb191b 100644 --- a/drivers/misc/mei/mei-trace.c +++ b/drivers/misc/mei/mei-trace.c | |||
@@ -22,4 +22,6 @@ | |||
22 | 22 | ||
23 | EXPORT_TRACEPOINT_SYMBOL(mei_reg_read); | 23 | EXPORT_TRACEPOINT_SYMBOL(mei_reg_read); |
24 | EXPORT_TRACEPOINT_SYMBOL(mei_reg_write); | 24 | EXPORT_TRACEPOINT_SYMBOL(mei_reg_write); |
25 | EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read); | ||
26 | EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write); | ||
25 | #endif /* __CHECKER__ */ | 27 | #endif /* __CHECKER__ */ |
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h index 47e1bc6551d4..7d2d5d4a1624 100644 --- a/drivers/misc/mei/mei-trace.h +++ b/drivers/misc/mei/mei-trace.h | |||
@@ -60,7 +60,45 @@ TRACE_EVENT(mei_reg_write, | |||
60 | __entry->offs = offs; | 60 | __entry->offs = offs; |
61 | __entry->val = val; | 61 | __entry->val = val; |
62 | ), | 62 | ), |
63 | TP_printk("[%s] write %s[%#x] = %#x)", | 63 | TP_printk("[%s] write %s[%#x] = %#x", |
64 | __get_str(dev), __entry->reg, __entry->offs, __entry->val) | ||
65 | ); | ||
66 | |||
67 | TRACE_EVENT(mei_pci_cfg_read, | ||
68 | TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val), | ||
69 | TP_ARGS(dev, reg, offs, val), | ||
70 | TP_STRUCT__entry( | ||
71 | __string(dev, dev_name(dev)) | ||
72 | __field(const char *, reg) | ||
73 | __field(u32, offs) | ||
74 | __field(u32, val) | ||
75 | ), | ||
76 | TP_fast_assign( | ||
77 | __assign_str(dev, dev_name(dev)) | ||
78 | __entry->reg = reg; | ||
79 | __entry->offs = offs; | ||
80 | __entry->val = val; | ||
81 | ), | ||
82 | TP_printk("[%s] pci cfg read %s:[%#x] = %#x", | ||
83 | __get_str(dev), __entry->reg, __entry->offs, __entry->val) | ||
84 | ); | ||
85 | |||
86 | TRACE_EVENT(mei_pci_cfg_write, | ||
87 | TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val), | ||
88 | TP_ARGS(dev, reg, offs, val), | ||
89 | TP_STRUCT__entry( | ||
90 | __string(dev, dev_name(dev)) | ||
91 | __field(const char *, reg) | ||
92 | __field(u32, offs) | ||
93 | __field(u32, val) | ||
94 | ), | ||
95 | TP_fast_assign( | ||
96 | __assign_str(dev, dev_name(dev)) | ||
97 | __entry->reg = reg; | ||
98 | __entry->offs = offs; | ||
99 | __entry->val = val; | ||
100 | ), | ||
101 | TP_printk("[%s] pci cfg write %s[%#x] = %#x", | ||
64 | __get_str(dev), __entry->reg, __entry->offs, __entry->val) | 102 | __get_str(dev), __entry->reg, __entry->offs, __entry->val) |
65 | ); | 103 | ); |
66 | 104 | ||
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 4250555d5e72..db78e6d99456 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define _MEI_DEV_H_ | 18 | #define _MEI_DEV_H_ |
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/watchdog.h> | 21 | #include <linux/cdev.h> |
22 | #include <linux/poll.h> | 22 | #include <linux/poll.h> |
23 | #include <linux/mei.h> | 23 | #include <linux/mei.h> |
24 | #include <linux/mei_cl_bus.h> | 24 | #include <linux/mei_cl_bus.h> |
@@ -26,33 +26,13 @@ | |||
26 | #include "hw.h" | 26 | #include "hw.h" |
27 | #include "hbm.h" | 27 | #include "hbm.h" |
28 | 28 | ||
29 | /* | ||
30 | * watch dog definition | ||
31 | */ | ||
32 | #define MEI_WD_HDR_SIZE 4 | ||
33 | #define MEI_WD_STOP_MSG_SIZE MEI_WD_HDR_SIZE | ||
34 | #define MEI_WD_START_MSG_SIZE (MEI_WD_HDR_SIZE + 16) | ||
35 | |||
36 | #define MEI_WD_DEFAULT_TIMEOUT 120 /* seconds */ | ||
37 | #define MEI_WD_MIN_TIMEOUT 120 /* seconds */ | ||
38 | #define MEI_WD_MAX_TIMEOUT 65535 /* seconds */ | ||
39 | |||
40 | #define MEI_WD_STOP_TIMEOUT 10 /* msecs */ | ||
41 | |||
42 | #define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0) | ||
43 | |||
44 | #define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32)) | ||
45 | |||
46 | 29 | ||
47 | /* | 30 | /* |
48 | * AMTHI Client UUID | 31 | * AMTHI Client UUID |
49 | */ | 32 | */ |
50 | extern const uuid_le mei_amthif_guid; | 33 | extern const uuid_le mei_amthif_guid; |
51 | 34 | ||
52 | /* | 35 | #define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32)) |
53 | * Watchdog Client UUID | ||
54 | */ | ||
55 | extern const uuid_le mei_wd_guid; | ||
56 | 36 | ||
57 | /* | 37 | /* |
58 | * Number of Maximum MEI Clients | 38 | * Number of Maximum MEI Clients |
@@ -73,15 +53,6 @@ extern const uuid_le mei_wd_guid; | |||
73 | */ | 53 | */ |
74 | #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) | 54 | #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) |
75 | 55 | ||
76 | /* | ||
77 | * Internal Clients Number | ||
78 | */ | ||
79 | #define MEI_HOST_CLIENT_ID_ANY (-1) | ||
80 | #define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */ | ||
81 | #define MEI_WD_HOST_CLIENT_ID 1 | ||
82 | #define MEI_IAMTHIF_HOST_CLIENT_ID 2 | ||
83 | |||
84 | |||
85 | /* File state */ | 56 | /* File state */ |
86 | enum file_state { | 57 | enum file_state { |
87 | MEI_FILE_INITIALIZING = 0, | 58 | MEI_FILE_INITIALIZING = 0, |
@@ -123,12 +94,6 @@ enum mei_file_transaction_states { | |||
123 | MEI_READ_COMPLETE | 94 | MEI_READ_COMPLETE |
124 | }; | 95 | }; |
125 | 96 | ||
126 | enum mei_wd_states { | ||
127 | MEI_WD_IDLE, | ||
128 | MEI_WD_RUNNING, | ||
129 | MEI_WD_STOPPING, | ||
130 | }; | ||
131 | |||
132 | /** | 97 | /** |
133 | * enum mei_cb_file_ops - file operation associated with the callback | 98 | * enum mei_cb_file_ops - file operation associated with the callback |
134 | * @MEI_FOP_READ: read | 99 | * @MEI_FOP_READ: read |
@@ -153,7 +118,7 @@ enum mei_cb_file_ops { | |||
153 | * Intel MEI message data struct | 118 | * Intel MEI message data struct |
154 | */ | 119 | */ |
155 | struct mei_msg_data { | 120 | struct mei_msg_data { |
156 | u32 size; | 121 | size_t size; |
157 | unsigned char *data; | 122 | unsigned char *data; |
158 | }; | 123 | }; |
159 | 124 | ||
@@ -206,8 +171,7 @@ struct mei_cl; | |||
206 | * @fop_type: file operation type | 171 | * @fop_type: file operation type |
207 | * @buf: buffer for data associated with the callback | 172 | * @buf: buffer for data associated with the callback |
208 | * @buf_idx: last read index | 173 | * @buf_idx: last read index |
209 | * @read_time: last read operation time stamp (iamthif) | 174 | * @fp: pointer to file structure |
210 | * @file_object: pointer to file structure | ||
211 | * @status: io status of the cb | 175 | * @status: io status of the cb |
212 | * @internal: communication between driver and FW flag | 176 | * @internal: communication between driver and FW flag |
213 | * @completed: the transfer or reception has completed | 177 | * @completed: the transfer or reception has completed |
@@ -217,9 +181,8 @@ struct mei_cl_cb { | |||
217 | struct mei_cl *cl; | 181 | struct mei_cl *cl; |
218 | enum mei_cb_file_ops fop_type; | 182 | enum mei_cb_file_ops fop_type; |
219 | struct mei_msg_data buf; | 183 | struct mei_msg_data buf; |
220 | unsigned long buf_idx; | 184 | size_t buf_idx; |
221 | unsigned long read_time; | 185 | const struct file *fp; |
222 | struct file *file_object; | ||
223 | int status; | 186 | int status; |
224 | u32 internal:1; | 187 | u32 internal:1; |
225 | u32 completed:1; | 188 | u32 completed:1; |
@@ -341,12 +304,13 @@ struct mei_hw_ops { | |||
341 | 304 | ||
342 | /* MEI bus API*/ | 305 | /* MEI bus API*/ |
343 | void mei_cl_bus_rescan(struct mei_device *bus); | 306 | void mei_cl_bus_rescan(struct mei_device *bus); |
307 | void mei_cl_bus_rescan_work(struct work_struct *work); | ||
344 | void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); | 308 | void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); |
345 | ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | 309 | ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, |
346 | bool blocking); | 310 | bool blocking); |
347 | ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); | 311 | ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); |
348 | void mei_cl_bus_rx_event(struct mei_cl *cl); | 312 | bool mei_cl_bus_rx_event(struct mei_cl *cl); |
349 | void mei_cl_bus_notify_event(struct mei_cl *cl); | 313 | bool mei_cl_bus_notify_event(struct mei_cl *cl); |
350 | void mei_cl_bus_remove_devices(struct mei_device *bus); | 314 | void mei_cl_bus_remove_devices(struct mei_device *bus); |
351 | int mei_cl_bus_init(void); | 315 | int mei_cl_bus_init(void); |
352 | void mei_cl_bus_exit(void); | 316 | void mei_cl_bus_exit(void); |
@@ -404,7 +368,6 @@ const char *mei_pg_state_str(enum mei_pg_state state); | |||
404 | * @wait_hw_ready : wait queue for receive HW ready message form FW | 368 | * @wait_hw_ready : wait queue for receive HW ready message form FW |
405 | * @wait_pg : wait queue for receive PG message from FW | 369 | * @wait_pg : wait queue for receive PG message from FW |
406 | * @wait_hbm_start : wait queue for receive HBM start message from FW | 370 | * @wait_hbm_start : wait queue for receive HBM start message from FW |
407 | * @wait_stop_wd : wait queue for receive WD stop message from FW | ||
408 | * | 371 | * |
409 | * @reset_count : number of consecutive resets | 372 | * @reset_count : number of consecutive resets |
410 | * @dev_state : device state | 373 | * @dev_state : device state |
@@ -426,6 +389,8 @@ const char *mei_pg_state_str(enum mei_pg_state state); | |||
426 | * @hbm_f_dc_supported : hbm feature dynamic clients | 389 | * @hbm_f_dc_supported : hbm feature dynamic clients |
427 | * @hbm_f_dot_supported : hbm feature disconnect on timeout | 390 | * @hbm_f_dot_supported : hbm feature disconnect on timeout |
428 | * @hbm_f_ev_supported : hbm feature event notification | 391 | * @hbm_f_ev_supported : hbm feature event notification |
392 | * @hbm_f_fa_supported : hbm feature fixed address client | ||
393 | * @hbm_f_ie_supported : hbm feature immediate reply to enum request | ||
429 | * | 394 | * |
430 | * @me_clients_rwsem: rw lock over me_clients list | 395 | * @me_clients_rwsem: rw lock over me_clients list |
431 | * @me_clients : list of FW clients | 396 | * @me_clients : list of FW clients |
@@ -434,26 +399,19 @@ const char *mei_pg_state_str(enum mei_pg_state state); | |||
434 | * @me_client_index : last FW client index in enumeration | 399 | * @me_client_index : last FW client index in enumeration |
435 | * | 400 | * |
436 | * @allow_fixed_address: allow user space to connect a fixed client | 401 | * @allow_fixed_address: allow user space to connect a fixed client |
437 | * | 402 | * @override_fixed_address: force allow fixed address behavior |
438 | * @wd_cl : watchdog client | ||
439 | * @wd_state : watchdog client state | ||
440 | * @wd_pending : watchdog command is pending | ||
441 | * @wd_timeout : watchdog expiration timeout | ||
442 | * @wd_data : watchdog message buffer | ||
443 | * | 403 | * |
444 | * @amthif_cmd_list : amthif list for cmd waiting | 404 | * @amthif_cmd_list : amthif list for cmd waiting |
445 | * @amthif_rd_complete_list : amthif list for reading completed cmd data | 405 | * @iamthif_fp : file for current amthif operation |
446 | * @iamthif_file_object : file for current amthif operation | ||
447 | * @iamthif_cl : amthif host client | 406 | * @iamthif_cl : amthif host client |
448 | * @iamthif_current_cb : amthif current operation callback | 407 | * @iamthif_current_cb : amthif current operation callback |
449 | * @iamthif_open_count : number of opened amthif connections | 408 | * @iamthif_open_count : number of opened amthif connections |
450 | * @iamthif_timer : time stamp of current amthif command completion | ||
451 | * @iamthif_stall_timer : timer to detect amthif hang | 409 | * @iamthif_stall_timer : timer to detect amthif hang |
452 | * @iamthif_state : amthif processor state | 410 | * @iamthif_state : amthif processor state |
453 | * @iamthif_canceled : current amthif command is canceled | 411 | * @iamthif_canceled : current amthif command is canceled |
454 | * | 412 | * |
455 | * @init_work : work item for the device init | ||
456 | * @reset_work : work item for the device reset | 413 | * @reset_work : work item for the device reset |
414 | * @bus_rescan_work : work item for the bus rescan | ||
457 | * | 415 | * |
458 | * @device_list : mei client bus list | 416 | * @device_list : mei client bus list |
459 | * @cl_bus_lock : client bus list lock | 417 | * @cl_bus_lock : client bus list lock |
@@ -486,7 +444,6 @@ struct mei_device { | |||
486 | wait_queue_head_t wait_hw_ready; | 444 | wait_queue_head_t wait_hw_ready; |
487 | wait_queue_head_t wait_pg; | 445 | wait_queue_head_t wait_pg; |
488 | wait_queue_head_t wait_hbm_start; | 446 | wait_queue_head_t wait_hbm_start; |
489 | wait_queue_head_t wait_stop_wd; | ||
490 | 447 | ||
491 | /* | 448 | /* |
492 | * mei device states | 449 | * mei device states |
@@ -522,6 +479,8 @@ struct mei_device { | |||
522 | unsigned int hbm_f_dc_supported:1; | 479 | unsigned int hbm_f_dc_supported:1; |
523 | unsigned int hbm_f_dot_supported:1; | 480 | unsigned int hbm_f_dot_supported:1; |
524 | unsigned int hbm_f_ev_supported:1; | 481 | unsigned int hbm_f_ev_supported:1; |
482 | unsigned int hbm_f_fa_supported:1; | ||
483 | unsigned int hbm_f_ie_supported:1; | ||
525 | 484 | ||
526 | struct rw_semaphore me_clients_rwsem; | 485 | struct rw_semaphore me_clients_rwsem; |
527 | struct list_head me_clients; | 486 | struct list_head me_clients; |
@@ -530,29 +489,21 @@ struct mei_device { | |||
530 | unsigned long me_client_index; | 489 | unsigned long me_client_index; |
531 | 490 | ||
532 | bool allow_fixed_address; | 491 | bool allow_fixed_address; |
533 | 492 | bool override_fixed_address; | |
534 | struct mei_cl wd_cl; | ||
535 | enum mei_wd_states wd_state; | ||
536 | bool wd_pending; | ||
537 | u16 wd_timeout; | ||
538 | unsigned char wd_data[MEI_WD_START_MSG_SIZE]; | ||
539 | |||
540 | 493 | ||
541 | /* amthif list for cmd waiting */ | 494 | /* amthif list for cmd waiting */ |
542 | struct mei_cl_cb amthif_cmd_list; | 495 | struct mei_cl_cb amthif_cmd_list; |
543 | /* driver managed amthif list for reading completed amthif cmd data */ | 496 | /* driver managed amthif list for reading completed amthif cmd data */ |
544 | struct mei_cl_cb amthif_rd_complete_list; | 497 | const struct file *iamthif_fp; |
545 | struct file *iamthif_file_object; | ||
546 | struct mei_cl iamthif_cl; | 498 | struct mei_cl iamthif_cl; |
547 | struct mei_cl_cb *iamthif_current_cb; | 499 | struct mei_cl_cb *iamthif_current_cb; |
548 | long iamthif_open_count; | 500 | long iamthif_open_count; |
549 | unsigned long iamthif_timer; | ||
550 | u32 iamthif_stall_timer; | 501 | u32 iamthif_stall_timer; |
551 | enum iamthif_states iamthif_state; | 502 | enum iamthif_states iamthif_state; |
552 | bool iamthif_canceled; | 503 | bool iamthif_canceled; |
553 | 504 | ||
554 | struct work_struct init_work; | ||
555 | struct work_struct reset_work; | 505 | struct work_struct reset_work; |
506 | struct work_struct bus_rescan_work; | ||
556 | 507 | ||
557 | /* List of bus devices */ | 508 | /* List of bus devices */ |
558 | struct list_head device_list; | 509 | struct list_head device_list; |
@@ -635,47 +586,18 @@ unsigned int mei_amthif_poll(struct mei_device *dev, | |||
635 | 586 | ||
636 | int mei_amthif_release(struct mei_device *dev, struct file *file); | 587 | int mei_amthif_release(struct mei_device *dev, struct file *file); |
637 | 588 | ||
638 | struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, | ||
639 | struct file *file); | ||
640 | |||
641 | int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb); | 589 | int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb); |
642 | int mei_amthif_run_next_cmd(struct mei_device *dev); | 590 | int mei_amthif_run_next_cmd(struct mei_device *dev); |
643 | int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | 591 | int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, |
644 | struct mei_cl_cb *cmpl_list); | 592 | struct mei_cl_cb *cmpl_list); |
645 | 593 | ||
646 | void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb); | 594 | void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb); |
647 | int mei_amthif_irq_read_msg(struct mei_cl *cl, | 595 | int mei_amthif_irq_read_msg(struct mei_cl *cl, |
648 | struct mei_msg_hdr *mei_hdr, | 596 | struct mei_msg_hdr *mei_hdr, |
649 | struct mei_cl_cb *complete_list); | 597 | struct mei_cl_cb *complete_list); |
650 | int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); | 598 | int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); |
651 | 599 | ||
652 | /* | 600 | /* |
653 | * NFC functions | ||
654 | */ | ||
655 | int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl); | ||
656 | void mei_nfc_host_exit(struct mei_device *dev); | ||
657 | |||
658 | /* | ||
659 | * NFC Client UUID | ||
660 | */ | ||
661 | extern const uuid_le mei_nfc_guid; | ||
662 | |||
663 | int mei_wd_send(struct mei_device *dev); | ||
664 | int mei_wd_stop(struct mei_device *dev); | ||
665 | int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl); | ||
666 | /* | ||
667 | * mei_watchdog_register - Registering watchdog interface | ||
668 | * once we got connection to the WD Client | ||
669 | * @dev: mei device | ||
670 | */ | ||
671 | int mei_watchdog_register(struct mei_device *dev); | ||
672 | /* | ||
673 | * mei_watchdog_unregister - Unregistering watchdog interface | ||
674 | * @dev: mei device | ||
675 | */ | ||
676 | void mei_watchdog_unregister(struct mei_device *dev); | ||
677 | |||
678 | /* | ||
679 | * Register Access Function | 601 | * Register Access Function |
680 | */ | 602 | */ |
681 | 603 | ||
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 75fc9c688df8..64e64da6da44 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -88,6 +88,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
88 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)}, | 88 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)}, |
89 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)}, | 89 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)}, |
90 | 90 | ||
91 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, | ||
92 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, | ||
93 | |||
91 | /* required last entry */ | 94 | /* required last entry */ |
92 | {0, } | 95 | {0, } |
93 | }; | 96 | }; |
@@ -210,7 +213,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
210 | 213 | ||
211 | err = mei_register(dev, &pdev->dev); | 214 | err = mei_register(dev, &pdev->dev); |
212 | if (err) | 215 | if (err) |
213 | goto release_irq; | 216 | goto stop; |
214 | 217 | ||
215 | pci_set_drvdata(pdev, dev); | 218 | pci_set_drvdata(pdev, dev); |
216 | 219 | ||
@@ -231,6 +234,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
231 | 234 | ||
232 | return 0; | 235 | return 0; |
233 | 236 | ||
237 | stop: | ||
238 | mei_stop(dev); | ||
234 | release_irq: | 239 | release_irq: |
235 | mei_cancel_work(dev); | 240 | mei_cancel_work(dev); |
236 | mei_disable_interrupts(dev); | 241 | mei_disable_interrupts(dev); |
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index 71f8a7475717..30cc30683c07 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c | |||
@@ -154,7 +154,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
154 | 154 | ||
155 | err = mei_register(dev, &pdev->dev); | 155 | err = mei_register(dev, &pdev->dev); |
156 | if (err) | 156 | if (err) |
157 | goto release_irq; | 157 | goto stop; |
158 | 158 | ||
159 | pci_set_drvdata(pdev, dev); | 159 | pci_set_drvdata(pdev, dev); |
160 | 160 | ||
@@ -170,6 +170,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
170 | 170 | ||
171 | return 0; | 171 | return 0; |
172 | 172 | ||
173 | stop: | ||
174 | mei_stop(dev); | ||
173 | release_irq: | 175 | release_irq: |
174 | 176 | ||
175 | mei_cancel_work(dev); | 177 | mei_cancel_work(dev); |
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c deleted file mode 100644 index b346638833b0..000000000000 --- a/drivers/misc/mei/wd.c +++ /dev/null | |||
@@ -1,391 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Intel Management Engine Interface (Intel MEI) Linux driver | ||
4 | * Copyright (c) 2003-2012, Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/moduleparam.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/watchdog.h> | ||
22 | |||
23 | #include <linux/mei.h> | ||
24 | |||
25 | #include "mei_dev.h" | ||
26 | #include "hbm.h" | ||
27 | #include "client.h" | ||
28 | |||
29 | static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; | ||
30 | static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 }; | ||
31 | |||
32 | /* | ||
33 | * AMT Watchdog Device | ||
34 | */ | ||
35 | #define INTEL_AMT_WATCHDOG_ID "INTCAMT" | ||
36 | |||
37 | /* UUIDs for AMT F/W clients */ | ||
38 | const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89, | ||
39 | 0x9D, 0xA9, 0x15, 0x14, 0xCB, | ||
40 | 0x32, 0xAB); | ||
41 | |||
42 | static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) | ||
43 | { | ||
44 | dev_dbg(dev->dev, "wd: set timeout=%d.\n", timeout); | ||
45 | memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE); | ||
46 | memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16)); | ||
47 | } | ||
48 | |||
49 | /** | ||
50 | * mei_wd_host_init - connect to the watchdog client | ||
51 | * | ||
52 | * @dev: the device structure | ||
53 | * @me_cl: me client | ||
54 | * | ||
55 | * Return: -ENOTTY if wd client cannot be found | ||
56 | * -EIO if write has failed | ||
57 | * 0 on success | ||
58 | */ | ||
59 | int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl) | ||
60 | { | ||
61 | struct mei_cl *cl = &dev->wd_cl; | ||
62 | int ret; | ||
63 | |||
64 | mei_cl_init(cl, dev); | ||
65 | |||
66 | dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT; | ||
67 | dev->wd_state = MEI_WD_IDLE; | ||
68 | |||
69 | ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID); | ||
70 | if (ret < 0) { | ||
71 | dev_info(dev->dev, "wd: failed link client\n"); | ||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | ret = mei_cl_connect(cl, me_cl, NULL); | ||
76 | if (ret) { | ||
77 | dev_err(dev->dev, "wd: failed to connect = %d\n", ret); | ||
78 | mei_cl_unlink(cl); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | ret = mei_watchdog_register(dev); | ||
83 | if (ret) { | ||
84 | mei_cl_disconnect(cl); | ||
85 | mei_cl_unlink(cl); | ||
86 | } | ||
87 | return ret; | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * mei_wd_send - sends watch dog message to fw. | ||
92 | * | ||
93 | * @dev: the device structure | ||
94 | * | ||
95 | * Return: 0 if success, | ||
96 | * -EIO when message send fails | ||
97 | * -EINVAL when invalid message is to be sent | ||
98 | * -ENODEV on flow control failure | ||
99 | */ | ||
100 | int mei_wd_send(struct mei_device *dev) | ||
101 | { | ||
102 | struct mei_cl *cl = &dev->wd_cl; | ||
103 | struct mei_msg_hdr hdr; | ||
104 | int ret; | ||
105 | |||
106 | hdr.host_addr = cl->host_client_id; | ||
107 | hdr.me_addr = mei_cl_me_id(cl); | ||
108 | hdr.msg_complete = 1; | ||
109 | hdr.reserved = 0; | ||
110 | hdr.internal = 0; | ||
111 | |||
112 | if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) | ||
113 | hdr.length = MEI_WD_START_MSG_SIZE; | ||
114 | else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) | ||
115 | hdr.length = MEI_WD_STOP_MSG_SIZE; | ||
116 | else { | ||
117 | dev_err(dev->dev, "wd: invalid message is to be sent, aborting\n"); | ||
118 | return -EINVAL; | ||
119 | } | ||
120 | |||
121 | ret = mei_write_message(dev, &hdr, dev->wd_data); | ||
122 | if (ret) { | ||
123 | dev_err(dev->dev, "wd: write message failed\n"); | ||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | ret = mei_cl_flow_ctrl_reduce(cl); | ||
128 | if (ret) { | ||
129 | dev_err(dev->dev, "wd: flow_ctrl_reduce failed.\n"); | ||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * mei_wd_stop - sends watchdog stop message to fw. | ||
138 | * | ||
139 | * @dev: the device structure | ||
140 | * | ||
141 | * Return: 0 if success | ||
142 | * on error: | ||
143 | * -EIO when message send fails | ||
144 | * -EINVAL when invalid message is to be sent | ||
145 | * -ETIME on message timeout | ||
146 | */ | ||
147 | int mei_wd_stop(struct mei_device *dev) | ||
148 | { | ||
149 | struct mei_cl *cl = &dev->wd_cl; | ||
150 | int ret; | ||
151 | |||
152 | if (!mei_cl_is_connected(cl) || | ||
153 | dev->wd_state != MEI_WD_RUNNING) | ||
154 | return 0; | ||
155 | |||
156 | memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_STOP_MSG_SIZE); | ||
157 | |||
158 | dev->wd_state = MEI_WD_STOPPING; | ||
159 | |||
160 | ret = mei_cl_flow_ctrl_creds(cl); | ||
161 | if (ret < 0) | ||
162 | goto err; | ||
163 | |||
164 | if (ret && mei_hbuf_acquire(dev)) { | ||
165 | ret = mei_wd_send(dev); | ||
166 | if (ret) | ||
167 | goto err; | ||
168 | dev->wd_pending = false; | ||
169 | } else { | ||
170 | dev->wd_pending = true; | ||
171 | } | ||
172 | |||
173 | mutex_unlock(&dev->device_lock); | ||
174 | |||
175 | ret = wait_event_timeout(dev->wait_stop_wd, | ||
176 | dev->wd_state == MEI_WD_IDLE, | ||
177 | msecs_to_jiffies(MEI_WD_STOP_TIMEOUT)); | ||
178 | mutex_lock(&dev->device_lock); | ||
179 | if (dev->wd_state != MEI_WD_IDLE) { | ||
180 | /* timeout */ | ||
181 | ret = -ETIME; | ||
182 | dev_warn(dev->dev, "wd: stop failed to complete ret=%d\n", ret); | ||
183 | goto err; | ||
184 | } | ||
185 | dev_dbg(dev->dev, "wd: stop completed after %u msec\n", | ||
186 | MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret)); | ||
187 | return 0; | ||
188 | err: | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * mei_wd_ops_start - wd start command from the watchdog core. | ||
194 | * | ||
195 | * @wd_dev: watchdog device struct | ||
196 | * | ||
197 | * Return: 0 if success, negative errno code for failure | ||
198 | */ | ||
199 | static int mei_wd_ops_start(struct watchdog_device *wd_dev) | ||
200 | { | ||
201 | struct mei_device *dev; | ||
202 | struct mei_cl *cl; | ||
203 | int err = -ENODEV; | ||
204 | |||
205 | dev = watchdog_get_drvdata(wd_dev); | ||
206 | if (!dev) | ||
207 | return -ENODEV; | ||
208 | |||
209 | cl = &dev->wd_cl; | ||
210 | |||
211 | mutex_lock(&dev->device_lock); | ||
212 | |||
213 | if (dev->dev_state != MEI_DEV_ENABLED) { | ||
214 | dev_dbg(dev->dev, "wd: dev_state != MEI_DEV_ENABLED dev_state = %s\n", | ||
215 | mei_dev_state_str(dev->dev_state)); | ||
216 | goto end_unlock; | ||
217 | } | ||
218 | |||
219 | if (!mei_cl_is_connected(cl)) { | ||
220 | cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n"); | ||
221 | goto end_unlock; | ||
222 | } | ||
223 | |||
224 | mei_wd_set_start_timeout(dev, dev->wd_timeout); | ||
225 | |||
226 | err = 0; | ||
227 | end_unlock: | ||
228 | mutex_unlock(&dev->device_lock); | ||
229 | return err; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * mei_wd_ops_stop - wd stop command from the watchdog core. | ||
234 | * | ||
235 | * @wd_dev: watchdog device struct | ||
236 | * | ||
237 | * Return: 0 if success, negative errno code for failure | ||
238 | */ | ||
239 | static int mei_wd_ops_stop(struct watchdog_device *wd_dev) | ||
240 | { | ||
241 | struct mei_device *dev; | ||
242 | |||
243 | dev = watchdog_get_drvdata(wd_dev); | ||
244 | if (!dev) | ||
245 | return -ENODEV; | ||
246 | |||
247 | mutex_lock(&dev->device_lock); | ||
248 | mei_wd_stop(dev); | ||
249 | mutex_unlock(&dev->device_lock); | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * mei_wd_ops_ping - wd ping command from the watchdog core. | ||
256 | * | ||
257 | * @wd_dev: watchdog device struct | ||
258 | * | ||
259 | * Return: 0 if success, negative errno code for failure | ||
260 | */ | ||
261 | static int mei_wd_ops_ping(struct watchdog_device *wd_dev) | ||
262 | { | ||
263 | struct mei_device *dev; | ||
264 | struct mei_cl *cl; | ||
265 | int ret; | ||
266 | |||
267 | dev = watchdog_get_drvdata(wd_dev); | ||
268 | if (!dev) | ||
269 | return -ENODEV; | ||
270 | |||
271 | cl = &dev->wd_cl; | ||
272 | |||
273 | mutex_lock(&dev->device_lock); | ||
274 | |||
275 | if (!mei_cl_is_connected(cl)) { | ||
276 | cl_err(dev, cl, "wd: not connected.\n"); | ||
277 | ret = -ENODEV; | ||
278 | goto end; | ||
279 | } | ||
280 | |||
281 | dev->wd_state = MEI_WD_RUNNING; | ||
282 | |||
283 | ret = mei_cl_flow_ctrl_creds(cl); | ||
284 | if (ret < 0) | ||
285 | goto end; | ||
286 | |||
287 | /* Check if we can send the ping to HW*/ | ||
288 | if (ret && mei_hbuf_acquire(dev)) { | ||
289 | dev_dbg(dev->dev, "wd: sending ping\n"); | ||
290 | |||
291 | ret = mei_wd_send(dev); | ||
292 | if (ret) | ||
293 | goto end; | ||
294 | dev->wd_pending = false; | ||
295 | } else { | ||
296 | dev->wd_pending = true; | ||
297 | } | ||
298 | |||
299 | end: | ||
300 | mutex_unlock(&dev->device_lock); | ||
301 | return ret; | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core. | ||
306 | * | ||
307 | * @wd_dev: watchdog device struct | ||
308 | * @timeout: timeout value to set | ||
309 | * | ||
310 | * Return: 0 if success, negative errno code for failure | ||
311 | */ | ||
312 | static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, | ||
313 | unsigned int timeout) | ||
314 | { | ||
315 | struct mei_device *dev; | ||
316 | |||
317 | dev = watchdog_get_drvdata(wd_dev); | ||
318 | if (!dev) | ||
319 | return -ENODEV; | ||
320 | |||
321 | /* Check Timeout value */ | ||
322 | if (timeout < MEI_WD_MIN_TIMEOUT || timeout > MEI_WD_MAX_TIMEOUT) | ||
323 | return -EINVAL; | ||
324 | |||
325 | mutex_lock(&dev->device_lock); | ||
326 | |||
327 | dev->wd_timeout = timeout; | ||
328 | wd_dev->timeout = timeout; | ||
329 | mei_wd_set_start_timeout(dev, dev->wd_timeout); | ||
330 | |||
331 | mutex_unlock(&dev->device_lock); | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Watchdog Device structs | ||
338 | */ | ||
339 | static const struct watchdog_ops wd_ops = { | ||
340 | .owner = THIS_MODULE, | ||
341 | .start = mei_wd_ops_start, | ||
342 | .stop = mei_wd_ops_stop, | ||
343 | .ping = mei_wd_ops_ping, | ||
344 | .set_timeout = mei_wd_ops_set_timeout, | ||
345 | }; | ||
346 | static const struct watchdog_info wd_info = { | ||
347 | .identity = INTEL_AMT_WATCHDOG_ID, | ||
348 | .options = WDIOF_KEEPALIVEPING | | ||
349 | WDIOF_SETTIMEOUT | | ||
350 | WDIOF_ALARMONLY, | ||
351 | }; | ||
352 | |||
353 | static struct watchdog_device amt_wd_dev = { | ||
354 | .info = &wd_info, | ||
355 | .ops = &wd_ops, | ||
356 | .timeout = MEI_WD_DEFAULT_TIMEOUT, | ||
357 | .min_timeout = MEI_WD_MIN_TIMEOUT, | ||
358 | .max_timeout = MEI_WD_MAX_TIMEOUT, | ||
359 | }; | ||
360 | |||
361 | |||
362 | int mei_watchdog_register(struct mei_device *dev) | ||
363 | { | ||
364 | |||
365 | int ret; | ||
366 | |||
367 | amt_wd_dev.parent = dev->dev; | ||
368 | /* unlock to perserve correct locking order */ | ||
369 | mutex_unlock(&dev->device_lock); | ||
370 | ret = watchdog_register_device(&amt_wd_dev); | ||
371 | mutex_lock(&dev->device_lock); | ||
372 | if (ret) { | ||
373 | dev_err(dev->dev, "wd: unable to register watchdog device = %d.\n", | ||
374 | ret); | ||
375 | return ret; | ||
376 | } | ||
377 | |||
378 | dev_dbg(dev->dev, "wd: successfully register watchdog interface.\n"); | ||
379 | watchdog_set_drvdata(&amt_wd_dev, dev); | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | void mei_watchdog_unregister(struct mei_device *dev) | ||
384 | { | ||
385 | if (watchdog_get_drvdata(&amt_wd_dev) == NULL) | ||
386 | return; | ||
387 | |||
388 | watchdog_set_drvdata(&amt_wd_dev, NULL); | ||
389 | watchdog_unregister_device(&amt_wd_dev); | ||
390 | } | ||
391 | |||
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig index 40677df7f996..2e4f3ba75c8e 100644 --- a/drivers/misc/mic/Kconfig +++ b/drivers/misc/mic/Kconfig | |||
@@ -32,12 +32,29 @@ config SCIF_BUS | |||
32 | OS and tools for MIC to use with this driver are available from | 32 | OS and tools for MIC to use with this driver are available from |
33 | <http://software.intel.com/en-us/mic-developer>. | 33 | <http://software.intel.com/en-us/mic-developer>. |
34 | 34 | ||
35 | comment "VOP Bus Driver" | ||
36 | |||
37 | config VOP_BUS | ||
38 | tristate "VOP Bus Driver" | ||
39 | depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS | ||
40 | help | ||
41 | This option is selected by any driver which registers a | ||
42 | device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST | ||
43 | and CONFIG_INTEL_MIC_CARD. | ||
44 | |||
45 | If you are building a host/card kernel with an Intel MIC device | ||
46 | then say M (recommended) or Y, else say N. If unsure say N. | ||
47 | |||
48 | More information about the Intel MIC family as well as the Linux | ||
49 | OS and tools for MIC to use with this driver are available from | ||
50 | <http://software.intel.com/en-us/mic-developer>. | ||
51 | |||
35 | comment "Intel MIC Host Driver" | 52 | comment "Intel MIC Host Driver" |
36 | 53 | ||
37 | config INTEL_MIC_HOST | 54 | config INTEL_MIC_HOST |
38 | tristate "Intel MIC Host Driver" | 55 | tristate "Intel MIC Host Driver" |
39 | depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM | 56 | depends on 64BIT && PCI && X86 |
40 | select VHOST_RING | 57 | depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS |
41 | help | 58 | help |
42 | This enables Host Driver support for the Intel Many Integrated | 59 | This enables Host Driver support for the Intel Many Integrated |
43 | Core (MIC) family of PCIe form factor coprocessor devices that | 60 | Core (MIC) family of PCIe form factor coprocessor devices that |
@@ -56,7 +73,8 @@ comment "Intel MIC Card Driver" | |||
56 | 73 | ||
57 | config INTEL_MIC_CARD | 74 | config INTEL_MIC_CARD |
58 | tristate "Intel MIC Card Driver" | 75 | tristate "Intel MIC Card Driver" |
59 | depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM | 76 | depends on 64BIT && X86 |
77 | depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS | ||
60 | select VIRTIO | 78 | select VIRTIO |
61 | help | 79 | help |
62 | This enables card driver support for the Intel Many Integrated | 80 | This enables card driver support for the Intel Many Integrated |
@@ -107,3 +125,23 @@ config MIC_COSM | |||
107 | More information about the Intel MIC family as well as the Linux | 125 | More information about the Intel MIC family as well as the Linux |
108 | OS and tools for MIC to use with this driver are available from | 126 | OS and tools for MIC to use with this driver are available from |
109 | <http://software.intel.com/en-us/mic-developer>. | 127 | <http://software.intel.com/en-us/mic-developer>. |
128 | |||
129 | comment "VOP Driver" | ||
130 | |||
131 | config VOP | ||
132 | tristate "VOP Driver" | ||
133 | depends on 64BIT && PCI && X86 && VOP_BUS | ||
134 | select VHOST_RING | ||
135 | help | ||
136 | This enables VOP (Virtio over PCIe) Driver support for the Intel | ||
137 | Many Integrated Core (MIC) family of PCIe form factor coprocessor | ||
138 | devices. The VOP driver allows virtio drivers, e.g. net, console | ||
139 | and block drivers, on the card connect to user space virtio | ||
140 | devices on the host. | ||
141 | |||
142 | If you are building a host kernel with an Intel MIC device then | ||
143 | say M (recommended) or Y, else say N. If unsure say N. | ||
144 | |||
145 | More information about the Intel MIC family as well as the Linux | ||
146 | OS and tools for MIC to use with this driver are available from | ||
147 | <http://software.intel.com/en-us/mic-developer>. | ||
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile index e288a1106738..f2b1323ff96c 100644 --- a/drivers/misc/mic/Makefile +++ b/drivers/misc/mic/Makefile | |||
@@ -8,3 +8,4 @@ obj-y += bus/ | |||
8 | obj-$(CONFIG_SCIF) += scif/ | 8 | obj-$(CONFIG_SCIF) += scif/ |
9 | obj-$(CONFIG_MIC_COSM) += cosm/ | 9 | obj-$(CONFIG_MIC_COSM) += cosm/ |
10 | obj-$(CONFIG_MIC_COSM) += cosm_client/ | 10 | obj-$(CONFIG_MIC_COSM) += cosm_client/ |
11 | obj-$(CONFIG_VOP) += vop/ | ||
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile index 761842b0d0bb..8758a7daa52c 100644 --- a/drivers/misc/mic/bus/Makefile +++ b/drivers/misc/mic/bus/Makefile | |||
@@ -5,3 +5,4 @@ | |||
5 | obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o | 5 | obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o |
6 | obj-$(CONFIG_SCIF_BUS) += scif_bus.o | 6 | obj-$(CONFIG_SCIF_BUS) += scif_bus.o |
7 | obj-$(CONFIG_MIC_COSM) += cosm_bus.o | 7 | obj-$(CONFIG_MIC_COSM) += cosm_bus.o |
8 | obj-$(CONFIG_VOP_BUS) += vop_bus.o | ||
diff --git a/drivers/misc/mic/bus/cosm_bus.h b/drivers/misc/mic/bus/cosm_bus.h index f7c57f266916..8b6341855dc3 100644 --- a/drivers/misc/mic/bus/cosm_bus.h +++ b/drivers/misc/mic/bus/cosm_bus.h | |||
@@ -30,6 +30,7 @@ | |||
30 | * @attr_group: Pointer to list of sysfs attribute groups. | 30 | * @attr_group: Pointer to list of sysfs attribute groups. |
31 | * @sdev: Device for sysfs entries. | 31 | * @sdev: Device for sysfs entries. |
32 | * @state: MIC state. | 32 | * @state: MIC state. |
33 | * @prev_state: MIC state previous to MIC_RESETTING | ||
33 | * @shutdown_status: MIC status reported by card for shutdown/crashes. | 34 | * @shutdown_status: MIC status reported by card for shutdown/crashes. |
34 | * @shutdown_status_int: Internal shutdown status maintained by the driver | 35 | * @shutdown_status_int: Internal shutdown status maintained by the driver |
35 | * @cosm_mutex: Mutex for synchronizing access to data structures. | 36 | * @cosm_mutex: Mutex for synchronizing access to data structures. |
@@ -55,6 +56,7 @@ struct cosm_device { | |||
55 | const struct attribute_group **attr_group; | 56 | const struct attribute_group **attr_group; |
56 | struct device *sdev; | 57 | struct device *sdev; |
57 | u8 state; | 58 | u8 state; |
59 | u8 prev_state; | ||
58 | u8 shutdown_status; | 60 | u8 shutdown_status; |
59 | u8 shutdown_status_int; | 61 | u8 shutdown_status_int; |
60 | struct mutex cosm_mutex; | 62 | struct mutex cosm_mutex; |
diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c new file mode 100644 index 000000000000..303da222f5b6 --- /dev/null +++ b/drivers/misc/mic/bus/vop_bus.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2016 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Intel Virtio Over PCIe (VOP) Bus driver. | ||
19 | */ | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/idr.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | |||
25 | #include "vop_bus.h" | ||
26 | |||
27 | static ssize_t device_show(struct device *d, | ||
28 | struct device_attribute *attr, char *buf) | ||
29 | { | ||
30 | struct vop_device *dev = dev_to_vop(d); | ||
31 | |||
32 | return sprintf(buf, "0x%04x\n", dev->id.device); | ||
33 | } | ||
34 | static DEVICE_ATTR_RO(device); | ||
35 | |||
36 | static ssize_t vendor_show(struct device *d, | ||
37 | struct device_attribute *attr, char *buf) | ||
38 | { | ||
39 | struct vop_device *dev = dev_to_vop(d); | ||
40 | |||
41 | return sprintf(buf, "0x%04x\n", dev->id.vendor); | ||
42 | } | ||
43 | static DEVICE_ATTR_RO(vendor); | ||
44 | |||
45 | static ssize_t modalias_show(struct device *d, | ||
46 | struct device_attribute *attr, char *buf) | ||
47 | { | ||
48 | struct vop_device *dev = dev_to_vop(d); | ||
49 | |||
50 | return sprintf(buf, "vop:d%08Xv%08X\n", | ||
51 | dev->id.device, dev->id.vendor); | ||
52 | } | ||
53 | static DEVICE_ATTR_RO(modalias); | ||
54 | |||
55 | static struct attribute *vop_dev_attrs[] = { | ||
56 | &dev_attr_device.attr, | ||
57 | &dev_attr_vendor.attr, | ||
58 | &dev_attr_modalias.attr, | ||
59 | NULL, | ||
60 | }; | ||
61 | ATTRIBUTE_GROUPS(vop_dev); | ||
62 | |||
63 | static inline int vop_id_match(const struct vop_device *dev, | ||
64 | const struct vop_device_id *id) | ||
65 | { | ||
66 | if (id->device != dev->id.device && id->device != VOP_DEV_ANY_ID) | ||
67 | return 0; | ||
68 | |||
69 | return id->vendor == VOP_DEV_ANY_ID || id->vendor == dev->id.vendor; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * This looks through all the IDs a driver claims to support. If any of them | ||
74 | * match, we return 1 and the kernel will call vop_dev_probe(). | ||
75 | */ | ||
76 | static int vop_dev_match(struct device *dv, struct device_driver *dr) | ||
77 | { | ||
78 | unsigned int i; | ||
79 | struct vop_device *dev = dev_to_vop(dv); | ||
80 | const struct vop_device_id *ids; | ||
81 | |||
82 | ids = drv_to_vop(dr)->id_table; | ||
83 | for (i = 0; ids[i].device; i++) | ||
84 | if (vop_id_match(dev, &ids[i])) | ||
85 | return 1; | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int vop_uevent(struct device *dv, struct kobj_uevent_env *env) | ||
90 | { | ||
91 | struct vop_device *dev = dev_to_vop(dv); | ||
92 | |||
93 | return add_uevent_var(env, "MODALIAS=vop:d%08Xv%08X", | ||
94 | dev->id.device, dev->id.vendor); | ||
95 | } | ||
96 | |||
97 | static int vop_dev_probe(struct device *d) | ||
98 | { | ||
99 | struct vop_device *dev = dev_to_vop(d); | ||
100 | struct vop_driver *drv = drv_to_vop(dev->dev.driver); | ||
101 | |||
102 | return drv->probe(dev); | ||
103 | } | ||
104 | |||
105 | static int vop_dev_remove(struct device *d) | ||
106 | { | ||
107 | struct vop_device *dev = dev_to_vop(d); | ||
108 | struct vop_driver *drv = drv_to_vop(dev->dev.driver); | ||
109 | |||
110 | drv->remove(dev); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static struct bus_type vop_bus = { | ||
115 | .name = "vop_bus", | ||
116 | .match = vop_dev_match, | ||
117 | .dev_groups = vop_dev_groups, | ||
118 | .uevent = vop_uevent, | ||
119 | .probe = vop_dev_probe, | ||
120 | .remove = vop_dev_remove, | ||
121 | }; | ||
122 | |||
123 | int vop_register_driver(struct vop_driver *driver) | ||
124 | { | ||
125 | driver->driver.bus = &vop_bus; | ||
126 | return driver_register(&driver->driver); | ||
127 | } | ||
128 | EXPORT_SYMBOL_GPL(vop_register_driver); | ||
129 | |||
130 | void vop_unregister_driver(struct vop_driver *driver) | ||
131 | { | ||
132 | driver_unregister(&driver->driver); | ||
133 | } | ||
134 | EXPORT_SYMBOL_GPL(vop_unregister_driver); | ||
135 | |||
136 | static void vop_release_dev(struct device *d) | ||
137 | { | ||
138 | put_device(d); | ||
139 | } | ||
140 | |||
141 | struct vop_device * | ||
142 | vop_register_device(struct device *pdev, int id, | ||
143 | const struct dma_map_ops *dma_ops, | ||
144 | struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper, | ||
145 | struct dma_chan *chan) | ||
146 | { | ||
147 | int ret; | ||
148 | struct vop_device *vdev; | ||
149 | |||
150 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
151 | if (!vdev) | ||
152 | return ERR_PTR(-ENOMEM); | ||
153 | |||
154 | vdev->dev.parent = pdev; | ||
155 | vdev->id.device = id; | ||
156 | vdev->id.vendor = VOP_DEV_ANY_ID; | ||
157 | vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops; | ||
158 | vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask; | ||
159 | dma_set_mask(&vdev->dev, DMA_BIT_MASK(64)); | ||
160 | vdev->dev.release = vop_release_dev; | ||
161 | vdev->hw_ops = hw_ops; | ||
162 | vdev->dev.bus = &vop_bus; | ||
163 | vdev->dnode = dnode; | ||
164 | vdev->aper = aper; | ||
165 | vdev->dma_ch = chan; | ||
166 | vdev->index = dnode - 1; | ||
167 | dev_set_name(&vdev->dev, "vop-dev%u", vdev->index); | ||
168 | /* | ||
169 | * device_register() causes the bus infrastructure to look for a | ||
170 | * matching driver. | ||
171 | */ | ||
172 | ret = device_register(&vdev->dev); | ||
173 | if (ret) | ||
174 | goto free_vdev; | ||
175 | return vdev; | ||
176 | free_vdev: | ||
177 | kfree(vdev); | ||
178 | return ERR_PTR(ret); | ||
179 | } | ||
180 | EXPORT_SYMBOL_GPL(vop_register_device); | ||
181 | |||
182 | void vop_unregister_device(struct vop_device *dev) | ||
183 | { | ||
184 | device_unregister(&dev->dev); | ||
185 | } | ||
186 | EXPORT_SYMBOL_GPL(vop_unregister_device); | ||
187 | |||
188 | static int __init vop_init(void) | ||
189 | { | ||
190 | return bus_register(&vop_bus); | ||
191 | } | ||
192 | |||
193 | static void __exit vop_exit(void) | ||
194 | { | ||
195 | bus_unregister(&vop_bus); | ||
196 | } | ||
197 | |||
198 | core_initcall(vop_init); | ||
199 | module_exit(vop_exit); | ||
200 | |||
201 | MODULE_AUTHOR("Intel Corporation"); | ||
202 | MODULE_DESCRIPTION("Intel(R) VOP Bus driver"); | ||
203 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h new file mode 100644 index 000000000000..fff7a865d721 --- /dev/null +++ b/drivers/misc/mic/bus/vop_bus.h | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2016 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Intel Virtio over PCIe Bus driver. | ||
19 | */ | ||
20 | #ifndef _VOP_BUS_H_ | ||
21 | #define _VOP_BUS_H_ | ||
22 | /* | ||
23 | * Everything a vop driver needs to work with any particular vop | ||
24 | * implementation. | ||
25 | */ | ||
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | |||
29 | #include "../common/mic_dev.h" | ||
30 | |||
31 | struct vop_device_id { | ||
32 | u32 device; | ||
33 | u32 vendor; | ||
34 | }; | ||
35 | |||
36 | #define VOP_DEV_TRNSP 1 | ||
37 | #define VOP_DEV_ANY_ID 0xffffffff | ||
38 | /* | ||
39 | * Size of the internal buffer used during DMA's as an intermediate buffer | ||
40 | * for copy to/from user. Must be an integral number of pages. | ||
41 | */ | ||
42 | #define VOP_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL) | ||
43 | |||
44 | /** | ||
45 | * vop_device - representation of a device using vop | ||
46 | * @hw_ops: the hardware ops supported by this device. | ||
47 | * @id: the device type identification (used to match it with a driver). | ||
48 | * @dev: underlying device. | ||
49 | * @dnode - The destination node which this device will communicate with. | ||
50 | * @aper: Aperture memory window | ||
51 | * @dma_ch - DMA channel | ||
52 | * @index: unique position on the vop bus | ||
53 | */ | ||
54 | struct vop_device { | ||
55 | struct vop_hw_ops *hw_ops; | ||
56 | struct vop_device_id id; | ||
57 | struct device dev; | ||
58 | u8 dnode; | ||
59 | struct mic_mw *aper; | ||
60 | struct dma_chan *dma_ch; | ||
61 | int index; | ||
62 | }; | ||
63 | |||
64 | /** | ||
65 | * vop_driver - operations for a vop I/O driver | ||
66 | * @driver: underlying device driver (populate name and owner). | ||
67 | * @id_table: the ids serviced by this driver. | ||
68 | * @probe: the function to call when a device is found. Returns 0 or -errno. | ||
69 | * @remove: the function to call when a device is removed. | ||
70 | */ | ||
71 | struct vop_driver { | ||
72 | struct device_driver driver; | ||
73 | const struct vop_device_id *id_table; | ||
74 | int (*probe)(struct vop_device *dev); | ||
75 | void (*remove)(struct vop_device *dev); | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * vop_hw_ops - Hardware operations for accessing a VOP device on the VOP bus. | ||
80 | * | ||
81 | * @next_db: Obtain the next available doorbell. | ||
82 | * @request_irq: Request an interrupt on a particular doorbell. | ||
83 | * @free_irq: Free an interrupt requested previously. | ||
84 | * @ack_interrupt: acknowledge an interrupt in the ISR. | ||
85 | * @get_remote_dp: Get access to the virtio device page used by the remote | ||
86 | * node to add/remove/configure virtio devices. | ||
87 | * @get_dp: Get access to the virtio device page used by the self | ||
88 | * node to add/remove/configure virtio devices. | ||
89 | * @send_intr: Send an interrupt to the peer node on a specified doorbell. | ||
90 | * @ioremap: Map a buffer with the specified DMA address and length. | ||
91 | * @iounmap: Unmap a buffer previously mapped. | ||
92 | * @dma_filter: The DMA filter function to use for obtaining access to | ||
93 | * a DMA channel on the peer node. | ||
94 | */ | ||
95 | struct vop_hw_ops { | ||
96 | int (*next_db)(struct vop_device *vpdev); | ||
97 | struct mic_irq *(*request_irq)(struct vop_device *vpdev, | ||
98 | irqreturn_t (*func)(int irq, void *data), | ||
99 | const char *name, void *data, | ||
100 | int intr_src); | ||
101 | void (*free_irq)(struct vop_device *vpdev, | ||
102 | struct mic_irq *cookie, void *data); | ||
103 | void (*ack_interrupt)(struct vop_device *vpdev, int num); | ||
104 | void __iomem * (*get_remote_dp)(struct vop_device *vpdev); | ||
105 | void * (*get_dp)(struct vop_device *vpdev); | ||
106 | void (*send_intr)(struct vop_device *vpdev, int db); | ||
107 | void __iomem * (*ioremap)(struct vop_device *vpdev, | ||
108 | dma_addr_t pa, size_t len); | ||
109 | void (*iounmap)(struct vop_device *vpdev, void __iomem *va); | ||
110 | }; | ||
111 | |||
112 | struct vop_device * | ||
113 | vop_register_device(struct device *pdev, int id, | ||
114 | const struct dma_map_ops *dma_ops, | ||
115 | struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper, | ||
116 | struct dma_chan *chan); | ||
117 | void vop_unregister_device(struct vop_device *dev); | ||
118 | int vop_register_driver(struct vop_driver *drv); | ||
119 | void vop_unregister_driver(struct vop_driver *drv); | ||
120 | |||
121 | /* | ||
122 | * module_vop_driver() - Helper macro for drivers that don't do | ||
123 | * anything special in module init/exit. This eliminates a lot of | ||
124 | * boilerplate. Each module may only use this macro once, and | ||
125 | * calling it replaces module_init() and module_exit() | ||
126 | */ | ||
127 | #define module_vop_driver(__vop_driver) \ | ||
128 | module_driver(__vop_driver, vop_register_driver, \ | ||
129 | vop_unregister_driver) | ||
130 | |||
131 | static inline struct vop_device *dev_to_vop(struct device *dev) | ||
132 | { | ||
133 | return container_of(dev, struct vop_device, dev); | ||
134 | } | ||
135 | |||
136 | static inline struct vop_driver *drv_to_vop(struct device_driver *drv) | ||
137 | { | ||
138 | return container_of(drv, struct vop_driver, driver); | ||
139 | } | ||
140 | #endif /* _VOP_BUS_H */ | ||
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile index 69d58bef92ce..6e9675e12a09 100644 --- a/drivers/misc/mic/card/Makefile +++ b/drivers/misc/mic/card/Makefile | |||
@@ -8,4 +8,3 @@ obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o | |||
8 | mic_card-y += mic_x100.o | 8 | mic_card-y += mic_x100.o |
9 | mic_card-y += mic_device.o | 9 | mic_card-y += mic_device.o |
10 | mic_card-y += mic_debugfs.o | 10 | mic_card-y += mic_debugfs.o |
11 | mic_card-y += mic_virtio.o | ||
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c index d0edaf7e0cd5..e749af48f736 100644 --- a/drivers/misc/mic/card/mic_device.c +++ b/drivers/misc/mic/card/mic_device.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/mic_common.h> | 34 | #include <linux/mic_common.h> |
35 | #include "../common/mic_dev.h" | 35 | #include "../common/mic_dev.h" |
36 | #include "mic_device.h" | 36 | #include "mic_device.h" |
37 | #include "mic_virtio.h" | ||
38 | 37 | ||
39 | static struct mic_driver *g_drv; | 38 | static struct mic_driver *g_drv; |
40 | 39 | ||
@@ -250,12 +249,82 @@ static struct scif_hw_ops scif_hw_ops = { | |||
250 | .iounmap = ___mic_iounmap, | 249 | .iounmap = ___mic_iounmap, |
251 | }; | 250 | }; |
252 | 251 | ||
252 | static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev) | ||
253 | { | ||
254 | return dev_get_drvdata(vpdev->dev.parent); | ||
255 | } | ||
256 | |||
257 | static struct mic_irq * | ||
258 | __mic_request_irq(struct vop_device *vpdev, | ||
259 | irqreturn_t (*func)(int irq, void *data), | ||
260 | const char *name, void *data, int intr_src) | ||
261 | { | ||
262 | return mic_request_card_irq(func, NULL, name, data, intr_src); | ||
263 | } | ||
264 | |||
265 | static void __mic_free_irq(struct vop_device *vpdev, | ||
266 | struct mic_irq *cookie, void *data) | ||
267 | { | ||
268 | return mic_free_card_irq(cookie, data); | ||
269 | } | ||
270 | |||
271 | static void __mic_ack_interrupt(struct vop_device *vpdev, int num) | ||
272 | { | ||
273 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
274 | |||
275 | mic_ack_interrupt(&mdrv->mdev); | ||
276 | } | ||
277 | |||
278 | static int __mic_next_db(struct vop_device *vpdev) | ||
279 | { | ||
280 | return mic_next_card_db(); | ||
281 | } | ||
282 | |||
283 | static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev) | ||
284 | { | ||
285 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
286 | |||
287 | return mdrv->dp; | ||
288 | } | ||
289 | |||
290 | static void __mic_send_intr(struct vop_device *vpdev, int db) | ||
291 | { | ||
292 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
293 | |||
294 | mic_send_intr(&mdrv->mdev, db); | ||
295 | } | ||
296 | |||
297 | static void __iomem *__mic_ioremap(struct vop_device *vpdev, | ||
298 | dma_addr_t pa, size_t len) | ||
299 | { | ||
300 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
301 | |||
302 | return mic_card_map(&mdrv->mdev, pa, len); | ||
303 | } | ||
304 | |||
305 | static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va) | ||
306 | { | ||
307 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
308 | |||
309 | mic_card_unmap(&mdrv->mdev, va); | ||
310 | } | ||
311 | |||
312 | static struct vop_hw_ops vop_hw_ops = { | ||
313 | .request_irq = __mic_request_irq, | ||
314 | .free_irq = __mic_free_irq, | ||
315 | .ack_interrupt = __mic_ack_interrupt, | ||
316 | .next_db = __mic_next_db, | ||
317 | .get_remote_dp = __mic_get_remote_dp, | ||
318 | .send_intr = __mic_send_intr, | ||
319 | .ioremap = __mic_ioremap, | ||
320 | .iounmap = __mic_iounmap, | ||
321 | }; | ||
322 | |||
253 | static int mic_request_dma_chans(struct mic_driver *mdrv) | 323 | static int mic_request_dma_chans(struct mic_driver *mdrv) |
254 | { | 324 | { |
255 | dma_cap_mask_t mask; | 325 | dma_cap_mask_t mask; |
256 | struct dma_chan *chan; | 326 | struct dma_chan *chan; |
257 | 327 | ||
258 | request_module("mic_x100_dma"); | ||
259 | dma_cap_zero(mask); | 328 | dma_cap_zero(mask); |
260 | dma_cap_set(DMA_MEMCPY, mask); | 329 | dma_cap_set(DMA_MEMCPY, mask); |
261 | 330 | ||
@@ -309,9 +378,13 @@ int __init mic_driver_init(struct mic_driver *mdrv) | |||
309 | rc = -ENODEV; | 378 | rc = -ENODEV; |
310 | goto irq_uninit; | 379 | goto irq_uninit; |
311 | } | 380 | } |
312 | rc = mic_devices_init(mdrv); | 381 | mdrv->vpdev = vop_register_device(mdrv->dev, VOP_DEV_TRNSP, |
313 | if (rc) | 382 | NULL, &vop_hw_ops, 0, |
383 | NULL, mdrv->dma_ch[0]); | ||
384 | if (IS_ERR(mdrv->vpdev)) { | ||
385 | rc = PTR_ERR(mdrv->vpdev); | ||
314 | goto dma_free; | 386 | goto dma_free; |
387 | } | ||
315 | bootparam = mdrv->dp; | 388 | bootparam = mdrv->dp; |
316 | node_id = ioread8(&bootparam->node_id); | 389 | node_id = ioread8(&bootparam->node_id); |
317 | mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV, | 390 | mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV, |
@@ -321,13 +394,13 @@ int __init mic_driver_init(struct mic_driver *mdrv) | |||
321 | mdrv->num_dma_ch, true); | 394 | mdrv->num_dma_ch, true); |
322 | if (IS_ERR(mdrv->scdev)) { | 395 | if (IS_ERR(mdrv->scdev)) { |
323 | rc = PTR_ERR(mdrv->scdev); | 396 | rc = PTR_ERR(mdrv->scdev); |
324 | goto device_uninit; | 397 | goto vop_remove; |
325 | } | 398 | } |
326 | mic_create_card_debug_dir(mdrv); | 399 | mic_create_card_debug_dir(mdrv); |
327 | done: | 400 | done: |
328 | return rc; | 401 | return rc; |
329 | device_uninit: | 402 | vop_remove: |
330 | mic_devices_uninit(mdrv); | 403 | vop_unregister_device(mdrv->vpdev); |
331 | dma_free: | 404 | dma_free: |
332 | mic_free_dma_chans(mdrv); | 405 | mic_free_dma_chans(mdrv); |
333 | irq_uninit: | 406 | irq_uninit: |
@@ -348,7 +421,7 @@ void mic_driver_uninit(struct mic_driver *mdrv) | |||
348 | { | 421 | { |
349 | mic_delete_card_debug_dir(mdrv); | 422 | mic_delete_card_debug_dir(mdrv); |
350 | scif_unregister_device(mdrv->scdev); | 423 | scif_unregister_device(mdrv->scdev); |
351 | mic_devices_uninit(mdrv); | 424 | vop_unregister_device(mdrv->vpdev); |
352 | mic_free_dma_chans(mdrv); | 425 | mic_free_dma_chans(mdrv); |
353 | mic_uninit_irq(); | 426 | mic_uninit_irq(); |
354 | mic_dp_uninit(); | 427 | mic_dp_uninit(); |
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h index 1dbf83c41289..333dbed972f6 100644 --- a/drivers/misc/mic/card/mic_device.h +++ b/drivers/misc/mic/card/mic_device.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
33 | #include <linux/mic_bus.h> | 33 | #include <linux/mic_bus.h> |
34 | #include "../bus/scif_bus.h" | 34 | #include "../bus/scif_bus.h" |
35 | #include "../bus/vop_bus.h" | ||
35 | 36 | ||
36 | /** | 37 | /** |
37 | * struct mic_intr_info - Contains h/w specific interrupt sources info | 38 | * struct mic_intr_info - Contains h/w specific interrupt sources info |
@@ -76,6 +77,7 @@ struct mic_device { | |||
76 | * @dma_ch - Array of DMA channels | 77 | * @dma_ch - Array of DMA channels |
77 | * @num_dma_ch - Number of DMA channels available | 78 | * @num_dma_ch - Number of DMA channels available |
78 | * @scdev: SCIF device on the SCIF virtual bus. | 79 | * @scdev: SCIF device on the SCIF virtual bus. |
80 | * @vpdev: Virtio over PCIe device on the VOP virtual bus. | ||
79 | */ | 81 | */ |
80 | struct mic_driver { | 82 | struct mic_driver { |
81 | char name[20]; | 83 | char name[20]; |
@@ -90,6 +92,7 @@ struct mic_driver { | |||
90 | struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; | 92 | struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; |
91 | int num_dma_ch; | 93 | int num_dma_ch; |
92 | struct scif_hw_dev *scdev; | 94 | struct scif_hw_dev *scdev; |
95 | struct vop_device *vpdev; | ||
93 | }; | 96 | }; |
94 | 97 | ||
95 | /** | 98 | /** |
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c deleted file mode 100644 index f6ed57d3125c..000000000000 --- a/drivers/misc/mic/card/mic_virtio.c +++ /dev/null | |||
@@ -1,634 +0,0 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2013 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Disclaimer: The codes contained in these modules may be specific to | ||
19 | * the Intel Software Development Platform codenamed: Knights Ferry, and | ||
20 | * the Intel product codenamed: Knights Corner, and are not backward | ||
21 | * compatible with other Intel products. Additionally, Intel will NOT | ||
22 | * support the codes or instruction set in future products. | ||
23 | * | ||
24 | * Adapted from: | ||
25 | * | ||
26 | * virtio for kvm on s390 | ||
27 | * | ||
28 | * Copyright IBM Corp. 2008 | ||
29 | * | ||
30 | * This program is free software; you can redistribute it and/or modify | ||
31 | * it under the terms of the GNU General Public License (version 2 only) | ||
32 | * as published by the Free Software Foundation. | ||
33 | * | ||
34 | * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> | ||
35 | * | ||
36 | * Intel MIC Card driver. | ||
37 | * | ||
38 | */ | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/slab.h> | ||
41 | #include <linux/virtio_config.h> | ||
42 | |||
43 | #include "../common/mic_dev.h" | ||
44 | #include "mic_virtio.h" | ||
45 | |||
46 | #define VIRTIO_SUBCODE_64 0x0D00 | ||
47 | |||
48 | #define MIC_MAX_VRINGS 4 | ||
49 | struct mic_vdev { | ||
50 | struct virtio_device vdev; | ||
51 | struct mic_device_desc __iomem *desc; | ||
52 | struct mic_device_ctrl __iomem *dc; | ||
53 | struct mic_device *mdev; | ||
54 | void __iomem *vr[MIC_MAX_VRINGS]; | ||
55 | int used_size[MIC_MAX_VRINGS]; | ||
56 | struct completion reset_done; | ||
57 | struct mic_irq *virtio_cookie; | ||
58 | int c2h_vdev_db; | ||
59 | }; | ||
60 | |||
61 | static struct mic_irq *virtio_config_cookie; | ||
62 | #define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev) | ||
63 | |||
64 | /* Helper API to obtain the parent of the virtio device */ | ||
65 | static inline struct device *mic_dev(struct mic_vdev *mvdev) | ||
66 | { | ||
67 | return mvdev->vdev.dev.parent; | ||
68 | } | ||
69 | |||
70 | /* This gets the device's feature bits. */ | ||
71 | static u64 mic_get_features(struct virtio_device *vdev) | ||
72 | { | ||
73 | unsigned int i, bits; | ||
74 | u32 features = 0; | ||
75 | struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; | ||
76 | u8 __iomem *in_features = mic_vq_features(desc); | ||
77 | int feature_len = ioread8(&desc->feature_len); | ||
78 | |||
79 | bits = min_t(unsigned, feature_len, sizeof(features)) * 8; | ||
80 | for (i = 0; i < bits; i++) | ||
81 | if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) | ||
82 | features |= BIT(i); | ||
83 | |||
84 | return features; | ||
85 | } | ||
86 | |||
87 | static int mic_finalize_features(struct virtio_device *vdev) | ||
88 | { | ||
89 | unsigned int i, bits; | ||
90 | struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; | ||
91 | u8 feature_len = ioread8(&desc->feature_len); | ||
92 | /* Second half of bitmap is features we accept. */ | ||
93 | u8 __iomem *out_features = | ||
94 | mic_vq_features(desc) + feature_len; | ||
95 | |||
96 | /* Give virtio_ring a chance to accept features. */ | ||
97 | vring_transport_features(vdev); | ||
98 | |||
99 | /* Make sure we don't have any features > 32 bits! */ | ||
100 | BUG_ON((u32)vdev->features != vdev->features); | ||
101 | |||
102 | memset_io(out_features, 0, feature_len); | ||
103 | bits = min_t(unsigned, feature_len, | ||
104 | sizeof(vdev->features)) * 8; | ||
105 | for (i = 0; i < bits; i++) { | ||
106 | if (__virtio_test_bit(vdev, i)) | ||
107 | iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)), | ||
108 | &out_features[i / 8]); | ||
109 | } | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Reading and writing elements in config space | ||
116 | */ | ||
117 | static void mic_get(struct virtio_device *vdev, unsigned int offset, | ||
118 | void *buf, unsigned len) | ||
119 | { | ||
120 | struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; | ||
121 | |||
122 | if (offset + len > ioread8(&desc->config_len)) | ||
123 | return; | ||
124 | memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len); | ||
125 | } | ||
126 | |||
127 | static void mic_set(struct virtio_device *vdev, unsigned int offset, | ||
128 | const void *buf, unsigned len) | ||
129 | { | ||
130 | struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; | ||
131 | |||
132 | if (offset + len > ioread8(&desc->config_len)) | ||
133 | return; | ||
134 | memcpy_toio(mic_vq_configspace(desc) + offset, buf, len); | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * The operations to get and set the status word just access the status | ||
139 | * field of the device descriptor. set_status also interrupts the host | ||
140 | * to tell about status changes. | ||
141 | */ | ||
142 | static u8 mic_get_status(struct virtio_device *vdev) | ||
143 | { | ||
144 | return ioread8(&to_micvdev(vdev)->desc->status); | ||
145 | } | ||
146 | |||
147 | static void mic_set_status(struct virtio_device *vdev, u8 status) | ||
148 | { | ||
149 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
150 | if (!status) | ||
151 | return; | ||
152 | iowrite8(status, &mvdev->desc->status); | ||
153 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | ||
154 | } | ||
155 | |||
156 | /* Inform host on a virtio device reset and wait for ack from host */ | ||
157 | static void mic_reset_inform_host(struct virtio_device *vdev) | ||
158 | { | ||
159 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
160 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | ||
161 | int retry; | ||
162 | |||
163 | iowrite8(0, &dc->host_ack); | ||
164 | iowrite8(1, &dc->vdev_reset); | ||
165 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | ||
166 | |||
167 | /* Wait till host completes all card accesses and acks the reset */ | ||
168 | for (retry = 100; retry--;) { | ||
169 | if (ioread8(&dc->host_ack)) | ||
170 | break; | ||
171 | msleep(100); | ||
172 | }; | ||
173 | |||
174 | dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); | ||
175 | |||
176 | /* Reset status to 0 in case we timed out */ | ||
177 | iowrite8(0, &mvdev->desc->status); | ||
178 | } | ||
179 | |||
180 | static void mic_reset(struct virtio_device *vdev) | ||
181 | { | ||
182 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
183 | |||
184 | dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n", | ||
185 | __func__, vdev->id.device); | ||
186 | |||
187 | mic_reset_inform_host(vdev); | ||
188 | complete_all(&mvdev->reset_done); | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * The virtio_ring code calls this API when it wants to notify the Host. | ||
193 | */ | ||
194 | static bool mic_notify(struct virtqueue *vq) | ||
195 | { | ||
196 | struct mic_vdev *mvdev = vq->priv; | ||
197 | |||
198 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | ||
199 | return true; | ||
200 | } | ||
201 | |||
202 | static void mic_del_vq(struct virtqueue *vq, int n) | ||
203 | { | ||
204 | struct mic_vdev *mvdev = to_micvdev(vq->vdev); | ||
205 | struct vring *vr = (struct vring *)(vq + 1); | ||
206 | |||
207 | free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n])); | ||
208 | vring_del_virtqueue(vq); | ||
209 | mic_card_unmap(mvdev->mdev, mvdev->vr[n]); | ||
210 | mvdev->vr[n] = NULL; | ||
211 | } | ||
212 | |||
213 | static void mic_del_vqs(struct virtio_device *vdev) | ||
214 | { | ||
215 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
216 | struct virtqueue *vq, *n; | ||
217 | int idx = 0; | ||
218 | |||
219 | dev_dbg(mic_dev(mvdev), "%s\n", __func__); | ||
220 | |||
221 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | ||
222 | mic_del_vq(vq, idx++); | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * This routine will assign vring's allocated in host/io memory. Code in | ||
227 | * virtio_ring.c however continues to access this io memory as if it were local | ||
228 | * memory without io accessors. | ||
229 | */ | ||
230 | static struct virtqueue *mic_find_vq(struct virtio_device *vdev, | ||
231 | unsigned index, | ||
232 | void (*callback)(struct virtqueue *vq), | ||
233 | const char *name) | ||
234 | { | ||
235 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
236 | struct mic_vqconfig __iomem *vqconfig; | ||
237 | struct mic_vqconfig config; | ||
238 | struct virtqueue *vq; | ||
239 | void __iomem *va; | ||
240 | struct _mic_vring_info __iomem *info; | ||
241 | void *used; | ||
242 | int vr_size, _vr_size, err, magic; | ||
243 | struct vring *vr; | ||
244 | u8 type = ioread8(&mvdev->desc->type); | ||
245 | |||
246 | if (index >= ioread8(&mvdev->desc->num_vq)) | ||
247 | return ERR_PTR(-ENOENT); | ||
248 | |||
249 | if (!name) | ||
250 | return ERR_PTR(-ENOENT); | ||
251 | |||
252 | /* First assign the vring's allocated in host memory */ | ||
253 | vqconfig = mic_vq_config(mvdev->desc) + index; | ||
254 | memcpy_fromio(&config, vqconfig, sizeof(config)); | ||
255 | _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); | ||
256 | vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); | ||
257 | va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size); | ||
258 | if (!va) | ||
259 | return ERR_PTR(-ENOMEM); | ||
260 | mvdev->vr[index] = va; | ||
261 | memset_io(va, 0x0, _vr_size); | ||
262 | vq = vring_new_virtqueue(index, le16_to_cpu(config.num), | ||
263 | MIC_VIRTIO_RING_ALIGN, vdev, false, | ||
264 | (void __force *)va, mic_notify, callback, | ||
265 | name); | ||
266 | if (!vq) { | ||
267 | err = -ENOMEM; | ||
268 | goto unmap; | ||
269 | } | ||
270 | info = va + _vr_size; | ||
271 | magic = ioread32(&info->magic); | ||
272 | |||
273 | if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { | ||
274 | err = -EIO; | ||
275 | goto unmap; | ||
276 | } | ||
277 | |||
278 | /* Allocate and reassign used ring now */ | ||
279 | mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + | ||
280 | sizeof(struct vring_used_elem) * | ||
281 | le16_to_cpu(config.num)); | ||
282 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
283 | get_order(mvdev->used_size[index])); | ||
284 | if (!used) { | ||
285 | err = -ENOMEM; | ||
286 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
287 | __func__, __LINE__, err); | ||
288 | goto del_vq; | ||
289 | } | ||
290 | iowrite64(virt_to_phys(used), &vqconfig->used_address); | ||
291 | |||
292 | /* | ||
293 | * To reassign the used ring here we are directly accessing | ||
294 | * struct vring_virtqueue which is a private data structure | ||
295 | * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in | ||
296 | * vring_new_virtqueue() would ensure that | ||
297 | * (&vq->vring == (struct vring *) (&vq->vq + 1)); | ||
298 | */ | ||
299 | vr = (struct vring *)(vq + 1); | ||
300 | vr->used = used; | ||
301 | |||
302 | vq->priv = mvdev; | ||
303 | return vq; | ||
304 | del_vq: | ||
305 | vring_del_virtqueue(vq); | ||
306 | unmap: | ||
307 | mic_card_unmap(mvdev->mdev, mvdev->vr[index]); | ||
308 | return ERR_PTR(err); | ||
309 | } | ||
310 | |||
311 | static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
312 | struct virtqueue *vqs[], | ||
313 | vq_callback_t *callbacks[], | ||
314 | const char * const names[]) | ||
315 | { | ||
316 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
317 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | ||
318 | int i, err, retry; | ||
319 | |||
320 | /* We must have this many virtqueues. */ | ||
321 | if (nvqs > ioread8(&mvdev->desc->num_vq)) | ||
322 | return -ENOENT; | ||
323 | |||
324 | for (i = 0; i < nvqs; ++i) { | ||
325 | dev_dbg(mic_dev(mvdev), "%s: %d: %s\n", | ||
326 | __func__, i, names[i]); | ||
327 | vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]); | ||
328 | if (IS_ERR(vqs[i])) { | ||
329 | err = PTR_ERR(vqs[i]); | ||
330 | goto error; | ||
331 | } | ||
332 | } | ||
333 | |||
334 | iowrite8(1, &dc->used_address_updated); | ||
335 | /* | ||
336 | * Send an interrupt to the host to inform it that used | ||
337 | * rings have been re-assigned. | ||
338 | */ | ||
339 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | ||
340 | for (retry = 100; retry--;) { | ||
341 | if (!ioread8(&dc->used_address_updated)) | ||
342 | break; | ||
343 | msleep(100); | ||
344 | }; | ||
345 | |||
346 | dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); | ||
347 | if (!retry) { | ||
348 | err = -ENODEV; | ||
349 | goto error; | ||
350 | } | ||
351 | |||
352 | return 0; | ||
353 | error: | ||
354 | mic_del_vqs(vdev); | ||
355 | return err; | ||
356 | } | ||
357 | |||
358 | /* | ||
359 | * The config ops structure as defined by virtio config | ||
360 | */ | ||
361 | static struct virtio_config_ops mic_vq_config_ops = { | ||
362 | .get_features = mic_get_features, | ||
363 | .finalize_features = mic_finalize_features, | ||
364 | .get = mic_get, | ||
365 | .set = mic_set, | ||
366 | .get_status = mic_get_status, | ||
367 | .set_status = mic_set_status, | ||
368 | .reset = mic_reset, | ||
369 | .find_vqs = mic_find_vqs, | ||
370 | .del_vqs = mic_del_vqs, | ||
371 | }; | ||
372 | |||
373 | static irqreturn_t | ||
374 | mic_virtio_intr_handler(int irq, void *data) | ||
375 | { | ||
376 | struct mic_vdev *mvdev = data; | ||
377 | struct virtqueue *vq; | ||
378 | |||
379 | mic_ack_interrupt(mvdev->mdev); | ||
380 | list_for_each_entry(vq, &mvdev->vdev.vqs, list) | ||
381 | vring_interrupt(0, vq); | ||
382 | |||
383 | return IRQ_HANDLED; | ||
384 | } | ||
385 | |||
386 | static void mic_virtio_release_dev(struct device *_d) | ||
387 | { | ||
388 | /* | ||
389 | * No need for a release method similar to virtio PCI. | ||
390 | * Provide an empty one to avoid getting a warning from core. | ||
391 | */ | ||
392 | } | ||
393 | |||
394 | /* | ||
395 | * adds a new device and register it with virtio | ||
396 | * appropriate drivers are loaded by the device model | ||
397 | */ | ||
398 | static int mic_add_device(struct mic_device_desc __iomem *d, | ||
399 | unsigned int offset, struct mic_driver *mdrv) | ||
400 | { | ||
401 | struct mic_vdev *mvdev; | ||
402 | int ret; | ||
403 | int virtio_db; | ||
404 | u8 type = ioread8(&d->type); | ||
405 | |||
406 | mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); | ||
407 | if (!mvdev) { | ||
408 | dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n", | ||
409 | offset, type); | ||
410 | return -ENOMEM; | ||
411 | } | ||
412 | |||
413 | mvdev->mdev = &mdrv->mdev; | ||
414 | mvdev->vdev.dev.parent = mdrv->dev; | ||
415 | mvdev->vdev.dev.release = mic_virtio_release_dev; | ||
416 | mvdev->vdev.id.device = type; | ||
417 | mvdev->vdev.config = &mic_vq_config_ops; | ||
418 | mvdev->desc = d; | ||
419 | mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d); | ||
420 | init_completion(&mvdev->reset_done); | ||
421 | |||
422 | virtio_db = mic_next_card_db(); | ||
423 | mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler, | ||
424 | NULL, "virtio intr", mvdev, virtio_db); | ||
425 | if (IS_ERR(mvdev->virtio_cookie)) { | ||
426 | ret = PTR_ERR(mvdev->virtio_cookie); | ||
427 | goto kfree; | ||
428 | } | ||
429 | iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db); | ||
430 | mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db); | ||
431 | |||
432 | ret = register_virtio_device(&mvdev->vdev); | ||
433 | if (ret) { | ||
434 | dev_err(mic_dev(mvdev), | ||
435 | "Failed to register mic device %u type %u\n", | ||
436 | offset, type); | ||
437 | goto free_irq; | ||
438 | } | ||
439 | iowrite64((u64)mvdev, &mvdev->dc->vdev); | ||
440 | dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n", | ||
441 | __func__, offset, type, mvdev); | ||
442 | |||
443 | return 0; | ||
444 | |||
445 | free_irq: | ||
446 | mic_free_card_irq(mvdev->virtio_cookie, mvdev); | ||
447 | kfree: | ||
448 | kfree(mvdev); | ||
449 | return ret; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * match for a mic device with a specific desc pointer | ||
454 | */ | ||
455 | static int mic_match_desc(struct device *dev, void *data) | ||
456 | { | ||
457 | struct virtio_device *vdev = dev_to_virtio(dev); | ||
458 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
459 | |||
460 | return mvdev->desc == (void __iomem *)data; | ||
461 | } | ||
462 | |||
463 | static void mic_handle_config_change(struct mic_device_desc __iomem *d, | ||
464 | unsigned int offset, struct mic_driver *mdrv) | ||
465 | { | ||
466 | struct mic_device_ctrl __iomem *dc | ||
467 | = (void __iomem *)d + mic_aligned_desc_size(d); | ||
468 | struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); | ||
469 | |||
470 | if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) | ||
471 | return; | ||
472 | |||
473 | dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__); | ||
474 | virtio_config_changed(&mvdev->vdev); | ||
475 | iowrite8(1, &dc->guest_ack); | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * removes a virtio device if a hot remove event has been | ||
480 | * requested by the host. | ||
481 | */ | ||
482 | static int mic_remove_device(struct mic_device_desc __iomem *d, | ||
483 | unsigned int offset, struct mic_driver *mdrv) | ||
484 | { | ||
485 | struct mic_device_ctrl __iomem *dc | ||
486 | = (void __iomem *)d + mic_aligned_desc_size(d); | ||
487 | struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); | ||
488 | u8 status; | ||
489 | int ret = -1; | ||
490 | |||
491 | if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { | ||
492 | dev_dbg(mdrv->dev, | ||
493 | "%s %d config_change %d type %d mvdev %p\n", | ||
494 | __func__, __LINE__, | ||
495 | ioread8(&dc->config_change), ioread8(&d->type), mvdev); | ||
496 | |||
497 | status = ioread8(&d->status); | ||
498 | reinit_completion(&mvdev->reset_done); | ||
499 | unregister_virtio_device(&mvdev->vdev); | ||
500 | mic_free_card_irq(mvdev->virtio_cookie, mvdev); | ||
501 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) | ||
502 | wait_for_completion(&mvdev->reset_done); | ||
503 | kfree(mvdev); | ||
504 | iowrite8(1, &dc->guest_ack); | ||
505 | dev_dbg(mdrv->dev, "%s %d guest_ack %d\n", | ||
506 | __func__, __LINE__, ioread8(&dc->guest_ack)); | ||
507 | ret = 0; | ||
508 | } | ||
509 | |||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | #define REMOVE_DEVICES true | ||
514 | |||
515 | static void mic_scan_devices(struct mic_driver *mdrv, bool remove) | ||
516 | { | ||
517 | s8 type; | ||
518 | unsigned int i; | ||
519 | struct mic_device_desc __iomem *d; | ||
520 | struct mic_device_ctrl __iomem *dc; | ||
521 | struct device *dev; | ||
522 | int ret; | ||
523 | |||
524 | for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE; | ||
525 | i += mic_total_desc_size(d)) { | ||
526 | d = mdrv->dp + i; | ||
527 | dc = (void __iomem *)d + mic_aligned_desc_size(d); | ||
528 | /* | ||
529 | * This read barrier is paired with the corresponding write | ||
530 | * barrier on the host which is inserted before adding or | ||
531 | * removing a virtio device descriptor, by updating the type. | ||
532 | */ | ||
533 | rmb(); | ||
534 | type = ioread8(&d->type); | ||
535 | |||
536 | /* end of list */ | ||
537 | if (type == 0) | ||
538 | break; | ||
539 | |||
540 | if (type == -1) | ||
541 | continue; | ||
542 | |||
543 | /* device already exists */ | ||
544 | dev = device_find_child(mdrv->dev, (void __force *)d, | ||
545 | mic_match_desc); | ||
546 | if (dev) { | ||
547 | if (remove) | ||
548 | iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, | ||
549 | &dc->config_change); | ||
550 | put_device(dev); | ||
551 | mic_handle_config_change(d, i, mdrv); | ||
552 | ret = mic_remove_device(d, i, mdrv); | ||
553 | if (!ret && !remove) | ||
554 | iowrite8(-1, &d->type); | ||
555 | if (remove) { | ||
556 | iowrite8(0, &dc->config_change); | ||
557 | iowrite8(0, &dc->guest_ack); | ||
558 | } | ||
559 | continue; | ||
560 | } | ||
561 | |||
562 | /* new device */ | ||
563 | dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n", | ||
564 | __func__, __LINE__, d); | ||
565 | if (!remove) | ||
566 | mic_add_device(d, i, mdrv); | ||
567 | } | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * mic_hotplug_device tries to find changes in the device page. | ||
572 | */ | ||
573 | static void mic_hotplug_devices(struct work_struct *work) | ||
574 | { | ||
575 | struct mic_driver *mdrv = container_of(work, | ||
576 | struct mic_driver, hotplug_work); | ||
577 | |||
578 | mic_scan_devices(mdrv, !REMOVE_DEVICES); | ||
579 | } | ||
580 | |||
581 | /* | ||
582 | * Interrupt handler for hot plug/config changes etc. | ||
583 | */ | ||
584 | static irqreturn_t | ||
585 | mic_extint_handler(int irq, void *data) | ||
586 | { | ||
587 | struct mic_driver *mdrv = (struct mic_driver *)data; | ||
588 | |||
589 | dev_dbg(mdrv->dev, "%s %d hotplug work\n", | ||
590 | __func__, __LINE__); | ||
591 | mic_ack_interrupt(&mdrv->mdev); | ||
592 | schedule_work(&mdrv->hotplug_work); | ||
593 | return IRQ_HANDLED; | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * Init function for virtio | ||
598 | */ | ||
599 | int mic_devices_init(struct mic_driver *mdrv) | ||
600 | { | ||
601 | int rc; | ||
602 | struct mic_bootparam __iomem *bootparam; | ||
603 | int config_db; | ||
604 | |||
605 | INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices); | ||
606 | mic_scan_devices(mdrv, !REMOVE_DEVICES); | ||
607 | |||
608 | config_db = mic_next_card_db(); | ||
609 | virtio_config_cookie = mic_request_card_irq(mic_extint_handler, NULL, | ||
610 | "virtio_config_intr", mdrv, | ||
611 | config_db); | ||
612 | if (IS_ERR(virtio_config_cookie)) { | ||
613 | rc = PTR_ERR(virtio_config_cookie); | ||
614 | goto exit; | ||
615 | } | ||
616 | |||
617 | bootparam = mdrv->dp; | ||
618 | iowrite8(config_db, &bootparam->h2c_config_db); | ||
619 | return 0; | ||
620 | exit: | ||
621 | return rc; | ||
622 | } | ||
623 | |||
624 | /* | ||
625 | * Uninit function for virtio | ||
626 | */ | ||
627 | void mic_devices_uninit(struct mic_driver *mdrv) | ||
628 | { | ||
629 | struct mic_bootparam __iomem *bootparam = mdrv->dp; | ||
630 | iowrite8(-1, &bootparam->h2c_config_db); | ||
631 | mic_free_card_irq(virtio_config_cookie, mdrv); | ||
632 | flush_work(&mdrv->hotplug_work); | ||
633 | mic_scan_devices(mdrv, REMOVE_DEVICES); | ||
634 | } | ||
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h deleted file mode 100644 index d0407ba53bb7..000000000000 --- a/drivers/misc/mic/card/mic_virtio.h +++ /dev/null | |||
@@ -1,76 +0,0 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2013 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Disclaimer: The codes contained in these modules may be specific to | ||
19 | * the Intel Software Development Platform codenamed: Knights Ferry, and | ||
20 | * the Intel product codenamed: Knights Corner, and are not backward | ||
21 | * compatible with other Intel products. Additionally, Intel will NOT | ||
22 | * support the codes or instruction set in future products. | ||
23 | * | ||
24 | * Intel MIC Card driver. | ||
25 | * | ||
26 | */ | ||
27 | #ifndef __MIC_CARD_VIRTIO_H | ||
28 | #define __MIC_CARD_VIRTIO_H | ||
29 | |||
30 | #include <linux/mic_common.h> | ||
31 | #include "mic_device.h" | ||
32 | |||
33 | /* | ||
34 | * 64 bit I/O access | ||
35 | */ | ||
36 | #ifndef ioread64 | ||
37 | #define ioread64 readq | ||
38 | #endif | ||
39 | #ifndef iowrite64 | ||
40 | #define iowrite64 writeq | ||
41 | #endif | ||
42 | |||
43 | static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) | ||
44 | { | ||
45 | return sizeof(*desc) | ||
46 | + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) | ||
47 | + ioread8(&desc->feature_len) * 2 | ||
48 | + ioread8(&desc->config_len); | ||
49 | } | ||
50 | |||
51 | static inline struct mic_vqconfig __iomem * | ||
52 | mic_vq_config(struct mic_device_desc __iomem *desc) | ||
53 | { | ||
54 | return (struct mic_vqconfig __iomem *)(desc + 1); | ||
55 | } | ||
56 | |||
57 | static inline __u8 __iomem * | ||
58 | mic_vq_features(struct mic_device_desc __iomem *desc) | ||
59 | { | ||
60 | return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq)); | ||
61 | } | ||
62 | |||
63 | static inline __u8 __iomem * | ||
64 | mic_vq_configspace(struct mic_device_desc __iomem *desc) | ||
65 | { | ||
66 | return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2; | ||
67 | } | ||
68 | static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) | ||
69 | { | ||
70 | return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); | ||
71 | } | ||
72 | |||
73 | int mic_devices_init(struct mic_driver *mdrv); | ||
74 | void mic_devices_uninit(struct mic_driver *mdrv); | ||
75 | |||
76 | #endif | ||
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c index b2958ce2368c..b9f0710ffa6b 100644 --- a/drivers/misc/mic/card/mic_x100.c +++ b/drivers/misc/mic/card/mic_x100.c | |||
@@ -326,6 +326,7 @@ static int __init mic_init(void) | |||
326 | goto done; | 326 | goto done; |
327 | } | 327 | } |
328 | 328 | ||
329 | request_module("mic_x100_dma"); | ||
329 | mic_init_card_debugfs(); | 330 | mic_init_card_debugfs(); |
330 | ret = platform_device_register(&mic_platform_dev); | 331 | ret = platform_device_register(&mic_platform_dev); |
331 | if (ret) { | 332 | if (ret) { |
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c index 4b4b356c797d..7005cb1e01d2 100644 --- a/drivers/misc/mic/cosm/cosm_main.c +++ b/drivers/misc/mic/cosm/cosm_main.c | |||
@@ -153,8 +153,10 @@ void cosm_stop(struct cosm_device *cdev, bool force) | |||
153 | * stop(..) calls device_unregister and will crash the system if | 153 | * stop(..) calls device_unregister and will crash the system if |
154 | * called multiple times. | 154 | * called multiple times. |
155 | */ | 155 | */ |
156 | bool call_hw_ops = cdev->state != MIC_RESET_FAILED && | 156 | u8 state = cdev->state == MIC_RESETTING ? |
157 | cdev->state != MIC_READY; | 157 | cdev->prev_state : cdev->state; |
158 | bool call_hw_ops = state != MIC_RESET_FAILED && | ||
159 | state != MIC_READY; | ||
158 | 160 | ||
159 | if (cdev->state != MIC_RESETTING) | 161 | if (cdev->state != MIC_RESETTING) |
160 | cosm_set_state(cdev, MIC_RESETTING); | 162 | cosm_set_state(cdev, MIC_RESETTING); |
@@ -195,8 +197,11 @@ int cosm_reset(struct cosm_device *cdev) | |||
195 | 197 | ||
196 | mutex_lock(&cdev->cosm_mutex); | 198 | mutex_lock(&cdev->cosm_mutex); |
197 | if (cdev->state != MIC_READY) { | 199 | if (cdev->state != MIC_READY) { |
198 | cosm_set_state(cdev, MIC_RESETTING); | 200 | if (cdev->state != MIC_RESETTING) { |
199 | schedule_work(&cdev->reset_trigger_work); | 201 | cdev->prev_state = cdev->state; |
202 | cosm_set_state(cdev, MIC_RESETTING); | ||
203 | schedule_work(&cdev->reset_trigger_work); | ||
204 | } | ||
200 | } else { | 205 | } else { |
201 | dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__); | 206 | dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__); |
202 | rc = -EINVAL; | 207 | rc = -EINVAL; |
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile index 004d3db0f990..f3b502333ded 100644 --- a/drivers/misc/mic/host/Makefile +++ b/drivers/misc/mic/host/Makefile | |||
@@ -9,5 +9,3 @@ mic_host-objs += mic_smpt.o | |||
9 | mic_host-objs += mic_intr.o | 9 | mic_host-objs += mic_intr.o |
10 | mic_host-objs += mic_boot.o | 10 | mic_host-objs += mic_boot.o |
11 | mic_host-objs += mic_debugfs.o | 11 | mic_host-objs += mic_debugfs.o |
12 | mic_host-objs += mic_fops.o | ||
13 | mic_host-objs += mic_virtio.o | ||
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 7845564dff64..8c91c9950b54 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c | |||
@@ -25,10 +25,117 @@ | |||
25 | #include <linux/mic_common.h> | 25 | #include <linux/mic_common.h> |
26 | #include <linux/mic_bus.h> | 26 | #include <linux/mic_bus.h> |
27 | #include "../bus/scif_bus.h" | 27 | #include "../bus/scif_bus.h" |
28 | #include "../bus/vop_bus.h" | ||
28 | #include "../common/mic_dev.h" | 29 | #include "../common/mic_dev.h" |
29 | #include "mic_device.h" | 30 | #include "mic_device.h" |
30 | #include "mic_smpt.h" | 31 | #include "mic_smpt.h" |
31 | #include "mic_virtio.h" | 32 | |
33 | static inline struct mic_device *vpdev_to_mdev(struct device *dev) | ||
34 | { | ||
35 | return dev_get_drvdata(dev->parent); | ||
36 | } | ||
37 | |||
38 | static dma_addr_t | ||
39 | _mic_dma_map_page(struct device *dev, struct page *page, | ||
40 | unsigned long offset, size_t size, | ||
41 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
42 | { | ||
43 | void *va = phys_to_virt(page_to_phys(page)) + offset; | ||
44 | struct mic_device *mdev = vpdev_to_mdev(dev); | ||
45 | |||
46 | return mic_map_single(mdev, va, size); | ||
47 | } | ||
48 | |||
49 | static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | ||
50 | size_t size, enum dma_data_direction dir, | ||
51 | struct dma_attrs *attrs) | ||
52 | { | ||
53 | struct mic_device *mdev = vpdev_to_mdev(dev); | ||
54 | |||
55 | mic_unmap_single(mdev, dma_addr, size); | ||
56 | } | ||
57 | |||
58 | static const struct dma_map_ops _mic_dma_ops = { | ||
59 | .map_page = _mic_dma_map_page, | ||
60 | .unmap_page = _mic_dma_unmap_page, | ||
61 | }; | ||
62 | |||
63 | static struct mic_irq * | ||
64 | __mic_request_irq(struct vop_device *vpdev, | ||
65 | irqreturn_t (*func)(int irq, void *data), | ||
66 | const char *name, void *data, int intr_src) | ||
67 | { | ||
68 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
69 | |||
70 | return mic_request_threaded_irq(mdev, func, NULL, name, data, | ||
71 | intr_src, MIC_INTR_DB); | ||
72 | } | ||
73 | |||
74 | static void __mic_free_irq(struct vop_device *vpdev, | ||
75 | struct mic_irq *cookie, void *data) | ||
76 | { | ||
77 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
78 | |||
79 | return mic_free_irq(mdev, cookie, data); | ||
80 | } | ||
81 | |||
82 | static void __mic_ack_interrupt(struct vop_device *vpdev, int num) | ||
83 | { | ||
84 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
85 | |||
86 | mdev->ops->intr_workarounds(mdev); | ||
87 | } | ||
88 | |||
89 | static int __mic_next_db(struct vop_device *vpdev) | ||
90 | { | ||
91 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
92 | |||
93 | return mic_next_db(mdev); | ||
94 | } | ||
95 | |||
96 | static void *__mic_get_dp(struct vop_device *vpdev) | ||
97 | { | ||
98 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
99 | |||
100 | return mdev->dp; | ||
101 | } | ||
102 | |||
103 | static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev) | ||
104 | { | ||
105 | return NULL; | ||
106 | } | ||
107 | |||
108 | static void __mic_send_intr(struct vop_device *vpdev, int db) | ||
109 | { | ||
110 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
111 | |||
112 | mdev->ops->send_intr(mdev, db); | ||
113 | } | ||
114 | |||
115 | static void __iomem *__mic_ioremap(struct vop_device *vpdev, | ||
116 | dma_addr_t pa, size_t len) | ||
117 | { | ||
118 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
119 | |||
120 | return mdev->aper.va + pa; | ||
121 | } | ||
122 | |||
123 | static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va) | ||
124 | { | ||
125 | /* nothing to do */ | ||
126 | } | ||
127 | |||
128 | static struct vop_hw_ops vop_hw_ops = { | ||
129 | .request_irq = __mic_request_irq, | ||
130 | .free_irq = __mic_free_irq, | ||
131 | .ack_interrupt = __mic_ack_interrupt, | ||
132 | .next_db = __mic_next_db, | ||
133 | .get_dp = __mic_get_dp, | ||
134 | .get_remote_dp = __mic_get_remote_dp, | ||
135 | .send_intr = __mic_send_intr, | ||
136 | .ioremap = __mic_ioremap, | ||
137 | .iounmap = __mic_iounmap, | ||
138 | }; | ||
32 | 139 | ||
33 | static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) | 140 | static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) |
34 | { | 141 | { |
@@ -315,7 +422,6 @@ static int mic_request_dma_chans(struct mic_device *mdev) | |||
315 | dma_cap_mask_t mask; | 422 | dma_cap_mask_t mask; |
316 | struct dma_chan *chan; | 423 | struct dma_chan *chan; |
317 | 424 | ||
318 | request_module("mic_x100_dma"); | ||
319 | dma_cap_zero(mask); | 425 | dma_cap_zero(mask); |
320 | dma_cap_set(DMA_MEMCPY, mask); | 426 | dma_cap_set(DMA_MEMCPY, mask); |
321 | 427 | ||
@@ -387,9 +493,18 @@ static int _mic_start(struct cosm_device *cdev, int id) | |||
387 | goto dma_free; | 493 | goto dma_free; |
388 | } | 494 | } |
389 | 495 | ||
496 | mdev->vpdev = vop_register_device(&mdev->pdev->dev, | ||
497 | VOP_DEV_TRNSP, &_mic_dma_ops, | ||
498 | &vop_hw_ops, id + 1, &mdev->aper, | ||
499 | mdev->dma_ch[0]); | ||
500 | if (IS_ERR(mdev->vpdev)) { | ||
501 | rc = PTR_ERR(mdev->vpdev); | ||
502 | goto scif_remove; | ||
503 | } | ||
504 | |||
390 | rc = mdev->ops->load_mic_fw(mdev, NULL); | 505 | rc = mdev->ops->load_mic_fw(mdev, NULL); |
391 | if (rc) | 506 | if (rc) |
392 | goto scif_remove; | 507 | goto vop_remove; |
393 | mic_smpt_restore(mdev); | 508 | mic_smpt_restore(mdev); |
394 | mic_intr_restore(mdev); | 509 | mic_intr_restore(mdev); |
395 | mdev->intr_ops->enable_interrupts(mdev); | 510 | mdev->intr_ops->enable_interrupts(mdev); |
@@ -397,6 +512,8 @@ static int _mic_start(struct cosm_device *cdev, int id) | |||
397 | mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); | 512 | mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); |
398 | mdev->ops->send_firmware_intr(mdev); | 513 | mdev->ops->send_firmware_intr(mdev); |
399 | goto unlock_ret; | 514 | goto unlock_ret; |
515 | vop_remove: | ||
516 | vop_unregister_device(mdev->vpdev); | ||
400 | scif_remove: | 517 | scif_remove: |
401 | scif_unregister_device(mdev->scdev); | 518 | scif_unregister_device(mdev->scdev); |
402 | dma_free: | 519 | dma_free: |
@@ -423,7 +540,7 @@ static void _mic_stop(struct cosm_device *cdev, bool force) | |||
423 | * will be the first to be registered and the last to be | 540 | * will be the first to be registered and the last to be |
424 | * unregistered. | 541 | * unregistered. |
425 | */ | 542 | */ |
426 | mic_virtio_reset_devices(mdev); | 543 | vop_unregister_device(mdev->vpdev); |
427 | scif_unregister_device(mdev->scdev); | 544 | scif_unregister_device(mdev->scdev); |
428 | mic_free_dma_chans(mdev); | 545 | mic_free_dma_chans(mdev); |
429 | mbus_unregister_device(mdev->dma_mbdev); | 546 | mbus_unregister_device(mdev->dma_mbdev); |
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c index 10581600777a..0a9daba8bb5d 100644 --- a/drivers/misc/mic/host/mic_debugfs.c +++ b/drivers/misc/mic/host/mic_debugfs.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include "../common/mic_dev.h" | 26 | #include "../common/mic_dev.h" |
27 | #include "mic_device.h" | 27 | #include "mic_device.h" |
28 | #include "mic_smpt.h" | 28 | #include "mic_smpt.h" |
29 | #include "mic_virtio.h" | ||
30 | 29 | ||
31 | /* Debugfs parent dir */ | 30 | /* Debugfs parent dir */ |
32 | static struct dentry *mic_dbg; | 31 | static struct dentry *mic_dbg; |
@@ -100,190 +99,6 @@ static const struct file_operations post_code_ops = { | |||
100 | .release = mic_post_code_debug_release | 99 | .release = mic_post_code_debug_release |
101 | }; | 100 | }; |
102 | 101 | ||
103 | static int mic_dp_show(struct seq_file *s, void *pos) | ||
104 | { | ||
105 | struct mic_device *mdev = s->private; | ||
106 | struct mic_device_desc *d; | ||
107 | struct mic_device_ctrl *dc; | ||
108 | struct mic_vqconfig *vqconfig; | ||
109 | __u32 *features; | ||
110 | __u8 *config; | ||
111 | struct mic_bootparam *bootparam = mdev->dp; | ||
112 | int i, j; | ||
113 | |||
114 | seq_printf(s, "Bootparam: magic 0x%x\n", | ||
115 | bootparam->magic); | ||
116 | seq_printf(s, "Bootparam: h2c_config_db %d\n", | ||
117 | bootparam->h2c_config_db); | ||
118 | seq_printf(s, "Bootparam: node_id %d\n", | ||
119 | bootparam->node_id); | ||
120 | seq_printf(s, "Bootparam: c2h_scif_db %d\n", | ||
121 | bootparam->c2h_scif_db); | ||
122 | seq_printf(s, "Bootparam: h2c_scif_db %d\n", | ||
123 | bootparam->h2c_scif_db); | ||
124 | seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n", | ||
125 | bootparam->scif_host_dma_addr); | ||
126 | seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n", | ||
127 | bootparam->scif_card_dma_addr); | ||
128 | |||
129 | |||
130 | for (i = sizeof(*bootparam); i < MIC_DP_SIZE; | ||
131 | i += mic_total_desc_size(d)) { | ||
132 | d = mdev->dp + i; | ||
133 | dc = (void *)d + mic_aligned_desc_size(d); | ||
134 | |||
135 | /* end of list */ | ||
136 | if (d->type == 0) | ||
137 | break; | ||
138 | |||
139 | if (d->type == -1) | ||
140 | continue; | ||
141 | |||
142 | seq_printf(s, "Type %d ", d->type); | ||
143 | seq_printf(s, "Num VQ %d ", d->num_vq); | ||
144 | seq_printf(s, "Feature Len %d\n", d->feature_len); | ||
145 | seq_printf(s, "Config Len %d ", d->config_len); | ||
146 | seq_printf(s, "Shutdown Status %d\n", d->status); | ||
147 | |||
148 | for (j = 0; j < d->num_vq; j++) { | ||
149 | vqconfig = mic_vq_config(d) + j; | ||
150 | seq_printf(s, "vqconfig[%d]: ", j); | ||
151 | seq_printf(s, "address 0x%llx ", vqconfig->address); | ||
152 | seq_printf(s, "num %d ", vqconfig->num); | ||
153 | seq_printf(s, "used address 0x%llx\n", | ||
154 | vqconfig->used_address); | ||
155 | } | ||
156 | |||
157 | features = (__u32 *)mic_vq_features(d); | ||
158 | seq_printf(s, "Features: Host 0x%x ", features[0]); | ||
159 | seq_printf(s, "Guest 0x%x\n", features[1]); | ||
160 | |||
161 | config = mic_vq_configspace(d); | ||
162 | for (j = 0; j < d->config_len; j++) | ||
163 | seq_printf(s, "config[%d]=%d\n", j, config[j]); | ||
164 | |||
165 | seq_puts(s, "Device control:\n"); | ||
166 | seq_printf(s, "Config Change %d ", dc->config_change); | ||
167 | seq_printf(s, "Vdev reset %d\n", dc->vdev_reset); | ||
168 | seq_printf(s, "Guest Ack %d ", dc->guest_ack); | ||
169 | seq_printf(s, "Host ack %d\n", dc->host_ack); | ||
170 | seq_printf(s, "Used address updated %d ", | ||
171 | dc->used_address_updated); | ||
172 | seq_printf(s, "Vdev 0x%llx\n", dc->vdev); | ||
173 | seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db); | ||
174 | seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db); | ||
175 | } | ||
176 | |||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | static int mic_dp_debug_open(struct inode *inode, struct file *file) | ||
181 | { | ||
182 | return single_open(file, mic_dp_show, inode->i_private); | ||
183 | } | ||
184 | |||
185 | static int mic_dp_debug_release(struct inode *inode, struct file *file) | ||
186 | { | ||
187 | return single_release(inode, file); | ||
188 | } | ||
189 | |||
190 | static const struct file_operations dp_ops = { | ||
191 | .owner = THIS_MODULE, | ||
192 | .open = mic_dp_debug_open, | ||
193 | .read = seq_read, | ||
194 | .llseek = seq_lseek, | ||
195 | .release = mic_dp_debug_release | ||
196 | }; | ||
197 | |||
198 | static int mic_vdev_info_show(struct seq_file *s, void *unused) | ||
199 | { | ||
200 | struct mic_device *mdev = s->private; | ||
201 | struct list_head *pos, *tmp; | ||
202 | struct mic_vdev *mvdev; | ||
203 | int i, j; | ||
204 | |||
205 | mutex_lock(&mdev->mic_mutex); | ||
206 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | ||
207 | mvdev = list_entry(pos, struct mic_vdev, list); | ||
208 | seq_printf(s, "VDEV type %d state %s in %ld out %ld\n", | ||
209 | mvdev->virtio_id, | ||
210 | mic_vdevup(mvdev) ? "UP" : "DOWN", | ||
211 | mvdev->in_bytes, | ||
212 | mvdev->out_bytes); | ||
213 | for (i = 0; i < MIC_MAX_VRINGS; i++) { | ||
214 | struct vring_desc *desc; | ||
215 | struct vring_avail *avail; | ||
216 | struct vring_used *used; | ||
217 | struct mic_vringh *mvr = &mvdev->mvr[i]; | ||
218 | struct vringh *vrh = &mvr->vrh; | ||
219 | int num = vrh->vring.num; | ||
220 | if (!num) | ||
221 | continue; | ||
222 | desc = vrh->vring.desc; | ||
223 | seq_printf(s, "vring i %d avail_idx %d", | ||
224 | i, mvr->vring.info->avail_idx & (num - 1)); | ||
225 | seq_printf(s, " vring i %d avail_idx %d\n", | ||
226 | i, mvr->vring.info->avail_idx); | ||
227 | seq_printf(s, "vrh i %d weak_barriers %d", | ||
228 | i, vrh->weak_barriers); | ||
229 | seq_printf(s, " last_avail_idx %d last_used_idx %d", | ||
230 | vrh->last_avail_idx, vrh->last_used_idx); | ||
231 | seq_printf(s, " completed %d\n", vrh->completed); | ||
232 | for (j = 0; j < num; j++) { | ||
233 | seq_printf(s, "desc[%d] addr 0x%llx len %d", | ||
234 | j, desc->addr, desc->len); | ||
235 | seq_printf(s, " flags 0x%x next %d\n", | ||
236 | desc->flags, desc->next); | ||
237 | desc++; | ||
238 | } | ||
239 | avail = vrh->vring.avail; | ||
240 | seq_printf(s, "avail flags 0x%x idx %d\n", | ||
241 | vringh16_to_cpu(vrh, avail->flags), | ||
242 | vringh16_to_cpu(vrh, avail->idx) & (num - 1)); | ||
243 | seq_printf(s, "avail flags 0x%x idx %d\n", | ||
244 | vringh16_to_cpu(vrh, avail->flags), | ||
245 | vringh16_to_cpu(vrh, avail->idx)); | ||
246 | for (j = 0; j < num; j++) | ||
247 | seq_printf(s, "avail ring[%d] %d\n", | ||
248 | j, avail->ring[j]); | ||
249 | used = vrh->vring.used; | ||
250 | seq_printf(s, "used flags 0x%x idx %d\n", | ||
251 | vringh16_to_cpu(vrh, used->flags), | ||
252 | vringh16_to_cpu(vrh, used->idx) & (num - 1)); | ||
253 | seq_printf(s, "used flags 0x%x idx %d\n", | ||
254 | vringh16_to_cpu(vrh, used->flags), | ||
255 | vringh16_to_cpu(vrh, used->idx)); | ||
256 | for (j = 0; j < num; j++) | ||
257 | seq_printf(s, "used ring[%d] id %d len %d\n", | ||
258 | j, vringh32_to_cpu(vrh, | ||
259 | used->ring[j].id), | ||
260 | vringh32_to_cpu(vrh, | ||
261 | used->ring[j].len)); | ||
262 | } | ||
263 | } | ||
264 | mutex_unlock(&mdev->mic_mutex); | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static int mic_vdev_info_debug_open(struct inode *inode, struct file *file) | ||
270 | { | ||
271 | return single_open(file, mic_vdev_info_show, inode->i_private); | ||
272 | } | ||
273 | |||
274 | static int mic_vdev_info_debug_release(struct inode *inode, struct file *file) | ||
275 | { | ||
276 | return single_release(inode, file); | ||
277 | } | ||
278 | |||
279 | static const struct file_operations vdev_info_ops = { | ||
280 | .owner = THIS_MODULE, | ||
281 | .open = mic_vdev_info_debug_open, | ||
282 | .read = seq_read, | ||
283 | .llseek = seq_lseek, | ||
284 | .release = mic_vdev_info_debug_release | ||
285 | }; | ||
286 | |||
287 | static int mic_msi_irq_info_show(struct seq_file *s, void *pos) | 102 | static int mic_msi_irq_info_show(struct seq_file *s, void *pos) |
288 | { | 103 | { |
289 | struct mic_device *mdev = s->private; | 104 | struct mic_device *mdev = s->private; |
@@ -367,11 +182,6 @@ void mic_create_debug_dir(struct mic_device *mdev) | |||
367 | debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev, | 182 | debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev, |
368 | &post_code_ops); | 183 | &post_code_ops); |
369 | 184 | ||
370 | debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops); | ||
371 | |||
372 | debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev, | ||
373 | &vdev_info_ops); | ||
374 | |||
375 | debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev, | 185 | debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev, |
376 | &msi_irq_info_ops); | 186 | &msi_irq_info_ops); |
377 | } | 187 | } |
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h index 461184a12fbb..52b12b22f4ae 100644 --- a/drivers/misc/mic/host/mic_device.h +++ b/drivers/misc/mic/host/mic_device.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/miscdevice.h> | 29 | #include <linux/miscdevice.h> |
30 | #include <linux/mic_bus.h> | 30 | #include <linux/mic_bus.h> |
31 | #include "../bus/scif_bus.h" | 31 | #include "../bus/scif_bus.h" |
32 | #include "../bus/vop_bus.h" | ||
32 | #include "../bus/cosm_bus.h" | 33 | #include "../bus/cosm_bus.h" |
33 | #include "mic_intr.h" | 34 | #include "mic_intr.h" |
34 | 35 | ||
@@ -64,13 +65,11 @@ extern struct cosm_hw_ops cosm_hw_ops; | |||
64 | * @bootaddr: MIC boot address. | 65 | * @bootaddr: MIC boot address. |
65 | * @dp: virtio device page | 66 | * @dp: virtio device page |
66 | * @dp_dma_addr: virtio device page DMA address. | 67 | * @dp_dma_addr: virtio device page DMA address. |
67 | * @name: name for the misc char device | ||
68 | * @miscdev: registered misc char device | ||
69 | * @vdev_list: list of virtio devices. | ||
70 | * @dma_mbdev: MIC BUS DMA device. | 68 | * @dma_mbdev: MIC BUS DMA device. |
71 | * @dma_ch - Array of DMA channels | 69 | * @dma_ch - Array of DMA channels |
72 | * @num_dma_ch - Number of DMA channels available | 70 | * @num_dma_ch - Number of DMA channels available |
73 | * @scdev: SCIF device on the SCIF virtual bus. | 71 | * @scdev: SCIF device on the SCIF virtual bus. |
72 | * @vpdev: Virtio over PCIe device on the VOP virtual bus. | ||
74 | * @cosm_dev: COSM device | 73 | * @cosm_dev: COSM device |
75 | */ | 74 | */ |
76 | struct mic_device { | 75 | struct mic_device { |
@@ -91,13 +90,11 @@ struct mic_device { | |||
91 | u32 bootaddr; | 90 | u32 bootaddr; |
92 | void *dp; | 91 | void *dp; |
93 | dma_addr_t dp_dma_addr; | 92 | dma_addr_t dp_dma_addr; |
94 | char name[16]; | ||
95 | struct miscdevice miscdev; | ||
96 | struct list_head vdev_list; | ||
97 | struct mbus_device *dma_mbdev; | 93 | struct mbus_device *dma_mbdev; |
98 | struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; | 94 | struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; |
99 | int num_dma_ch; | 95 | int num_dma_ch; |
100 | struct scif_hw_dev *scdev; | 96 | struct scif_hw_dev *scdev; |
97 | struct vop_device *vpdev; | ||
101 | struct cosm_device *cosm_dev; | 98 | struct cosm_device *cosm_dev; |
102 | }; | 99 | }; |
103 | 100 | ||
diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c deleted file mode 100644 index 8cc1d90cd949..000000000000 --- a/drivers/misc/mic/host/mic_fops.c +++ /dev/null | |||
@@ -1,222 +0,0 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2013 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Intel MIC Host driver. | ||
19 | * | ||
20 | */ | ||
21 | #include <linux/poll.h> | ||
22 | #include <linux/pci.h> | ||
23 | |||
24 | #include <linux/mic_common.h> | ||
25 | #include "../common/mic_dev.h" | ||
26 | #include "mic_device.h" | ||
27 | #include "mic_fops.h" | ||
28 | #include "mic_virtio.h" | ||
29 | |||
30 | int mic_open(struct inode *inode, struct file *f) | ||
31 | { | ||
32 | struct mic_vdev *mvdev; | ||
33 | struct mic_device *mdev = container_of(f->private_data, | ||
34 | struct mic_device, miscdev); | ||
35 | |||
36 | mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); | ||
37 | if (!mvdev) | ||
38 | return -ENOMEM; | ||
39 | |||
40 | init_waitqueue_head(&mvdev->waitq); | ||
41 | INIT_LIST_HEAD(&mvdev->list); | ||
42 | mvdev->mdev = mdev; | ||
43 | mvdev->virtio_id = -1; | ||
44 | |||
45 | f->private_data = mvdev; | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | int mic_release(struct inode *inode, struct file *f) | ||
50 | { | ||
51 | struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; | ||
52 | |||
53 | if (-1 != mvdev->virtio_id) | ||
54 | mic_virtio_del_device(mvdev); | ||
55 | f->private_data = NULL; | ||
56 | kfree(mvdev); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | ||
61 | { | ||
62 | struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; | ||
63 | void __user *argp = (void __user *)arg; | ||
64 | int ret; | ||
65 | |||
66 | switch (cmd) { | ||
67 | case MIC_VIRTIO_ADD_DEVICE: | ||
68 | { | ||
69 | ret = mic_virtio_add_device(mvdev, argp); | ||
70 | if (ret < 0) { | ||
71 | dev_err(mic_dev(mvdev), | ||
72 | "%s %d errno ret %d\n", | ||
73 | __func__, __LINE__, ret); | ||
74 | return ret; | ||
75 | } | ||
76 | break; | ||
77 | } | ||
78 | case MIC_VIRTIO_COPY_DESC: | ||
79 | { | ||
80 | struct mic_copy_desc copy; | ||
81 | |||
82 | ret = mic_vdev_inited(mvdev); | ||
83 | if (ret) | ||
84 | return ret; | ||
85 | |||
86 | if (copy_from_user(©, argp, sizeof(copy))) | ||
87 | return -EFAULT; | ||
88 | |||
89 | dev_dbg(mic_dev(mvdev), | ||
90 | "%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n", | ||
91 | __func__, __LINE__, copy.iovcnt, copy.vr_idx, | ||
92 | copy.update_used); | ||
93 | |||
94 | ret = mic_virtio_copy_desc(mvdev, ©); | ||
95 | if (ret < 0) { | ||
96 | dev_err(mic_dev(mvdev), | ||
97 | "%s %d errno ret %d\n", | ||
98 | __func__, __LINE__, ret); | ||
99 | return ret; | ||
100 | } | ||
101 | if (copy_to_user( | ||
102 | &((struct mic_copy_desc __user *)argp)->out_len, | ||
103 | ©.out_len, sizeof(copy.out_len))) { | ||
104 | dev_err(mic_dev(mvdev), "%s %d errno ret %d\n", | ||
105 | __func__, __LINE__, -EFAULT); | ||
106 | return -EFAULT; | ||
107 | } | ||
108 | break; | ||
109 | } | ||
110 | case MIC_VIRTIO_CONFIG_CHANGE: | ||
111 | { | ||
112 | ret = mic_vdev_inited(mvdev); | ||
113 | if (ret) | ||
114 | return ret; | ||
115 | |||
116 | ret = mic_virtio_config_change(mvdev, argp); | ||
117 | if (ret < 0) { | ||
118 | dev_err(mic_dev(mvdev), | ||
119 | "%s %d errno ret %d\n", | ||
120 | __func__, __LINE__, ret); | ||
121 | return ret; | ||
122 | } | ||
123 | break; | ||
124 | } | ||
125 | default: | ||
126 | return -ENOIOCTLCMD; | ||
127 | }; | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and | ||
133 | * not when previously enqueued buffers may be available. This means that | ||
134 | * in the card->host (TX) path, when userspace is unblocked by poll it | ||
135 | * must drain all available descriptors or it can stall. | ||
136 | */ | ||
137 | unsigned int mic_poll(struct file *f, poll_table *wait) | ||
138 | { | ||
139 | struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; | ||
140 | int mask = 0; | ||
141 | |||
142 | poll_wait(f, &mvdev->waitq, wait); | ||
143 | |||
144 | if (mic_vdev_inited(mvdev)) { | ||
145 | mask = POLLERR; | ||
146 | } else if (mvdev->poll_wake) { | ||
147 | mvdev->poll_wake = 0; | ||
148 | mask = POLLIN | POLLOUT; | ||
149 | } | ||
150 | |||
151 | return mask; | ||
152 | } | ||
153 | |||
154 | static inline int | ||
155 | mic_query_offset(struct mic_vdev *mvdev, unsigned long offset, | ||
156 | unsigned long *size, unsigned long *pa) | ||
157 | { | ||
158 | struct mic_device *mdev = mvdev->mdev; | ||
159 | unsigned long start = MIC_DP_SIZE; | ||
160 | int i; | ||
161 | |||
162 | /* | ||
163 | * MMAP interface is as follows: | ||
164 | * offset region | ||
165 | * 0x0 virtio device_page | ||
166 | * 0x1000 first vring | ||
167 | * 0x1000 + size of 1st vring second vring | ||
168 | * .... | ||
169 | */ | ||
170 | if (!offset) { | ||
171 | *pa = virt_to_phys(mdev->dp); | ||
172 | *size = MIC_DP_SIZE; | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | for (i = 0; i < mvdev->dd->num_vq; i++) { | ||
177 | struct mic_vringh *mvr = &mvdev->mvr[i]; | ||
178 | if (offset == start) { | ||
179 | *pa = virt_to_phys(mvr->vring.va); | ||
180 | *size = mvr->vring.len; | ||
181 | return 0; | ||
182 | } | ||
183 | start += mvr->vring.len; | ||
184 | } | ||
185 | return -1; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * Maps the device page and virtio rings to user space for readonly access. | ||
190 | */ | ||
191 | int | ||
192 | mic_mmap(struct file *f, struct vm_area_struct *vma) | ||
193 | { | ||
194 | struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; | ||
195 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
196 | unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; | ||
197 | int i, err; | ||
198 | |||
199 | err = mic_vdev_inited(mvdev); | ||
200 | if (err) | ||
201 | return err; | ||
202 | |||
203 | if (vma->vm_flags & VM_WRITE) | ||
204 | return -EACCES; | ||
205 | |||
206 | while (size_rem) { | ||
207 | i = mic_query_offset(mvdev, offset, &size, &pa); | ||
208 | if (i < 0) | ||
209 | return -EINVAL; | ||
210 | err = remap_pfn_range(vma, vma->vm_start + offset, | ||
211 | pa >> PAGE_SHIFT, size, vma->vm_page_prot); | ||
212 | if (err) | ||
213 | return err; | ||
214 | dev_dbg(mic_dev(mvdev), | ||
215 | "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n", | ||
216 | __func__, __LINE__, mvdev->virtio_id, size, offset, | ||
217 | pa, vma->vm_start + offset); | ||
218 | size_rem -= size; | ||
219 | offset += size; | ||
220 | } | ||
221 | return 0; | ||
222 | } | ||
diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h deleted file mode 100644 index dc3893dff667..000000000000 --- a/drivers/misc/mic/host/mic_fops.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2013 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Intel MIC Host driver. | ||
19 | * | ||
20 | */ | ||
21 | #ifndef _MIC_FOPS_H_ | ||
22 | #define _MIC_FOPS_H_ | ||
23 | |||
24 | int mic_open(struct inode *inode, struct file *filp); | ||
25 | int mic_release(struct inode *inode, struct file *filp); | ||
26 | ssize_t mic_read(struct file *filp, char __user *buf, | ||
27 | size_t count, loff_t *pos); | ||
28 | long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); | ||
29 | int mic_mmap(struct file *f, struct vm_area_struct *vma); | ||
30 | unsigned int mic_poll(struct file *f, poll_table *wait); | ||
31 | |||
32 | #endif | ||
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c index 153894e7ed5b..035be3e9ceba 100644 --- a/drivers/misc/mic/host/mic_main.c +++ b/drivers/misc/mic/host/mic_main.c | |||
@@ -27,8 +27,6 @@ | |||
27 | #include "mic_device.h" | 27 | #include "mic_device.h" |
28 | #include "mic_x100.h" | 28 | #include "mic_x100.h" |
29 | #include "mic_smpt.h" | 29 | #include "mic_smpt.h" |
30 | #include "mic_fops.h" | ||
31 | #include "mic_virtio.h" | ||
32 | 30 | ||
33 | static const char mic_driver_name[] = "mic"; | 31 | static const char mic_driver_name[] = "mic"; |
34 | 32 | ||
@@ -57,17 +55,6 @@ MODULE_DEVICE_TABLE(pci, mic_pci_tbl); | |||
57 | 55 | ||
58 | /* ID allocator for MIC devices */ | 56 | /* ID allocator for MIC devices */ |
59 | static struct ida g_mic_ida; | 57 | static struct ida g_mic_ida; |
60 | /* Base device node number for MIC devices */ | ||
61 | static dev_t g_mic_devno; | ||
62 | |||
63 | static const struct file_operations mic_fops = { | ||
64 | .open = mic_open, | ||
65 | .release = mic_release, | ||
66 | .unlocked_ioctl = mic_ioctl, | ||
67 | .poll = mic_poll, | ||
68 | .mmap = mic_mmap, | ||
69 | .owner = THIS_MODULE, | ||
70 | }; | ||
71 | 58 | ||
72 | /* Initialize the device page */ | 59 | /* Initialize the device page */ |
73 | static int mic_dp_init(struct mic_device *mdev) | 60 | static int mic_dp_init(struct mic_device *mdev) |
@@ -169,7 +156,6 @@ mic_device_init(struct mic_device *mdev, struct pci_dev *pdev) | |||
169 | mic_ops_init(mdev); | 156 | mic_ops_init(mdev); |
170 | mutex_init(&mdev->mic_mutex); | 157 | mutex_init(&mdev->mic_mutex); |
171 | mdev->irq_info.next_avail_src = 0; | 158 | mdev->irq_info.next_avail_src = 0; |
172 | INIT_LIST_HEAD(&mdev->vdev_list); | ||
173 | } | 159 | } |
174 | 160 | ||
175 | /** | 161 | /** |
@@ -259,30 +245,15 @@ static int mic_probe(struct pci_dev *pdev, | |||
259 | goto smpt_uninit; | 245 | goto smpt_uninit; |
260 | } | 246 | } |
261 | mic_bootparam_init(mdev); | 247 | mic_bootparam_init(mdev); |
262 | |||
263 | mic_create_debug_dir(mdev); | 248 | mic_create_debug_dir(mdev); |
264 | 249 | ||
265 | mdev->miscdev.minor = MISC_DYNAMIC_MINOR; | ||
266 | snprintf(mdev->name, sizeof(mdev->name), "mic%d", mdev->id); | ||
267 | mdev->miscdev.name = mdev->name; | ||
268 | mdev->miscdev.fops = &mic_fops; | ||
269 | mdev->miscdev.parent = &mdev->pdev->dev; | ||
270 | rc = misc_register(&mdev->miscdev); | ||
271 | if (rc) { | ||
272 | dev_err(&pdev->dev, "misc_register err id %d rc %d\n", | ||
273 | mdev->id, rc); | ||
274 | goto cleanup_debug_dir; | ||
275 | } | ||
276 | |||
277 | mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops); | 250 | mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops); |
278 | if (IS_ERR(mdev->cosm_dev)) { | 251 | if (IS_ERR(mdev->cosm_dev)) { |
279 | rc = PTR_ERR(mdev->cosm_dev); | 252 | rc = PTR_ERR(mdev->cosm_dev); |
280 | dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc); | 253 | dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc); |
281 | goto misc_dereg; | 254 | goto cleanup_debug_dir; |
282 | } | 255 | } |
283 | return 0; | 256 | return 0; |
284 | misc_dereg: | ||
285 | misc_deregister(&mdev->miscdev); | ||
286 | cleanup_debug_dir: | 257 | cleanup_debug_dir: |
287 | mic_delete_debug_dir(mdev); | 258 | mic_delete_debug_dir(mdev); |
288 | mic_dp_uninit(mdev); | 259 | mic_dp_uninit(mdev); |
@@ -323,7 +294,6 @@ static void mic_remove(struct pci_dev *pdev) | |||
323 | return; | 294 | return; |
324 | 295 | ||
325 | cosm_unregister_device(mdev->cosm_dev); | 296 | cosm_unregister_device(mdev->cosm_dev); |
326 | misc_deregister(&mdev->miscdev); | ||
327 | mic_delete_debug_dir(mdev); | 297 | mic_delete_debug_dir(mdev); |
328 | mic_dp_uninit(mdev); | 298 | mic_dp_uninit(mdev); |
329 | mic_smpt_uninit(mdev); | 299 | mic_smpt_uninit(mdev); |
@@ -347,26 +317,18 @@ static int __init mic_init(void) | |||
347 | { | 317 | { |
348 | int ret; | 318 | int ret; |
349 | 319 | ||
350 | ret = alloc_chrdev_region(&g_mic_devno, 0, | 320 | request_module("mic_x100_dma"); |
351 | MIC_MAX_NUM_DEVS, mic_driver_name); | ||
352 | if (ret) { | ||
353 | pr_err("alloc_chrdev_region failed ret %d\n", ret); | ||
354 | goto error; | ||
355 | } | ||
356 | |||
357 | mic_init_debugfs(); | 321 | mic_init_debugfs(); |
358 | ida_init(&g_mic_ida); | 322 | ida_init(&g_mic_ida); |
359 | ret = pci_register_driver(&mic_driver); | 323 | ret = pci_register_driver(&mic_driver); |
360 | if (ret) { | 324 | if (ret) { |
361 | pr_err("pci_register_driver failed ret %d\n", ret); | 325 | pr_err("pci_register_driver failed ret %d\n", ret); |
362 | goto cleanup_chrdev; | 326 | goto cleanup_debugfs; |
363 | } | 327 | } |
364 | return ret; | 328 | return 0; |
365 | cleanup_chrdev: | 329 | cleanup_debugfs: |
366 | ida_destroy(&g_mic_ida); | 330 | ida_destroy(&g_mic_ida); |
367 | mic_exit_debugfs(); | 331 | mic_exit_debugfs(); |
368 | unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); | ||
369 | error: | ||
370 | return ret; | 332 | return ret; |
371 | } | 333 | } |
372 | 334 | ||
@@ -375,7 +337,6 @@ static void __exit mic_exit(void) | |||
375 | pci_unregister_driver(&mic_driver); | 337 | pci_unregister_driver(&mic_driver); |
376 | ida_destroy(&g_mic_ida); | 338 | ida_destroy(&g_mic_ida); |
377 | mic_exit_debugfs(); | 339 | mic_exit_debugfs(); |
378 | unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); | ||
379 | } | 340 | } |
380 | 341 | ||
381 | module_init(mic_init); | 342 | module_init(mic_init); |
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c deleted file mode 100644 index 58b107a24a8b..000000000000 --- a/drivers/misc/mic/host/mic_virtio.c +++ /dev/null | |||
@@ -1,811 +0,0 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2013 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Intel MIC Host driver. | ||
19 | * | ||
20 | */ | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/dmaengine.h> | ||
25 | #include <linux/mic_common.h> | ||
26 | #include "../common/mic_dev.h" | ||
27 | #include "mic_device.h" | ||
28 | #include "mic_smpt.h" | ||
29 | #include "mic_virtio.h" | ||
30 | |||
31 | /* | ||
32 | * Size of the internal buffer used during DMA's as an intermediate buffer | ||
33 | * for copy to/from user. | ||
34 | */ | ||
35 | #define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL) | ||
36 | |||
37 | static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst, | ||
38 | dma_addr_t src, size_t len) | ||
39 | { | ||
40 | int err = 0; | ||
41 | struct dma_async_tx_descriptor *tx; | ||
42 | struct dma_chan *mic_ch = mdev->dma_ch[0]; | ||
43 | |||
44 | if (!mic_ch) { | ||
45 | err = -EBUSY; | ||
46 | goto error; | ||
47 | } | ||
48 | |||
49 | tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len, | ||
50 | DMA_PREP_FENCE); | ||
51 | if (!tx) { | ||
52 | err = -ENOMEM; | ||
53 | goto error; | ||
54 | } else { | ||
55 | dma_cookie_t cookie = tx->tx_submit(tx); | ||
56 | |||
57 | err = dma_submit_error(cookie); | ||
58 | if (err) | ||
59 | goto error; | ||
60 | err = dma_sync_wait(mic_ch, cookie); | ||
61 | } | ||
62 | error: | ||
63 | if (err) | ||
64 | dev_err(&mdev->pdev->dev, "%s %d err %d\n", | ||
65 | __func__, __LINE__, err); | ||
66 | return err; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Initiates the copies across the PCIe bus from card memory to a user | ||
71 | * space buffer. When transfers are done using DMA, source/destination | ||
72 | * addresses and transfer length must follow the alignment requirements of | ||
73 | * the MIC DMA engine. | ||
74 | */ | ||
75 | static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf, | ||
76 | size_t len, u64 daddr, size_t dlen, | ||
77 | int vr_idx) | ||
78 | { | ||
79 | struct mic_device *mdev = mvdev->mdev; | ||
80 | void __iomem *dbuf = mdev->aper.va + daddr; | ||
81 | struct mic_vringh *mvr = &mvdev->mvr[vr_idx]; | ||
82 | size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align; | ||
83 | size_t dma_offset; | ||
84 | size_t partlen; | ||
85 | int err; | ||
86 | |||
87 | dma_offset = daddr - round_down(daddr, dma_alignment); | ||
88 | daddr -= dma_offset; | ||
89 | len += dma_offset; | ||
90 | |||
91 | while (len) { | ||
92 | partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE); | ||
93 | |||
94 | err = mic_sync_dma(mdev, mvr->buf_da, daddr, | ||
95 | ALIGN(partlen, dma_alignment)); | ||
96 | if (err) | ||
97 | goto err; | ||
98 | |||
99 | if (copy_to_user(ubuf, mvr->buf + dma_offset, | ||
100 | partlen - dma_offset)) { | ||
101 | err = -EFAULT; | ||
102 | goto err; | ||
103 | } | ||
104 | daddr += partlen; | ||
105 | ubuf += partlen; | ||
106 | dbuf += partlen; | ||
107 | mvdev->in_bytes_dma += partlen; | ||
108 | mvdev->in_bytes += partlen; | ||
109 | len -= partlen; | ||
110 | dma_offset = 0; | ||
111 | } | ||
112 | return 0; | ||
113 | err: | ||
114 | dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err); | ||
115 | return err; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Initiates copies across the PCIe bus from a user space buffer to card | ||
120 | * memory. When transfers are done using DMA, source/destination addresses | ||
121 | * and transfer length must follow the alignment requirements of the MIC | ||
122 | * DMA engine. | ||
123 | */ | ||
124 | static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf, | ||
125 | size_t len, u64 daddr, size_t dlen, | ||
126 | int vr_idx) | ||
127 | { | ||
128 | struct mic_device *mdev = mvdev->mdev; | ||
129 | void __iomem *dbuf = mdev->aper.va + daddr; | ||
130 | struct mic_vringh *mvr = &mvdev->mvr[vr_idx]; | ||
131 | size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align; | ||
132 | size_t partlen; | ||
133 | int err; | ||
134 | |||
135 | if (daddr & (dma_alignment - 1)) { | ||
136 | mvdev->tx_dst_unaligned += len; | ||
137 | goto memcpy; | ||
138 | } else if (ALIGN(len, dma_alignment) > dlen) { | ||
139 | mvdev->tx_len_unaligned += len; | ||
140 | goto memcpy; | ||
141 | } | ||
142 | |||
143 | while (len) { | ||
144 | partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE); | ||
145 | |||
146 | if (copy_from_user(mvr->buf, ubuf, partlen)) { | ||
147 | err = -EFAULT; | ||
148 | goto err; | ||
149 | } | ||
150 | err = mic_sync_dma(mdev, daddr, mvr->buf_da, | ||
151 | ALIGN(partlen, dma_alignment)); | ||
152 | if (err) | ||
153 | goto err; | ||
154 | daddr += partlen; | ||
155 | ubuf += partlen; | ||
156 | dbuf += partlen; | ||
157 | mvdev->out_bytes_dma += partlen; | ||
158 | mvdev->out_bytes += partlen; | ||
159 | len -= partlen; | ||
160 | } | ||
161 | memcpy: | ||
162 | /* | ||
163 | * We are copying to IO below and should ideally use something | ||
164 | * like copy_from_user_toio(..) if it existed. | ||
165 | */ | ||
166 | if (copy_from_user((void __force *)dbuf, ubuf, len)) { | ||
167 | err = -EFAULT; | ||
168 | goto err; | ||
169 | } | ||
170 | mvdev->out_bytes += len; | ||
171 | return 0; | ||
172 | err: | ||
173 | dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err); | ||
174 | return err; | ||
175 | } | ||
176 | |||
177 | #define MIC_VRINGH_READ true | ||
178 | |||
179 | /* The function to call to notify the card about added buffers */ | ||
180 | static void mic_notify(struct vringh *vrh) | ||
181 | { | ||
182 | struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh); | ||
183 | struct mic_vdev *mvdev = mvrh->mvdev; | ||
184 | s8 db = mvdev->dc->h2c_vdev_db; | ||
185 | |||
186 | if (db != -1) | ||
187 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | ||
188 | } | ||
189 | |||
190 | /* Determine the total number of bytes consumed in a VRINGH KIOV */ | ||
191 | static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov) | ||
192 | { | ||
193 | int i; | ||
194 | u32 total = iov->consumed; | ||
195 | |||
196 | for (i = 0; i < iov->i; i++) | ||
197 | total += iov->iov[i].iov_len; | ||
198 | return total; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Traverse the VRINGH KIOV and issue the APIs to trigger the copies. | ||
203 | * This API is heavily based on the vringh_iov_xfer(..) implementation | ||
204 | * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..) | ||
205 | * and vringh_iov_push_kern(..) directly is because there is no | ||
206 | * way to override the VRINGH xfer(..) routines as of v3.10. | ||
207 | */ | ||
208 | static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, | ||
209 | void __user *ubuf, size_t len, bool read, int vr_idx, | ||
210 | size_t *out_len) | ||
211 | { | ||
212 | int ret = 0; | ||
213 | size_t partlen, tot_len = 0; | ||
214 | |||
215 | while (len && iov->i < iov->used) { | ||
216 | partlen = min(iov->iov[iov->i].iov_len, len); | ||
217 | if (read) | ||
218 | ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen, | ||
219 | (u64)iov->iov[iov->i].iov_base, | ||
220 | iov->iov[iov->i].iov_len, | ||
221 | vr_idx); | ||
222 | else | ||
223 | ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen, | ||
224 | (u64)iov->iov[iov->i].iov_base, | ||
225 | iov->iov[iov->i].iov_len, | ||
226 | vr_idx); | ||
227 | if (ret) { | ||
228 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
229 | __func__, __LINE__, ret); | ||
230 | break; | ||
231 | } | ||
232 | len -= partlen; | ||
233 | ubuf += partlen; | ||
234 | tot_len += partlen; | ||
235 | iov->consumed += partlen; | ||
236 | iov->iov[iov->i].iov_len -= partlen; | ||
237 | iov->iov[iov->i].iov_base += partlen; | ||
238 | if (!iov->iov[iov->i].iov_len) { | ||
239 | /* Fix up old iov element then increment. */ | ||
240 | iov->iov[iov->i].iov_len = iov->consumed; | ||
241 | iov->iov[iov->i].iov_base -= iov->consumed; | ||
242 | |||
243 | iov->consumed = 0; | ||
244 | iov->i++; | ||
245 | } | ||
246 | } | ||
247 | *out_len = tot_len; | ||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Use the standard VRINGH infrastructure in the kernel to fetch new | ||
253 | * descriptors, initiate the copies and update the used ring. | ||
254 | */ | ||
255 | static int _mic_virtio_copy(struct mic_vdev *mvdev, | ||
256 | struct mic_copy_desc *copy) | ||
257 | { | ||
258 | int ret = 0; | ||
259 | u32 iovcnt = copy->iovcnt; | ||
260 | struct iovec iov; | ||
261 | struct iovec __user *u_iov = copy->iov; | ||
262 | void __user *ubuf = NULL; | ||
263 | struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; | ||
264 | struct vringh_kiov *riov = &mvr->riov; | ||
265 | struct vringh_kiov *wiov = &mvr->wiov; | ||
266 | struct vringh *vrh = &mvr->vrh; | ||
267 | u16 *head = &mvr->head; | ||
268 | struct mic_vring *vr = &mvr->vring; | ||
269 | size_t len = 0, out_len; | ||
270 | |||
271 | copy->out_len = 0; | ||
272 | /* Fetch a new IOVEC if all previous elements have been processed */ | ||
273 | if (riov->i == riov->used && wiov->i == wiov->used) { | ||
274 | ret = vringh_getdesc_kern(vrh, riov, wiov, | ||
275 | head, GFP_KERNEL); | ||
276 | /* Check if there are available descriptors */ | ||
277 | if (ret <= 0) | ||
278 | return ret; | ||
279 | } | ||
280 | while (iovcnt) { | ||
281 | if (!len) { | ||
282 | /* Copy over a new iovec from user space. */ | ||
283 | ret = copy_from_user(&iov, u_iov, sizeof(*u_iov)); | ||
284 | if (ret) { | ||
285 | ret = -EINVAL; | ||
286 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
287 | __func__, __LINE__, ret); | ||
288 | break; | ||
289 | } | ||
290 | len = iov.iov_len; | ||
291 | ubuf = iov.iov_base; | ||
292 | } | ||
293 | /* Issue all the read descriptors first */ | ||
294 | ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ, | ||
295 | copy->vr_idx, &out_len); | ||
296 | if (ret) { | ||
297 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
298 | __func__, __LINE__, ret); | ||
299 | break; | ||
300 | } | ||
301 | len -= out_len; | ||
302 | ubuf += out_len; | ||
303 | copy->out_len += out_len; | ||
304 | /* Issue the write descriptors next */ | ||
305 | ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ, | ||
306 | copy->vr_idx, &out_len); | ||
307 | if (ret) { | ||
308 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
309 | __func__, __LINE__, ret); | ||
310 | break; | ||
311 | } | ||
312 | len -= out_len; | ||
313 | ubuf += out_len; | ||
314 | copy->out_len += out_len; | ||
315 | if (!len) { | ||
316 | /* One user space iovec is now completed */ | ||
317 | iovcnt--; | ||
318 | u_iov++; | ||
319 | } | ||
320 | /* Exit loop if all elements in KIOVs have been processed. */ | ||
321 | if (riov->i == riov->used && wiov->i == wiov->used) | ||
322 | break; | ||
323 | } | ||
324 | /* | ||
325 | * Update the used ring if a descriptor was available and some data was | ||
326 | * copied in/out and the user asked for a used ring update. | ||
327 | */ | ||
328 | if (*head != USHRT_MAX && copy->out_len && copy->update_used) { | ||
329 | u32 total = 0; | ||
330 | |||
331 | /* Determine the total data consumed */ | ||
332 | total += mic_vringh_iov_consumed(riov); | ||
333 | total += mic_vringh_iov_consumed(wiov); | ||
334 | vringh_complete_kern(vrh, *head, total); | ||
335 | *head = USHRT_MAX; | ||
336 | if (vringh_need_notify_kern(vrh) > 0) | ||
337 | vringh_notify(vrh); | ||
338 | vringh_kiov_cleanup(riov); | ||
339 | vringh_kiov_cleanup(wiov); | ||
340 | /* Update avail idx for user space */ | ||
341 | vr->info->avail_idx = vrh->last_avail_idx; | ||
342 | } | ||
343 | return ret; | ||
344 | } | ||
345 | |||
346 | static inline int mic_verify_copy_args(struct mic_vdev *mvdev, | ||
347 | struct mic_copy_desc *copy) | ||
348 | { | ||
349 | if (copy->vr_idx >= mvdev->dd->num_vq) { | ||
350 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
351 | __func__, __LINE__, -EINVAL); | ||
352 | return -EINVAL; | ||
353 | } | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | /* Copy a specified number of virtio descriptors in a chain */ | ||
358 | int mic_virtio_copy_desc(struct mic_vdev *mvdev, | ||
359 | struct mic_copy_desc *copy) | ||
360 | { | ||
361 | int err; | ||
362 | struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; | ||
363 | |||
364 | err = mic_verify_copy_args(mvdev, copy); | ||
365 | if (err) | ||
366 | return err; | ||
367 | |||
368 | mutex_lock(&mvr->vr_mutex); | ||
369 | if (!mic_vdevup(mvdev)) { | ||
370 | err = -ENODEV; | ||
371 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
372 | __func__, __LINE__, err); | ||
373 | goto err; | ||
374 | } | ||
375 | err = _mic_virtio_copy(mvdev, copy); | ||
376 | if (err) { | ||
377 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
378 | __func__, __LINE__, err); | ||
379 | } | ||
380 | err: | ||
381 | mutex_unlock(&mvr->vr_mutex); | ||
382 | return err; | ||
383 | } | ||
384 | |||
385 | static void mic_virtio_init_post(struct mic_vdev *mvdev) | ||
386 | { | ||
387 | struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd); | ||
388 | int i; | ||
389 | |||
390 | for (i = 0; i < mvdev->dd->num_vq; i++) { | ||
391 | if (!le64_to_cpu(vqconfig[i].used_address)) { | ||
392 | dev_warn(mic_dev(mvdev), "used_address zero??\n"); | ||
393 | continue; | ||
394 | } | ||
395 | mvdev->mvr[i].vrh.vring.used = | ||
396 | (void __force *)mvdev->mdev->aper.va + | ||
397 | le64_to_cpu(vqconfig[i].used_address); | ||
398 | } | ||
399 | |||
400 | mvdev->dc->used_address_updated = 0; | ||
401 | |||
402 | dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n", | ||
403 | __func__, mvdev->virtio_id); | ||
404 | } | ||
405 | |||
406 | static inline void mic_virtio_device_reset(struct mic_vdev *mvdev) | ||
407 | { | ||
408 | int i; | ||
409 | |||
410 | dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n", | ||
411 | __func__, mvdev->dd->status, mvdev->virtio_id); | ||
412 | |||
413 | for (i = 0; i < mvdev->dd->num_vq; i++) | ||
414 | /* | ||
415 | * Avoid lockdep false positive. The + 1 is for the mic | ||
416 | * mutex which is held in the reset devices code path. | ||
417 | */ | ||
418 | mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); | ||
419 | |||
420 | /* 0 status means "reset" */ | ||
421 | mvdev->dd->status = 0; | ||
422 | mvdev->dc->vdev_reset = 0; | ||
423 | mvdev->dc->host_ack = 1; | ||
424 | |||
425 | for (i = 0; i < mvdev->dd->num_vq; i++) { | ||
426 | struct vringh *vrh = &mvdev->mvr[i].vrh; | ||
427 | mvdev->mvr[i].vring.info->avail_idx = 0; | ||
428 | vrh->completed = 0; | ||
429 | vrh->last_avail_idx = 0; | ||
430 | vrh->last_used_idx = 0; | ||
431 | } | ||
432 | |||
433 | for (i = 0; i < mvdev->dd->num_vq; i++) | ||
434 | mutex_unlock(&mvdev->mvr[i].vr_mutex); | ||
435 | } | ||
436 | |||
437 | void mic_virtio_reset_devices(struct mic_device *mdev) | ||
438 | { | ||
439 | struct list_head *pos, *tmp; | ||
440 | struct mic_vdev *mvdev; | ||
441 | |||
442 | dev_dbg(&mdev->pdev->dev, "%s\n", __func__); | ||
443 | |||
444 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | ||
445 | mvdev = list_entry(pos, struct mic_vdev, list); | ||
446 | mic_virtio_device_reset(mvdev); | ||
447 | mvdev->poll_wake = 1; | ||
448 | wake_up(&mvdev->waitq); | ||
449 | } | ||
450 | } | ||
451 | |||
452 | void mic_bh_handler(struct work_struct *work) | ||
453 | { | ||
454 | struct mic_vdev *mvdev = container_of(work, struct mic_vdev, | ||
455 | virtio_bh_work); | ||
456 | |||
457 | if (mvdev->dc->used_address_updated) | ||
458 | mic_virtio_init_post(mvdev); | ||
459 | |||
460 | if (mvdev->dc->vdev_reset) | ||
461 | mic_virtio_device_reset(mvdev); | ||
462 | |||
463 | mvdev->poll_wake = 1; | ||
464 | wake_up(&mvdev->waitq); | ||
465 | } | ||
466 | |||
467 | static irqreturn_t mic_virtio_intr_handler(int irq, void *data) | ||
468 | { | ||
469 | struct mic_vdev *mvdev = data; | ||
470 | struct mic_device *mdev = mvdev->mdev; | ||
471 | |||
472 | mdev->ops->intr_workarounds(mdev); | ||
473 | schedule_work(&mvdev->virtio_bh_work); | ||
474 | return IRQ_HANDLED; | ||
475 | } | ||
476 | |||
477 | int mic_virtio_config_change(struct mic_vdev *mvdev, | ||
478 | void __user *argp) | ||
479 | { | ||
480 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | ||
481 | int ret = 0, retry, i; | ||
482 | struct mic_bootparam *bootparam = mvdev->mdev->dp; | ||
483 | s8 db = bootparam->h2c_config_db; | ||
484 | |||
485 | mutex_lock(&mvdev->mdev->mic_mutex); | ||
486 | for (i = 0; i < mvdev->dd->num_vq; i++) | ||
487 | mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); | ||
488 | |||
489 | if (db == -1 || mvdev->dd->type == -1) { | ||
490 | ret = -EIO; | ||
491 | goto exit; | ||
492 | } | ||
493 | |||
494 | if (copy_from_user(mic_vq_configspace(mvdev->dd), | ||
495 | argp, mvdev->dd->config_len)) { | ||
496 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
497 | __func__, __LINE__, -EFAULT); | ||
498 | ret = -EFAULT; | ||
499 | goto exit; | ||
500 | } | ||
501 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; | ||
502 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | ||
503 | |||
504 | for (retry = 100; retry--;) { | ||
505 | ret = wait_event_timeout(wake, | ||
506 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | ||
507 | if (ret) | ||
508 | break; | ||
509 | } | ||
510 | |||
511 | dev_dbg(mic_dev(mvdev), | ||
512 | "%s %d retry: %d\n", __func__, __LINE__, retry); | ||
513 | mvdev->dc->config_change = 0; | ||
514 | mvdev->dc->guest_ack = 0; | ||
515 | exit: | ||
516 | for (i = 0; i < mvdev->dd->num_vq; i++) | ||
517 | mutex_unlock(&mvdev->mvr[i].vr_mutex); | ||
518 | mutex_unlock(&mvdev->mdev->mic_mutex); | ||
519 | return ret; | ||
520 | } | ||
521 | |||
522 | static int mic_copy_dp_entry(struct mic_vdev *mvdev, | ||
523 | void __user *argp, | ||
524 | __u8 *type, | ||
525 | struct mic_device_desc **devpage) | ||
526 | { | ||
527 | struct mic_device *mdev = mvdev->mdev; | ||
528 | struct mic_device_desc dd, *dd_config, *devp; | ||
529 | struct mic_vqconfig *vqconfig; | ||
530 | int ret = 0, i; | ||
531 | bool slot_found = false; | ||
532 | |||
533 | if (copy_from_user(&dd, argp, sizeof(dd))) { | ||
534 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
535 | __func__, __LINE__, -EFAULT); | ||
536 | return -EFAULT; | ||
537 | } | ||
538 | |||
539 | if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE || | ||
540 | dd.num_vq > MIC_MAX_VRINGS) { | ||
541 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
542 | __func__, __LINE__, -EINVAL); | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | |||
546 | dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL); | ||
547 | if (dd_config == NULL) { | ||
548 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
549 | __func__, __LINE__, -ENOMEM); | ||
550 | return -ENOMEM; | ||
551 | } | ||
552 | if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) { | ||
553 | ret = -EFAULT; | ||
554 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
555 | __func__, __LINE__, ret); | ||
556 | goto exit; | ||
557 | } | ||
558 | |||
559 | vqconfig = mic_vq_config(dd_config); | ||
560 | for (i = 0; i < dd.num_vq; i++) { | ||
561 | if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) { | ||
562 | ret = -EINVAL; | ||
563 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
564 | __func__, __LINE__, ret); | ||
565 | goto exit; | ||
566 | } | ||
567 | } | ||
568 | |||
569 | /* Find the first free device page entry */ | ||
570 | for (i = sizeof(struct mic_bootparam); | ||
571 | i < MIC_DP_SIZE - mic_total_desc_size(dd_config); | ||
572 | i += mic_total_desc_size(devp)) { | ||
573 | devp = mdev->dp + i; | ||
574 | if (devp->type == 0 || devp->type == -1) { | ||
575 | slot_found = true; | ||
576 | break; | ||
577 | } | ||
578 | } | ||
579 | if (!slot_found) { | ||
580 | ret = -EINVAL; | ||
581 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
582 | __func__, __LINE__, ret); | ||
583 | goto exit; | ||
584 | } | ||
585 | /* | ||
586 | * Save off the type before doing the memcpy. Type will be set in the | ||
587 | * end after completing all initialization for the new device. | ||
588 | */ | ||
589 | *type = dd_config->type; | ||
590 | dd_config->type = 0; | ||
591 | memcpy(devp, dd_config, mic_desc_size(dd_config)); | ||
592 | |||
593 | *devpage = devp; | ||
594 | exit: | ||
595 | kfree(dd_config); | ||
596 | return ret; | ||
597 | } | ||
598 | |||
599 | static void mic_init_device_ctrl(struct mic_vdev *mvdev, | ||
600 | struct mic_device_desc *devpage) | ||
601 | { | ||
602 | struct mic_device_ctrl *dc; | ||
603 | |||
604 | dc = (void *)devpage + mic_aligned_desc_size(devpage); | ||
605 | |||
606 | dc->config_change = 0; | ||
607 | dc->guest_ack = 0; | ||
608 | dc->vdev_reset = 0; | ||
609 | dc->host_ack = 0; | ||
610 | dc->used_address_updated = 0; | ||
611 | dc->c2h_vdev_db = -1; | ||
612 | dc->h2c_vdev_db = -1; | ||
613 | mvdev->dc = dc; | ||
614 | } | ||
615 | |||
616 | int mic_virtio_add_device(struct mic_vdev *mvdev, | ||
617 | void __user *argp) | ||
618 | { | ||
619 | struct mic_device *mdev = mvdev->mdev; | ||
620 | struct mic_device_desc *dd = NULL; | ||
621 | struct mic_vqconfig *vqconfig; | ||
622 | int vr_size, i, j, ret; | ||
623 | u8 type = 0; | ||
624 | s8 db; | ||
625 | char irqname[10]; | ||
626 | struct mic_bootparam *bootparam = mdev->dp; | ||
627 | u16 num; | ||
628 | dma_addr_t vr_addr; | ||
629 | |||
630 | mutex_lock(&mdev->mic_mutex); | ||
631 | |||
632 | ret = mic_copy_dp_entry(mvdev, argp, &type, &dd); | ||
633 | if (ret) { | ||
634 | mutex_unlock(&mdev->mic_mutex); | ||
635 | return ret; | ||
636 | } | ||
637 | |||
638 | mic_init_device_ctrl(mvdev, dd); | ||
639 | |||
640 | mvdev->dd = dd; | ||
641 | mvdev->virtio_id = type; | ||
642 | vqconfig = mic_vq_config(dd); | ||
643 | INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler); | ||
644 | |||
645 | for (i = 0; i < dd->num_vq; i++) { | ||
646 | struct mic_vringh *mvr = &mvdev->mvr[i]; | ||
647 | struct mic_vring *vr = &mvdev->mvr[i].vring; | ||
648 | num = le16_to_cpu(vqconfig[i].num); | ||
649 | mutex_init(&mvr->vr_mutex); | ||
650 | vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + | ||
651 | sizeof(struct _mic_vring_info)); | ||
652 | vr->va = (void *) | ||
653 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
654 | get_order(vr_size)); | ||
655 | if (!vr->va) { | ||
656 | ret = -ENOMEM; | ||
657 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
658 | __func__, __LINE__, ret); | ||
659 | goto err; | ||
660 | } | ||
661 | vr->len = vr_size; | ||
662 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); | ||
663 | vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); | ||
664 | vr_addr = mic_map_single(mdev, vr->va, vr_size); | ||
665 | if (mic_map_error(vr_addr)) { | ||
666 | free_pages((unsigned long)vr->va, get_order(vr_size)); | ||
667 | ret = -ENOMEM; | ||
668 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
669 | __func__, __LINE__, ret); | ||
670 | goto err; | ||
671 | } | ||
672 | vqconfig[i].address = cpu_to_le64(vr_addr); | ||
673 | |||
674 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); | ||
675 | ret = vringh_init_kern(&mvr->vrh, | ||
676 | *(u32 *)mic_vq_features(mvdev->dd), num, false, | ||
677 | vr->vr.desc, vr->vr.avail, vr->vr.used); | ||
678 | if (ret) { | ||
679 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
680 | __func__, __LINE__, ret); | ||
681 | goto err; | ||
682 | } | ||
683 | vringh_kiov_init(&mvr->riov, NULL, 0); | ||
684 | vringh_kiov_init(&mvr->wiov, NULL, 0); | ||
685 | mvr->head = USHRT_MAX; | ||
686 | mvr->mvdev = mvdev; | ||
687 | mvr->vrh.notify = mic_notify; | ||
688 | dev_dbg(&mdev->pdev->dev, | ||
689 | "%s %d index %d va %p info %p vr_size 0x%x\n", | ||
690 | __func__, __LINE__, i, vr->va, vr->info, vr_size); | ||
691 | mvr->buf = (void *)__get_free_pages(GFP_KERNEL, | ||
692 | get_order(MIC_INT_DMA_BUF_SIZE)); | ||
693 | mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf, | ||
694 | MIC_INT_DMA_BUF_SIZE); | ||
695 | } | ||
696 | |||
697 | snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id, | ||
698 | mvdev->virtio_id); | ||
699 | mvdev->virtio_db = mic_next_db(mdev); | ||
700 | mvdev->virtio_cookie = mic_request_threaded_irq(mdev, | ||
701 | mic_virtio_intr_handler, | ||
702 | NULL, irqname, mvdev, | ||
703 | mvdev->virtio_db, MIC_INTR_DB); | ||
704 | if (IS_ERR(mvdev->virtio_cookie)) { | ||
705 | ret = PTR_ERR(mvdev->virtio_cookie); | ||
706 | dev_dbg(&mdev->pdev->dev, "request irq failed\n"); | ||
707 | goto err; | ||
708 | } | ||
709 | |||
710 | mvdev->dc->c2h_vdev_db = mvdev->virtio_db; | ||
711 | |||
712 | list_add_tail(&mvdev->list, &mdev->vdev_list); | ||
713 | /* | ||
714 | * Order the type update with previous stores. This write barrier | ||
715 | * is paired with the corresponding read barrier before the uncached | ||
716 | * system memory read of the type, on the card while scanning the | ||
717 | * device page. | ||
718 | */ | ||
719 | smp_wmb(); | ||
720 | dd->type = type; | ||
721 | |||
722 | dev_dbg(&mdev->pdev->dev, "Added virtio device id %d\n", dd->type); | ||
723 | |||
724 | db = bootparam->h2c_config_db; | ||
725 | if (db != -1) | ||
726 | mdev->ops->send_intr(mdev, db); | ||
727 | mutex_unlock(&mdev->mic_mutex); | ||
728 | return 0; | ||
729 | err: | ||
730 | vqconfig = mic_vq_config(dd); | ||
731 | for (j = 0; j < i; j++) { | ||
732 | struct mic_vringh *mvr = &mvdev->mvr[j]; | ||
733 | mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address), | ||
734 | mvr->vring.len); | ||
735 | free_pages((unsigned long)mvr->vring.va, | ||
736 | get_order(mvr->vring.len)); | ||
737 | } | ||
738 | mutex_unlock(&mdev->mic_mutex); | ||
739 | return ret; | ||
740 | } | ||
741 | |||
742 | void mic_virtio_del_device(struct mic_vdev *mvdev) | ||
743 | { | ||
744 | struct list_head *pos, *tmp; | ||
745 | struct mic_vdev *tmp_mvdev; | ||
746 | struct mic_device *mdev = mvdev->mdev; | ||
747 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | ||
748 | int i, ret, retry; | ||
749 | struct mic_vqconfig *vqconfig; | ||
750 | struct mic_bootparam *bootparam = mdev->dp; | ||
751 | s8 db; | ||
752 | |||
753 | mutex_lock(&mdev->mic_mutex); | ||
754 | db = bootparam->h2c_config_db; | ||
755 | if (db == -1) | ||
756 | goto skip_hot_remove; | ||
757 | dev_dbg(&mdev->pdev->dev, | ||
758 | "Requesting hot remove id %d\n", mvdev->virtio_id); | ||
759 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; | ||
760 | mdev->ops->send_intr(mdev, db); | ||
761 | for (retry = 100; retry--;) { | ||
762 | ret = wait_event_timeout(wake, | ||
763 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | ||
764 | if (ret) | ||
765 | break; | ||
766 | } | ||
767 | dev_dbg(&mdev->pdev->dev, | ||
768 | "Device id %d config_change %d guest_ack %d retry %d\n", | ||
769 | mvdev->virtio_id, mvdev->dc->config_change, | ||
770 | mvdev->dc->guest_ack, retry); | ||
771 | mvdev->dc->config_change = 0; | ||
772 | mvdev->dc->guest_ack = 0; | ||
773 | skip_hot_remove: | ||
774 | mic_free_irq(mdev, mvdev->virtio_cookie, mvdev); | ||
775 | flush_work(&mvdev->virtio_bh_work); | ||
776 | vqconfig = mic_vq_config(mvdev->dd); | ||
777 | for (i = 0; i < mvdev->dd->num_vq; i++) { | ||
778 | struct mic_vringh *mvr = &mvdev->mvr[i]; | ||
779 | |||
780 | mic_unmap_single(mvdev->mdev, mvr->buf_da, | ||
781 | MIC_INT_DMA_BUF_SIZE); | ||
782 | free_pages((unsigned long)mvr->buf, | ||
783 | get_order(MIC_INT_DMA_BUF_SIZE)); | ||
784 | vringh_kiov_cleanup(&mvr->riov); | ||
785 | vringh_kiov_cleanup(&mvr->wiov); | ||
786 | mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address), | ||
787 | mvr->vring.len); | ||
788 | free_pages((unsigned long)mvr->vring.va, | ||
789 | get_order(mvr->vring.len)); | ||
790 | } | ||
791 | |||
792 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | ||
793 | tmp_mvdev = list_entry(pos, struct mic_vdev, list); | ||
794 | if (tmp_mvdev == mvdev) { | ||
795 | list_del(pos); | ||
796 | dev_dbg(&mdev->pdev->dev, | ||
797 | "Removing virtio device id %d\n", | ||
798 | mvdev->virtio_id); | ||
799 | break; | ||
800 | } | ||
801 | } | ||
802 | /* | ||
803 | * Order the type update with previous stores. This write barrier | ||
804 | * is paired with the corresponding read barrier before the uncached | ||
805 | * system memory read of the type, on the card while scanning the | ||
806 | * device page. | ||
807 | */ | ||
808 | smp_wmb(); | ||
809 | mvdev->dd->type = -1; | ||
810 | mutex_unlock(&mdev->mic_mutex); | ||
811 | } | ||
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c index 8118ac48c764..82a973c85b5d 100644 --- a/drivers/misc/mic/host/mic_x100.c +++ b/drivers/misc/mic/host/mic_x100.c | |||
@@ -450,26 +450,29 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf) | |||
450 | 450 | ||
451 | rc = mic_x100_get_boot_addr(mdev); | 451 | rc = mic_x100_get_boot_addr(mdev); |
452 | if (rc) | 452 | if (rc) |
453 | goto error; | 453 | return rc; |
454 | /* load OS */ | 454 | /* load OS */ |
455 | rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev); | 455 | rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev); |
456 | if (rc < 0) { | 456 | if (rc < 0) { |
457 | dev_err(&mdev->pdev->dev, | 457 | dev_err(&mdev->pdev->dev, |
458 | "ramdisk request_firmware failed: %d %s\n", | 458 | "ramdisk request_firmware failed: %d %s\n", |
459 | rc, mdev->cosm_dev->firmware); | 459 | rc, mdev->cosm_dev->firmware); |
460 | goto error; | 460 | return rc; |
461 | } | 461 | } |
462 | if (mdev->bootaddr > mdev->aper.len - fw->size) { | 462 | if (mdev->bootaddr > mdev->aper.len - fw->size) { |
463 | rc = -EINVAL; | 463 | rc = -EINVAL; |
464 | dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n", | 464 | dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n", |
465 | __func__, __LINE__, rc, mdev->bootaddr); | 465 | __func__, __LINE__, rc, mdev->bootaddr); |
466 | release_firmware(fw); | ||
467 | goto error; | 466 | goto error; |
468 | } | 467 | } |
469 | memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size); | 468 | memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size); |
470 | mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size); | 469 | mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size); |
471 | if (!strcmp(mdev->cosm_dev->bootmode, "flash")) | 470 | if (!strcmp(mdev->cosm_dev->bootmode, "flash")) { |
472 | goto done; | 471 | rc = -EINVAL; |
472 | dev_err(&mdev->pdev->dev, "%s %d rc %d\n", | ||
473 | __func__, __LINE__, rc); | ||
474 | goto error; | ||
475 | } | ||
473 | /* load command line */ | 476 | /* load command line */ |
474 | rc = mic_x100_load_command_line(mdev, fw); | 477 | rc = mic_x100_load_command_line(mdev, fw); |
475 | if (rc) { | 478 | if (rc) { |
@@ -481,9 +484,11 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf) | |||
481 | /* load ramdisk */ | 484 | /* load ramdisk */ |
482 | if (mdev->cosm_dev->ramdisk) | 485 | if (mdev->cosm_dev->ramdisk) |
483 | rc = mic_x100_load_ramdisk(mdev); | 486 | rc = mic_x100_load_ramdisk(mdev); |
487 | |||
488 | return rc; | ||
489 | |||
484 | error: | 490 | error: |
485 | dev_dbg(&mdev->pdev->dev, "%s %d rc %d\n", __func__, __LINE__, rc); | 491 | release_firmware(fw); |
486 | done: | ||
487 | return rc; | 492 | return rc; |
488 | } | 493 | } |
489 | 494 | ||
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c index 95a13c629a8e..cd01a0efda6b 100644 --- a/drivers/misc/mic/scif/scif_dma.c +++ b/drivers/misc/mic/scif/scif_dma.c | |||
@@ -74,11 +74,6 @@ struct scif_copy_work { | |||
74 | bool ordered; | 74 | bool ordered; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | #ifndef list_entry_next | ||
78 | #define list_entry_next(pos, member) \ | ||
79 | list_entry(pos->member.next, typeof(*pos), member) | ||
80 | #endif | ||
81 | |||
82 | /** | 77 | /** |
83 | * scif_reserve_dma_chan: | 78 | * scif_reserve_dma_chan: |
84 | * @ep: Endpoint Descriptor. | 79 | * @ep: Endpoint Descriptor. |
@@ -276,13 +271,10 @@ static struct scif_mmu_notif * | |||
276 | scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma) | 271 | scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma) |
277 | { | 272 | { |
278 | struct scif_mmu_notif *mmn; | 273 | struct scif_mmu_notif *mmn; |
279 | struct list_head *item; | ||
280 | 274 | ||
281 | list_for_each(item, &rma->mmn_list) { | 275 | list_for_each_entry(mmn, &rma->mmn_list, list) |
282 | mmn = list_entry(item, struct scif_mmu_notif, list); | ||
283 | if (mmn->mm == mm) | 276 | if (mmn->mm == mm) |
284 | return mmn; | 277 | return mmn; |
285 | } | ||
286 | return NULL; | 278 | return NULL; |
287 | } | 279 | } |
288 | 280 | ||
@@ -293,13 +285,12 @@ scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) | |||
293 | = kzalloc(sizeof(*mmn), GFP_KERNEL); | 285 | = kzalloc(sizeof(*mmn), GFP_KERNEL); |
294 | 286 | ||
295 | if (!mmn) | 287 | if (!mmn) |
296 | return ERR_PTR(ENOMEM); | 288 | return ERR_PTR(-ENOMEM); |
297 | 289 | ||
298 | scif_init_mmu_notifier(mmn, current->mm, ep); | 290 | scif_init_mmu_notifier(mmn, current->mm, ep); |
299 | if (mmu_notifier_register(&mmn->ep_mmu_notifier, | 291 | if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) { |
300 | current->mm)) { | ||
301 | kfree(mmn); | 292 | kfree(mmn); |
302 | return ERR_PTR(EBUSY); | 293 | return ERR_PTR(-EBUSY); |
303 | } | 294 | } |
304 | list_add(&mmn->list, &ep->rma_info.mmn_list); | 295 | list_add(&mmn->list, &ep->rma_info.mmn_list); |
305 | return mmn; | 296 | return mmn; |
@@ -851,7 +842,7 @@ static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window, | |||
851 | (window->nr_pages << PAGE_SHIFT); | 842 | (window->nr_pages << PAGE_SHIFT); |
852 | while (rem_len) { | 843 | while (rem_len) { |
853 | if (offset == end_offset) { | 844 | if (offset == end_offset) { |
854 | window = list_entry_next(window, list); | 845 | window = list_next_entry(window, list); |
855 | end_offset = window->offset + | 846 | end_offset = window->offset + |
856 | (window->nr_pages << PAGE_SHIFT); | 847 | (window->nr_pages << PAGE_SHIFT); |
857 | } | 848 | } |
@@ -957,7 +948,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, | |||
957 | remaining_len -= tail_len; | 948 | remaining_len -= tail_len; |
958 | while (remaining_len) { | 949 | while (remaining_len) { |
959 | if (offset == end_offset) { | 950 | if (offset == end_offset) { |
960 | window = list_entry_next(window, list); | 951 | window = list_next_entry(window, list); |
961 | end_offset = window->offset + | 952 | end_offset = window->offset + |
962 | (window->nr_pages << PAGE_SHIFT); | 953 | (window->nr_pages << PAGE_SHIFT); |
963 | } | 954 | } |
@@ -1064,7 +1055,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, | |||
1064 | } | 1055 | } |
1065 | if (tail_len) { | 1056 | if (tail_len) { |
1066 | if (offset == end_offset) { | 1057 | if (offset == end_offset) { |
1067 | window = list_entry_next(window, list); | 1058 | window = list_next_entry(window, list); |
1068 | end_offset = window->offset + | 1059 | end_offset = window->offset + |
1069 | (window->nr_pages << PAGE_SHIFT); | 1060 | (window->nr_pages << PAGE_SHIFT); |
1070 | } | 1061 | } |
@@ -1147,13 +1138,13 @@ static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |||
1147 | (dst_window->nr_pages << PAGE_SHIFT); | 1138 | (dst_window->nr_pages << PAGE_SHIFT); |
1148 | while (remaining_len) { | 1139 | while (remaining_len) { |
1149 | if (src_offset == end_src_offset) { | 1140 | if (src_offset == end_src_offset) { |
1150 | src_window = list_entry_next(src_window, list); | 1141 | src_window = list_next_entry(src_window, list); |
1151 | end_src_offset = src_window->offset + | 1142 | end_src_offset = src_window->offset + |
1152 | (src_window->nr_pages << PAGE_SHIFT); | 1143 | (src_window->nr_pages << PAGE_SHIFT); |
1153 | scif_init_window_iter(src_window, &src_win_iter); | 1144 | scif_init_window_iter(src_window, &src_win_iter); |
1154 | } | 1145 | } |
1155 | if (dst_offset == end_dst_offset) { | 1146 | if (dst_offset == end_dst_offset) { |
1156 | dst_window = list_entry_next(dst_window, list); | 1147 | dst_window = list_next_entry(dst_window, list); |
1157 | end_dst_offset = dst_window->offset + | 1148 | end_dst_offset = dst_window->offset + |
1158 | (dst_window->nr_pages << PAGE_SHIFT); | 1149 | (dst_window->nr_pages << PAGE_SHIFT); |
1159 | scif_init_window_iter(dst_window, &dst_win_iter); | 1150 | scif_init_window_iter(dst_window, &dst_win_iter); |
@@ -1314,13 +1305,13 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |||
1314 | remaining_len -= tail_len; | 1305 | remaining_len -= tail_len; |
1315 | while (remaining_len) { | 1306 | while (remaining_len) { |
1316 | if (src_offset == end_src_offset) { | 1307 | if (src_offset == end_src_offset) { |
1317 | src_window = list_entry_next(src_window, list); | 1308 | src_window = list_next_entry(src_window, list); |
1318 | end_src_offset = src_window->offset + | 1309 | end_src_offset = src_window->offset + |
1319 | (src_window->nr_pages << PAGE_SHIFT); | 1310 | (src_window->nr_pages << PAGE_SHIFT); |
1320 | scif_init_window_iter(src_window, &src_win_iter); | 1311 | scif_init_window_iter(src_window, &src_win_iter); |
1321 | } | 1312 | } |
1322 | if (dst_offset == end_dst_offset) { | 1313 | if (dst_offset == end_dst_offset) { |
1323 | dst_window = list_entry_next(dst_window, list); | 1314 | dst_window = list_next_entry(dst_window, list); |
1324 | end_dst_offset = dst_window->offset + | 1315 | end_dst_offset = dst_window->offset + |
1325 | (dst_window->nr_pages << PAGE_SHIFT); | 1316 | (dst_window->nr_pages << PAGE_SHIFT); |
1326 | scif_init_window_iter(dst_window, &dst_win_iter); | 1317 | scif_init_window_iter(dst_window, &dst_win_iter); |
@@ -1405,9 +1396,9 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |||
1405 | if (remaining_len) { | 1396 | if (remaining_len) { |
1406 | loop_len = remaining_len; | 1397 | loop_len = remaining_len; |
1407 | if (src_offset == end_src_offset) | 1398 | if (src_offset == end_src_offset) |
1408 | src_window = list_entry_next(src_window, list); | 1399 | src_window = list_next_entry(src_window, list); |
1409 | if (dst_offset == end_dst_offset) | 1400 | if (dst_offset == end_dst_offset) |
1410 | dst_window = list_entry_next(dst_window, list); | 1401 | dst_window = list_next_entry(dst_window, list); |
1411 | 1402 | ||
1412 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); | 1403 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); |
1413 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); | 1404 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); |
@@ -1550,12 +1541,12 @@ static int scif_rma_list_cpu_copy(struct scif_copy_work *work) | |||
1550 | end_dst_offset = dst_window->offset + | 1541 | end_dst_offset = dst_window->offset + |
1551 | (dst_window->nr_pages << PAGE_SHIFT); | 1542 | (dst_window->nr_pages << PAGE_SHIFT); |
1552 | if (src_offset == end_src_offset) { | 1543 | if (src_offset == end_src_offset) { |
1553 | src_window = list_entry_next(src_window, list); | 1544 | src_window = list_next_entry(src_window, list); |
1554 | scif_init_window_iter(src_window, | 1545 | scif_init_window_iter(src_window, |
1555 | &src_win_iter); | 1546 | &src_win_iter); |
1556 | } | 1547 | } |
1557 | if (dst_offset == end_dst_offset) { | 1548 | if (dst_offset == end_dst_offset) { |
1558 | dst_window = list_entry_next(dst_window, list); | 1549 | dst_window = list_next_entry(dst_window, list); |
1559 | scif_init_window_iter(dst_window, | 1550 | scif_init_window_iter(dst_window, |
1560 | &dst_win_iter); | 1551 | &dst_win_iter); |
1561 | } | 1552 | } |
@@ -1730,7 +1721,7 @@ static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr, | |||
1730 | mutex_lock(&ep->rma_info.mmn_lock); | 1721 | mutex_lock(&ep->rma_info.mmn_lock); |
1731 | mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); | 1722 | mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); |
1732 | if (!mmn) | 1723 | if (!mmn) |
1733 | scif_add_mmu_notifier(current->mm, ep); | 1724 | mmn = scif_add_mmu_notifier(current->mm, ep); |
1734 | mutex_unlock(&ep->rma_info.mmn_lock); | 1725 | mutex_unlock(&ep->rma_info.mmn_lock); |
1735 | if (IS_ERR(mmn)) { | 1726 | if (IS_ERR(mmn)) { |
1736 | scif_put_peer_dev(spdev); | 1727 | scif_put_peer_dev(spdev); |
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index 8310b4dbff06..6a451bd65bf3 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c | |||
@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd, | |||
1511 | if ((map_flags & SCIF_MAP_FIXED) && | 1511 | if ((map_flags & SCIF_MAP_FIXED) && |
1512 | ((ALIGN(offset, PAGE_SIZE) != offset) || | 1512 | ((ALIGN(offset, PAGE_SIZE) != offset) || |
1513 | (offset < 0) || | 1513 | (offset < 0) || |
1514 | (offset + (off_t)len < offset))) | 1514 | (len > LONG_MAX - offset))) |
1515 | return -EINVAL; | 1515 | return -EINVAL; |
1516 | 1516 | ||
1517 | might_sleep(); | 1517 | might_sleep(); |
@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset, | |||
1614 | if ((map_flags & SCIF_MAP_FIXED) && | 1614 | if ((map_flags & SCIF_MAP_FIXED) && |
1615 | ((ALIGN(offset, PAGE_SIZE) != offset) || | 1615 | ((ALIGN(offset, PAGE_SIZE) != offset) || |
1616 | (offset < 0) || | 1616 | (offset < 0) || |
1617 | (offset + (off_t)len < offset))) | 1617 | (len > LONG_MAX - offset))) |
1618 | return -EINVAL; | 1618 | return -EINVAL; |
1619 | 1619 | ||
1620 | /* Unsupported protection requested */ | 1620 | /* Unsupported protection requested */ |
@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len) | |||
1732 | 1732 | ||
1733 | /* Offset is not page aligned or offset+len wraps around */ | 1733 | /* Offset is not page aligned or offset+len wraps around */ |
1734 | if ((ALIGN(offset, PAGE_SIZE) != offset) || | 1734 | if ((ALIGN(offset, PAGE_SIZE) != offset) || |
1735 | (offset + (off_t)len < offset)) | 1735 | (offset < 0) || |
1736 | (len > LONG_MAX - offset)) | ||
1736 | return -EINVAL; | 1737 | return -EINVAL; |
1737 | 1738 | ||
1738 | err = scif_verify_epd(ep); | 1739 | err = scif_verify_epd(ep); |
diff --git a/drivers/misc/mic/vop/Makefile b/drivers/misc/mic/vop/Makefile new file mode 100644 index 000000000000..78819c8999f1 --- /dev/null +++ b/drivers/misc/mic/vop/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile - Intel MIC Linux driver. | ||
3 | # Copyright(c) 2016, Intel Corporation. | ||
4 | # | ||
5 | obj-m := vop.o | ||
6 | |||
7 | vop-objs += vop_main.o | ||
8 | vop-objs += vop_debugfs.o | ||
9 | vop-objs += vop_vringh.o | ||
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c new file mode 100644 index 000000000000..ab43884e5cd7 --- /dev/null +++ b/drivers/misc/mic/vop/vop_debugfs.c | |||
@@ -0,0 +1,232 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2016 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Intel Virtio Over PCIe (VOP) driver. | ||
19 | * | ||
20 | */ | ||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | |||
24 | #include "vop_main.h" | ||
25 | |||
26 | static int vop_dp_show(struct seq_file *s, void *pos) | ||
27 | { | ||
28 | struct mic_device_desc *d; | ||
29 | struct mic_device_ctrl *dc; | ||
30 | struct mic_vqconfig *vqconfig; | ||
31 | __u32 *features; | ||
32 | __u8 *config; | ||
33 | struct vop_info *vi = s->private; | ||
34 | struct vop_device *vpdev = vi->vpdev; | ||
35 | struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
36 | int j, k; | ||
37 | |||
38 | seq_printf(s, "Bootparam: magic 0x%x\n", | ||
39 | bootparam->magic); | ||
40 | seq_printf(s, "Bootparam: h2c_config_db %d\n", | ||
41 | bootparam->h2c_config_db); | ||
42 | seq_printf(s, "Bootparam: node_id %d\n", | ||
43 | bootparam->node_id); | ||
44 | seq_printf(s, "Bootparam: c2h_scif_db %d\n", | ||
45 | bootparam->c2h_scif_db); | ||
46 | seq_printf(s, "Bootparam: h2c_scif_db %d\n", | ||
47 | bootparam->h2c_scif_db); | ||
48 | seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n", | ||
49 | bootparam->scif_host_dma_addr); | ||
50 | seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n", | ||
51 | bootparam->scif_card_dma_addr); | ||
52 | |||
53 | for (j = sizeof(*bootparam); | ||
54 | j < MIC_DP_SIZE; j += mic_total_desc_size(d)) { | ||
55 | d = (void *)bootparam + j; | ||
56 | dc = (void *)d + mic_aligned_desc_size(d); | ||
57 | |||
58 | /* end of list */ | ||
59 | if (d->type == 0) | ||
60 | break; | ||
61 | |||
62 | if (d->type == -1) | ||
63 | continue; | ||
64 | |||
65 | seq_printf(s, "Type %d ", d->type); | ||
66 | seq_printf(s, "Num VQ %d ", d->num_vq); | ||
67 | seq_printf(s, "Feature Len %d\n", d->feature_len); | ||
68 | seq_printf(s, "Config Len %d ", d->config_len); | ||
69 | seq_printf(s, "Shutdown Status %d\n", d->status); | ||
70 | |||
71 | for (k = 0; k < d->num_vq; k++) { | ||
72 | vqconfig = mic_vq_config(d) + k; | ||
73 | seq_printf(s, "vqconfig[%d]: ", k); | ||
74 | seq_printf(s, "address 0x%llx ", | ||
75 | vqconfig->address); | ||
76 | seq_printf(s, "num %d ", vqconfig->num); | ||
77 | seq_printf(s, "used address 0x%llx\n", | ||
78 | vqconfig->used_address); | ||
79 | } | ||
80 | |||
81 | features = (__u32 *)mic_vq_features(d); | ||
82 | seq_printf(s, "Features: Host 0x%x ", features[0]); | ||
83 | seq_printf(s, "Guest 0x%x\n", features[1]); | ||
84 | |||
85 | config = mic_vq_configspace(d); | ||
86 | for (k = 0; k < d->config_len; k++) | ||
87 | seq_printf(s, "config[%d]=%d\n", k, config[k]); | ||
88 | |||
89 | seq_puts(s, "Device control:\n"); | ||
90 | seq_printf(s, "Config Change %d ", dc->config_change); | ||
91 | seq_printf(s, "Vdev reset %d\n", dc->vdev_reset); | ||
92 | seq_printf(s, "Guest Ack %d ", dc->guest_ack); | ||
93 | seq_printf(s, "Host ack %d\n", dc->host_ack); | ||
94 | seq_printf(s, "Used address updated %d ", | ||
95 | dc->used_address_updated); | ||
96 | seq_printf(s, "Vdev 0x%llx\n", dc->vdev); | ||
97 | seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db); | ||
98 | seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db); | ||
99 | } | ||
100 | schedule_work(&vi->hotplug_work); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int vop_dp_debug_open(struct inode *inode, struct file *file) | ||
105 | { | ||
106 | return single_open(file, vop_dp_show, inode->i_private); | ||
107 | } | ||
108 | |||
109 | static int vop_dp_debug_release(struct inode *inode, struct file *file) | ||
110 | { | ||
111 | return single_release(inode, file); | ||
112 | } | ||
113 | |||
114 | static const struct file_operations dp_ops = { | ||
115 | .owner = THIS_MODULE, | ||
116 | .open = vop_dp_debug_open, | ||
117 | .read = seq_read, | ||
118 | .llseek = seq_lseek, | ||
119 | .release = vop_dp_debug_release | ||
120 | }; | ||
121 | |||
122 | static int vop_vdev_info_show(struct seq_file *s, void *unused) | ||
123 | { | ||
124 | struct vop_info *vi = s->private; | ||
125 | struct list_head *pos, *tmp; | ||
126 | struct vop_vdev *vdev; | ||
127 | int i, j; | ||
128 | |||
129 | mutex_lock(&vi->vop_mutex); | ||
130 | list_for_each_safe(pos, tmp, &vi->vdev_list) { | ||
131 | vdev = list_entry(pos, struct vop_vdev, list); | ||
132 | seq_printf(s, "VDEV type %d state %s in %ld out %ld in_dma %ld out_dma %ld\n", | ||
133 | vdev->virtio_id, | ||
134 | vop_vdevup(vdev) ? "UP" : "DOWN", | ||
135 | vdev->in_bytes, | ||
136 | vdev->out_bytes, | ||
137 | vdev->in_bytes_dma, | ||
138 | vdev->out_bytes_dma); | ||
139 | for (i = 0; i < MIC_MAX_VRINGS; i++) { | ||
140 | struct vring_desc *desc; | ||
141 | struct vring_avail *avail; | ||
142 | struct vring_used *used; | ||
143 | struct vop_vringh *vvr = &vdev->vvr[i]; | ||
144 | struct vringh *vrh = &vvr->vrh; | ||
145 | int num = vrh->vring.num; | ||
146 | |||
147 | if (!num) | ||
148 | continue; | ||
149 | desc = vrh->vring.desc; | ||
150 | seq_printf(s, "vring i %d avail_idx %d", | ||
151 | i, vvr->vring.info->avail_idx & (num - 1)); | ||
152 | seq_printf(s, " vring i %d avail_idx %d\n", | ||
153 | i, vvr->vring.info->avail_idx); | ||
154 | seq_printf(s, "vrh i %d weak_barriers %d", | ||
155 | i, vrh->weak_barriers); | ||
156 | seq_printf(s, " last_avail_idx %d last_used_idx %d", | ||
157 | vrh->last_avail_idx, vrh->last_used_idx); | ||
158 | seq_printf(s, " completed %d\n", vrh->completed); | ||
159 | for (j = 0; j < num; j++) { | ||
160 | seq_printf(s, "desc[%d] addr 0x%llx len %d", | ||
161 | j, desc->addr, desc->len); | ||
162 | seq_printf(s, " flags 0x%x next %d\n", | ||
163 | desc->flags, desc->next); | ||
164 | desc++; | ||
165 | } | ||
166 | avail = vrh->vring.avail; | ||
167 | seq_printf(s, "avail flags 0x%x idx %d\n", | ||
168 | vringh16_to_cpu(vrh, avail->flags), | ||
169 | vringh16_to_cpu(vrh, | ||
170 | avail->idx) & (num - 1)); | ||
171 | seq_printf(s, "avail flags 0x%x idx %d\n", | ||
172 | vringh16_to_cpu(vrh, avail->flags), | ||
173 | vringh16_to_cpu(vrh, avail->idx)); | ||
174 | for (j = 0; j < num; j++) | ||
175 | seq_printf(s, "avail ring[%d] %d\n", | ||
176 | j, avail->ring[j]); | ||
177 | used = vrh->vring.used; | ||
178 | seq_printf(s, "used flags 0x%x idx %d\n", | ||
179 | vringh16_to_cpu(vrh, used->flags), | ||
180 | vringh16_to_cpu(vrh, used->idx) & (num - 1)); | ||
181 | seq_printf(s, "used flags 0x%x idx %d\n", | ||
182 | vringh16_to_cpu(vrh, used->flags), | ||
183 | vringh16_to_cpu(vrh, used->idx)); | ||
184 | for (j = 0; j < num; j++) | ||
185 | seq_printf(s, "used ring[%d] id %d len %d\n", | ||
186 | j, vringh32_to_cpu(vrh, | ||
187 | used->ring[j].id), | ||
188 | vringh32_to_cpu(vrh, | ||
189 | used->ring[j].len)); | ||
190 | } | ||
191 | } | ||
192 | mutex_unlock(&vi->vop_mutex); | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int vop_vdev_info_debug_open(struct inode *inode, struct file *file) | ||
198 | { | ||
199 | return single_open(file, vop_vdev_info_show, inode->i_private); | ||
200 | } | ||
201 | |||
202 | static int vop_vdev_info_debug_release(struct inode *inode, struct file *file) | ||
203 | { | ||
204 | return single_release(inode, file); | ||
205 | } | ||
206 | |||
207 | static const struct file_operations vdev_info_ops = { | ||
208 | .owner = THIS_MODULE, | ||
209 | .open = vop_vdev_info_debug_open, | ||
210 | .read = seq_read, | ||
211 | .llseek = seq_lseek, | ||
212 | .release = vop_vdev_info_debug_release | ||
213 | }; | ||
214 | |||
215 | void vop_init_debugfs(struct vop_info *vi) | ||
216 | { | ||
217 | char name[16]; | ||
218 | |||
219 | snprintf(name, sizeof(name), "%s%d", KBUILD_MODNAME, vi->vpdev->dnode); | ||
220 | vi->dbg = debugfs_create_dir(name, NULL); | ||
221 | if (!vi->dbg) { | ||
222 | pr_err("can't create debugfs dir vop\n"); | ||
223 | return; | ||
224 | } | ||
225 | debugfs_create_file("dp", 0444, vi->dbg, vi, &dp_ops); | ||
226 | debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vdev_info_ops); | ||
227 | } | ||
228 | |||
229 | void vop_exit_debugfs(struct vop_info *vi) | ||
230 | { | ||
231 | debugfs_remove_recursive(vi->dbg); | ||
232 | } | ||
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c new file mode 100644 index 000000000000..1a2b67f3183d --- /dev/null +++ b/drivers/misc/mic/vop/vop_main.c | |||
@@ -0,0 +1,755 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2016 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Adapted from: | ||
19 | * | ||
20 | * virtio for kvm on s390 | ||
21 | * | ||
22 | * Copyright IBM Corp. 2008 | ||
23 | * | ||
24 | * This program is free software; you can redistribute it and/or modify | ||
25 | * it under the terms of the GNU General Public License (version 2 only) | ||
26 | * as published by the Free Software Foundation. | ||
27 | * | ||
28 | * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> | ||
29 | * | ||
30 | * Intel Virtio Over PCIe (VOP) driver. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <linux/sched.h> | ||
36 | #include <linux/dma-mapping.h> | ||
37 | |||
38 | #include "vop_main.h" | ||
39 | |||
40 | #define VOP_MAX_VRINGS 4 | ||
41 | |||
42 | /* | ||
43 | * _vop_vdev - Allocated per virtio device instance injected by the peer. | ||
44 | * | ||
45 | * @vdev: Virtio device | ||
46 | * @desc: Virtio device page descriptor | ||
47 | * @dc: Virtio device control | ||
48 | * @vpdev: VOP device which is the parent for this virtio device | ||
49 | * @vr: Buffer for accessing the VRING | ||
50 | * @used: Buffer for used | ||
51 | * @used_size: Size of the used buffer | ||
52 | * @reset_done: Track whether VOP reset is complete | ||
53 | * @virtio_cookie: Cookie returned upon requesting a interrupt | ||
54 | * @c2h_vdev_db: The doorbell used by the guest to interrupt the host | ||
55 | * @h2c_vdev_db: The doorbell used by the host to interrupt the guest | ||
56 | * @dnode: The destination node | ||
57 | */ | ||
58 | struct _vop_vdev { | ||
59 | struct virtio_device vdev; | ||
60 | struct mic_device_desc __iomem *desc; | ||
61 | struct mic_device_ctrl __iomem *dc; | ||
62 | struct vop_device *vpdev; | ||
63 | void __iomem *vr[VOP_MAX_VRINGS]; | ||
64 | dma_addr_t used[VOP_MAX_VRINGS]; | ||
65 | int used_size[VOP_MAX_VRINGS]; | ||
66 | struct completion reset_done; | ||
67 | struct mic_irq *virtio_cookie; | ||
68 | int c2h_vdev_db; | ||
69 | int h2c_vdev_db; | ||
70 | int dnode; | ||
71 | }; | ||
72 | |||
73 | #define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev) | ||
74 | |||
75 | #define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8) | ||
76 | |||
77 | /* Helper API to obtain the parent of the virtio device */ | ||
78 | static inline struct device *_vop_dev(struct _vop_vdev *vdev) | ||
79 | { | ||
80 | return vdev->vdev.dev.parent; | ||
81 | } | ||
82 | |||
83 | static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc) | ||
84 | { | ||
85 | return sizeof(*desc) | ||
86 | + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) | ||
87 | + ioread8(&desc->feature_len) * 2 | ||
88 | + ioread8(&desc->config_len); | ||
89 | } | ||
90 | |||
91 | static inline struct mic_vqconfig __iomem * | ||
92 | _vop_vq_config(struct mic_device_desc __iomem *desc) | ||
93 | { | ||
94 | return (struct mic_vqconfig __iomem *)(desc + 1); | ||
95 | } | ||
96 | |||
97 | static inline u8 __iomem * | ||
98 | _vop_vq_features(struct mic_device_desc __iomem *desc) | ||
99 | { | ||
100 | return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq)); | ||
101 | } | ||
102 | |||
103 | static inline u8 __iomem * | ||
104 | _vop_vq_configspace(struct mic_device_desc __iomem *desc) | ||
105 | { | ||
106 | return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2; | ||
107 | } | ||
108 | |||
109 | static inline unsigned | ||
110 | _vop_total_desc_size(struct mic_device_desc __iomem *desc) | ||
111 | { | ||
112 | return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); | ||
113 | } | ||
114 | |||
115 | /* This gets the device's feature bits. */ | ||
116 | static u64 vop_get_features(struct virtio_device *vdev) | ||
117 | { | ||
118 | unsigned int i, bits; | ||
119 | u32 features = 0; | ||
120 | struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; | ||
121 | u8 __iomem *in_features = _vop_vq_features(desc); | ||
122 | int feature_len = ioread8(&desc->feature_len); | ||
123 | |||
124 | bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8; | ||
125 | for (i = 0; i < bits; i++) | ||
126 | if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) | ||
127 | features |= BIT(i); | ||
128 | |||
129 | return features; | ||
130 | } | ||
131 | |||
132 | static int vop_finalize_features(struct virtio_device *vdev) | ||
133 | { | ||
134 | unsigned int i, bits; | ||
135 | struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; | ||
136 | u8 feature_len = ioread8(&desc->feature_len); | ||
137 | /* Second half of bitmap is features we accept. */ | ||
138 | u8 __iomem *out_features = | ||
139 | _vop_vq_features(desc) + feature_len; | ||
140 | |||
141 | /* Give virtio_ring a chance to accept features. */ | ||
142 | vring_transport_features(vdev); | ||
143 | |||
144 | memset_io(out_features, 0, feature_len); | ||
145 | bits = min_t(unsigned, feature_len, | ||
146 | sizeof(vdev->features)) * 8; | ||
147 | for (i = 0; i < bits; i++) { | ||
148 | if (__virtio_test_bit(vdev, i)) | ||
149 | iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)), | ||
150 | &out_features[i / 8]); | ||
151 | } | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Reading and writing elements in config space | ||
157 | */ | ||
158 | static void vop_get(struct virtio_device *vdev, unsigned int offset, | ||
159 | void *buf, unsigned len) | ||
160 | { | ||
161 | struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; | ||
162 | |||
163 | if (offset + len > ioread8(&desc->config_len)) | ||
164 | return; | ||
165 | memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len); | ||
166 | } | ||
167 | |||
168 | static void vop_set(struct virtio_device *vdev, unsigned int offset, | ||
169 | const void *buf, unsigned len) | ||
170 | { | ||
171 | struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; | ||
172 | |||
173 | if (offset + len > ioread8(&desc->config_len)) | ||
174 | return; | ||
175 | memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * The operations to get and set the status word just access the status | ||
180 | * field of the device descriptor. set_status also interrupts the host | ||
181 | * to tell about status changes. | ||
182 | */ | ||
183 | static u8 vop_get_status(struct virtio_device *vdev) | ||
184 | { | ||
185 | return ioread8(&to_vopvdev(vdev)->desc->status); | ||
186 | } | ||
187 | |||
188 | static void vop_set_status(struct virtio_device *dev, u8 status) | ||
189 | { | ||
190 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
191 | struct vop_device *vpdev = vdev->vpdev; | ||
192 | |||
193 | if (!status) | ||
194 | return; | ||
195 | iowrite8(status, &vdev->desc->status); | ||
196 | vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db); | ||
197 | } | ||
198 | |||
199 | /* Inform host on a virtio device reset and wait for ack from host */ | ||
200 | static void vop_reset_inform_host(struct virtio_device *dev) | ||
201 | { | ||
202 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
203 | struct mic_device_ctrl __iomem *dc = vdev->dc; | ||
204 | struct vop_device *vpdev = vdev->vpdev; | ||
205 | int retry; | ||
206 | |||
207 | iowrite8(0, &dc->host_ack); | ||
208 | iowrite8(1, &dc->vdev_reset); | ||
209 | vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db); | ||
210 | |||
211 | /* Wait till host completes all card accesses and acks the reset */ | ||
212 | for (retry = 100; retry--;) { | ||
213 | if (ioread8(&dc->host_ack)) | ||
214 | break; | ||
215 | msleep(100); | ||
216 | }; | ||
217 | |||
218 | dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry); | ||
219 | |||
220 | /* Reset status to 0 in case we timed out */ | ||
221 | iowrite8(0, &vdev->desc->status); | ||
222 | } | ||
223 | |||
224 | static void vop_reset(struct virtio_device *dev) | ||
225 | { | ||
226 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
227 | |||
228 | dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n", | ||
229 | __func__, dev->id.device); | ||
230 | |||
231 | vop_reset_inform_host(dev); | ||
232 | complete_all(&vdev->reset_done); | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * The virtio_ring code calls this API when it wants to notify the Host. | ||
237 | */ | ||
238 | static bool vop_notify(struct virtqueue *vq) | ||
239 | { | ||
240 | struct _vop_vdev *vdev = vq->priv; | ||
241 | struct vop_device *vpdev = vdev->vpdev; | ||
242 | |||
243 | vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db); | ||
244 | return true; | ||
245 | } | ||
246 | |||
247 | static void vop_del_vq(struct virtqueue *vq, int n) | ||
248 | { | ||
249 | struct _vop_vdev *vdev = to_vopvdev(vq->vdev); | ||
250 | struct vring *vr = (struct vring *)(vq + 1); | ||
251 | struct vop_device *vpdev = vdev->vpdev; | ||
252 | |||
253 | dma_unmap_single(&vpdev->dev, vdev->used[n], | ||
254 | vdev->used_size[n], DMA_BIDIRECTIONAL); | ||
255 | free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); | ||
256 | vring_del_virtqueue(vq); | ||
257 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); | ||
258 | vdev->vr[n] = NULL; | ||
259 | } | ||
260 | |||
261 | static void vop_del_vqs(struct virtio_device *dev) | ||
262 | { | ||
263 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
264 | struct virtqueue *vq, *n; | ||
265 | int idx = 0; | ||
266 | |||
267 | dev_dbg(_vop_dev(vdev), "%s\n", __func__); | ||
268 | |||
269 | list_for_each_entry_safe(vq, n, &dev->vqs, list) | ||
270 | vop_del_vq(vq, idx++); | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * This routine will assign vring's allocated in host/io memory. Code in | ||
275 | * virtio_ring.c however continues to access this io memory as if it were local | ||
276 | * memory without io accessors. | ||
277 | */ | ||
278 | static struct virtqueue *vop_find_vq(struct virtio_device *dev, | ||
279 | unsigned index, | ||
280 | void (*callback)(struct virtqueue *vq), | ||
281 | const char *name) | ||
282 | { | ||
283 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
284 | struct vop_device *vpdev = vdev->vpdev; | ||
285 | struct mic_vqconfig __iomem *vqconfig; | ||
286 | struct mic_vqconfig config; | ||
287 | struct virtqueue *vq; | ||
288 | void __iomem *va; | ||
289 | struct _mic_vring_info __iomem *info; | ||
290 | void *used; | ||
291 | int vr_size, _vr_size, err, magic; | ||
292 | struct vring *vr; | ||
293 | u8 type = ioread8(&vdev->desc->type); | ||
294 | |||
295 | if (index >= ioread8(&vdev->desc->num_vq)) | ||
296 | return ERR_PTR(-ENOENT); | ||
297 | |||
298 | if (!name) | ||
299 | return ERR_PTR(-ENOENT); | ||
300 | |||
301 | /* First assign the vring's allocated in host memory */ | ||
302 | vqconfig = _vop_vq_config(vdev->desc) + index; | ||
303 | memcpy_fromio(&config, vqconfig, sizeof(config)); | ||
304 | _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); | ||
305 | vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); | ||
306 | va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address), | ||
307 | vr_size); | ||
308 | if (!va) | ||
309 | return ERR_PTR(-ENOMEM); | ||
310 | vdev->vr[index] = va; | ||
311 | memset_io(va, 0x0, _vr_size); | ||
312 | vq = vring_new_virtqueue( | ||
313 | index, | ||
314 | le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, | ||
315 | dev, | ||
316 | false, | ||
317 | (void __force *)va, vop_notify, callback, name); | ||
318 | if (!vq) { | ||
319 | err = -ENOMEM; | ||
320 | goto unmap; | ||
321 | } | ||
322 | info = va + _vr_size; | ||
323 | magic = ioread32(&info->magic); | ||
324 | |||
325 | if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { | ||
326 | err = -EIO; | ||
327 | goto unmap; | ||
328 | } | ||
329 | |||
330 | /* Allocate and reassign used ring now */ | ||
331 | vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + | ||
332 | sizeof(struct vring_used_elem) * | ||
333 | le16_to_cpu(config.num)); | ||
334 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
335 | get_order(vdev->used_size[index])); | ||
336 | if (!used) { | ||
337 | err = -ENOMEM; | ||
338 | dev_err(_vop_dev(vdev), "%s %d err %d\n", | ||
339 | __func__, __LINE__, err); | ||
340 | goto del_vq; | ||
341 | } | ||
342 | vdev->used[index] = dma_map_single(&vpdev->dev, used, | ||
343 | vdev->used_size[index], | ||
344 | DMA_BIDIRECTIONAL); | ||
345 | if (dma_mapping_error(&vpdev->dev, vdev->used[index])) { | ||
346 | err = -ENOMEM; | ||
347 | dev_err(_vop_dev(vdev), "%s %d err %d\n", | ||
348 | __func__, __LINE__, err); | ||
349 | goto free_used; | ||
350 | } | ||
351 | writeq(vdev->used[index], &vqconfig->used_address); | ||
352 | /* | ||
353 | * To reassign the used ring here we are directly accessing | ||
354 | * struct vring_virtqueue which is a private data structure | ||
355 | * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in | ||
356 | * vring_new_virtqueue() would ensure that | ||
357 | * (&vq->vring == (struct vring *) (&vq->vq + 1)); | ||
358 | */ | ||
359 | vr = (struct vring *)(vq + 1); | ||
360 | vr->used = used; | ||
361 | |||
362 | vq->priv = vdev; | ||
363 | return vq; | ||
364 | free_used: | ||
365 | free_pages((unsigned long)used, | ||
366 | get_order(vdev->used_size[index])); | ||
367 | del_vq: | ||
368 | vring_del_virtqueue(vq); | ||
369 | unmap: | ||
370 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); | ||
371 | return ERR_PTR(err); | ||
372 | } | ||
373 | |||
374 | static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, | ||
375 | struct virtqueue *vqs[], | ||
376 | vq_callback_t *callbacks[], | ||
377 | const char * const names[]) | ||
378 | { | ||
379 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
380 | struct vop_device *vpdev = vdev->vpdev; | ||
381 | struct mic_device_ctrl __iomem *dc = vdev->dc; | ||
382 | int i, err, retry; | ||
383 | |||
384 | /* We must have this many virtqueues. */ | ||
385 | if (nvqs > ioread8(&vdev->desc->num_vq)) | ||
386 | return -ENOENT; | ||
387 | |||
388 | for (i = 0; i < nvqs; ++i) { | ||
389 | dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", | ||
390 | __func__, i, names[i]); | ||
391 | vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]); | ||
392 | if (IS_ERR(vqs[i])) { | ||
393 | err = PTR_ERR(vqs[i]); | ||
394 | goto error; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | iowrite8(1, &dc->used_address_updated); | ||
399 | /* | ||
400 | * Send an interrupt to the host to inform it that used | ||
401 | * rings have been re-assigned. | ||
402 | */ | ||
403 | vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db); | ||
404 | for (retry = 100; --retry;) { | ||
405 | if (!ioread8(&dc->used_address_updated)) | ||
406 | break; | ||
407 | msleep(100); | ||
408 | }; | ||
409 | |||
410 | dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry); | ||
411 | if (!retry) { | ||
412 | err = -ENODEV; | ||
413 | goto error; | ||
414 | } | ||
415 | |||
416 | return 0; | ||
417 | error: | ||
418 | vop_del_vqs(dev); | ||
419 | return err; | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * The config ops structure as defined by virtio config | ||
424 | */ | ||
425 | static struct virtio_config_ops vop_vq_config_ops = { | ||
426 | .get_features = vop_get_features, | ||
427 | .finalize_features = vop_finalize_features, | ||
428 | .get = vop_get, | ||
429 | .set = vop_set, | ||
430 | .get_status = vop_get_status, | ||
431 | .set_status = vop_set_status, | ||
432 | .reset = vop_reset, | ||
433 | .find_vqs = vop_find_vqs, | ||
434 | .del_vqs = vop_del_vqs, | ||
435 | }; | ||
436 | |||
437 | static irqreturn_t vop_virtio_intr_handler(int irq, void *data) | ||
438 | { | ||
439 | struct _vop_vdev *vdev = data; | ||
440 | struct vop_device *vpdev = vdev->vpdev; | ||
441 | struct virtqueue *vq; | ||
442 | |||
443 | vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db); | ||
444 | list_for_each_entry(vq, &vdev->vdev.vqs, list) | ||
445 | vring_interrupt(0, vq); | ||
446 | |||
447 | return IRQ_HANDLED; | ||
448 | } | ||
449 | |||
450 | static void vop_virtio_release_dev(struct device *_d) | ||
451 | { | ||
452 | /* | ||
453 | * No need for a release method similar to virtio PCI. | ||
454 | * Provide an empty one to avoid getting a warning from core. | ||
455 | */ | ||
456 | } | ||
457 | |||
458 | /* | ||
459 | * adds a new device and register it with virtio | ||
460 | * appropriate drivers are loaded by the device model | ||
461 | */ | ||
462 | static int _vop_add_device(struct mic_device_desc __iomem *d, | ||
463 | unsigned int offset, struct vop_device *vpdev, | ||
464 | int dnode) | ||
465 | { | ||
466 | struct _vop_vdev *vdev; | ||
467 | int ret; | ||
468 | u8 type = ioread8(&d->type); | ||
469 | |||
470 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
471 | if (!vdev) | ||
472 | return -ENOMEM; | ||
473 | |||
474 | vdev->vpdev = vpdev; | ||
475 | vdev->vdev.dev.parent = &vpdev->dev; | ||
476 | vdev->vdev.dev.release = vop_virtio_release_dev; | ||
477 | vdev->vdev.id.device = type; | ||
478 | vdev->vdev.config = &vop_vq_config_ops; | ||
479 | vdev->desc = d; | ||
480 | vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d); | ||
481 | vdev->dnode = dnode; | ||
482 | vdev->vdev.priv = (void *)(u64)dnode; | ||
483 | init_completion(&vdev->reset_done); | ||
484 | |||
485 | vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev); | ||
486 | vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev, | ||
487 | vop_virtio_intr_handler, "virtio intr", | ||
488 | vdev, vdev->h2c_vdev_db); | ||
489 | if (IS_ERR(vdev->virtio_cookie)) { | ||
490 | ret = PTR_ERR(vdev->virtio_cookie); | ||
491 | goto kfree; | ||
492 | } | ||
493 | iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db); | ||
494 | vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db); | ||
495 | |||
496 | ret = register_virtio_device(&vdev->vdev); | ||
497 | if (ret) { | ||
498 | dev_err(_vop_dev(vdev), | ||
499 | "Failed to register vop device %u type %u\n", | ||
500 | offset, type); | ||
501 | goto free_irq; | ||
502 | } | ||
503 | writeq((u64)vdev, &vdev->dc->vdev); | ||
504 | dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n", | ||
505 | __func__, offset, type, vdev); | ||
506 | |||
507 | return 0; | ||
508 | |||
509 | free_irq: | ||
510 | vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev); | ||
511 | kfree: | ||
512 | kfree(vdev); | ||
513 | return ret; | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * match for a vop device with a specific desc pointer | ||
518 | */ | ||
519 | static int vop_match_desc(struct device *dev, void *data) | ||
520 | { | ||
521 | struct virtio_device *_dev = dev_to_virtio(dev); | ||
522 | struct _vop_vdev *vdev = to_vopvdev(_dev); | ||
523 | |||
524 | return vdev->desc == (void __iomem *)data; | ||
525 | } | ||
526 | |||
527 | static void _vop_handle_config_change(struct mic_device_desc __iomem *d, | ||
528 | unsigned int offset, | ||
529 | struct vop_device *vpdev) | ||
530 | { | ||
531 | struct mic_device_ctrl __iomem *dc | ||
532 | = (void __iomem *)d + _vop_aligned_desc_size(d); | ||
533 | struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev); | ||
534 | |||
535 | if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) | ||
536 | return; | ||
537 | |||
538 | dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__); | ||
539 | virtio_config_changed(&vdev->vdev); | ||
540 | iowrite8(1, &dc->guest_ack); | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * removes a virtio device if a hot remove event has been | ||
545 | * requested by the host. | ||
546 | */ | ||
547 | static int _vop_remove_device(struct mic_device_desc __iomem *d, | ||
548 | unsigned int offset, struct vop_device *vpdev) | ||
549 | { | ||
550 | struct mic_device_ctrl __iomem *dc | ||
551 | = (void __iomem *)d + _vop_aligned_desc_size(d); | ||
552 | struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev); | ||
553 | u8 status; | ||
554 | int ret = -1; | ||
555 | |||
556 | if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { | ||
557 | dev_dbg(&vpdev->dev, | ||
558 | "%s %d config_change %d type %d vdev %p\n", | ||
559 | __func__, __LINE__, | ||
560 | ioread8(&dc->config_change), ioread8(&d->type), vdev); | ||
561 | status = ioread8(&d->status); | ||
562 | reinit_completion(&vdev->reset_done); | ||
563 | unregister_virtio_device(&vdev->vdev); | ||
564 | vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev); | ||
565 | iowrite8(-1, &dc->h2c_vdev_db); | ||
566 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) | ||
567 | wait_for_completion(&vdev->reset_done); | ||
568 | kfree(vdev); | ||
569 | iowrite8(1, &dc->guest_ack); | ||
570 | dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", | ||
571 | __func__, __LINE__, ioread8(&dc->guest_ack)); | ||
572 | iowrite8(-1, &d->type); | ||
573 | ret = 0; | ||
574 | } | ||
575 | return ret; | ||
576 | } | ||
577 | |||
578 | #define REMOVE_DEVICES true | ||
579 | |||
580 | static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev, | ||
581 | bool remove, int dnode) | ||
582 | { | ||
583 | s8 type; | ||
584 | unsigned int i; | ||
585 | struct mic_device_desc __iomem *d; | ||
586 | struct mic_device_ctrl __iomem *dc; | ||
587 | struct device *dev; | ||
588 | int ret; | ||
589 | |||
590 | for (i = sizeof(struct mic_bootparam); | ||
591 | i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) { | ||
592 | d = dp + i; | ||
593 | dc = (void __iomem *)d + _vop_aligned_desc_size(d); | ||
594 | /* | ||
595 | * This read barrier is paired with the corresponding write | ||
596 | * barrier on the host which is inserted before adding or | ||
597 | * removing a virtio device descriptor, by updating the type. | ||
598 | */ | ||
599 | rmb(); | ||
600 | type = ioread8(&d->type); | ||
601 | |||
602 | /* end of list */ | ||
603 | if (type == 0) | ||
604 | break; | ||
605 | |||
606 | if (type == -1) | ||
607 | continue; | ||
608 | |||
609 | /* device already exists */ | ||
610 | dev = device_find_child(&vpdev->dev, (void __force *)d, | ||
611 | vop_match_desc); | ||
612 | if (dev) { | ||
613 | if (remove) | ||
614 | iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, | ||
615 | &dc->config_change); | ||
616 | put_device(dev); | ||
617 | _vop_handle_config_change(d, i, vpdev); | ||
618 | ret = _vop_remove_device(d, i, vpdev); | ||
619 | if (remove) { | ||
620 | iowrite8(0, &dc->config_change); | ||
621 | iowrite8(0, &dc->guest_ack); | ||
622 | } | ||
623 | continue; | ||
624 | } | ||
625 | |||
626 | /* new device */ | ||
627 | dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n", | ||
628 | __func__, __LINE__, d); | ||
629 | if (!remove) | ||
630 | _vop_add_device(d, i, vpdev, dnode); | ||
631 | } | ||
632 | } | ||
633 | |||
634 | static void vop_scan_devices(struct vop_info *vi, | ||
635 | struct vop_device *vpdev, bool remove) | ||
636 | { | ||
637 | void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev); | ||
638 | |||
639 | if (!dp) | ||
640 | return; | ||
641 | mutex_lock(&vi->vop_mutex); | ||
642 | _vop_scan_devices(dp, vpdev, remove, vpdev->dnode); | ||
643 | mutex_unlock(&vi->vop_mutex); | ||
644 | } | ||
645 | |||
646 | /* | ||
647 | * vop_hotplug_device tries to find changes in the device page. | ||
648 | */ | ||
649 | static void vop_hotplug_devices(struct work_struct *work) | ||
650 | { | ||
651 | struct vop_info *vi = container_of(work, struct vop_info, | ||
652 | hotplug_work); | ||
653 | |||
654 | vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES); | ||
655 | } | ||
656 | |||
657 | /* | ||
658 | * Interrupt handler for hot plug/config changes etc. | ||
659 | */ | ||
660 | static irqreturn_t vop_extint_handler(int irq, void *data) | ||
661 | { | ||
662 | struct vop_info *vi = data; | ||
663 | struct mic_bootparam __iomem *bp; | ||
664 | struct vop_device *vpdev = vi->vpdev; | ||
665 | |||
666 | bp = vpdev->hw_ops->get_remote_dp(vpdev); | ||
667 | dev_dbg(&vpdev->dev, "%s %d hotplug work\n", | ||
668 | __func__, __LINE__); | ||
669 | vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db)); | ||
670 | schedule_work(&vi->hotplug_work); | ||
671 | return IRQ_HANDLED; | ||
672 | } | ||
673 | |||
674 | static int vop_driver_probe(struct vop_device *vpdev) | ||
675 | { | ||
676 | struct vop_info *vi; | ||
677 | int rc; | ||
678 | |||
679 | vi = kzalloc(sizeof(*vi), GFP_KERNEL); | ||
680 | if (!vi) { | ||
681 | rc = -ENOMEM; | ||
682 | goto exit; | ||
683 | } | ||
684 | dev_set_drvdata(&vpdev->dev, vi); | ||
685 | vi->vpdev = vpdev; | ||
686 | |||
687 | mutex_init(&vi->vop_mutex); | ||
688 | INIT_WORK(&vi->hotplug_work, vop_hotplug_devices); | ||
689 | if (vpdev->dnode) { | ||
690 | rc = vop_host_init(vi); | ||
691 | if (rc < 0) | ||
692 | goto free; | ||
693 | } else { | ||
694 | struct mic_bootparam __iomem *bootparam; | ||
695 | |||
696 | vop_scan_devices(vi, vpdev, !REMOVE_DEVICES); | ||
697 | |||
698 | vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev); | ||
699 | vi->cookie = vpdev->hw_ops->request_irq(vpdev, | ||
700 | vop_extint_handler, | ||
701 | "virtio_config_intr", | ||
702 | vi, vi->h2c_config_db); | ||
703 | if (IS_ERR(vi->cookie)) { | ||
704 | rc = PTR_ERR(vi->cookie); | ||
705 | goto free; | ||
706 | } | ||
707 | bootparam = vpdev->hw_ops->get_remote_dp(vpdev); | ||
708 | iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db); | ||
709 | } | ||
710 | vop_init_debugfs(vi); | ||
711 | return 0; | ||
712 | free: | ||
713 | kfree(vi); | ||
714 | exit: | ||
715 | return rc; | ||
716 | } | ||
717 | |||
718 | static void vop_driver_remove(struct vop_device *vpdev) | ||
719 | { | ||
720 | struct vop_info *vi = dev_get_drvdata(&vpdev->dev); | ||
721 | |||
722 | if (vpdev->dnode) { | ||
723 | vop_host_uninit(vi); | ||
724 | } else { | ||
725 | struct mic_bootparam __iomem *bootparam = | ||
726 | vpdev->hw_ops->get_remote_dp(vpdev); | ||
727 | if (bootparam) | ||
728 | iowrite8(-1, &bootparam->h2c_config_db); | ||
729 | vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi); | ||
730 | flush_work(&vi->hotplug_work); | ||
731 | vop_scan_devices(vi, vpdev, REMOVE_DEVICES); | ||
732 | } | ||
733 | vop_exit_debugfs(vi); | ||
734 | kfree(vi); | ||
735 | } | ||
736 | |||
737 | static struct vop_device_id id_table[] = { | ||
738 | { VOP_DEV_TRNSP, VOP_DEV_ANY_ID }, | ||
739 | { 0 }, | ||
740 | }; | ||
741 | |||
742 | static struct vop_driver vop_driver = { | ||
743 | .driver.name = KBUILD_MODNAME, | ||
744 | .driver.owner = THIS_MODULE, | ||
745 | .id_table = id_table, | ||
746 | .probe = vop_driver_probe, | ||
747 | .remove = vop_driver_remove, | ||
748 | }; | ||
749 | |||
750 | module_vop_driver(vop_driver); | ||
751 | |||
752 | MODULE_DEVICE_TABLE(mbus, id_table); | ||
753 | MODULE_AUTHOR("Intel Corporation"); | ||
754 | MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver"); | ||
755 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/vop/vop_main.h index a80631f2790d..ba47ec7a6386 100644 --- a/drivers/misc/mic/host/mic_virtio.h +++ b/drivers/misc/mic/vop/vop_main.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Intel MIC Platform Software Stack (MPSS) | 2 | * Intel MIC Platform Software Stack (MPSS) |
3 | * | 3 | * |
4 | * Copyright(c) 2013 Intel Corporation. | 4 | * Copyright(c) 2016 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License, version 2, as | 7 | * it under the terms of the GNU General Public License, version 2, as |
@@ -15,14 +15,21 @@ | |||
15 | * The full GNU General Public License is included in this distribution in | 15 | * The full GNU General Public License is included in this distribution in |
16 | * the file called "COPYING". | 16 | * the file called "COPYING". |
17 | * | 17 | * |
18 | * Intel MIC Host driver. | 18 | * Intel Virtio Over PCIe (VOP) driver. |
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | #ifndef MIC_VIRTIO_H | 21 | #ifndef _VOP_MAIN_H_ |
22 | #define MIC_VIRTIO_H | 22 | #define _VOP_MAIN_H_ |
23 | 23 | ||
24 | #include <linux/vringh.h> | ||
24 | #include <linux/virtio_config.h> | 25 | #include <linux/virtio_config.h> |
25 | #include <linux/mic_ioctl.h> | 26 | #include <linux/virtio.h> |
27 | #include <linux/miscdevice.h> | ||
28 | |||
29 | #include <linux/mic_common.h> | ||
30 | #include "../common/mic_dev.h" | ||
31 | |||
32 | #include "../bus/vop_bus.h" | ||
26 | 33 | ||
27 | /* | 34 | /* |
28 | * Note on endianness. | 35 | * Note on endianness. |
@@ -39,38 +46,68 @@ | |||
39 | * in guest endianness. | 46 | * in guest endianness. |
40 | */ | 47 | */ |
41 | 48 | ||
49 | /* | ||
50 | * vop_info - Allocated per invocation of VOP probe | ||
51 | * | ||
52 | * @vpdev: VOP device | ||
53 | * @hotplug_work: Handle virtio device creation, deletion and configuration | ||
54 | * @cookie: Cookie received upon requesting a virtio configuration interrupt | ||
55 | * @h2c_config_db: The doorbell used by the peer to indicate a config change | ||
56 | * @vdev_list: List of "active" virtio devices injected in the peer node | ||
57 | * @vop_mutex: Synchronize access to the device page as well as serialize | ||
58 | * creation/deletion of virtio devices on the peer node | ||
59 | * @dp: Peer device page information | ||
60 | * @dbg: Debugfs entry | ||
61 | * @dma_ch: The DMA channel used by this transport for data transfers. | ||
62 | * @name: Name for this transport used in misc device creation. | ||
63 | * @miscdev: The misc device registered. | ||
64 | */ | ||
65 | struct vop_info { | ||
66 | struct vop_device *vpdev; | ||
67 | struct work_struct hotplug_work; | ||
68 | struct mic_irq *cookie; | ||
69 | int h2c_config_db; | ||
70 | struct list_head vdev_list; | ||
71 | struct mutex vop_mutex; | ||
72 | void __iomem *dp; | ||
73 | struct dentry *dbg; | ||
74 | struct dma_chan *dma_ch; | ||
75 | char name[16]; | ||
76 | struct miscdevice miscdev; | ||
77 | }; | ||
78 | |||
42 | /** | 79 | /** |
43 | * struct mic_vringh - Virtio ring host information. | 80 | * struct vop_vringh - Virtio ring host information. |
44 | * | 81 | * |
45 | * @vring: The MIC vring used for setting up user space mappings. | 82 | * @vring: The VOP vring used for setting up user space mappings. |
46 | * @vrh: The host VRINGH used for accessing the card vrings. | 83 | * @vrh: The host VRINGH used for accessing the card vrings. |
47 | * @riov: The VRINGH read kernel IOV. | 84 | * @riov: The VRINGH read kernel IOV. |
48 | * @wiov: The VRINGH write kernel IOV. | 85 | * @wiov: The VRINGH write kernel IOV. |
86 | * @head: The VRINGH head index address passed to vringh_getdesc_kern(..). | ||
49 | * @vr_mutex: Mutex for synchronizing access to the VRING. | 87 | * @vr_mutex: Mutex for synchronizing access to the VRING. |
50 | * @buf: Temporary kernel buffer used to copy in/out data | 88 | * @buf: Temporary kernel buffer used to copy in/out data |
51 | * from/to the card via DMA. | 89 | * from/to the card via DMA. |
52 | * @buf_da: dma address of buf. | 90 | * @buf_da: dma address of buf. |
53 | * @mvdev: Back pointer to MIC virtio device for vringh_notify(..). | 91 | * @vdev: Back pointer to VOP virtio device for vringh_notify(..). |
54 | * @head: The VRINGH head index address passed to vringh_getdesc_kern(..). | ||
55 | */ | 92 | */ |
56 | struct mic_vringh { | 93 | struct vop_vringh { |
57 | struct mic_vring vring; | 94 | struct mic_vring vring; |
58 | struct vringh vrh; | 95 | struct vringh vrh; |
59 | struct vringh_kiov riov; | 96 | struct vringh_kiov riov; |
60 | struct vringh_kiov wiov; | 97 | struct vringh_kiov wiov; |
98 | u16 head; | ||
61 | struct mutex vr_mutex; | 99 | struct mutex vr_mutex; |
62 | void *buf; | 100 | void *buf; |
63 | dma_addr_t buf_da; | 101 | dma_addr_t buf_da; |
64 | struct mic_vdev *mvdev; | 102 | struct vop_vdev *vdev; |
65 | u16 head; | ||
66 | }; | 103 | }; |
67 | 104 | ||
68 | /** | 105 | /** |
69 | * struct mic_vdev - Host information for a card Virtio device. | 106 | * struct vop_vdev - Host information for a card Virtio device. |
70 | * | 107 | * |
71 | * @virtio_id - Virtio device id. | 108 | * @virtio_id - Virtio device id. |
72 | * @waitq - Waitqueue to allow ring3 apps to poll. | 109 | * @waitq - Waitqueue to allow ring3 apps to poll. |
73 | * @mdev - Back pointer to host MIC device. | 110 | * @vpdev - pointer to VOP bus device. |
74 | * @poll_wake - Used for waking up threads blocked in poll. | 111 | * @poll_wake - Used for waking up threads blocked in poll. |
75 | * @out_bytes - Debug stats for number of bytes copied from host to card. | 112 | * @out_bytes - Debug stats for number of bytes copied from host to card. |
76 | * @in_bytes - Debug stats for number of bytes copied from card to host. | 113 | * @in_bytes - Debug stats for number of bytes copied from card to host. |
@@ -82,18 +119,23 @@ struct mic_vringh { | |||
82 | * the transfer length did not have the required DMA alignment. | 119 | * the transfer length did not have the required DMA alignment. |
83 | * @tx_dst_unaligned - Debug stats for number of bytes copied where the | 120 | * @tx_dst_unaligned - Debug stats for number of bytes copied where the |
84 | * destination address on the card did not have the required DMA alignment. | 121 | * destination address on the card did not have the required DMA alignment. |
85 | * @mvr - Store per VRING data structures. | 122 | * @vvr - Store per VRING data structures. |
86 | * @virtio_bh_work - Work struct used to schedule virtio bottom half handling. | 123 | * @virtio_bh_work - Work struct used to schedule virtio bottom half handling. |
87 | * @dd - Virtio device descriptor. | 124 | * @dd - Virtio device descriptor. |
88 | * @dc - Virtio device control fields. | 125 | * @dc - Virtio device control fields. |
89 | * @list - List of Virtio devices. | 126 | * @list - List of Virtio devices. |
90 | * @virtio_db - The doorbell used by the card to interrupt the host. | 127 | * @virtio_db - The doorbell used by the card to interrupt the host. |
91 | * @virtio_cookie - The cookie returned while requesting interrupts. | 128 | * @virtio_cookie - The cookie returned while requesting interrupts. |
129 | * @vi: Transport information. | ||
130 | * @vdev_mutex: Mutex synchronizing virtio device injection, | ||
131 | * removal and data transfers. | ||
132 | * @destroy: Track if a virtio device is being destroyed. | ||
133 | * @deleted: The virtio device has been deleted. | ||
92 | */ | 134 | */ |
93 | struct mic_vdev { | 135 | struct vop_vdev { |
94 | int virtio_id; | 136 | int virtio_id; |
95 | wait_queue_head_t waitq; | 137 | wait_queue_head_t waitq; |
96 | struct mic_device *mdev; | 138 | struct vop_device *vpdev; |
97 | int poll_wake; | 139 | int poll_wake; |
98 | unsigned long out_bytes; | 140 | unsigned long out_bytes; |
99 | unsigned long in_bytes; | 141 | unsigned long in_bytes; |
@@ -101,55 +143,28 @@ struct mic_vdev { | |||
101 | unsigned long in_bytes_dma; | 143 | unsigned long in_bytes_dma; |
102 | unsigned long tx_len_unaligned; | 144 | unsigned long tx_len_unaligned; |
103 | unsigned long tx_dst_unaligned; | 145 | unsigned long tx_dst_unaligned; |
104 | struct mic_vringh mvr[MIC_MAX_VRINGS]; | 146 | unsigned long rx_dst_unaligned; |
147 | struct vop_vringh vvr[MIC_MAX_VRINGS]; | ||
105 | struct work_struct virtio_bh_work; | 148 | struct work_struct virtio_bh_work; |
106 | struct mic_device_desc *dd; | 149 | struct mic_device_desc *dd; |
107 | struct mic_device_ctrl *dc; | 150 | struct mic_device_ctrl *dc; |
108 | struct list_head list; | 151 | struct list_head list; |
109 | int virtio_db; | 152 | int virtio_db; |
110 | struct mic_irq *virtio_cookie; | 153 | struct mic_irq *virtio_cookie; |
154 | struct vop_info *vi; | ||
155 | struct mutex vdev_mutex; | ||
156 | struct completion destroy; | ||
157 | bool deleted; | ||
111 | }; | 158 | }; |
112 | 159 | ||
113 | void mic_virtio_uninit(struct mic_device *mdev); | ||
114 | int mic_virtio_add_device(struct mic_vdev *mvdev, | ||
115 | void __user *argp); | ||
116 | void mic_virtio_del_device(struct mic_vdev *mvdev); | ||
117 | int mic_virtio_config_change(struct mic_vdev *mvdev, | ||
118 | void __user *argp); | ||
119 | int mic_virtio_copy_desc(struct mic_vdev *mvdev, | ||
120 | struct mic_copy_desc *request); | ||
121 | void mic_virtio_reset_devices(struct mic_device *mdev); | ||
122 | void mic_bh_handler(struct work_struct *work); | ||
123 | |||
124 | /* Helper API to obtain the MIC PCIe device */ | ||
125 | static inline struct device *mic_dev(struct mic_vdev *mvdev) | ||
126 | { | ||
127 | return &mvdev->mdev->pdev->dev; | ||
128 | } | ||
129 | |||
130 | /* Helper API to check if a virtio device is initialized */ | ||
131 | static inline int mic_vdev_inited(struct mic_vdev *mvdev) | ||
132 | { | ||
133 | /* Device has not been created yet */ | ||
134 | if (!mvdev->dd || !mvdev->dd->type) { | ||
135 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
136 | __func__, __LINE__, -EINVAL); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | |||
140 | /* Device has been removed/deleted */ | ||
141 | if (mvdev->dd->type == -1) { | ||
142 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
143 | __func__, __LINE__, -ENODEV); | ||
144 | return -ENODEV; | ||
145 | } | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | /* Helper API to check if a virtio device is running */ | 160 | /* Helper API to check if a virtio device is running */ |
151 | static inline bool mic_vdevup(struct mic_vdev *mvdev) | 161 | static inline bool vop_vdevup(struct vop_vdev *vdev) |
152 | { | 162 | { |
153 | return !!mvdev->dd->status; | 163 | return !!vdev->dd->status; |
154 | } | 164 | } |
165 | |||
166 | void vop_init_debugfs(struct vop_info *vi); | ||
167 | void vop_exit_debugfs(struct vop_info *vi); | ||
168 | int vop_host_init(struct vop_info *vi); | ||
169 | void vop_host_uninit(struct vop_info *vi); | ||
155 | #endif | 170 | #endif |
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c new file mode 100644 index 000000000000..e94c7fb6712a --- /dev/null +++ b/drivers/misc/mic/vop/vop_vringh.c | |||
@@ -0,0 +1,1165 @@ | |||
1 | /* | ||
2 | * Intel MIC Platform Software Stack (MPSS) | ||
3 | * | ||
4 | * Copyright(c) 2016 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * The full GNU General Public License is included in this distribution in | ||
16 | * the file called "COPYING". | ||
17 | * | ||
18 | * Intel Virtio Over PCIe (VOP) driver. | ||
19 | * | ||
20 | */ | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/poll.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | |||
25 | #include <linux/mic_common.h> | ||
26 | #include "../common/mic_dev.h" | ||
27 | |||
28 | #include <linux/mic_ioctl.h> | ||
29 | #include "vop_main.h" | ||
30 | |||
31 | /* Helper API to obtain the VOP PCIe device */ | ||
32 | static inline struct device *vop_dev(struct vop_vdev *vdev) | ||
33 | { | ||
34 | return vdev->vpdev->dev.parent; | ||
35 | } | ||
36 | |||
37 | /* Helper API to check if a virtio device is initialized */ | ||
38 | static inline int vop_vdev_inited(struct vop_vdev *vdev) | ||
39 | { | ||
40 | if (!vdev) | ||
41 | return -EINVAL; | ||
42 | /* Device has not been created yet */ | ||
43 | if (!vdev->dd || !vdev->dd->type) { | ||
44 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
45 | __func__, __LINE__, -EINVAL); | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | /* Device has been removed/deleted */ | ||
49 | if (vdev->dd->type == -1) { | ||
50 | dev_dbg(vop_dev(vdev), "%s %d err %d\n", | ||
51 | __func__, __LINE__, -ENODEV); | ||
52 | return -ENODEV; | ||
53 | } | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static void _vop_notify(struct vringh *vrh) | ||
58 | { | ||
59 | struct vop_vringh *vvrh = container_of(vrh, struct vop_vringh, vrh); | ||
60 | struct vop_vdev *vdev = vvrh->vdev; | ||
61 | struct vop_device *vpdev = vdev->vpdev; | ||
62 | s8 db = vdev->dc->h2c_vdev_db; | ||
63 | |||
64 | if (db != -1) | ||
65 | vpdev->hw_ops->send_intr(vpdev, db); | ||
66 | } | ||
67 | |||
68 | static void vop_virtio_init_post(struct vop_vdev *vdev) | ||
69 | { | ||
70 | struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd); | ||
71 | struct vop_device *vpdev = vdev->vpdev; | ||
72 | int i, used_size; | ||
73 | |||
74 | for (i = 0; i < vdev->dd->num_vq; i++) { | ||
75 | used_size = PAGE_ALIGN(sizeof(u16) * 3 + | ||
76 | sizeof(struct vring_used_elem) * | ||
77 | le16_to_cpu(vqconfig->num)); | ||
78 | if (!le64_to_cpu(vqconfig[i].used_address)) { | ||
79 | dev_warn(vop_dev(vdev), "used_address zero??\n"); | ||
80 | continue; | ||
81 | } | ||
82 | vdev->vvr[i].vrh.vring.used = | ||
83 | (void __force *)vpdev->hw_ops->ioremap( | ||
84 | vpdev, | ||
85 | le64_to_cpu(vqconfig[i].used_address), | ||
86 | used_size); | ||
87 | } | ||
88 | |||
89 | vdev->dc->used_address_updated = 0; | ||
90 | |||
91 | dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n", | ||
92 | __func__, vdev->virtio_id); | ||
93 | } | ||
94 | |||
95 | static inline void vop_virtio_device_reset(struct vop_vdev *vdev) | ||
96 | { | ||
97 | int i; | ||
98 | |||
99 | dev_dbg(vop_dev(vdev), "%s: status %d device type %d RESET\n", | ||
100 | __func__, vdev->dd->status, vdev->virtio_id); | ||
101 | |||
102 | for (i = 0; i < vdev->dd->num_vq; i++) | ||
103 | /* | ||
104 | * Avoid lockdep false positive. The + 1 is for the vop | ||
105 | * mutex which is held in the reset devices code path. | ||
106 | */ | ||
107 | mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1); | ||
108 | |||
109 | /* 0 status means "reset" */ | ||
110 | vdev->dd->status = 0; | ||
111 | vdev->dc->vdev_reset = 0; | ||
112 | vdev->dc->host_ack = 1; | ||
113 | |||
114 | for (i = 0; i < vdev->dd->num_vq; i++) { | ||
115 | struct vringh *vrh = &vdev->vvr[i].vrh; | ||
116 | |||
117 | vdev->vvr[i].vring.info->avail_idx = 0; | ||
118 | vrh->completed = 0; | ||
119 | vrh->last_avail_idx = 0; | ||
120 | vrh->last_used_idx = 0; | ||
121 | } | ||
122 | |||
123 | for (i = 0; i < vdev->dd->num_vq; i++) | ||
124 | mutex_unlock(&vdev->vvr[i].vr_mutex); | ||
125 | } | ||
126 | |||
127 | static void vop_virtio_reset_devices(struct vop_info *vi) | ||
128 | { | ||
129 | struct list_head *pos, *tmp; | ||
130 | struct vop_vdev *vdev; | ||
131 | |||
132 | list_for_each_safe(pos, tmp, &vi->vdev_list) { | ||
133 | vdev = list_entry(pos, struct vop_vdev, list); | ||
134 | vop_virtio_device_reset(vdev); | ||
135 | vdev->poll_wake = 1; | ||
136 | wake_up(&vdev->waitq); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | static void vop_bh_handler(struct work_struct *work) | ||
141 | { | ||
142 | struct vop_vdev *vdev = container_of(work, struct vop_vdev, | ||
143 | virtio_bh_work); | ||
144 | |||
145 | if (vdev->dc->used_address_updated) | ||
146 | vop_virtio_init_post(vdev); | ||
147 | |||
148 | if (vdev->dc->vdev_reset) | ||
149 | vop_virtio_device_reset(vdev); | ||
150 | |||
151 | vdev->poll_wake = 1; | ||
152 | wake_up(&vdev->waitq); | ||
153 | } | ||
154 | |||
155 | static irqreturn_t _vop_virtio_intr_handler(int irq, void *data) | ||
156 | { | ||
157 | struct vop_vdev *vdev = data; | ||
158 | struct vop_device *vpdev = vdev->vpdev; | ||
159 | |||
160 | vpdev->hw_ops->ack_interrupt(vpdev, vdev->virtio_db); | ||
161 | schedule_work(&vdev->virtio_bh_work); | ||
162 | return IRQ_HANDLED; | ||
163 | } | ||
164 | |||
165 | static int vop_virtio_config_change(struct vop_vdev *vdev, void *argp) | ||
166 | { | ||
167 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | ||
168 | int ret = 0, retry, i; | ||
169 | struct vop_device *vpdev = vdev->vpdev; | ||
170 | struct vop_info *vi = dev_get_drvdata(&vpdev->dev); | ||
171 | struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
172 | s8 db = bootparam->h2c_config_db; | ||
173 | |||
174 | mutex_lock(&vi->vop_mutex); | ||
175 | for (i = 0; i < vdev->dd->num_vq; i++) | ||
176 | mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1); | ||
177 | |||
178 | if (db == -1 || vdev->dd->type == -1) { | ||
179 | ret = -EIO; | ||
180 | goto exit; | ||
181 | } | ||
182 | |||
183 | memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len); | ||
184 | vdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; | ||
185 | vpdev->hw_ops->send_intr(vpdev, db); | ||
186 | |||
187 | for (retry = 100; retry--;) { | ||
188 | ret = wait_event_timeout(wake, vdev->dc->guest_ack, | ||
189 | msecs_to_jiffies(100)); | ||
190 | if (ret) | ||
191 | break; | ||
192 | } | ||
193 | |||
194 | dev_dbg(vop_dev(vdev), | ||
195 | "%s %d retry: %d\n", __func__, __LINE__, retry); | ||
196 | vdev->dc->config_change = 0; | ||
197 | vdev->dc->guest_ack = 0; | ||
198 | exit: | ||
199 | for (i = 0; i < vdev->dd->num_vq; i++) | ||
200 | mutex_unlock(&vdev->vvr[i].vr_mutex); | ||
201 | mutex_unlock(&vi->vop_mutex); | ||
202 | return ret; | ||
203 | } | ||
204 | |||
205 | static int vop_copy_dp_entry(struct vop_vdev *vdev, | ||
206 | struct mic_device_desc *argp, __u8 *type, | ||
207 | struct mic_device_desc **devpage) | ||
208 | { | ||
209 | struct vop_device *vpdev = vdev->vpdev; | ||
210 | struct mic_device_desc *devp; | ||
211 | struct mic_vqconfig *vqconfig; | ||
212 | int ret = 0, i; | ||
213 | bool slot_found = false; | ||
214 | |||
215 | vqconfig = mic_vq_config(argp); | ||
216 | for (i = 0; i < argp->num_vq; i++) { | ||
217 | if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) { | ||
218 | ret = -EINVAL; | ||
219 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
220 | __func__, __LINE__, ret); | ||
221 | goto exit; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | /* Find the first free device page entry */ | ||
226 | for (i = sizeof(struct mic_bootparam); | ||
227 | i < MIC_DP_SIZE - mic_total_desc_size(argp); | ||
228 | i += mic_total_desc_size(devp)) { | ||
229 | devp = vpdev->hw_ops->get_dp(vpdev) + i; | ||
230 | if (devp->type == 0 || devp->type == -1) { | ||
231 | slot_found = true; | ||
232 | break; | ||
233 | } | ||
234 | } | ||
235 | if (!slot_found) { | ||
236 | ret = -EINVAL; | ||
237 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
238 | __func__, __LINE__, ret); | ||
239 | goto exit; | ||
240 | } | ||
241 | /* | ||
242 | * Save off the type before doing the memcpy. Type will be set in the | ||
243 | * end after completing all initialization for the new device. | ||
244 | */ | ||
245 | *type = argp->type; | ||
246 | argp->type = 0; | ||
247 | memcpy(devp, argp, mic_desc_size(argp)); | ||
248 | |||
249 | *devpage = devp; | ||
250 | exit: | ||
251 | return ret; | ||
252 | } | ||
253 | |||
254 | static void vop_init_device_ctrl(struct vop_vdev *vdev, | ||
255 | struct mic_device_desc *devpage) | ||
256 | { | ||
257 | struct mic_device_ctrl *dc; | ||
258 | |||
259 | dc = (void *)devpage + mic_aligned_desc_size(devpage); | ||
260 | |||
261 | dc->config_change = 0; | ||
262 | dc->guest_ack = 0; | ||
263 | dc->vdev_reset = 0; | ||
264 | dc->host_ack = 0; | ||
265 | dc->used_address_updated = 0; | ||
266 | dc->c2h_vdev_db = -1; | ||
267 | dc->h2c_vdev_db = -1; | ||
268 | vdev->dc = dc; | ||
269 | } | ||
270 | |||
271 | static int vop_virtio_add_device(struct vop_vdev *vdev, | ||
272 | struct mic_device_desc *argp) | ||
273 | { | ||
274 | struct vop_info *vi = vdev->vi; | ||
275 | struct vop_device *vpdev = vi->vpdev; | ||
276 | struct mic_device_desc *dd = NULL; | ||
277 | struct mic_vqconfig *vqconfig; | ||
278 | int vr_size, i, j, ret; | ||
279 | u8 type = 0; | ||
280 | s8 db = -1; | ||
281 | char irqname[16]; | ||
282 | struct mic_bootparam *bootparam; | ||
283 | u16 num; | ||
284 | dma_addr_t vr_addr; | ||
285 | |||
286 | bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
287 | init_waitqueue_head(&vdev->waitq); | ||
288 | INIT_LIST_HEAD(&vdev->list); | ||
289 | vdev->vpdev = vpdev; | ||
290 | |||
291 | ret = vop_copy_dp_entry(vdev, argp, &type, &dd); | ||
292 | if (ret) { | ||
293 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
294 | __func__, __LINE__, ret); | ||
295 | kfree(vdev); | ||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | vop_init_device_ctrl(vdev, dd); | ||
300 | |||
301 | vdev->dd = dd; | ||
302 | vdev->virtio_id = type; | ||
303 | vqconfig = mic_vq_config(dd); | ||
304 | INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler); | ||
305 | |||
306 | for (i = 0; i < dd->num_vq; i++) { | ||
307 | struct vop_vringh *vvr = &vdev->vvr[i]; | ||
308 | struct mic_vring *vr = &vdev->vvr[i].vring; | ||
309 | |||
310 | num = le16_to_cpu(vqconfig[i].num); | ||
311 | mutex_init(&vvr->vr_mutex); | ||
312 | vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + | ||
313 | sizeof(struct _mic_vring_info)); | ||
314 | vr->va = (void *) | ||
315 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
316 | get_order(vr_size)); | ||
317 | if (!vr->va) { | ||
318 | ret = -ENOMEM; | ||
319 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
320 | __func__, __LINE__, ret); | ||
321 | goto err; | ||
322 | } | ||
323 | vr->len = vr_size; | ||
324 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); | ||
325 | vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i); | ||
326 | vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size, | ||
327 | DMA_BIDIRECTIONAL); | ||
328 | if (dma_mapping_error(&vpdev->dev, vr_addr)) { | ||
329 | free_pages((unsigned long)vr->va, get_order(vr_size)); | ||
330 | ret = -ENOMEM; | ||
331 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
332 | __func__, __LINE__, ret); | ||
333 | goto err; | ||
334 | } | ||
335 | vqconfig[i].address = cpu_to_le64(vr_addr); | ||
336 | |||
337 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); | ||
338 | ret = vringh_init_kern(&vvr->vrh, | ||
339 | *(u32 *)mic_vq_features(vdev->dd), | ||
340 | num, false, vr->vr.desc, vr->vr.avail, | ||
341 | vr->vr.used); | ||
342 | if (ret) { | ||
343 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
344 | __func__, __LINE__, ret); | ||
345 | goto err; | ||
346 | } | ||
347 | vringh_kiov_init(&vvr->riov, NULL, 0); | ||
348 | vringh_kiov_init(&vvr->wiov, NULL, 0); | ||
349 | vvr->head = USHRT_MAX; | ||
350 | vvr->vdev = vdev; | ||
351 | vvr->vrh.notify = _vop_notify; | ||
352 | dev_dbg(&vpdev->dev, | ||
353 | "%s %d index %d va %p info %p vr_size 0x%x\n", | ||
354 | __func__, __LINE__, i, vr->va, vr->info, vr_size); | ||
355 | vvr->buf = (void *)__get_free_pages(GFP_KERNEL, | ||
356 | get_order(VOP_INT_DMA_BUF_SIZE)); | ||
357 | vvr->buf_da = dma_map_single(&vpdev->dev, | ||
358 | vvr->buf, VOP_INT_DMA_BUF_SIZE, | ||
359 | DMA_BIDIRECTIONAL); | ||
360 | } | ||
361 | |||
362 | snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index, | ||
363 | vdev->virtio_id); | ||
364 | vdev->virtio_db = vpdev->hw_ops->next_db(vpdev); | ||
365 | vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev, | ||
366 | _vop_virtio_intr_handler, irqname, vdev, | ||
367 | vdev->virtio_db); | ||
368 | if (IS_ERR(vdev->virtio_cookie)) { | ||
369 | ret = PTR_ERR(vdev->virtio_cookie); | ||
370 | dev_dbg(&vpdev->dev, "request irq failed\n"); | ||
371 | goto err; | ||
372 | } | ||
373 | |||
374 | vdev->dc->c2h_vdev_db = vdev->virtio_db; | ||
375 | |||
376 | /* | ||
377 | * Order the type update with previous stores. This write barrier | ||
378 | * is paired with the corresponding read barrier before the uncached | ||
379 | * system memory read of the type, on the card while scanning the | ||
380 | * device page. | ||
381 | */ | ||
382 | smp_wmb(); | ||
383 | dd->type = type; | ||
384 | argp->type = type; | ||
385 | |||
386 | if (bootparam) { | ||
387 | db = bootparam->h2c_config_db; | ||
388 | if (db != -1) | ||
389 | vpdev->hw_ops->send_intr(vpdev, db); | ||
390 | } | ||
391 | dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db); | ||
392 | return 0; | ||
393 | err: | ||
394 | vqconfig = mic_vq_config(dd); | ||
395 | for (j = 0; j < i; j++) { | ||
396 | struct vop_vringh *vvr = &vdev->vvr[j]; | ||
397 | |||
398 | dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address), | ||
399 | vvr->vring.len, DMA_BIDIRECTIONAL); | ||
400 | free_pages((unsigned long)vvr->vring.va, | ||
401 | get_order(vvr->vring.len)); | ||
402 | } | ||
403 | return ret; | ||
404 | } | ||
405 | |||
406 | static void vop_dev_remove(struct vop_info *pvi, struct mic_device_ctrl *devp, | ||
407 | struct vop_device *vpdev) | ||
408 | { | ||
409 | struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
410 | s8 db; | ||
411 | int ret, retry; | ||
412 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | ||
413 | |||
414 | devp->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; | ||
415 | db = bootparam->h2c_config_db; | ||
416 | if (db != -1) | ||
417 | vpdev->hw_ops->send_intr(vpdev, db); | ||
418 | else | ||
419 | goto done; | ||
420 | for (retry = 15; retry--;) { | ||
421 | ret = wait_event_timeout(wake, devp->guest_ack, | ||
422 | msecs_to_jiffies(1000)); | ||
423 | if (ret) | ||
424 | break; | ||
425 | } | ||
426 | done: | ||
427 | devp->config_change = 0; | ||
428 | devp->guest_ack = 0; | ||
429 | } | ||
430 | |||
431 | static void vop_virtio_del_device(struct vop_vdev *vdev) | ||
432 | { | ||
433 | struct vop_info *vi = vdev->vi; | ||
434 | struct vop_device *vpdev = vdev->vpdev; | ||
435 | int i; | ||
436 | struct mic_vqconfig *vqconfig; | ||
437 | struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
438 | |||
439 | if (!bootparam) | ||
440 | goto skip_hot_remove; | ||
441 | vop_dev_remove(vi, vdev->dc, vpdev); | ||
442 | skip_hot_remove: | ||
443 | vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev); | ||
444 | flush_work(&vdev->virtio_bh_work); | ||
445 | vqconfig = mic_vq_config(vdev->dd); | ||
446 | for (i = 0; i < vdev->dd->num_vq; i++) { | ||
447 | struct vop_vringh *vvr = &vdev->vvr[i]; | ||
448 | |||
449 | dma_unmap_single(&vpdev->dev, | ||
450 | vvr->buf_da, VOP_INT_DMA_BUF_SIZE, | ||
451 | DMA_BIDIRECTIONAL); | ||
452 | free_pages((unsigned long)vvr->buf, | ||
453 | get_order(VOP_INT_DMA_BUF_SIZE)); | ||
454 | vringh_kiov_cleanup(&vvr->riov); | ||
455 | vringh_kiov_cleanup(&vvr->wiov); | ||
456 | dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address), | ||
457 | vvr->vring.len, DMA_BIDIRECTIONAL); | ||
458 | free_pages((unsigned long)vvr->vring.va, | ||
459 | get_order(vvr->vring.len)); | ||
460 | } | ||
461 | /* | ||
462 | * Order the type update with previous stores. This write barrier | ||
463 | * is paired with the corresponding read barrier before the uncached | ||
464 | * system memory read of the type, on the card while scanning the | ||
465 | * device page. | ||
466 | */ | ||
467 | smp_wmb(); | ||
468 | vdev->dd->type = -1; | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | * vop_sync_dma - Wrapper for synchronous DMAs. | ||
473 | * | ||
474 | * @dev - The address of the pointer to the device instance used | ||
475 | * for DMA registration. | ||
476 | * @dst - destination DMA address. | ||
477 | * @src - source DMA address. | ||
478 | * @len - size of the transfer. | ||
479 | * | ||
480 | * Return DMA_SUCCESS on success | ||
481 | */ | ||
482 | static int vop_sync_dma(struct vop_vdev *vdev, dma_addr_t dst, dma_addr_t src, | ||
483 | size_t len) | ||
484 | { | ||
485 | int err = 0; | ||
486 | struct dma_device *ddev; | ||
487 | struct dma_async_tx_descriptor *tx; | ||
488 | struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev); | ||
489 | struct dma_chan *vop_ch = vi->dma_ch; | ||
490 | |||
491 | if (!vop_ch) { | ||
492 | err = -EBUSY; | ||
493 | goto error; | ||
494 | } | ||
495 | ddev = vop_ch->device; | ||
496 | tx = ddev->device_prep_dma_memcpy(vop_ch, dst, src, len, | ||
497 | DMA_PREP_FENCE); | ||
498 | if (!tx) { | ||
499 | err = -ENOMEM; | ||
500 | goto error; | ||
501 | } else { | ||
502 | dma_cookie_t cookie; | ||
503 | |||
504 | cookie = tx->tx_submit(tx); | ||
505 | if (dma_submit_error(cookie)) { | ||
506 | err = -ENOMEM; | ||
507 | goto error; | ||
508 | } | ||
509 | dma_async_issue_pending(vop_ch); | ||
510 | err = dma_sync_wait(vop_ch, cookie); | ||
511 | } | ||
512 | error: | ||
513 | if (err) | ||
514 | dev_err(&vi->vpdev->dev, "%s %d err %d\n", | ||
515 | __func__, __LINE__, err); | ||
516 | return err; | ||
517 | } | ||
518 | |||
519 | #define VOP_USE_DMA true | ||
520 | |||
521 | /* | ||
522 | * Initiates the copies across the PCIe bus from card memory to a user | ||
523 | * space buffer. When transfers are done using DMA, source/destination | ||
524 | * addresses and transfer length must follow the alignment requirements of | ||
525 | * the MIC DMA engine. | ||
526 | */ | ||
527 | static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf, | ||
528 | size_t len, u64 daddr, size_t dlen, | ||
529 | int vr_idx) | ||
530 | { | ||
531 | struct vop_device *vpdev = vdev->vpdev; | ||
532 | void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len); | ||
533 | struct vop_vringh *vvr = &vdev->vvr[vr_idx]; | ||
534 | struct vop_info *vi = dev_get_drvdata(&vpdev->dev); | ||
535 | size_t dma_alignment = 1 << vi->dma_ch->device->copy_align; | ||
536 | bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); | ||
537 | size_t dma_offset, partlen; | ||
538 | int err; | ||
539 | |||
540 | if (!VOP_USE_DMA) { | ||
541 | if (copy_to_user(ubuf, (void __force *)dbuf, len)) { | ||
542 | err = -EFAULT; | ||
543 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
544 | __func__, __LINE__, err); | ||
545 | goto err; | ||
546 | } | ||
547 | vdev->in_bytes += len; | ||
548 | err = 0; | ||
549 | goto err; | ||
550 | } | ||
551 | |||
552 | dma_offset = daddr - round_down(daddr, dma_alignment); | ||
553 | daddr -= dma_offset; | ||
554 | len += dma_offset; | ||
555 | /* | ||
556 | * X100 uses DMA addresses as seen by the card so adding | ||
557 | * the aperture base is not required for DMA. However x200 | ||
558 | * requires DMA addresses to be an offset into the bar so | ||
559 | * add the aperture base for x200. | ||
560 | */ | ||
561 | if (x200) | ||
562 | daddr += vpdev->aper->pa; | ||
563 | while (len) { | ||
564 | partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE); | ||
565 | err = vop_sync_dma(vdev, vvr->buf_da, daddr, | ||
566 | ALIGN(partlen, dma_alignment)); | ||
567 | if (err) { | ||
568 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
569 | __func__, __LINE__, err); | ||
570 | goto err; | ||
571 | } | ||
572 | if (copy_to_user(ubuf, vvr->buf + dma_offset, | ||
573 | partlen - dma_offset)) { | ||
574 | err = -EFAULT; | ||
575 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
576 | __func__, __LINE__, err); | ||
577 | goto err; | ||
578 | } | ||
579 | daddr += partlen; | ||
580 | ubuf += partlen; | ||
581 | dbuf += partlen; | ||
582 | vdev->in_bytes_dma += partlen; | ||
583 | vdev->in_bytes += partlen; | ||
584 | len -= partlen; | ||
585 | dma_offset = 0; | ||
586 | } | ||
587 | err = 0; | ||
588 | err: | ||
589 | vpdev->hw_ops->iounmap(vpdev, dbuf); | ||
590 | dev_dbg(vop_dev(vdev), | ||
591 | "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n", | ||
592 | __func__, ubuf, dbuf, len, vr_idx); | ||
593 | return err; | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * Initiates copies across the PCIe bus from a user space buffer to card | ||
598 | * memory. When transfers are done using DMA, source/destination addresses | ||
599 | * and transfer length must follow the alignment requirements of the MIC | ||
600 | * DMA engine. | ||
601 | */ | ||
602 | static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf, | ||
603 | size_t len, u64 daddr, size_t dlen, | ||
604 | int vr_idx) | ||
605 | { | ||
606 | struct vop_device *vpdev = vdev->vpdev; | ||
607 | void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len); | ||
608 | struct vop_vringh *vvr = &vdev->vvr[vr_idx]; | ||
609 | struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev); | ||
610 | size_t dma_alignment = 1 << vi->dma_ch->device->copy_align; | ||
611 | bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); | ||
612 | size_t partlen; | ||
613 | bool dma = VOP_USE_DMA; | ||
614 | int err = 0; | ||
615 | |||
616 | if (daddr & (dma_alignment - 1)) { | ||
617 | vdev->tx_dst_unaligned += len; | ||
618 | dma = false; | ||
619 | } else if (ALIGN(len, dma_alignment) > dlen) { | ||
620 | vdev->tx_len_unaligned += len; | ||
621 | dma = false; | ||
622 | } | ||
623 | |||
624 | if (!dma) | ||
625 | goto memcpy; | ||
626 | |||
627 | /* | ||
628 | * X100 uses DMA addresses as seen by the card so adding | ||
629 | * the aperture base is not required for DMA. However x200 | ||
630 | * requires DMA addresses to be an offset into the bar so | ||
631 | * add the aperture base for x200. | ||
632 | */ | ||
633 | if (x200) | ||
634 | daddr += vpdev->aper->pa; | ||
635 | while (len) { | ||
636 | partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE); | ||
637 | |||
638 | if (copy_from_user(vvr->buf, ubuf, partlen)) { | ||
639 | err = -EFAULT; | ||
640 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
641 | __func__, __LINE__, err); | ||
642 | goto err; | ||
643 | } | ||
644 | err = vop_sync_dma(vdev, daddr, vvr->buf_da, | ||
645 | ALIGN(partlen, dma_alignment)); | ||
646 | if (err) { | ||
647 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
648 | __func__, __LINE__, err); | ||
649 | goto err; | ||
650 | } | ||
651 | daddr += partlen; | ||
652 | ubuf += partlen; | ||
653 | dbuf += partlen; | ||
654 | vdev->out_bytes_dma += partlen; | ||
655 | vdev->out_bytes += partlen; | ||
656 | len -= partlen; | ||
657 | } | ||
658 | memcpy: | ||
659 | /* | ||
660 | * We are copying to IO below and should ideally use something | ||
661 | * like copy_from_user_toio(..) if it existed. | ||
662 | */ | ||
663 | if (copy_from_user((void __force *)dbuf, ubuf, len)) { | ||
664 | err = -EFAULT; | ||
665 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
666 | __func__, __LINE__, err); | ||
667 | goto err; | ||
668 | } | ||
669 | vdev->out_bytes += len; | ||
670 | err = 0; | ||
671 | err: | ||
672 | vpdev->hw_ops->iounmap(vpdev, dbuf); | ||
673 | dev_dbg(vop_dev(vdev), | ||
674 | "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n", | ||
675 | __func__, ubuf, dbuf, len, vr_idx); | ||
676 | return err; | ||
677 | } | ||
678 | |||
679 | #define MIC_VRINGH_READ true | ||
680 | |||
681 | /* Determine the total number of bytes consumed in a VRINGH KIOV */ | ||
682 | static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov) | ||
683 | { | ||
684 | int i; | ||
685 | u32 total = iov->consumed; | ||
686 | |||
687 | for (i = 0; i < iov->i; i++) | ||
688 | total += iov->iov[i].iov_len; | ||
689 | return total; | ||
690 | } | ||
691 | |||
692 | /* | ||
693 | * Traverse the VRINGH KIOV and issue the APIs to trigger the copies. | ||
694 | * This API is heavily based on the vringh_iov_xfer(..) implementation | ||
695 | * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..) | ||
696 | * and vringh_iov_push_kern(..) directly is because there is no | ||
697 | * way to override the VRINGH xfer(..) routines as of v3.10. | ||
698 | */ | ||
699 | static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov, | ||
700 | void __user *ubuf, size_t len, bool read, int vr_idx, | ||
701 | size_t *out_len) | ||
702 | { | ||
703 | int ret = 0; | ||
704 | size_t partlen, tot_len = 0; | ||
705 | |||
706 | while (len && iov->i < iov->used) { | ||
707 | struct kvec *kiov = &iov->iov[iov->i]; | ||
708 | |||
709 | partlen = min(kiov->iov_len, len); | ||
710 | if (read) | ||
711 | ret = vop_virtio_copy_to_user(vdev, ubuf, partlen, | ||
712 | (u64)kiov->iov_base, | ||
713 | kiov->iov_len, | ||
714 | vr_idx); | ||
715 | else | ||
716 | ret = vop_virtio_copy_from_user(vdev, ubuf, partlen, | ||
717 | (u64)kiov->iov_base, | ||
718 | kiov->iov_len, | ||
719 | vr_idx); | ||
720 | if (ret) { | ||
721 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
722 | __func__, __LINE__, ret); | ||
723 | break; | ||
724 | } | ||
725 | len -= partlen; | ||
726 | ubuf += partlen; | ||
727 | tot_len += partlen; | ||
728 | iov->consumed += partlen; | ||
729 | kiov->iov_len -= partlen; | ||
730 | kiov->iov_base += partlen; | ||
731 | if (!kiov->iov_len) { | ||
732 | /* Fix up old iov element then increment. */ | ||
733 | kiov->iov_len = iov->consumed; | ||
734 | kiov->iov_base -= iov->consumed; | ||
735 | |||
736 | iov->consumed = 0; | ||
737 | iov->i++; | ||
738 | } | ||
739 | } | ||
740 | *out_len = tot_len; | ||
741 | return ret; | ||
742 | } | ||
743 | |||
744 | /* | ||
745 | * Use the standard VRINGH infrastructure in the kernel to fetch new | ||
746 | * descriptors, initiate the copies and update the used ring. | ||
747 | */ | ||
748 | static int _vop_virtio_copy(struct vop_vdev *vdev, struct mic_copy_desc *copy) | ||
749 | { | ||
750 | int ret = 0; | ||
751 | u32 iovcnt = copy->iovcnt; | ||
752 | struct iovec iov; | ||
753 | struct iovec __user *u_iov = copy->iov; | ||
754 | void __user *ubuf = NULL; | ||
755 | struct vop_vringh *vvr = &vdev->vvr[copy->vr_idx]; | ||
756 | struct vringh_kiov *riov = &vvr->riov; | ||
757 | struct vringh_kiov *wiov = &vvr->wiov; | ||
758 | struct vringh *vrh = &vvr->vrh; | ||
759 | u16 *head = &vvr->head; | ||
760 | struct mic_vring *vr = &vvr->vring; | ||
761 | size_t len = 0, out_len; | ||
762 | |||
763 | copy->out_len = 0; | ||
764 | /* Fetch a new IOVEC if all previous elements have been processed */ | ||
765 | if (riov->i == riov->used && wiov->i == wiov->used) { | ||
766 | ret = vringh_getdesc_kern(vrh, riov, wiov, | ||
767 | head, GFP_KERNEL); | ||
768 | /* Check if there are available descriptors */ | ||
769 | if (ret <= 0) | ||
770 | return ret; | ||
771 | } | ||
772 | while (iovcnt) { | ||
773 | if (!len) { | ||
774 | /* Copy over a new iovec from user space. */ | ||
775 | ret = copy_from_user(&iov, u_iov, sizeof(*u_iov)); | ||
776 | if (ret) { | ||
777 | ret = -EINVAL; | ||
778 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
779 | __func__, __LINE__, ret); | ||
780 | break; | ||
781 | } | ||
782 | len = iov.iov_len; | ||
783 | ubuf = iov.iov_base; | ||
784 | } | ||
785 | /* Issue all the read descriptors first */ | ||
786 | ret = vop_vringh_copy(vdev, riov, ubuf, len, | ||
787 | MIC_VRINGH_READ, copy->vr_idx, &out_len); | ||
788 | if (ret) { | ||
789 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
790 | __func__, __LINE__, ret); | ||
791 | break; | ||
792 | } | ||
793 | len -= out_len; | ||
794 | ubuf += out_len; | ||
795 | copy->out_len += out_len; | ||
796 | /* Issue the write descriptors next */ | ||
797 | ret = vop_vringh_copy(vdev, wiov, ubuf, len, | ||
798 | !MIC_VRINGH_READ, copy->vr_idx, &out_len); | ||
799 | if (ret) { | ||
800 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
801 | __func__, __LINE__, ret); | ||
802 | break; | ||
803 | } | ||
804 | len -= out_len; | ||
805 | ubuf += out_len; | ||
806 | copy->out_len += out_len; | ||
807 | if (!len) { | ||
808 | /* One user space iovec is now completed */ | ||
809 | iovcnt--; | ||
810 | u_iov++; | ||
811 | } | ||
812 | /* Exit loop if all elements in KIOVs have been processed. */ | ||
813 | if (riov->i == riov->used && wiov->i == wiov->used) | ||
814 | break; | ||
815 | } | ||
816 | /* | ||
817 | * Update the used ring if a descriptor was available and some data was | ||
818 | * copied in/out and the user asked for a used ring update. | ||
819 | */ | ||
820 | if (*head != USHRT_MAX && copy->out_len && copy->update_used) { | ||
821 | u32 total = 0; | ||
822 | |||
823 | /* Determine the total data consumed */ | ||
824 | total += vop_vringh_iov_consumed(riov); | ||
825 | total += vop_vringh_iov_consumed(wiov); | ||
826 | vringh_complete_kern(vrh, *head, total); | ||
827 | *head = USHRT_MAX; | ||
828 | if (vringh_need_notify_kern(vrh) > 0) | ||
829 | vringh_notify(vrh); | ||
830 | vringh_kiov_cleanup(riov); | ||
831 | vringh_kiov_cleanup(wiov); | ||
832 | /* Update avail idx for user space */ | ||
833 | vr->info->avail_idx = vrh->last_avail_idx; | ||
834 | } | ||
835 | return ret; | ||
836 | } | ||
837 | |||
838 | static inline int vop_verify_copy_args(struct vop_vdev *vdev, | ||
839 | struct mic_copy_desc *copy) | ||
840 | { | ||
841 | if (!vdev || copy->vr_idx >= vdev->dd->num_vq) | ||
842 | return -EINVAL; | ||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | /* Copy a specified number of virtio descriptors in a chain */ | ||
847 | static int vop_virtio_copy_desc(struct vop_vdev *vdev, | ||
848 | struct mic_copy_desc *copy) | ||
849 | { | ||
850 | int err; | ||
851 | struct vop_vringh *vvr; | ||
852 | |||
853 | err = vop_verify_copy_args(vdev, copy); | ||
854 | if (err) | ||
855 | return err; | ||
856 | |||
857 | vvr = &vdev->vvr[copy->vr_idx]; | ||
858 | mutex_lock(&vvr->vr_mutex); | ||
859 | if (!vop_vdevup(vdev)) { | ||
860 | err = -ENODEV; | ||
861 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
862 | __func__, __LINE__, err); | ||
863 | goto err; | ||
864 | } | ||
865 | err = _vop_virtio_copy(vdev, copy); | ||
866 | if (err) { | ||
867 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
868 | __func__, __LINE__, err); | ||
869 | } | ||
870 | err: | ||
871 | mutex_unlock(&vvr->vr_mutex); | ||
872 | return err; | ||
873 | } | ||
874 | |||
875 | static int vop_open(struct inode *inode, struct file *f) | ||
876 | { | ||
877 | struct vop_vdev *vdev; | ||
878 | struct vop_info *vi = container_of(f->private_data, | ||
879 | struct vop_info, miscdev); | ||
880 | |||
881 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
882 | if (!vdev) | ||
883 | return -ENOMEM; | ||
884 | vdev->vi = vi; | ||
885 | mutex_init(&vdev->vdev_mutex); | ||
886 | f->private_data = vdev; | ||
887 | init_completion(&vdev->destroy); | ||
888 | complete(&vdev->destroy); | ||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | static int vop_release(struct inode *inode, struct file *f) | ||
893 | { | ||
894 | struct vop_vdev *vdev = f->private_data, *vdev_tmp; | ||
895 | struct vop_info *vi = vdev->vi; | ||
896 | struct list_head *pos, *tmp; | ||
897 | bool found = false; | ||
898 | |||
899 | mutex_lock(&vdev->vdev_mutex); | ||
900 | if (vdev->deleted) | ||
901 | goto unlock; | ||
902 | mutex_lock(&vi->vop_mutex); | ||
903 | list_for_each_safe(pos, tmp, &vi->vdev_list) { | ||
904 | vdev_tmp = list_entry(pos, struct vop_vdev, list); | ||
905 | if (vdev == vdev_tmp) { | ||
906 | vop_virtio_del_device(vdev); | ||
907 | list_del(pos); | ||
908 | found = true; | ||
909 | break; | ||
910 | } | ||
911 | } | ||
912 | mutex_unlock(&vi->vop_mutex); | ||
913 | unlock: | ||
914 | mutex_unlock(&vdev->vdev_mutex); | ||
915 | if (!found) | ||
916 | wait_for_completion(&vdev->destroy); | ||
917 | f->private_data = NULL; | ||
918 | kfree(vdev); | ||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | ||
923 | { | ||
924 | struct vop_vdev *vdev = f->private_data; | ||
925 | struct vop_info *vi = vdev->vi; | ||
926 | void __user *argp = (void __user *)arg; | ||
927 | int ret; | ||
928 | |||
929 | switch (cmd) { | ||
930 | case MIC_VIRTIO_ADD_DEVICE: | ||
931 | { | ||
932 | struct mic_device_desc dd, *dd_config; | ||
933 | |||
934 | if (copy_from_user(&dd, argp, sizeof(dd))) | ||
935 | return -EFAULT; | ||
936 | |||
937 | if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE || | ||
938 | dd.num_vq > MIC_MAX_VRINGS) | ||
939 | return -EINVAL; | ||
940 | |||
941 | dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL); | ||
942 | if (!dd_config) | ||
943 | return -ENOMEM; | ||
944 | if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) { | ||
945 | ret = -EFAULT; | ||
946 | goto free_ret; | ||
947 | } | ||
948 | mutex_lock(&vdev->vdev_mutex); | ||
949 | mutex_lock(&vi->vop_mutex); | ||
950 | ret = vop_virtio_add_device(vdev, dd_config); | ||
951 | if (ret) | ||
952 | goto unlock_ret; | ||
953 | list_add_tail(&vdev->list, &vi->vdev_list); | ||
954 | unlock_ret: | ||
955 | mutex_unlock(&vi->vop_mutex); | ||
956 | mutex_unlock(&vdev->vdev_mutex); | ||
957 | free_ret: | ||
958 | kfree(dd_config); | ||
959 | return ret; | ||
960 | } | ||
961 | case MIC_VIRTIO_COPY_DESC: | ||
962 | { | ||
963 | struct mic_copy_desc copy; | ||
964 | |||
965 | mutex_lock(&vdev->vdev_mutex); | ||
966 | ret = vop_vdev_inited(vdev); | ||
967 | if (ret) | ||
968 | goto _unlock_ret; | ||
969 | |||
970 | if (copy_from_user(©, argp, sizeof(copy))) { | ||
971 | ret = -EFAULT; | ||
972 | goto _unlock_ret; | ||
973 | } | ||
974 | |||
975 | ret = vop_virtio_copy_desc(vdev, ©); | ||
976 | if (ret < 0) | ||
977 | goto _unlock_ret; | ||
978 | if (copy_to_user( | ||
979 | &((struct mic_copy_desc __user *)argp)->out_len, | ||
980 | ©.out_len, sizeof(copy.out_len))) | ||
981 | ret = -EFAULT; | ||
982 | _unlock_ret: | ||
983 | mutex_unlock(&vdev->vdev_mutex); | ||
984 | return ret; | ||
985 | } | ||
986 | case MIC_VIRTIO_CONFIG_CHANGE: | ||
987 | { | ||
988 | void *buf; | ||
989 | |||
990 | mutex_lock(&vdev->vdev_mutex); | ||
991 | ret = vop_vdev_inited(vdev); | ||
992 | if (ret) | ||
993 | goto __unlock_ret; | ||
994 | buf = kzalloc(vdev->dd->config_len, GFP_KERNEL); | ||
995 | if (!buf) { | ||
996 | ret = -ENOMEM; | ||
997 | goto __unlock_ret; | ||
998 | } | ||
999 | if (copy_from_user(buf, argp, vdev->dd->config_len)) { | ||
1000 | ret = -EFAULT; | ||
1001 | goto done; | ||
1002 | } | ||
1003 | ret = vop_virtio_config_change(vdev, buf); | ||
1004 | done: | ||
1005 | kfree(buf); | ||
1006 | __unlock_ret: | ||
1007 | mutex_unlock(&vdev->vdev_mutex); | ||
1008 | return ret; | ||
1009 | } | ||
1010 | default: | ||
1011 | return -ENOIOCTLCMD; | ||
1012 | }; | ||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | /* | ||
1017 | * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and | ||
1018 | * not when previously enqueued buffers may be available. This means that | ||
1019 | * in the card->host (TX) path, when userspace is unblocked by poll it | ||
1020 | * must drain all available descriptors or it can stall. | ||
1021 | */ | ||
1022 | static unsigned int vop_poll(struct file *f, poll_table *wait) | ||
1023 | { | ||
1024 | struct vop_vdev *vdev = f->private_data; | ||
1025 | int mask = 0; | ||
1026 | |||
1027 | mutex_lock(&vdev->vdev_mutex); | ||
1028 | if (vop_vdev_inited(vdev)) { | ||
1029 | mask = POLLERR; | ||
1030 | goto done; | ||
1031 | } | ||
1032 | poll_wait(f, &vdev->waitq, wait); | ||
1033 | if (vop_vdev_inited(vdev)) { | ||
1034 | mask = POLLERR; | ||
1035 | } else if (vdev->poll_wake) { | ||
1036 | vdev->poll_wake = 0; | ||
1037 | mask = POLLIN | POLLOUT; | ||
1038 | } | ||
1039 | done: | ||
1040 | mutex_unlock(&vdev->vdev_mutex); | ||
1041 | return mask; | ||
1042 | } | ||
1043 | |||
1044 | static inline int | ||
1045 | vop_query_offset(struct vop_vdev *vdev, unsigned long offset, | ||
1046 | unsigned long *size, unsigned long *pa) | ||
1047 | { | ||
1048 | struct vop_device *vpdev = vdev->vpdev; | ||
1049 | unsigned long start = MIC_DP_SIZE; | ||
1050 | int i; | ||
1051 | |||
1052 | /* | ||
1053 | * MMAP interface is as follows: | ||
1054 | * offset region | ||
1055 | * 0x0 virtio device_page | ||
1056 | * 0x1000 first vring | ||
1057 | * 0x1000 + size of 1st vring second vring | ||
1058 | * .... | ||
1059 | */ | ||
1060 | if (!offset) { | ||
1061 | *pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev)); | ||
1062 | *size = MIC_DP_SIZE; | ||
1063 | return 0; | ||
1064 | } | ||
1065 | |||
1066 | for (i = 0; i < vdev->dd->num_vq; i++) { | ||
1067 | struct vop_vringh *vvr = &vdev->vvr[i]; | ||
1068 | |||
1069 | if (offset == start) { | ||
1070 | *pa = virt_to_phys(vvr->vring.va); | ||
1071 | *size = vvr->vring.len; | ||
1072 | return 0; | ||
1073 | } | ||
1074 | start += vvr->vring.len; | ||
1075 | } | ||
1076 | return -1; | ||
1077 | } | ||
1078 | |||
1079 | /* | ||
1080 | * Maps the device page and virtio rings to user space for readonly access. | ||
1081 | */ | ||
1082 | static int vop_mmap(struct file *f, struct vm_area_struct *vma) | ||
1083 | { | ||
1084 | struct vop_vdev *vdev = f->private_data; | ||
1085 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
1086 | unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; | ||
1087 | int i, err; | ||
1088 | |||
1089 | err = vop_vdev_inited(vdev); | ||
1090 | if (err) | ||
1091 | goto ret; | ||
1092 | if (vma->vm_flags & VM_WRITE) { | ||
1093 | err = -EACCES; | ||
1094 | goto ret; | ||
1095 | } | ||
1096 | while (size_rem) { | ||
1097 | i = vop_query_offset(vdev, offset, &size, &pa); | ||
1098 | if (i < 0) { | ||
1099 | err = -EINVAL; | ||
1100 | goto ret; | ||
1101 | } | ||
1102 | err = remap_pfn_range(vma, vma->vm_start + offset, | ||
1103 | pa >> PAGE_SHIFT, size, | ||
1104 | vma->vm_page_prot); | ||
1105 | if (err) | ||
1106 | goto ret; | ||
1107 | size_rem -= size; | ||
1108 | offset += size; | ||
1109 | } | ||
1110 | ret: | ||
1111 | return err; | ||
1112 | } | ||
1113 | |||
1114 | static const struct file_operations vop_fops = { | ||
1115 | .open = vop_open, | ||
1116 | .release = vop_release, | ||
1117 | .unlocked_ioctl = vop_ioctl, | ||
1118 | .poll = vop_poll, | ||
1119 | .mmap = vop_mmap, | ||
1120 | .owner = THIS_MODULE, | ||
1121 | }; | ||
1122 | |||
1123 | int vop_host_init(struct vop_info *vi) | ||
1124 | { | ||
1125 | int rc; | ||
1126 | struct miscdevice *mdev; | ||
1127 | struct vop_device *vpdev = vi->vpdev; | ||
1128 | |||
1129 | INIT_LIST_HEAD(&vi->vdev_list); | ||
1130 | vi->dma_ch = vpdev->dma_ch; | ||
1131 | mdev = &vi->miscdev; | ||
1132 | mdev->minor = MISC_DYNAMIC_MINOR; | ||
1133 | snprintf(vi->name, sizeof(vi->name), "vop_virtio%d", vpdev->index); | ||
1134 | mdev->name = vi->name; | ||
1135 | mdev->fops = &vop_fops; | ||
1136 | mdev->parent = &vpdev->dev; | ||
1137 | |||
1138 | rc = misc_register(mdev); | ||
1139 | if (rc) | ||
1140 | dev_err(&vpdev->dev, "%s failed rc %d\n", __func__, rc); | ||
1141 | return rc; | ||
1142 | } | ||
1143 | |||
1144 | void vop_host_uninit(struct vop_info *vi) | ||
1145 | { | ||
1146 | struct list_head *pos, *tmp; | ||
1147 | struct vop_vdev *vdev; | ||
1148 | |||
1149 | mutex_lock(&vi->vop_mutex); | ||
1150 | vop_virtio_reset_devices(vi); | ||
1151 | list_for_each_safe(pos, tmp, &vi->vdev_list) { | ||
1152 | vdev = list_entry(pos, struct vop_vdev, list); | ||
1153 | list_del(pos); | ||
1154 | reinit_completion(&vdev->destroy); | ||
1155 | mutex_unlock(&vi->vop_mutex); | ||
1156 | mutex_lock(&vdev->vdev_mutex); | ||
1157 | vop_virtio_del_device(vdev); | ||
1158 | vdev->deleted = true; | ||
1159 | mutex_unlock(&vdev->vdev_mutex); | ||
1160 | complete(&vdev->destroy); | ||
1161 | mutex_lock(&vi->vop_mutex); | ||
1162 | } | ||
1163 | mutex_unlock(&vi->vop_mutex); | ||
1164 | misc_deregister(&vi->miscdev); | ||
1165 | } | ||
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index 9a17a9bab8d6..4810e039bbec 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c | |||
@@ -503,8 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, | |||
503 | int err; | 503 | int err; |
504 | ssize_t rom_size; | 504 | ssize_t rom_size; |
505 | 505 | ||
506 | struct pch_phub_reg *chip = | 506 | struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj)); |
507 | dev_get_drvdata(container_of(kobj, struct device, kobj)); | ||
508 | 507 | ||
509 | ret = mutex_lock_interruptible(&pch_phub_mutex); | 508 | ret = mutex_lock_interruptible(&pch_phub_mutex); |
510 | if (ret) { | 509 | if (ret) { |
@@ -514,8 +513,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, | |||
514 | 513 | ||
515 | /* Get Rom signature */ | 514 | /* Get Rom signature */ |
516 | chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); | 515 | chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); |
517 | if (!chip->pch_phub_extrom_base_address) | 516 | if (!chip->pch_phub_extrom_base_address) { |
517 | err = -ENODATA; | ||
518 | goto exrom_map_err; | 518 | goto exrom_map_err; |
519 | } | ||
519 | 520 | ||
520 | pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, | 521 | pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, |
521 | (unsigned char *)&rom_signature); | 522 | (unsigned char *)&rom_signature); |
@@ -567,8 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, | |||
567 | unsigned int addr_offset; | 568 | unsigned int addr_offset; |
568 | int ret; | 569 | int ret; |
569 | ssize_t rom_size; | 570 | ssize_t rom_size; |
570 | struct pch_phub_reg *chip = | 571 | struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj)); |
571 | dev_get_drvdata(container_of(kobj, struct device, kobj)); | ||
572 | 572 | ||
573 | ret = mutex_lock_interruptible(&pch_phub_mutex); | 573 | ret = mutex_lock_interruptible(&pch_phub_mutex); |
574 | if (ret) | 574 | if (ret) |
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 6e3af8b42cdd..dcdbd58672cc 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c | |||
@@ -632,7 +632,6 @@ long st_register(struct st_proto_s *new_proto) | |||
632 | spin_unlock_irqrestore(&st_gdata->lock, flags); | 632 | spin_unlock_irqrestore(&st_gdata->lock, flags); |
633 | return err; | 633 | return err; |
634 | } | 634 | } |
635 | pr_debug("done %s(%d) ", __func__, new_proto->chnl_id); | ||
636 | } | 635 | } |
637 | EXPORT_SYMBOL_GPL(st_register); | 636 | EXPORT_SYMBOL_GPL(st_register); |
638 | 637 | ||
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c index b823f9a6e464..896be150e28f 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.c +++ b/drivers/misc/vmw_vmci/vmci_driver.c | |||
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit); | |||
113 | 113 | ||
114 | MODULE_AUTHOR("VMware, Inc."); | 114 | MODULE_AUTHOR("VMware, Inc."); |
115 | MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); | 115 | MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); |
116 | MODULE_VERSION("1.1.3.0-k"); | 116 | MODULE_VERSION("1.1.4.0-k"); |
117 | MODULE_LICENSE("GPL v2"); | 117 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index bc4ea585b42e..ca52952d850f 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig | |||
@@ -25,9 +25,19 @@ config NVMEM_IMX_OCOTP | |||
25 | This driver can also be built as a module. If so, the module | 25 | This driver can also be built as a module. If so, the module |
26 | will be called nvmem-imx-ocotp. | 26 | will be called nvmem-imx-ocotp. |
27 | 27 | ||
28 | config NVMEM_LPC18XX_EEPROM | ||
29 | tristate "NXP LPC18XX EEPROM Memory Support" | ||
30 | depends on ARCH_LPC18XX || COMPILE_TEST | ||
31 | help | ||
32 | Say Y here to include support for NXP LPC18xx EEPROM memory found in | ||
33 | NXP LPC185x/3x and LPC435x/3x/2x/1x devices. | ||
34 | To compile this driver as a module, choose M here: the module | ||
35 | will be called nvmem_lpc18xx_eeprom. | ||
36 | |||
28 | config NVMEM_MXS_OCOTP | 37 | config NVMEM_MXS_OCOTP |
29 | tristate "Freescale MXS On-Chip OTP Memory Support" | 38 | tristate "Freescale MXS On-Chip OTP Memory Support" |
30 | depends on ARCH_MXS || COMPILE_TEST | 39 | depends on ARCH_MXS || COMPILE_TEST |
40 | depends on HAS_IOMEM | ||
31 | help | 41 | help |
32 | If you say Y here, you will get readonly access to the | 42 | If you say Y here, you will get readonly access to the |
33 | One Time Programmable memory pages that are stored | 43 | One Time Programmable memory pages that are stored |
@@ -36,9 +46,21 @@ config NVMEM_MXS_OCOTP | |||
36 | This driver can also be built as a module. If so, the module | 46 | This driver can also be built as a module. If so, the module |
37 | will be called nvmem-mxs-ocotp. | 47 | will be called nvmem-mxs-ocotp. |
38 | 48 | ||
49 | config MTK_EFUSE | ||
50 | tristate "Mediatek SoCs EFUSE support" | ||
51 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
52 | select REGMAP_MMIO | ||
53 | help | ||
54 | This is a driver to access hardware related data like sensor | ||
55 | calibration, HDMI impedance etc. | ||
56 | |||
57 | This driver can also be built as a module. If so, the module | ||
58 | will be called efuse-mtk. | ||
59 | |||
39 | config QCOM_QFPROM | 60 | config QCOM_QFPROM |
40 | tristate "QCOM QFPROM Support" | 61 | tristate "QCOM QFPROM Support" |
41 | depends on ARCH_QCOM || COMPILE_TEST | 62 | depends on ARCH_QCOM || COMPILE_TEST |
63 | depends on HAS_IOMEM | ||
42 | select REGMAP_MMIO | 64 | select REGMAP_MMIO |
43 | help | 65 | help |
44 | Say y here to enable QFPROM support. The QFPROM provides access | 66 | Say y here to enable QFPROM support. The QFPROM provides access |
@@ -50,6 +72,7 @@ config QCOM_QFPROM | |||
50 | config ROCKCHIP_EFUSE | 72 | config ROCKCHIP_EFUSE |
51 | tristate "Rockchip eFuse Support" | 73 | tristate "Rockchip eFuse Support" |
52 | depends on ARCH_ROCKCHIP || COMPILE_TEST | 74 | depends on ARCH_ROCKCHIP || COMPILE_TEST |
75 | depends on HAS_IOMEM | ||
53 | help | 76 | help |
54 | This is a simple drive to dump specified values of Rockchip SoC | 77 | This is a simple drive to dump specified values of Rockchip SoC |
55 | from eFuse, such as cpu-leakage. | 78 | from eFuse, such as cpu-leakage. |
@@ -71,6 +94,7 @@ config NVMEM_SUNXI_SID | |||
71 | config NVMEM_VF610_OCOTP | 94 | config NVMEM_VF610_OCOTP |
72 | tristate "VF610 SoC OCOTP support" | 95 | tristate "VF610 SoC OCOTP support" |
73 | depends on SOC_VF610 || COMPILE_TEST | 96 | depends on SOC_VF610 || COMPILE_TEST |
97 | depends on HAS_IOMEM | ||
74 | help | 98 | help |
75 | This is a driver for the 'OCOTP' peripheral available on Vybrid | 99 | This is a driver for the 'OCOTP' peripheral available on Vybrid |
76 | devices like VF5xx and VF6xx. | 100 | devices like VF5xx and VF6xx. |
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 95dde3f8f085..45ab1ae08fa9 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile | |||
@@ -8,8 +8,12 @@ nvmem_core-y := core.o | |||
8 | # Devices | 8 | # Devices |
9 | obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o | 9 | obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o |
10 | nvmem-imx-ocotp-y := imx-ocotp.o | 10 | nvmem-imx-ocotp-y := imx-ocotp.o |
11 | obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o | ||
12 | nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o | ||
11 | obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o | 13 | obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o |
12 | nvmem-mxs-ocotp-y := mxs-ocotp.o | 14 | nvmem-mxs-ocotp-y := mxs-ocotp.o |
15 | obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o | ||
16 | nvmem_mtk-efuse-y := mtk-efuse.o | ||
13 | obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o | 17 | obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o |
14 | nvmem_qfprom-y := qfprom.o | 18 | nvmem_qfprom-y := qfprom.o |
15 | obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o | 19 | obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o |
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 9d11d9837312..0de3d878c439 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c | |||
@@ -38,8 +38,13 @@ struct nvmem_device { | |||
38 | int users; | 38 | int users; |
39 | size_t size; | 39 | size_t size; |
40 | bool read_only; | 40 | bool read_only; |
41 | int flags; | ||
42 | struct bin_attribute eeprom; | ||
43 | struct device *base_dev; | ||
41 | }; | 44 | }; |
42 | 45 | ||
46 | #define FLAG_COMPAT BIT(0) | ||
47 | |||
43 | struct nvmem_cell { | 48 | struct nvmem_cell { |
44 | const char *name; | 49 | const char *name; |
45 | int offset; | 50 | int offset; |
@@ -56,16 +61,26 @@ static DEFINE_IDA(nvmem_ida); | |||
56 | static LIST_HEAD(nvmem_cells); | 61 | static LIST_HEAD(nvmem_cells); |
57 | static DEFINE_MUTEX(nvmem_cells_mutex); | 62 | static DEFINE_MUTEX(nvmem_cells_mutex); |
58 | 63 | ||
64 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
65 | static struct lock_class_key eeprom_lock_key; | ||
66 | #endif | ||
67 | |||
59 | #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) | 68 | #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) |
60 | 69 | ||
61 | static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, | 70 | static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, |
62 | struct bin_attribute *attr, | 71 | struct bin_attribute *attr, |
63 | char *buf, loff_t pos, size_t count) | 72 | char *buf, loff_t pos, size_t count) |
64 | { | 73 | { |
65 | struct device *dev = container_of(kobj, struct device, kobj); | 74 | struct device *dev; |
66 | struct nvmem_device *nvmem = to_nvmem_device(dev); | 75 | struct nvmem_device *nvmem; |
67 | int rc; | 76 | int rc; |
68 | 77 | ||
78 | if (attr->private) | ||
79 | dev = attr->private; | ||
80 | else | ||
81 | dev = container_of(kobj, struct device, kobj); | ||
82 | nvmem = to_nvmem_device(dev); | ||
83 | |||
69 | /* Stop the user from reading */ | 84 | /* Stop the user from reading */ |
70 | if (pos >= nvmem->size) | 85 | if (pos >= nvmem->size) |
71 | return 0; | 86 | return 0; |
@@ -90,10 +105,16 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, | |||
90 | struct bin_attribute *attr, | 105 | struct bin_attribute *attr, |
91 | char *buf, loff_t pos, size_t count) | 106 | char *buf, loff_t pos, size_t count) |
92 | { | 107 | { |
93 | struct device *dev = container_of(kobj, struct device, kobj); | 108 | struct device *dev; |
94 | struct nvmem_device *nvmem = to_nvmem_device(dev); | 109 | struct nvmem_device *nvmem; |
95 | int rc; | 110 | int rc; |
96 | 111 | ||
112 | if (attr->private) | ||
113 | dev = attr->private; | ||
114 | else | ||
115 | dev = container_of(kobj, struct device, kobj); | ||
116 | nvmem = to_nvmem_device(dev); | ||
117 | |||
97 | /* Stop the user from writing */ | 118 | /* Stop the user from writing */ |
98 | if (pos >= nvmem->size) | 119 | if (pos >= nvmem->size) |
99 | return 0; | 120 | return 0; |
@@ -161,6 +182,53 @@ static const struct attribute_group *nvmem_ro_dev_groups[] = { | |||
161 | NULL, | 182 | NULL, |
162 | }; | 183 | }; |
163 | 184 | ||
185 | /* default read/write permissions, root only */ | ||
186 | static struct bin_attribute bin_attr_rw_root_nvmem = { | ||
187 | .attr = { | ||
188 | .name = "nvmem", | ||
189 | .mode = S_IWUSR | S_IRUSR, | ||
190 | }, | ||
191 | .read = bin_attr_nvmem_read, | ||
192 | .write = bin_attr_nvmem_write, | ||
193 | }; | ||
194 | |||
195 | static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { | ||
196 | &bin_attr_rw_root_nvmem, | ||
197 | NULL, | ||
198 | }; | ||
199 | |||
200 | static const struct attribute_group nvmem_bin_rw_root_group = { | ||
201 | .bin_attrs = nvmem_bin_rw_root_attributes, | ||
202 | }; | ||
203 | |||
204 | static const struct attribute_group *nvmem_rw_root_dev_groups[] = { | ||
205 | &nvmem_bin_rw_root_group, | ||
206 | NULL, | ||
207 | }; | ||
208 | |||
209 | /* read only permission, root only */ | ||
210 | static struct bin_attribute bin_attr_ro_root_nvmem = { | ||
211 | .attr = { | ||
212 | .name = "nvmem", | ||
213 | .mode = S_IRUSR, | ||
214 | }, | ||
215 | .read = bin_attr_nvmem_read, | ||
216 | }; | ||
217 | |||
218 | static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { | ||
219 | &bin_attr_ro_root_nvmem, | ||
220 | NULL, | ||
221 | }; | ||
222 | |||
223 | static const struct attribute_group nvmem_bin_ro_root_group = { | ||
224 | .bin_attrs = nvmem_bin_ro_root_attributes, | ||
225 | }; | ||
226 | |||
227 | static const struct attribute_group *nvmem_ro_root_dev_groups[] = { | ||
228 | &nvmem_bin_ro_root_group, | ||
229 | NULL, | ||
230 | }; | ||
231 | |||
164 | static void nvmem_release(struct device *dev) | 232 | static void nvmem_release(struct device *dev) |
165 | { | 233 | { |
166 | struct nvmem_device *nvmem = to_nvmem_device(dev); | 234 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
@@ -294,12 +362,51 @@ static int nvmem_add_cells(struct nvmem_device *nvmem, | |||
294 | 362 | ||
295 | return 0; | 363 | return 0; |
296 | err: | 364 | err: |
297 | while (--i) | 365 | while (i--) |
298 | nvmem_cell_drop(cells[i]); | 366 | nvmem_cell_drop(cells[i]); |
299 | 367 | ||
368 | kfree(cells); | ||
369 | |||
300 | return rval; | 370 | return rval; |
301 | } | 371 | } |
302 | 372 | ||
373 | /* | ||
374 | * nvmem_setup_compat() - Create an additional binary entry in | ||
375 | * drivers sys directory, to be backwards compatible with the older | ||
376 | * drivers/misc/eeprom drivers. | ||
377 | */ | ||
378 | static int nvmem_setup_compat(struct nvmem_device *nvmem, | ||
379 | const struct nvmem_config *config) | ||
380 | { | ||
381 | int rval; | ||
382 | |||
383 | if (!config->base_dev) | ||
384 | return -EINVAL; | ||
385 | |||
386 | if (nvmem->read_only) | ||
387 | nvmem->eeprom = bin_attr_ro_root_nvmem; | ||
388 | else | ||
389 | nvmem->eeprom = bin_attr_rw_root_nvmem; | ||
390 | nvmem->eeprom.attr.name = "eeprom"; | ||
391 | nvmem->eeprom.size = nvmem->size; | ||
392 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
393 | nvmem->eeprom.attr.key = &eeprom_lock_key; | ||
394 | #endif | ||
395 | nvmem->eeprom.private = &nvmem->dev; | ||
396 | nvmem->base_dev = config->base_dev; | ||
397 | |||
398 | rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); | ||
399 | if (rval) { | ||
400 | dev_err(&nvmem->dev, | ||
401 | "Failed to create eeprom binary file %d\n", rval); | ||
402 | return rval; | ||
403 | } | ||
404 | |||
405 | nvmem->flags |= FLAG_COMPAT; | ||
406 | |||
407 | return 0; | ||
408 | } | ||
409 | |||
303 | /** | 410 | /** |
304 | * nvmem_register() - Register a nvmem device for given nvmem_config. | 411 | * nvmem_register() - Register a nvmem device for given nvmem_config. |
305 | * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem | 412 | * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem |
@@ -353,24 +460,37 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) | |||
353 | nvmem->read_only = of_property_read_bool(np, "read-only") | | 460 | nvmem->read_only = of_property_read_bool(np, "read-only") | |
354 | config->read_only; | 461 | config->read_only; |
355 | 462 | ||
356 | nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups : | 463 | if (config->root_only) |
357 | nvmem_rw_dev_groups; | 464 | nvmem->dev.groups = nvmem->read_only ? |
465 | nvmem_ro_root_dev_groups : | ||
466 | nvmem_rw_root_dev_groups; | ||
467 | else | ||
468 | nvmem->dev.groups = nvmem->read_only ? | ||
469 | nvmem_ro_dev_groups : | ||
470 | nvmem_rw_dev_groups; | ||
358 | 471 | ||
359 | device_initialize(&nvmem->dev); | 472 | device_initialize(&nvmem->dev); |
360 | 473 | ||
361 | dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); | 474 | dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); |
362 | 475 | ||
363 | rval = device_add(&nvmem->dev); | 476 | rval = device_add(&nvmem->dev); |
364 | if (rval) { | 477 | if (rval) |
365 | ida_simple_remove(&nvmem_ida, nvmem->id); | 478 | goto out; |
366 | kfree(nvmem); | 479 | |
367 | return ERR_PTR(rval); | 480 | if (config->compat) { |
481 | rval = nvmem_setup_compat(nvmem, config); | ||
482 | if (rval) | ||
483 | goto out; | ||
368 | } | 484 | } |
369 | 485 | ||
370 | if (config->cells) | 486 | if (config->cells) |
371 | nvmem_add_cells(nvmem, config); | 487 | nvmem_add_cells(nvmem, config); |
372 | 488 | ||
373 | return nvmem; | 489 | return nvmem; |
490 | out: | ||
491 | ida_simple_remove(&nvmem_ida, nvmem->id); | ||
492 | kfree(nvmem); | ||
493 | return ERR_PTR(rval); | ||
374 | } | 494 | } |
375 | EXPORT_SYMBOL_GPL(nvmem_register); | 495 | EXPORT_SYMBOL_GPL(nvmem_register); |
376 | 496 | ||
@@ -390,6 +510,9 @@ int nvmem_unregister(struct nvmem_device *nvmem) | |||
390 | } | 510 | } |
391 | mutex_unlock(&nvmem_mutex); | 511 | mutex_unlock(&nvmem_mutex); |
392 | 512 | ||
513 | if (nvmem->flags & FLAG_COMPAT) | ||
514 | device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); | ||
515 | |||
393 | nvmem_device_remove_all_cells(nvmem); | 516 | nvmem_device_remove_all_cells(nvmem); |
394 | device_del(&nvmem->dev); | 517 | device_del(&nvmem->dev); |
395 | 518 | ||
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index b7971d410b60..d7796eb5421f 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c | |||
@@ -51,7 +51,7 @@ static int imx_ocotp_read(void *context, const void *reg, size_t reg_size, | |||
51 | val += 4; | 51 | val += 4; |
52 | } | 52 | } |
53 | 53 | ||
54 | return (i - index) * 4; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | static int imx_ocotp_write(void *context, const void *data, size_t count) | 57 | static int imx_ocotp_write(void *context, const void *data, size_t count) |
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c new file mode 100644 index 000000000000..878fce789341 --- /dev/null +++ b/drivers/nvmem/lpc18xx_eeprom.c | |||
@@ -0,0 +1,330 @@ | |||
1 | /* | ||
2 | * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver | ||
3 | * | ||
4 | * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published by | ||
8 | * the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/nvmem-provider.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/regmap.h> | ||
20 | #include <linux/reset.h> | ||
21 | |||
22 | /* Registers */ | ||
23 | #define LPC18XX_EEPROM_AUTOPROG 0x00c | ||
24 | #define LPC18XX_EEPROM_AUTOPROG_WORD 0x1 | ||
25 | |||
26 | #define LPC18XX_EEPROM_CLKDIV 0x014 | ||
27 | |||
28 | #define LPC18XX_EEPROM_PWRDWN 0x018 | ||
29 | #define LPC18XX_EEPROM_PWRDWN_NO 0x0 | ||
30 | #define LPC18XX_EEPROM_PWRDWN_YES 0x1 | ||
31 | |||
32 | #define LPC18XX_EEPROM_INTSTAT 0xfe0 | ||
33 | #define LPC18XX_EEPROM_INTSTAT_END_OF_PROG BIT(2) | ||
34 | |||
35 | #define LPC18XX_EEPROM_INTSTATCLR 0xfe8 | ||
36 | #define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST BIT(2) | ||
37 | |||
38 | /* Fixed page size (bytes) */ | ||
39 | #define LPC18XX_EEPROM_PAGE_SIZE 0x80 | ||
40 | |||
41 | /* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */ | ||
42 | #define LPC18XX_EEPROM_CLOCK_HZ 1500000 | ||
43 | |||
44 | /* EEPROM requires 3 ms of erase/program time between each writing */ | ||
45 | #define LPC18XX_EEPROM_PROGRAM_TIME 3 | ||
46 | |||
47 | struct lpc18xx_eeprom_dev { | ||
48 | struct clk *clk; | ||
49 | void __iomem *reg_base; | ||
50 | void __iomem *mem_base; | ||
51 | struct nvmem_device *nvmem; | ||
52 | unsigned reg_bytes; | ||
53 | unsigned val_bytes; | ||
54 | }; | ||
55 | |||
56 | static struct regmap_config lpc18xx_regmap_config = { | ||
57 | .reg_bits = 32, | ||
58 | .reg_stride = 4, | ||
59 | .val_bits = 32, | ||
60 | }; | ||
61 | |||
62 | static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom, | ||
63 | u32 reg, u32 val) | ||
64 | { | ||
65 | writel(val, eeprom->reg_base + reg); | ||
66 | } | ||
67 | |||
68 | static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom, | ||
69 | u32 reg) | ||
70 | { | ||
71 | return readl(eeprom->reg_base + reg); | ||
72 | } | ||
73 | |||
74 | static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom) | ||
75 | { | ||
76 | unsigned long end; | ||
77 | u32 val; | ||
78 | |||
79 | /* Wait until EEPROM program operation has finished */ | ||
80 | end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10); | ||
81 | |||
82 | while (time_is_after_jiffies(end)) { | ||
83 | val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT); | ||
84 | |||
85 | if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) { | ||
86 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR, | ||
87 | LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC, | ||
92 | (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC); | ||
93 | } | ||
94 | |||
95 | return -ETIMEDOUT; | ||
96 | } | ||
97 | |||
98 | static int lpc18xx_eeprom_gather_write(void *context, const void *reg, | ||
99 | size_t reg_size, const void *val, | ||
100 | size_t val_size) | ||
101 | { | ||
102 | struct lpc18xx_eeprom_dev *eeprom = context; | ||
103 | unsigned int offset = *(u32 *)reg; | ||
104 | int ret; | ||
105 | |||
106 | if (offset % lpc18xx_regmap_config.reg_stride) | ||
107 | return -EINVAL; | ||
108 | |||
109 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
110 | LPC18XX_EEPROM_PWRDWN_NO); | ||
111 | |||
112 | /* Wait 100 us while the EEPROM wakes up */ | ||
113 | usleep_range(100, 200); | ||
114 | |||
115 | while (val_size) { | ||
116 | writel(*(u32 *)val, eeprom->mem_base + offset); | ||
117 | ret = lpc18xx_eeprom_busywait_until_prog(eeprom); | ||
118 | if (ret < 0) | ||
119 | return ret; | ||
120 | |||
121 | val_size -= eeprom->val_bytes; | ||
122 | val += eeprom->val_bytes; | ||
123 | offset += eeprom->val_bytes; | ||
124 | } | ||
125 | |||
126 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
127 | LPC18XX_EEPROM_PWRDWN_YES); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int lpc18xx_eeprom_write(void *context, const void *data, size_t count) | ||
133 | { | ||
134 | struct lpc18xx_eeprom_dev *eeprom = context; | ||
135 | unsigned int offset = eeprom->reg_bytes; | ||
136 | |||
137 | if (count <= offset) | ||
138 | return -EINVAL; | ||
139 | |||
140 | return lpc18xx_eeprom_gather_write(context, data, eeprom->reg_bytes, | ||
141 | data + offset, count - offset); | ||
142 | } | ||
143 | |||
144 | static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size, | ||
145 | void *val, size_t val_size) | ||
146 | { | ||
147 | struct lpc18xx_eeprom_dev *eeprom = context; | ||
148 | unsigned int offset = *(u32 *)reg; | ||
149 | |||
150 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
151 | LPC18XX_EEPROM_PWRDWN_NO); | ||
152 | |||
153 | /* Wait 100 us while the EEPROM wakes up */ | ||
154 | usleep_range(100, 200); | ||
155 | |||
156 | while (val_size) { | ||
157 | *(u32 *)val = readl(eeprom->mem_base + offset); | ||
158 | val_size -= eeprom->val_bytes; | ||
159 | val += eeprom->val_bytes; | ||
160 | offset += eeprom->val_bytes; | ||
161 | } | ||
162 | |||
163 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
164 | LPC18XX_EEPROM_PWRDWN_YES); | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static struct regmap_bus lpc18xx_eeprom_bus = { | ||
170 | .write = lpc18xx_eeprom_write, | ||
171 | .gather_write = lpc18xx_eeprom_gather_write, | ||
172 | .read = lpc18xx_eeprom_read, | ||
173 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
174 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
175 | }; | ||
176 | |||
177 | static bool lpc18xx_eeprom_writeable_reg(struct device *dev, unsigned int reg) | ||
178 | { | ||
179 | /* | ||
180 | * The last page contains the EEPROM initialization data and is not | ||
181 | * writable. | ||
182 | */ | ||
183 | return reg <= lpc18xx_regmap_config.max_register - | ||
184 | LPC18XX_EEPROM_PAGE_SIZE; | ||
185 | } | ||
186 | |||
187 | static bool lpc18xx_eeprom_readable_reg(struct device *dev, unsigned int reg) | ||
188 | { | ||
189 | return reg <= lpc18xx_regmap_config.max_register; | ||
190 | } | ||
191 | |||
192 | static struct nvmem_config lpc18xx_nvmem_config = { | ||
193 | .name = "lpc18xx-eeprom", | ||
194 | .owner = THIS_MODULE, | ||
195 | }; | ||
196 | |||
197 | static int lpc18xx_eeprom_probe(struct platform_device *pdev) | ||
198 | { | ||
199 | struct lpc18xx_eeprom_dev *eeprom; | ||
200 | struct device *dev = &pdev->dev; | ||
201 | struct reset_control *rst; | ||
202 | unsigned long clk_rate; | ||
203 | struct regmap *regmap; | ||
204 | struct resource *res; | ||
205 | int ret; | ||
206 | |||
207 | eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); | ||
208 | if (!eeprom) | ||
209 | return -ENOMEM; | ||
210 | |||
211 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); | ||
212 | eeprom->reg_base = devm_ioremap_resource(dev, res); | ||
213 | if (IS_ERR(eeprom->reg_base)) | ||
214 | return PTR_ERR(eeprom->reg_base); | ||
215 | |||
216 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); | ||
217 | eeprom->mem_base = devm_ioremap_resource(dev, res); | ||
218 | if (IS_ERR(eeprom->mem_base)) | ||
219 | return PTR_ERR(eeprom->mem_base); | ||
220 | |||
221 | eeprom->clk = devm_clk_get(&pdev->dev, "eeprom"); | ||
222 | if (IS_ERR(eeprom->clk)) { | ||
223 | dev_err(&pdev->dev, "failed to get eeprom clock\n"); | ||
224 | return PTR_ERR(eeprom->clk); | ||
225 | } | ||
226 | |||
227 | ret = clk_prepare_enable(eeprom->clk); | ||
228 | if (ret < 0) { | ||
229 | dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret); | ||
230 | return ret; | ||
231 | } | ||
232 | |||
233 | rst = devm_reset_control_get(dev, NULL); | ||
234 | if (IS_ERR(rst)) { | ||
235 | dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst)); | ||
236 | ret = PTR_ERR(rst); | ||
237 | goto err_clk; | ||
238 | } | ||
239 | |||
240 | ret = reset_control_assert(rst); | ||
241 | if (ret < 0) { | ||
242 | dev_err(dev, "failed to assert reset: %d\n", ret); | ||
243 | goto err_clk; | ||
244 | } | ||
245 | |||
246 | eeprom->val_bytes = lpc18xx_regmap_config.val_bits / BITS_PER_BYTE; | ||
247 | eeprom->reg_bytes = lpc18xx_regmap_config.reg_bits / BITS_PER_BYTE; | ||
248 | |||
249 | /* | ||
250 | * Clock rate is generated by dividing the system bus clock by the | ||
251 | * division factor, contained in the divider register (minus 1 encoded). | ||
252 | */ | ||
253 | clk_rate = clk_get_rate(eeprom->clk); | ||
254 | clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1; | ||
255 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate); | ||
256 | |||
257 | /* | ||
258 | * Writing a single word to the page will start the erase/program cycle | ||
259 | * automatically | ||
260 | */ | ||
261 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG, | ||
262 | LPC18XX_EEPROM_AUTOPROG_WORD); | ||
263 | |||
264 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
265 | LPC18XX_EEPROM_PWRDWN_YES); | ||
266 | |||
267 | lpc18xx_regmap_config.max_register = resource_size(res) - 1; | ||
268 | lpc18xx_regmap_config.writeable_reg = lpc18xx_eeprom_writeable_reg; | ||
269 | lpc18xx_regmap_config.readable_reg = lpc18xx_eeprom_readable_reg; | ||
270 | |||
271 | regmap = devm_regmap_init(dev, &lpc18xx_eeprom_bus, eeprom, | ||
272 | &lpc18xx_regmap_config); | ||
273 | if (IS_ERR(regmap)) { | ||
274 | dev_err(dev, "regmap init failed: %ld\n", PTR_ERR(regmap)); | ||
275 | ret = PTR_ERR(regmap); | ||
276 | goto err_clk; | ||
277 | } | ||
278 | |||
279 | lpc18xx_nvmem_config.dev = dev; | ||
280 | |||
281 | eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config); | ||
282 | if (IS_ERR(eeprom->nvmem)) { | ||
283 | ret = PTR_ERR(eeprom->nvmem); | ||
284 | goto err_clk; | ||
285 | } | ||
286 | |||
287 | platform_set_drvdata(pdev, eeprom); | ||
288 | |||
289 | return 0; | ||
290 | |||
291 | err_clk: | ||
292 | clk_disable_unprepare(eeprom->clk); | ||
293 | |||
294 | return ret; | ||
295 | } | ||
296 | |||
297 | static int lpc18xx_eeprom_remove(struct platform_device *pdev) | ||
298 | { | ||
299 | struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev); | ||
300 | int ret; | ||
301 | |||
302 | ret = nvmem_unregister(eeprom->nvmem); | ||
303 | if (ret < 0) | ||
304 | return ret; | ||
305 | |||
306 | clk_disable_unprepare(eeprom->clk); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static const struct of_device_id lpc18xx_eeprom_of_match[] = { | ||
312 | { .compatible = "nxp,lpc1857-eeprom" }, | ||
313 | { }, | ||
314 | }; | ||
315 | MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match); | ||
316 | |||
317 | static struct platform_driver lpc18xx_eeprom_driver = { | ||
318 | .probe = lpc18xx_eeprom_probe, | ||
319 | .remove = lpc18xx_eeprom_remove, | ||
320 | .driver = { | ||
321 | .name = "lpc18xx-eeprom", | ||
322 | .of_match_table = lpc18xx_eeprom_of_match, | ||
323 | }, | ||
324 | }; | ||
325 | |||
326 | module_platform_driver(lpc18xx_eeprom_driver); | ||
327 | |||
328 | MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>"); | ||
329 | MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver"); | ||
330 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c new file mode 100644 index 000000000000..9c49369beea5 --- /dev/null +++ b/drivers/nvmem/mtk-efuse.c | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2015 MediaTek Inc. | ||
3 | * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/nvmem-provider.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/regmap.h> | ||
20 | |||
21 | static struct regmap_config mtk_regmap_config = { | ||
22 | .reg_bits = 32, | ||
23 | .val_bits = 32, | ||
24 | .reg_stride = 4, | ||
25 | }; | ||
26 | |||
27 | static int mtk_efuse_probe(struct platform_device *pdev) | ||
28 | { | ||
29 | struct device *dev = &pdev->dev; | ||
30 | struct resource *res; | ||
31 | struct nvmem_device *nvmem; | ||
32 | struct nvmem_config *econfig; | ||
33 | struct regmap *regmap; | ||
34 | void __iomem *base; | ||
35 | |||
36 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
37 | base = devm_ioremap_resource(dev, res); | ||
38 | if (IS_ERR(base)) | ||
39 | return PTR_ERR(base); | ||
40 | |||
41 | econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL); | ||
42 | if (!econfig) | ||
43 | return -ENOMEM; | ||
44 | |||
45 | mtk_regmap_config.max_register = resource_size(res) - 1; | ||
46 | |||
47 | regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config); | ||
48 | if (IS_ERR(regmap)) { | ||
49 | dev_err(dev, "regmap init failed\n"); | ||
50 | return PTR_ERR(regmap); | ||
51 | } | ||
52 | |||
53 | econfig->dev = dev; | ||
54 | econfig->owner = THIS_MODULE; | ||
55 | nvmem = nvmem_register(econfig); | ||
56 | if (IS_ERR(nvmem)) | ||
57 | return PTR_ERR(nvmem); | ||
58 | |||
59 | platform_set_drvdata(pdev, nvmem); | ||
60 | |||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int mtk_efuse_remove(struct platform_device *pdev) | ||
65 | { | ||
66 | struct nvmem_device *nvmem = platform_get_drvdata(pdev); | ||
67 | |||
68 | return nvmem_unregister(nvmem); | ||
69 | } | ||
70 | |||
71 | static const struct of_device_id mtk_efuse_of_match[] = { | ||
72 | { .compatible = "mediatek,mt8173-efuse",}, | ||
73 | { .compatible = "mediatek,efuse",}, | ||
74 | {/* sentinel */}, | ||
75 | }; | ||
76 | MODULE_DEVICE_TABLE(of, mtk_efuse_of_match); | ||
77 | |||
78 | static struct platform_driver mtk_efuse_driver = { | ||
79 | .probe = mtk_efuse_probe, | ||
80 | .remove = mtk_efuse_remove, | ||
81 | .driver = { | ||
82 | .name = "mediatek,efuse", | ||
83 | .of_match_table = mtk_efuse_of_match, | ||
84 | }, | ||
85 | }; | ||
86 | |||
87 | static int __init mtk_efuse_init(void) | ||
88 | { | ||
89 | int ret; | ||
90 | |||
91 | ret = platform_driver_register(&mtk_efuse_driver); | ||
92 | if (ret) { | ||
93 | pr_err("Failed to register efuse driver\n"); | ||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static void __exit mtk_efuse_exit(void) | ||
101 | { | ||
102 | return platform_driver_unregister(&mtk_efuse_driver); | ||
103 | } | ||
104 | |||
105 | subsys_initcall(mtk_efuse_init); | ||
106 | module_exit(mtk_efuse_exit); | ||
107 | |||
108 | MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>"); | ||
109 | MODULE_DESCRIPTION("Mediatek EFUSE driver"); | ||
110 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c index f55213424222..a009795111e9 100644 --- a/drivers/nvmem/rockchip-efuse.c +++ b/drivers/nvmem/rockchip-efuse.c | |||
@@ -14,16 +14,16 @@ | |||
14 | * more details. | 14 | * more details. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/platform_device.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/nvmem-provider.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/slab.h> | ||
20 | #include <linux/regmap.h> | ||
21 | #include <linux/device.h> | 19 | #include <linux/device.h> |
22 | #include <linux/io.h> | 20 | #include <linux/io.h> |
23 | #include <linux/module.h> | 21 | #include <linux/module.h> |
24 | #include <linux/delay.h> | 22 | #include <linux/nvmem-provider.h> |
23 | #include <linux/slab.h> | ||
25 | #include <linux/of.h> | 24 | #include <linux/of.h> |
26 | #include <linux/clk.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/regmap.h> | ||
27 | 27 | ||
28 | #define EFUSE_A_SHIFT 6 | 28 | #define EFUSE_A_SHIFT 6 |
29 | #define EFUSE_A_MASK 0x3ff | 29 | #define EFUSE_A_MASK 0x3ff |
@@ -35,10 +35,10 @@ | |||
35 | #define REG_EFUSE_CTRL 0x0000 | 35 | #define REG_EFUSE_CTRL 0x0000 |
36 | #define REG_EFUSE_DOUT 0x0004 | 36 | #define REG_EFUSE_DOUT 0x0004 |
37 | 37 | ||
38 | struct rockchip_efuse_context { | 38 | struct rockchip_efuse_chip { |
39 | struct device *dev; | 39 | struct device *dev; |
40 | void __iomem *base; | 40 | void __iomem *base; |
41 | struct clk *efuse_clk; | 41 | struct clk *clk; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static int rockchip_efuse_write(void *context, const void *data, size_t count) | 44 | static int rockchip_efuse_write(void *context, const void *data, size_t count) |
@@ -52,34 +52,32 @@ static int rockchip_efuse_read(void *context, | |||
52 | void *val, size_t val_size) | 52 | void *val, size_t val_size) |
53 | { | 53 | { |
54 | unsigned int offset = *(u32 *)reg; | 54 | unsigned int offset = *(u32 *)reg; |
55 | struct rockchip_efuse_context *_context = context; | 55 | struct rockchip_efuse_chip *efuse = context; |
56 | void __iomem *base = _context->base; | ||
57 | struct clk *clk = _context->efuse_clk; | ||
58 | u8 *buf = val; | 56 | u8 *buf = val; |
59 | int ret; | 57 | int ret; |
60 | 58 | ||
61 | ret = clk_prepare_enable(clk); | 59 | ret = clk_prepare_enable(efuse->clk); |
62 | if (ret < 0) { | 60 | if (ret < 0) { |
63 | dev_err(_context->dev, "failed to prepare/enable efuse clk\n"); | 61 | dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); |
64 | return ret; | 62 | return ret; |
65 | } | 63 | } |
66 | 64 | ||
67 | writel(EFUSE_LOAD | EFUSE_PGENB, base + REG_EFUSE_CTRL); | 65 | writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL); |
68 | udelay(1); | 66 | udelay(1); |
69 | while (val_size) { | 67 | while (val_size) { |
70 | writel(readl(base + REG_EFUSE_CTRL) & | 68 | writel(readl(efuse->base + REG_EFUSE_CTRL) & |
71 | (~(EFUSE_A_MASK << EFUSE_A_SHIFT)), | 69 | (~(EFUSE_A_MASK << EFUSE_A_SHIFT)), |
72 | base + REG_EFUSE_CTRL); | 70 | efuse->base + REG_EFUSE_CTRL); |
73 | writel(readl(base + REG_EFUSE_CTRL) | | 71 | writel(readl(efuse->base + REG_EFUSE_CTRL) | |
74 | ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT), | 72 | ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT), |
75 | base + REG_EFUSE_CTRL); | 73 | efuse->base + REG_EFUSE_CTRL); |
76 | udelay(1); | 74 | udelay(1); |
77 | writel(readl(base + REG_EFUSE_CTRL) | | 75 | writel(readl(efuse->base + REG_EFUSE_CTRL) | |
78 | EFUSE_STROBE, base + REG_EFUSE_CTRL); | 76 | EFUSE_STROBE, efuse->base + REG_EFUSE_CTRL); |
79 | udelay(1); | 77 | udelay(1); |
80 | *buf++ = readb(base + REG_EFUSE_DOUT); | 78 | *buf++ = readb(efuse->base + REG_EFUSE_DOUT); |
81 | writel(readl(base + REG_EFUSE_CTRL) & | 79 | writel(readl(efuse->base + REG_EFUSE_CTRL) & |
82 | (~EFUSE_STROBE), base + REG_EFUSE_CTRL); | 80 | (~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL); |
83 | udelay(1); | 81 | udelay(1); |
84 | 82 | ||
85 | val_size -= 1; | 83 | val_size -= 1; |
@@ -87,9 +85,9 @@ static int rockchip_efuse_read(void *context, | |||
87 | } | 85 | } |
88 | 86 | ||
89 | /* Switch to standby mode */ | 87 | /* Switch to standby mode */ |
90 | writel(EFUSE_PGENB | EFUSE_CSB, base + REG_EFUSE_CTRL); | 88 | writel(EFUSE_PGENB | EFUSE_CSB, efuse->base + REG_EFUSE_CTRL); |
91 | 89 | ||
92 | clk_disable_unprepare(clk); | 90 | clk_disable_unprepare(efuse->clk); |
93 | 91 | ||
94 | return 0; | 92 | return 0; |
95 | } | 93 | } |
@@ -114,48 +112,44 @@ static struct nvmem_config econfig = { | |||
114 | }; | 112 | }; |
115 | 113 | ||
116 | static const struct of_device_id rockchip_efuse_match[] = { | 114 | static const struct of_device_id rockchip_efuse_match[] = { |
117 | { .compatible = "rockchip,rockchip-efuse",}, | 115 | { .compatible = "rockchip,rockchip-efuse", }, |
118 | { /* sentinel */}, | 116 | { /* sentinel */}, |
119 | }; | 117 | }; |
120 | MODULE_DEVICE_TABLE(of, rockchip_efuse_match); | 118 | MODULE_DEVICE_TABLE(of, rockchip_efuse_match); |
121 | 119 | ||
122 | static int rockchip_efuse_probe(struct platform_device *pdev) | 120 | static int rockchip_efuse_probe(struct platform_device *pdev) |
123 | { | 121 | { |
124 | struct device *dev = &pdev->dev; | ||
125 | struct resource *res; | 122 | struct resource *res; |
126 | struct nvmem_device *nvmem; | 123 | struct nvmem_device *nvmem; |
127 | struct regmap *regmap; | 124 | struct regmap *regmap; |
128 | void __iomem *base; | 125 | struct rockchip_efuse_chip *efuse; |
129 | struct clk *clk; | ||
130 | struct rockchip_efuse_context *context; | ||
131 | 126 | ||
132 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 127 | efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip), |
133 | base = devm_ioremap_resource(dev, res); | 128 | GFP_KERNEL); |
134 | if (IS_ERR(base)) | 129 | if (!efuse) |
135 | return PTR_ERR(base); | 130 | return -ENOMEM; |
136 | 131 | ||
137 | context = devm_kzalloc(dev, sizeof(struct rockchip_efuse_context), | 132 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
138 | GFP_KERNEL); | 133 | efuse->base = devm_ioremap_resource(&pdev->dev, res); |
139 | if (IS_ERR(context)) | 134 | if (IS_ERR(efuse->base)) |
140 | return PTR_ERR(context); | 135 | return PTR_ERR(efuse->base); |
141 | 136 | ||
142 | clk = devm_clk_get(dev, "pclk_efuse"); | 137 | efuse->clk = devm_clk_get(&pdev->dev, "pclk_efuse"); |
143 | if (IS_ERR(clk)) | 138 | if (IS_ERR(efuse->clk)) |
144 | return PTR_ERR(clk); | 139 | return PTR_ERR(efuse->clk); |
145 | 140 | ||
146 | context->dev = dev; | 141 | efuse->dev = &pdev->dev; |
147 | context->base = base; | ||
148 | context->efuse_clk = clk; | ||
149 | 142 | ||
150 | rockchip_efuse_regmap_config.max_register = resource_size(res) - 1; | 143 | rockchip_efuse_regmap_config.max_register = resource_size(res) - 1; |
151 | 144 | ||
152 | regmap = devm_regmap_init(dev, &rockchip_efuse_bus, | 145 | regmap = devm_regmap_init(efuse->dev, &rockchip_efuse_bus, |
153 | context, &rockchip_efuse_regmap_config); | 146 | efuse, &rockchip_efuse_regmap_config); |
154 | if (IS_ERR(regmap)) { | 147 | if (IS_ERR(regmap)) { |
155 | dev_err(dev, "regmap init failed\n"); | 148 | dev_err(efuse->dev, "regmap init failed\n"); |
156 | return PTR_ERR(regmap); | 149 | return PTR_ERR(regmap); |
157 | } | 150 | } |
158 | econfig.dev = dev; | 151 | |
152 | econfig.dev = efuse->dev; | ||
159 | nvmem = nvmem_register(&econfig); | 153 | nvmem = nvmem_register(&econfig); |
160 | if (IS_ERR(nvmem)) | 154 | if (IS_ERR(nvmem)) |
161 | return PTR_ERR(nvmem); | 155 | return PTR_ERR(nvmem); |
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index cfa3b85064dd..bc88b4084055 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c | |||
@@ -13,10 +13,8 @@ | |||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | ||
17 | */ | 16 | */ |
18 | 17 | ||
19 | |||
20 | #include <linux/device.h> | 18 | #include <linux/device.h> |
21 | #include <linux/io.h> | 19 | #include <linux/io.h> |
22 | #include <linux/module.h> | 20 | #include <linux/module.h> |
@@ -27,7 +25,6 @@ | |||
27 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
28 | #include <linux/random.h> | 26 | #include <linux/random.h> |
29 | 27 | ||
30 | |||
31 | static struct nvmem_config econfig = { | 28 | static struct nvmem_config econfig = { |
32 | .name = "sunxi-sid", | 29 | .name = "sunxi-sid", |
33 | .read_only = true, | 30 | .read_only = true, |
@@ -55,8 +52,8 @@ static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid, | |||
55 | } | 52 | } |
56 | 53 | ||
57 | static int sunxi_sid_read(void *context, | 54 | static int sunxi_sid_read(void *context, |
58 | const void *reg, size_t reg_size, | 55 | const void *reg, size_t reg_size, |
59 | void *val, size_t val_size) | 56 | void *val, size_t val_size) |
60 | { | 57 | { |
61 | struct sunxi_sid *sid = context; | 58 | struct sunxi_sid *sid = context; |
62 | unsigned int offset = *(u32 *)reg; | 59 | unsigned int offset = *(u32 *)reg; |
@@ -130,7 +127,7 @@ static int sunxi_sid_probe(struct platform_device *pdev) | |||
130 | if (IS_ERR(nvmem)) | 127 | if (IS_ERR(nvmem)) |
131 | return PTR_ERR(nvmem); | 128 | return PTR_ERR(nvmem); |
132 | 129 | ||
133 | randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); | 130 | randomness = kzalloc(sizeof(u8) * (size), GFP_KERNEL); |
134 | if (!randomness) { | 131 | if (!randomness) { |
135 | ret = -EINVAL; | 132 | ret = -EINVAL; |
136 | goto err_unreg_nvmem; | 133 | goto err_unreg_nvmem; |
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 0adccbf5c83f..c11db8bceea1 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig | |||
@@ -4,8 +4,7 @@ endif | |||
4 | if MIPS | 4 | if MIPS |
5 | source "drivers/platform/mips/Kconfig" | 5 | source "drivers/platform/mips/Kconfig" |
6 | endif | 6 | endif |
7 | if GOLDFISH | 7 | |
8 | source "drivers/platform/goldfish/Kconfig" | 8 | source "drivers/platform/goldfish/Kconfig" |
9 | endif | ||
10 | 9 | ||
11 | source "drivers/platform/chrome/Kconfig" | 10 | source "drivers/platform/chrome/Kconfig" |
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig index 635ef25cc722..fefbb8370da0 100644 --- a/drivers/platform/goldfish/Kconfig +++ b/drivers/platform/goldfish/Kconfig | |||
@@ -1,5 +1,24 @@ | |||
1 | menuconfig GOLDFISH | ||
2 | bool "Platform support for Goldfish virtual devices" | ||
3 | depends on X86_32 || X86_64 || ARM || ARM64 || MIPS | ||
4 | depends on HAS_IOMEM | ||
5 | ---help--- | ||
6 | Say Y here to get to see options for the Goldfish virtual platform. | ||
7 | This option alone does not add any kernel code. | ||
8 | |||
9 | Unless you are building for the Android Goldfish emulator say N here. | ||
10 | |||
11 | if GOLDFISH | ||
12 | |||
13 | config GOLDFISH_BUS | ||
14 | bool "Goldfish platform bus" | ||
15 | ---help--- | ||
16 | This is a virtual bus to host Goldfish Android Virtual Devices. | ||
17 | |||
1 | config GOLDFISH_PIPE | 18 | config GOLDFISH_PIPE |
2 | tristate "Goldfish virtual device for QEMU pipes" | 19 | tristate "Goldfish virtual device for QEMU pipes" |
3 | ---help--- | 20 | ---help--- |
4 | This is a virtual device to drive the QEMU pipe interface used by | 21 | This is a virtual device to drive the QEMU pipe interface used by |
5 | the Goldfish Android Virtual Device. | 22 | the Goldfish Android Virtual Device. |
23 | |||
24 | endif # GOLDFISH | ||
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile index a0022395eee9..d3487125838c 100644 --- a/drivers/platform/goldfish/Makefile +++ b/drivers/platform/goldfish/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # Makefile for Goldfish platform specific drivers | 2 | # Makefile for Goldfish platform specific drivers |
3 | # | 3 | # |
4 | obj-$(CONFIG_GOLDFISH) += pdev_bus.o | 4 | obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o |
5 | obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o | 5 | obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o |
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index e7a29e2750c6..839df4aace76 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Copyright (C) 2011 Google, Inc. | 2 | * Copyright (C) 2011 Google, Inc. |
3 | * Copyright (C) 2012 Intel, Inc. | 3 | * Copyright (C) 2012 Intel, Inc. |
4 | * Copyright (C) 2013 Intel, Inc. | 4 | * Copyright (C) 2013 Intel, Inc. |
5 | * Copyright (C) 2014 Linaro Limited | ||
5 | * | 6 | * |
6 | * This software is licensed under the terms of the GNU General Public | 7 | * This software is licensed under the terms of the GNU General Public |
7 | * License version 2, as published by the Free Software Foundation, and | 8 | * License version 2, as published by the Free Software Foundation, and |
@@ -57,6 +58,8 @@ | |||
57 | #include <linux/slab.h> | 58 | #include <linux/slab.h> |
58 | #include <linux/io.h> | 59 | #include <linux/io.h> |
59 | #include <linux/goldfish.h> | 60 | #include <linux/goldfish.h> |
61 | #include <linux/mm.h> | ||
62 | #include <linux/acpi.h> | ||
60 | 63 | ||
61 | /* | 64 | /* |
62 | * IMPORTANT: The following constants must match the ones used and defined | 65 | * IMPORTANT: The following constants must match the ones used and defined |
@@ -75,6 +78,7 @@ | |||
75 | #define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */ | 78 | #define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */ |
76 | #define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */ | 79 | #define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */ |
77 | #define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */ | 80 | #define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */ |
81 | #define PIPE_REG_VERSION 0x24 /* read: device version */ | ||
78 | 82 | ||
79 | /* list of commands for PIPE_REG_COMMAND */ | 83 | /* list of commands for PIPE_REG_COMMAND */ |
80 | #define CMD_OPEN 1 /* open new channel */ | 84 | #define CMD_OPEN 1 /* open new channel */ |
@@ -90,12 +94,6 @@ | |||
90 | #define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */ | 94 | #define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */ |
91 | #define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing | 95 | #define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing |
92 | is possible */ | 96 | is possible */ |
93 | |||
94 | /* The following commands are related to read operations, they must be | ||
95 | * listed in the same order than the corresponding write ones, since we | ||
96 | * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset | ||
97 | * in goldfish_pipe_read_write() below. | ||
98 | */ | ||
99 | #define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */ | 97 | #define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */ |
100 | #define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading | 98 | #define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading |
101 | * is possible */ | 99 | * is possible */ |
@@ -130,6 +128,7 @@ struct goldfish_pipe_dev { | |||
130 | unsigned char __iomem *base; | 128 | unsigned char __iomem *base; |
131 | struct access_params *aps; | 129 | struct access_params *aps; |
132 | int irq; | 130 | int irq; |
131 | u32 version; | ||
133 | }; | 132 | }; |
134 | 133 | ||
135 | static struct goldfish_pipe_dev pipe_dev[1]; | 134 | static struct goldfish_pipe_dev pipe_dev[1]; |
@@ -263,19 +262,14 @@ static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd, | |||
263 | return 0; | 262 | return 0; |
264 | } | 263 | } |
265 | 264 | ||
266 | /* This function is used for both reading from and writing to a given | ||
267 | * pipe. | ||
268 | */ | ||
269 | static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, | 265 | static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, |
270 | size_t bufflen, int is_write) | 266 | size_t bufflen, int is_write) |
271 | { | 267 | { |
272 | unsigned long irq_flags; | 268 | unsigned long irq_flags; |
273 | struct goldfish_pipe *pipe = filp->private_data; | 269 | struct goldfish_pipe *pipe = filp->private_data; |
274 | struct goldfish_pipe_dev *dev = pipe->dev; | 270 | struct goldfish_pipe_dev *dev = pipe->dev; |
275 | const int cmd_offset = is_write ? 0 | ||
276 | : (CMD_READ_BUFFER - CMD_WRITE_BUFFER); | ||
277 | unsigned long address, address_end; | 271 | unsigned long address, address_end; |
278 | int ret = 0; | 272 | int count = 0, ret = -EINVAL; |
279 | 273 | ||
280 | /* If the emulator already closed the pipe, no need to go further */ | 274 | /* If the emulator already closed the pipe, no need to go further */ |
281 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) | 275 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
@@ -298,79 +292,107 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, | |||
298 | address_end = address + bufflen; | 292 | address_end = address + bufflen; |
299 | 293 | ||
300 | while (address < address_end) { | 294 | while (address < address_end) { |
301 | unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE; | 295 | unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE; |
302 | unsigned long next = page_end < address_end ? page_end | 296 | unsigned long next = page_end < address_end ? page_end |
303 | : address_end; | 297 | : address_end; |
304 | unsigned long avail = next - address; | 298 | unsigned long avail = next - address; |
305 | int status, wakeBit; | 299 | int status, wakeBit; |
300 | struct page *page; | ||
301 | |||
302 | /* Either vaddr or paddr depending on the device version */ | ||
303 | unsigned long xaddr; | ||
304 | |||
305 | /* | ||
306 | * We grab the pages on a page-by-page basis in case user | ||
307 | * space gives us a potentially huge buffer but the read only | ||
308 | * returns a small amount, then there's no need to pin that | ||
309 | * much memory to the process. | ||
310 | */ | ||
311 | down_read(¤t->mm->mmap_sem); | ||
312 | ret = get_user_pages(current, current->mm, address, 1, | ||
313 | !is_write, 0, &page, NULL); | ||
314 | up_read(¤t->mm->mmap_sem); | ||
315 | if (ret < 0) | ||
316 | break; | ||
306 | 317 | ||
307 | /* Ensure that the corresponding page is properly mapped */ | 318 | if (dev->version) { |
308 | /* FIXME: this isn't safe or sufficient - use get_user_pages */ | 319 | /* Device version 1 or newer (qemu-android) expects the |
309 | if (is_write) { | 320 | * physical address. |
310 | char c; | 321 | */ |
311 | /* Ensure that the page is mapped and readable */ | 322 | xaddr = page_to_phys(page) | (address & ~PAGE_MASK); |
312 | if (__get_user(c, (char __user *)address)) { | ||
313 | if (!ret) | ||
314 | ret = -EFAULT; | ||
315 | break; | ||
316 | } | ||
317 | } else { | 323 | } else { |
318 | /* Ensure that the page is mapped and writable */ | 324 | /* Device version 0 (classic emulator) expects the |
319 | if (__put_user(0, (char __user *)address)) { | 325 | * virtual address. |
320 | if (!ret) | 326 | */ |
321 | ret = -EFAULT; | 327 | xaddr = address; |
322 | break; | ||
323 | } | ||
324 | } | 328 | } |
325 | 329 | ||
326 | /* Now, try to transfer the bytes in the current page */ | 330 | /* Now, try to transfer the bytes in the current page */ |
327 | spin_lock_irqsave(&dev->lock, irq_flags); | 331 | spin_lock_irqsave(&dev->lock, irq_flags); |
328 | if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset, | 332 | if (access_with_param(dev, |
329 | address, avail, pipe, &status)) { | 333 | is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER, |
334 | xaddr, avail, pipe, &status)) { | ||
330 | gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL, | 335 | gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL, |
331 | dev->base + PIPE_REG_CHANNEL_HIGH); | 336 | dev->base + PIPE_REG_CHANNEL_HIGH); |
332 | writel(avail, dev->base + PIPE_REG_SIZE); | 337 | writel(avail, dev->base + PIPE_REG_SIZE); |
333 | gf_write_ptr((void *)address, | 338 | gf_write_ptr((void *)xaddr, |
334 | dev->base + PIPE_REG_ADDRESS, | 339 | dev->base + PIPE_REG_ADDRESS, |
335 | dev->base + PIPE_REG_ADDRESS_HIGH); | 340 | dev->base + PIPE_REG_ADDRESS_HIGH); |
336 | writel(CMD_WRITE_BUFFER + cmd_offset, | 341 | writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER, |
337 | dev->base + PIPE_REG_COMMAND); | 342 | dev->base + PIPE_REG_COMMAND); |
338 | status = readl(dev->base + PIPE_REG_STATUS); | 343 | status = readl(dev->base + PIPE_REG_STATUS); |
339 | } | 344 | } |
340 | spin_unlock_irqrestore(&dev->lock, irq_flags); | 345 | spin_unlock_irqrestore(&dev->lock, irq_flags); |
341 | 346 | ||
347 | if (status > 0 && !is_write) | ||
348 | set_page_dirty(page); | ||
349 | put_page(page); | ||
350 | |||
342 | if (status > 0) { /* Correct transfer */ | 351 | if (status > 0) { /* Correct transfer */ |
343 | ret += status; | 352 | count += status; |
344 | address += status; | 353 | address += status; |
345 | continue; | 354 | continue; |
346 | } | 355 | } else if (status == 0) { /* EOF */ |
347 | 356 | ret = 0; | |
348 | if (status == 0) /* EOF */ | ||
349 | break; | 357 | break; |
350 | 358 | } else if (status < 0 && count > 0) { | |
351 | /* An error occured. If we already transfered stuff, just | 359 | /* |
352 | * return with its count. We expect the next call to return | 360 | * An error occurred and we already transferred |
353 | * an error code */ | 361 | * something on one of the previous pages. |
354 | if (ret > 0) | 362 | * Just return what we already copied and log this |
363 | * err. | ||
364 | * | ||
365 | * Note: This seems like an incorrect approach but | ||
366 | * cannot change it until we check if any user space | ||
367 | * ABI relies on this behavior. | ||
368 | */ | ||
369 | if (status != PIPE_ERROR_AGAIN) | ||
370 | pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n", | ||
371 | status, is_write ? "write" : "read"); | ||
372 | ret = 0; | ||
355 | break; | 373 | break; |
374 | } | ||
356 | 375 | ||
357 | /* If the error is not PIPE_ERROR_AGAIN, or if we are not in | 376 | /* |
358 | * non-blocking mode, just return the error code. | 377 | * If the error is not PIPE_ERROR_AGAIN, or if we are not in |
359 | */ | 378 | * non-blocking mode, just return the error code. |
379 | */ | ||
360 | if (status != PIPE_ERROR_AGAIN || | 380 | if (status != PIPE_ERROR_AGAIN || |
361 | (filp->f_flags & O_NONBLOCK) != 0) { | 381 | (filp->f_flags & O_NONBLOCK) != 0) { |
362 | ret = goldfish_pipe_error_convert(status); | 382 | ret = goldfish_pipe_error_convert(status); |
363 | break; | 383 | break; |
364 | } | 384 | } |
365 | 385 | ||
366 | /* We will have to wait until more data/space is available. | 386 | /* |
367 | * First, mark the pipe as waiting for a specific wake signal. | 387 | * The backend blocked the read/write, wait until the backend |
368 | */ | 388 | * tells us it's ready to process more data. |
389 | */ | ||
369 | wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; | 390 | wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
370 | set_bit(wakeBit, &pipe->flags); | 391 | set_bit(wakeBit, &pipe->flags); |
371 | 392 | ||
372 | /* Tell the emulator we're going to wait for a wake event */ | 393 | /* Tell the emulator we're going to wait for a wake event */ |
373 | goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset); | 394 | goldfish_cmd(pipe, |
395 | is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ); | ||
374 | 396 | ||
375 | /* Unlock the pipe, then wait for the wake signal */ | 397 | /* Unlock the pipe, then wait for the wake signal */ |
376 | mutex_unlock(&pipe->lock); | 398 | mutex_unlock(&pipe->lock); |
@@ -388,12 +410,13 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, | |||
388 | /* Try to re-acquire the lock */ | 410 | /* Try to re-acquire the lock */ |
389 | if (mutex_lock_interruptible(&pipe->lock)) | 411 | if (mutex_lock_interruptible(&pipe->lock)) |
390 | return -ERESTARTSYS; | 412 | return -ERESTARTSYS; |
391 | |||
392 | /* Try the transfer again */ | ||
393 | continue; | ||
394 | } | 413 | } |
395 | mutex_unlock(&pipe->lock); | 414 | mutex_unlock(&pipe->lock); |
396 | return ret; | 415 | |
416 | if (ret < 0) | ||
417 | return ret; | ||
418 | else | ||
419 | return count; | ||
397 | } | 420 | } |
398 | 421 | ||
399 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, | 422 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, |
@@ -446,10 +469,11 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) | |||
446 | unsigned long irq_flags; | 469 | unsigned long irq_flags; |
447 | int count = 0; | 470 | int count = 0; |
448 | 471 | ||
449 | /* We're going to read from the emulator a list of (channel,flags) | 472 | /* |
450 | * pairs corresponding to the wake events that occured on each | 473 | * We're going to read from the emulator a list of (channel,flags) |
451 | * blocked pipe (i.e. channel). | 474 | * pairs corresponding to the wake events that occurred on each |
452 | */ | 475 | * blocked pipe (i.e. channel). |
476 | */ | ||
453 | spin_lock_irqsave(&dev->lock, irq_flags); | 477 | spin_lock_irqsave(&dev->lock, irq_flags); |
454 | for (;;) { | 478 | for (;;) { |
455 | /* First read the channel, 0 means the end of the list */ | 479 | /* First read the channel, 0 means the end of the list */ |
@@ -600,6 +624,12 @@ static int goldfish_pipe_probe(struct platform_device *pdev) | |||
600 | goto error; | 624 | goto error; |
601 | } | 625 | } |
602 | setup_access_params_addr(pdev, dev); | 626 | setup_access_params_addr(pdev, dev); |
627 | |||
628 | /* Although the pipe device in the classic Android emulator does not | ||
629 | * recognize the 'version' register, it won't treat this as an error | ||
630 | * either and will simply return 0, which is fine. | ||
631 | */ | ||
632 | dev->version = readl(dev->base + PIPE_REG_VERSION); | ||
603 | return 0; | 633 | return 0; |
604 | 634 | ||
605 | error: | 635 | error: |
@@ -615,11 +645,26 @@ static int goldfish_pipe_remove(struct platform_device *pdev) | |||
615 | return 0; | 645 | return 0; |
616 | } | 646 | } |
617 | 647 | ||
648 | static const struct acpi_device_id goldfish_pipe_acpi_match[] = { | ||
649 | { "GFSH0003", 0 }, | ||
650 | { }, | ||
651 | }; | ||
652 | MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); | ||
653 | |||
654 | static const struct of_device_id goldfish_pipe_of_match[] = { | ||
655 | { .compatible = "google,android-pipe", }, | ||
656 | {}, | ||
657 | }; | ||
658 | MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); | ||
659 | |||
618 | static struct platform_driver goldfish_pipe = { | 660 | static struct platform_driver goldfish_pipe = { |
619 | .probe = goldfish_pipe_probe, | 661 | .probe = goldfish_pipe_probe, |
620 | .remove = goldfish_pipe_remove, | 662 | .remove = goldfish_pipe_remove, |
621 | .driver = { | 663 | .driver = { |
622 | .name = "goldfish_pipe" | 664 | .name = "goldfish_pipe", |
665 | .owner = THIS_MODULE, | ||
666 | .of_match_table = goldfish_pipe_of_match, | ||
667 | .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), | ||
623 | } | 668 | } |
624 | }; | 669 | }; |
625 | 670 | ||
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index be822f7a9ce6..aca282d45421 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
12 | */ | 12 | */ |
13 | #include <linux/bitmap.h> | ||
13 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
14 | #include <linux/err.h> | 15 | #include <linux/err.h> |
15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
@@ -47,9 +48,9 @@ | |||
47 | #define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1) | 48 | #define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1) |
48 | #define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF) | 49 | #define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF) |
49 | 50 | ||
50 | #define SPMI_MAPPING_TABLE_LEN 255 | ||
51 | #define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */ | 51 | #define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */ |
52 | #define PPID_TO_CHAN_TABLE_SZ BIT(12) /* PPID is 12bit chan is 1byte*/ | 52 | #define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */ |
53 | #define PMIC_ARB_CHAN_VALID BIT(15) | ||
53 | 54 | ||
54 | /* Ownership Table */ | 55 | /* Ownership Table */ |
55 | #define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N))) | 56 | #define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N))) |
@@ -85,9 +86,7 @@ enum pmic_arb_cmd_op_code { | |||
85 | }; | 86 | }; |
86 | 87 | ||
87 | /* Maximum number of support PMIC peripherals */ | 88 | /* Maximum number of support PMIC peripherals */ |
88 | #define PMIC_ARB_MAX_PERIPHS 256 | 89 | #define PMIC_ARB_MAX_PERIPHS 512 |
89 | #define PMIC_ARB_MAX_CHNL 128 | ||
90 | #define PMIC_ARB_PERIPH_ID_VALID (1 << 15) | ||
91 | #define PMIC_ARB_TIMEOUT_US 100 | 90 | #define PMIC_ARB_TIMEOUT_US 100 |
92 | #define PMIC_ARB_MAX_TRANS_BYTES (8) | 91 | #define PMIC_ARB_MAX_TRANS_BYTES (8) |
93 | 92 | ||
@@ -125,18 +124,22 @@ struct spmi_pmic_arb_dev { | |||
125 | void __iomem *wr_base; | 124 | void __iomem *wr_base; |
126 | void __iomem *intr; | 125 | void __iomem *intr; |
127 | void __iomem *cnfg; | 126 | void __iomem *cnfg; |
127 | void __iomem *core; | ||
128 | resource_size_t core_size; | ||
128 | raw_spinlock_t lock; | 129 | raw_spinlock_t lock; |
129 | u8 channel; | 130 | u8 channel; |
130 | int irq; | 131 | int irq; |
131 | u8 ee; | 132 | u8 ee; |
132 | u8 min_apid; | 133 | u16 min_apid; |
133 | u8 max_apid; | 134 | u16 max_apid; |
134 | u32 mapping_table[SPMI_MAPPING_TABLE_LEN]; | 135 | u32 *mapping_table; |
136 | DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS); | ||
135 | struct irq_domain *domain; | 137 | struct irq_domain *domain; |
136 | struct spmi_controller *spmic; | 138 | struct spmi_controller *spmic; |
137 | u16 apid_to_ppid[256]; | 139 | u16 *apid_to_ppid; |
138 | const struct pmic_arb_ver_ops *ver_ops; | 140 | const struct pmic_arb_ver_ops *ver_ops; |
139 | u8 *ppid_to_chan; | 141 | u16 *ppid_to_chan; |
142 | u16 last_channel; | ||
140 | }; | 143 | }; |
141 | 144 | ||
142 | /** | 145 | /** |
@@ -158,7 +161,8 @@ struct spmi_pmic_arb_dev { | |||
158 | */ | 161 | */ |
159 | struct pmic_arb_ver_ops { | 162 | struct pmic_arb_ver_ops { |
160 | /* spmi commands (read_cmd, write_cmd, cmd) functionality */ | 163 | /* spmi commands (read_cmd, write_cmd, cmd) functionality */ |
161 | u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr); | 164 | int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr, |
165 | u32 *offset); | ||
162 | u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc); | 166 | u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc); |
163 | int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid); | 167 | int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid); |
164 | /* Interrupts controller functionality (offset of PIC registers) */ | 168 | /* Interrupts controller functionality (offset of PIC registers) */ |
@@ -212,7 +216,14 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl, | |||
212 | struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl); | 216 | struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl); |
213 | u32 status = 0; | 217 | u32 status = 0; |
214 | u32 timeout = PMIC_ARB_TIMEOUT_US; | 218 | u32 timeout = PMIC_ARB_TIMEOUT_US; |
215 | u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS; | 219 | u32 offset; |
220 | int rc; | ||
221 | |||
222 | rc = dev->ver_ops->offset(dev, sid, addr, &offset); | ||
223 | if (rc) | ||
224 | return rc; | ||
225 | |||
226 | offset += PMIC_ARB_STATUS; | ||
216 | 227 | ||
217 | while (timeout--) { | 228 | while (timeout--) { |
218 | status = readl_relaxed(base + offset); | 229 | status = readl_relaxed(base + offset); |
@@ -257,7 +268,11 @@ pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid) | |||
257 | unsigned long flags; | 268 | unsigned long flags; |
258 | u32 cmd; | 269 | u32 cmd; |
259 | int rc; | 270 | int rc; |
260 | u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0); | 271 | u32 offset; |
272 | |||
273 | rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset); | ||
274 | if (rc) | ||
275 | return rc; | ||
261 | 276 | ||
262 | cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20); | 277 | cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20); |
263 | 278 | ||
@@ -297,7 +312,11 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, | |||
297 | u8 bc = len - 1; | 312 | u8 bc = len - 1; |
298 | u32 cmd; | 313 | u32 cmd; |
299 | int rc; | 314 | int rc; |
300 | u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr); | 315 | u32 offset; |
316 | |||
317 | rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset); | ||
318 | if (rc) | ||
319 | return rc; | ||
301 | 320 | ||
302 | if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { | 321 | if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { |
303 | dev_err(&ctrl->dev, | 322 | dev_err(&ctrl->dev, |
@@ -344,7 +363,11 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, | |||
344 | u8 bc = len - 1; | 363 | u8 bc = len - 1; |
345 | u32 cmd; | 364 | u32 cmd; |
346 | int rc; | 365 | int rc; |
347 | u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr); | 366 | u32 offset; |
367 | |||
368 | rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset); | ||
369 | if (rc) | ||
370 | return rc; | ||
348 | 371 | ||
349 | if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { | 372 | if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { |
350 | dev_err(&ctrl->dev, | 373 | dev_err(&ctrl->dev, |
@@ -614,6 +637,10 @@ static int search_mapping_table(struct spmi_pmic_arb_dev *pa, | |||
614 | u32 data; | 637 | u32 data; |
615 | 638 | ||
616 | for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) { | 639 | for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) { |
640 | if (!test_and_set_bit(index, pa->mapping_table_valid)) | ||
641 | mapping_table[index] = readl_relaxed(pa->cnfg + | ||
642 | SPMI_MAPPING_TABLE_REG(index)); | ||
643 | |||
617 | data = mapping_table[index]; | 644 | data = mapping_table[index]; |
618 | 645 | ||
619 | if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) { | 646 | if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) { |
@@ -701,18 +728,61 @@ static int qpnpint_irq_domain_map(struct irq_domain *d, | |||
701 | } | 728 | } |
702 | 729 | ||
703 | /* v1 offset per ee */ | 730 | /* v1 offset per ee */ |
704 | static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr) | 731 | static int |
732 | pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset) | ||
705 | { | 733 | { |
706 | return 0x800 + 0x80 * pa->channel; | 734 | *offset = 0x800 + 0x80 * pa->channel; |
735 | return 0; | ||
707 | } | 736 | } |
708 | 737 | ||
738 | static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid) | ||
739 | { | ||
740 | u32 regval, offset; | ||
741 | u16 chan; | ||
742 | u16 id; | ||
743 | |||
744 | /* | ||
745 | * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid. | ||
746 | * ppid_to_chan is an in-memory invert of that table. | ||
747 | */ | ||
748 | for (chan = pa->last_channel; ; chan++) { | ||
749 | offset = PMIC_ARB_REG_CHNL(chan); | ||
750 | if (offset >= pa->core_size) | ||
751 | break; | ||
752 | |||
753 | regval = readl_relaxed(pa->core + offset); | ||
754 | if (!regval) | ||
755 | continue; | ||
756 | |||
757 | id = (regval >> 8) & PMIC_ARB_PPID_MASK; | ||
758 | pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID; | ||
759 | if (id == ppid) { | ||
760 | chan |= PMIC_ARB_CHAN_VALID; | ||
761 | break; | ||
762 | } | ||
763 | } | ||
764 | pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID; | ||
765 | |||
766 | return chan; | ||
767 | } | ||
768 | |||
769 | |||
709 | /* v2 offset per ppid (chan) and per ee */ | 770 | /* v2 offset per ppid (chan) and per ee */ |
710 | static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr) | 771 | static int |
772 | pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset) | ||
711 | { | 773 | { |
712 | u16 ppid = (sid << 8) | (addr >> 8); | 774 | u16 ppid = (sid << 8) | (addr >> 8); |
713 | u8 chan = pa->ppid_to_chan[ppid]; | 775 | u16 chan; |
714 | 776 | ||
715 | return 0x1000 * pa->ee + 0x8000 * chan; | 777 | chan = pa->ppid_to_chan[ppid]; |
778 | if (!(chan & PMIC_ARB_CHAN_VALID)) | ||
779 | chan = pmic_arb_find_chan(pa, ppid); | ||
780 | if (!(chan & PMIC_ARB_CHAN_VALID)) | ||
781 | return -ENODEV; | ||
782 | chan &= ~PMIC_ARB_CHAN_VALID; | ||
783 | |||
784 | *offset = 0x1000 * pa->ee + 0x8000 * chan; | ||
785 | return 0; | ||
716 | } | 786 | } |
717 | 787 | ||
718 | static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc) | 788 | static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc) |
@@ -797,7 +867,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
797 | struct resource *res; | 867 | struct resource *res; |
798 | void __iomem *core; | 868 | void __iomem *core; |
799 | u32 channel, ee, hw_ver; | 869 | u32 channel, ee, hw_ver; |
800 | int err, i; | 870 | int err; |
801 | bool is_v1; | 871 | bool is_v1; |
802 | 872 | ||
803 | ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa)); | 873 | ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa)); |
@@ -808,6 +878,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
808 | pa->spmic = ctrl; | 878 | pa->spmic = ctrl; |
809 | 879 | ||
810 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); | 880 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); |
881 | pa->core_size = resource_size(res); | ||
811 | core = devm_ioremap_resource(&ctrl->dev, res); | 882 | core = devm_ioremap_resource(&ctrl->dev, res); |
812 | if (IS_ERR(core)) { | 883 | if (IS_ERR(core)) { |
813 | err = PTR_ERR(core); | 884 | err = PTR_ERR(core); |
@@ -825,10 +896,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
825 | pa->wr_base = core; | 896 | pa->wr_base = core; |
826 | pa->rd_base = core; | 897 | pa->rd_base = core; |
827 | } else { | 898 | } else { |
828 | u8 chan; | 899 | pa->core = core; |
829 | u16 ppid; | ||
830 | u32 regval; | ||
831 | |||
832 | pa->ver_ops = &pmic_arb_v2; | 900 | pa->ver_ops = &pmic_arb_v2; |
833 | 901 | ||
834 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 902 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
@@ -847,24 +915,14 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
847 | goto err_put_ctrl; | 915 | goto err_put_ctrl; |
848 | } | 916 | } |
849 | 917 | ||
850 | pa->ppid_to_chan = devm_kzalloc(&ctrl->dev, | 918 | pa->ppid_to_chan = devm_kcalloc(&ctrl->dev, |
851 | PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL); | 919 | PMIC_ARB_MAX_PPID, |
920 | sizeof(*pa->ppid_to_chan), | ||
921 | GFP_KERNEL); | ||
852 | if (!pa->ppid_to_chan) { | 922 | if (!pa->ppid_to_chan) { |
853 | err = -ENOMEM; | 923 | err = -ENOMEM; |
854 | goto err_put_ctrl; | 924 | goto err_put_ctrl; |
855 | } | 925 | } |
856 | /* | ||
857 | * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid. | ||
858 | * ppid_to_chan is an in-memory invert of that table. | ||
859 | */ | ||
860 | for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) { | ||
861 | regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan)); | ||
862 | if (!regval) | ||
863 | continue; | ||
864 | |||
865 | ppid = (regval >> 8) & 0xFFF; | ||
866 | pa->ppid_to_chan[ppid] = chan; | ||
867 | } | ||
868 | } | 926 | } |
869 | 927 | ||
870 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr"); | 928 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr"); |
@@ -915,9 +973,20 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
915 | 973 | ||
916 | pa->ee = ee; | 974 | pa->ee = ee; |
917 | 975 | ||
918 | for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i) | 976 | pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS, |
919 | pa->mapping_table[i] = readl_relaxed( | 977 | sizeof(*pa->apid_to_ppid), |
920 | pa->cnfg + SPMI_MAPPING_TABLE_REG(i)); | 978 | GFP_KERNEL); |
979 | if (!pa->apid_to_ppid) { | ||
980 | err = -ENOMEM; | ||
981 | goto err_put_ctrl; | ||
982 | } | ||
983 | |||
984 | pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1, | ||
985 | sizeof(*pa->mapping_table), GFP_KERNEL); | ||
986 | if (!pa->mapping_table) { | ||
987 | err = -ENOMEM; | ||
988 | goto err_put_ctrl; | ||
989 | } | ||
921 | 990 | ||
922 | /* Initialize max_apid/min_apid to the opposite bounds, during | 991 | /* Initialize max_apid/min_apid to the opposite bounds, during |
923 | * the irq domain translation, we are sure to update these */ | 992 | * the irq domain translation, we are sure to update these */ |
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c index b0927e49d0a8..364fdcdd3a06 100644 --- a/drivers/staging/goldfish/goldfish_audio.c +++ b/drivers/staging/goldfish/goldfish_audio.c | |||
@@ -63,7 +63,7 @@ struct goldfish_audio { | |||
63 | #define AUDIO_READ(data, addr) (readl(data->reg_base + addr)) | 63 | #define AUDIO_READ(data, addr) (readl(data->reg_base + addr)) |
64 | #define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr)) | 64 | #define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr)) |
65 | #define AUDIO_WRITE64(data, addr, addr2, x) \ | 65 | #define AUDIO_WRITE64(data, addr, addr2, x) \ |
66 | (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base+addr2)) | 66 | (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base + addr2)) |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * temporary variable used between goldfish_audio_probe() and | 69 | * temporary variable used between goldfish_audio_probe() and |
@@ -280,12 +280,12 @@ static int goldfish_audio_probe(struct platform_device *pdev) | |||
280 | platform_set_drvdata(pdev, data); | 280 | platform_set_drvdata(pdev, data); |
281 | 281 | ||
282 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 282 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
283 | if (r == NULL) { | 283 | if (!r) { |
284 | dev_err(&pdev->dev, "platform_get_resource failed\n"); | 284 | dev_err(&pdev->dev, "platform_get_resource failed\n"); |
285 | return -ENODEV; | 285 | return -ENODEV; |
286 | } | 286 | } |
287 | data->reg_base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); | 287 | data->reg_base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); |
288 | if (data->reg_base == NULL) | 288 | if (!data->reg_base) |
289 | return -ENOMEM; | 289 | return -ENOMEM; |
290 | 290 | ||
291 | data->irq = platform_get_irq(pdev, 0); | 291 | data->irq = platform_get_irq(pdev, 0); |
@@ -295,7 +295,7 @@ static int goldfish_audio_probe(struct platform_device *pdev) | |||
295 | } | 295 | } |
296 | data->buffer_virt = dmam_alloc_coherent(&pdev->dev, | 296 | data->buffer_virt = dmam_alloc_coherent(&pdev->dev, |
297 | COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL); | 297 | COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL); |
298 | if (data->buffer_virt == NULL) { | 298 | if (!data->buffer_virt) { |
299 | dev_err(&pdev->dev, "allocate buffer failed\n"); | 299 | dev_err(&pdev->dev, "allocate buffer failed\n"); |
300 | return -ENOMEM; | 300 | return -ENOMEM; |
301 | } | 301 | } |
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c index 623353db5a08..76d60eed1490 100644 --- a/drivers/staging/goldfish/goldfish_nand.c +++ b/drivers/staging/goldfish/goldfish_nand.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/goldfish.h> | 28 | #include <linux/goldfish.h> |
29 | #include <asm/div64.h> | 29 | #include <asm/div64.h> |
30 | #include <linux/dma-mapping.h> | ||
30 | 31 | ||
31 | #include "goldfish_nand_reg.h" | 32 | #include "goldfish_nand_reg.h" |
32 | 33 | ||
@@ -99,11 +100,11 @@ static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
99 | { | 100 | { |
100 | loff_t ofs = instr->addr; | 101 | loff_t ofs = instr->addr; |
101 | u32 len = instr->len; | 102 | u32 len = instr->len; |
102 | u32 rem; | 103 | s32 rem; |
103 | 104 | ||
104 | if (ofs + len > mtd->size) | 105 | if (ofs + len > mtd->size) |
105 | goto invalid_arg; | 106 | goto invalid_arg; |
106 | rem = do_div(ofs, mtd->writesize); | 107 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
107 | if (rem) | 108 | if (rem) |
108 | goto invalid_arg; | 109 | goto invalid_arg; |
109 | ofs *= (mtd->writesize + mtd->oobsize); | 110 | ofs *= (mtd->writesize + mtd->oobsize); |
@@ -132,7 +133,7 @@ invalid_arg: | |||
132 | static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, | 133 | static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, |
133 | struct mtd_oob_ops *ops) | 134 | struct mtd_oob_ops *ops) |
134 | { | 135 | { |
135 | u32 rem; | 136 | s32 rem; |
136 | 137 | ||
137 | if (ofs + ops->len > mtd->size) | 138 | if (ofs + ops->len > mtd->size) |
138 | goto invalid_arg; | 139 | goto invalid_arg; |
@@ -141,7 +142,7 @@ static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, | |||
141 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) | 142 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) |
142 | goto invalid_arg; | 143 | goto invalid_arg; |
143 | 144 | ||
144 | rem = do_div(ofs, mtd->writesize); | 145 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
145 | if (rem) | 146 | if (rem) |
146 | goto invalid_arg; | 147 | goto invalid_arg; |
147 | ofs *= (mtd->writesize + mtd->oobsize); | 148 | ofs *= (mtd->writesize + mtd->oobsize); |
@@ -164,7 +165,7 @@ invalid_arg: | |||
164 | static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, | 165 | static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, |
165 | struct mtd_oob_ops *ops) | 166 | struct mtd_oob_ops *ops) |
166 | { | 167 | { |
167 | u32 rem; | 168 | s32 rem; |
168 | 169 | ||
169 | if (ofs + ops->len > mtd->size) | 170 | if (ofs + ops->len > mtd->size) |
170 | goto invalid_arg; | 171 | goto invalid_arg; |
@@ -173,7 +174,7 @@ static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
173 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) | 174 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) |
174 | goto invalid_arg; | 175 | goto invalid_arg; |
175 | 176 | ||
176 | rem = do_div(ofs, mtd->writesize); | 177 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
177 | if (rem) | 178 | if (rem) |
178 | goto invalid_arg; | 179 | goto invalid_arg; |
179 | ofs *= (mtd->writesize + mtd->oobsize); | 180 | ofs *= (mtd->writesize + mtd->oobsize); |
@@ -196,12 +197,12 @@ invalid_arg: | |||
196 | static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len, | 197 | static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len, |
197 | size_t *retlen, u_char *buf) | 198 | size_t *retlen, u_char *buf) |
198 | { | 199 | { |
199 | u32 rem; | 200 | s32 rem; |
200 | 201 | ||
201 | if (from + len > mtd->size) | 202 | if (from + len > mtd->size) |
202 | goto invalid_arg; | 203 | goto invalid_arg; |
203 | 204 | ||
204 | rem = do_div(from, mtd->writesize); | 205 | from = div_s64_rem(from, mtd->writesize, &rem); |
205 | if (rem) | 206 | if (rem) |
206 | goto invalid_arg; | 207 | goto invalid_arg; |
207 | from *= (mtd->writesize + mtd->oobsize); | 208 | from *= (mtd->writesize + mtd->oobsize); |
@@ -218,12 +219,12 @@ invalid_arg: | |||
218 | static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | 219 | static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len, |
219 | size_t *retlen, const u_char *buf) | 220 | size_t *retlen, const u_char *buf) |
220 | { | 221 | { |
221 | u32 rem; | 222 | s32 rem; |
222 | 223 | ||
223 | if (to + len > mtd->size) | 224 | if (to + len > mtd->size) |
224 | goto invalid_arg; | 225 | goto invalid_arg; |
225 | 226 | ||
226 | rem = do_div(to, mtd->writesize); | 227 | to = div_s64_rem(to, mtd->writesize, &rem); |
227 | if (rem) | 228 | if (rem) |
228 | goto invalid_arg; | 229 | goto invalid_arg; |
229 | to *= (mtd->writesize + mtd->oobsize); | 230 | to *= (mtd->writesize + mtd->oobsize); |
@@ -239,12 +240,12 @@ invalid_arg: | |||
239 | 240 | ||
240 | static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) | 241 | static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) |
241 | { | 242 | { |
242 | u32 rem; | 243 | s32 rem; |
243 | 244 | ||
244 | if (ofs >= mtd->size) | 245 | if (ofs >= mtd->size) |
245 | goto invalid_arg; | 246 | goto invalid_arg; |
246 | 247 | ||
247 | rem = do_div(ofs, mtd->erasesize); | 248 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
248 | if (rem) | 249 | if (rem) |
249 | goto invalid_arg; | 250 | goto invalid_arg; |
250 | ofs *= mtd->erasesize / mtd->writesize; | 251 | ofs *= mtd->erasesize / mtd->writesize; |
@@ -260,12 +261,12 @@ invalid_arg: | |||
260 | 261 | ||
261 | static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) | 262 | static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) |
262 | { | 263 | { |
263 | u32 rem; | 264 | s32 rem; |
264 | 265 | ||
265 | if (ofs >= mtd->size) | 266 | if (ofs >= mtd->size) |
266 | goto invalid_arg; | 267 | goto invalid_arg; |
267 | 268 | ||
268 | rem = do_div(ofs, mtd->erasesize); | 269 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
269 | if (rem) | 270 | if (rem) |
270 | goto invalid_arg; | 271 | goto invalid_arg; |
271 | ofs *= mtd->erasesize / mtd->writesize; | 272 | ofs *= mtd->erasesize / mtd->writesize; |
@@ -284,17 +285,18 @@ invalid_arg: | |||
284 | static int nand_setup_cmd_params(struct platform_device *pdev, | 285 | static int nand_setup_cmd_params(struct platform_device *pdev, |
285 | struct goldfish_nand *nand) | 286 | struct goldfish_nand *nand) |
286 | { | 287 | { |
287 | u64 paddr; | 288 | dma_addr_t dma_handle; |
288 | unsigned char __iomem *base = nand->base; | 289 | unsigned char __iomem *base = nand->base; |
289 | 290 | ||
290 | nand->cmd_params = devm_kzalloc(&pdev->dev, | 291 | nand->cmd_params = dmam_alloc_coherent(&pdev->dev, |
291 | sizeof(struct cmd_params), GFP_KERNEL); | 292 | sizeof(struct cmd_params), |
292 | if (!nand->cmd_params) | 293 | &dma_handle, GFP_KERNEL); |
293 | return -1; | 294 | if (!nand->cmd_params) { |
294 | 295 | dev_err(&pdev->dev, "allocate buffer failed\n"); | |
295 | paddr = __pa(nand->cmd_params); | 296 | return -ENOMEM; |
296 | writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH); | 297 | } |
297 | writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW); | 298 | writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH); |
299 | writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW); | ||
298 | return 0; | 300 | return 0; |
299 | } | 301 | } |
300 | 302 | ||
@@ -319,7 +321,7 @@ static int goldfish_nand_init_device(struct platform_device *pdev, | |||
319 | mtd->oobavail = mtd->oobsize; | 321 | mtd->oobavail = mtd->oobsize; |
320 | mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) / | 322 | mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) / |
321 | (mtd->writesize + mtd->oobsize) * mtd->writesize; | 323 | (mtd->writesize + mtd->oobsize) * mtd->writesize; |
322 | do_div(mtd->size, mtd->writesize + mtd->oobsize); | 324 | mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize); |
323 | mtd->size *= mtd->writesize; | 325 | mtd->size *= mtd->writesize; |
324 | dev_dbg(&pdev->dev, | 326 | dev_dbg(&pdev->dev, |
325 | "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n", | 327 | "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n", |
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index b79a74a98a23..5fbeab38889e 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c | |||
@@ -202,7 +202,7 @@ static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge) | |||
202 | bridge = ca91cx42_bridge->driver_priv; | 202 | bridge = ca91cx42_bridge->driver_priv; |
203 | 203 | ||
204 | /* Need pdev */ | 204 | /* Need pdev */ |
205 | pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev); | 205 | pdev = to_pci_dev(ca91cx42_bridge->parent); |
206 | 206 | ||
207 | INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers); | 207 | INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers); |
208 | 208 | ||
@@ -293,8 +293,7 @@ static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, | |||
293 | iowrite32(tmp, bridge->base + LINT_EN); | 293 | iowrite32(tmp, bridge->base + LINT_EN); |
294 | 294 | ||
295 | if ((state == 0) && (sync != 0)) { | 295 | if ((state == 0) && (sync != 0)) { |
296 | pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, | 296 | pdev = to_pci_dev(ca91cx42_bridge->parent); |
297 | dev); | ||
298 | 297 | ||
299 | synchronize_irq(pdev->irq); | 298 | synchronize_irq(pdev->irq); |
300 | } | 299 | } |
@@ -518,7 +517,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image, | |||
518 | dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n"); | 517 | dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n"); |
519 | return -EINVAL; | 518 | return -EINVAL; |
520 | } | 519 | } |
521 | pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev); | 520 | pdev = to_pci_dev(ca91cx42_bridge->parent); |
522 | 521 | ||
523 | existing_size = (unsigned long long)(image->bus_resource.end - | 522 | existing_size = (unsigned long long)(image->bus_resource.end - |
524 | image->bus_resource.start); | 523 | image->bus_resource.start); |
@@ -1519,7 +1518,7 @@ static void *ca91cx42_alloc_consistent(struct device *parent, size_t size, | |||
1519 | struct pci_dev *pdev; | 1518 | struct pci_dev *pdev; |
1520 | 1519 | ||
1521 | /* Find pci_dev container of dev */ | 1520 | /* Find pci_dev container of dev */ |
1522 | pdev = container_of(parent, struct pci_dev, dev); | 1521 | pdev = to_pci_dev(parent); |
1523 | 1522 | ||
1524 | return pci_alloc_consistent(pdev, size, dma); | 1523 | return pci_alloc_consistent(pdev, size, dma); |
1525 | } | 1524 | } |
@@ -1530,7 +1529,7 @@ static void ca91cx42_free_consistent(struct device *parent, size_t size, | |||
1530 | struct pci_dev *pdev; | 1529 | struct pci_dev *pdev; |
1531 | 1530 | ||
1532 | /* Find pci_dev container of dev */ | 1531 | /* Find pci_dev container of dev */ |
1533 | pdev = container_of(parent, struct pci_dev, dev); | 1532 | pdev = to_pci_dev(parent); |
1534 | 1533 | ||
1535 | pci_free_consistent(pdev, size, vaddr, dma); | 1534 | pci_free_consistent(pdev, size, vaddr, dma); |
1536 | } | 1535 | } |
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index 0e2f43bccf1f..a2eec97d5064 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
@@ -618,7 +618,6 @@ static u8 omap_w1_read_byte(void *_hdq) | |||
618 | 618 | ||
619 | hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS, | 619 | hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS, |
620 | ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); | 620 | ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); |
621 | hdq_data->hdq_usecount = 0; | ||
622 | 621 | ||
623 | /* Write followed by a read, release the module */ | 622 | /* Write followed by a read, release the module */ |
624 | if (hdq_data->init_trans) { | 623 | if (hdq_data->init_trans) { |
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index c9a7ff67d395..89a784751738 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c | |||
@@ -1147,7 +1147,6 @@ int w1_process(void *data) | |||
1147 | jremain = 1; | 1147 | jremain = 1; |
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | try_to_freeze(); | ||
1151 | __set_current_state(TASK_INTERRUPTIBLE); | 1150 | __set_current_state(TASK_INTERRUPTIBLE); |
1152 | 1151 | ||
1153 | /* hold list_mutex until after interruptible to prevent loosing | 1152 | /* hold list_mutex until after interruptible to prevent loosing |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 80825a7e8e48..9289da313d98 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -1214,6 +1214,21 @@ config SBC_EPX_C3_WATCHDOG | |||
1214 | To compile this driver as a module, choose M here: the | 1214 | To compile this driver as a module, choose M here: the |
1215 | module will be called sbc_epx_c3. | 1215 | module will be called sbc_epx_c3. |
1216 | 1216 | ||
1217 | config INTEL_MEI_WDT | ||
1218 | tristate "Intel MEI iAMT Watchdog" | ||
1219 | depends on INTEL_MEI && X86 | ||
1220 | select WATCHDOG_CORE | ||
1221 | ---help--- | ||
1222 | A device driver for the Intel MEI iAMT watchdog. | ||
1223 | |||
1224 | The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog. | ||
1225 | Whenever the OS hangs or crashes, iAMT will send an event | ||
1226 | to any subscriber to this event. The watchdog doesn't reset the | ||
1227 | the platform. | ||
1228 | |||
1229 | To compile this driver as a module, choose M here: | ||
1230 | the module will be called mei_wdt. | ||
1231 | |||
1217 | # M32R Architecture | 1232 | # M32R Architecture |
1218 | 1233 | ||
1219 | # M68K Architecture | 1234 | # M68K Architecture |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index f6a6a387c6c7..14bd772d3e66 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
@@ -126,6 +126,7 @@ obj-$(CONFIG_MACHZ_WDT) += machzwd.o | |||
126 | obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o | 126 | obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o |
127 | obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o | 127 | obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o |
128 | obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o | 128 | obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o |
129 | obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o | ||
129 | 130 | ||
130 | # M32R Architecture | 131 | # M32R Architecture |
131 | 132 | ||
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c new file mode 100644 index 000000000000..630bd189f167 --- /dev/null +++ b/drivers/watchdog/mei_wdt.c | |||
@@ -0,0 +1,724 @@ | |||
1 | /* | ||
2 | * Intel Management Engine Interface (Intel MEI) Linux driver | ||
3 | * Copyright (c) 2015, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/debugfs.h> | ||
19 | #include <linux/completion.h> | ||
20 | #include <linux/watchdog.h> | ||
21 | |||
22 | #include <linux/uuid.h> | ||
23 | #include <linux/mei_cl_bus.h> | ||
24 | |||
25 | /* | ||
26 | * iAMT Watchdog Device | ||
27 | */ | ||
28 | #define INTEL_AMT_WATCHDOG_ID "iamt_wdt" | ||
29 | |||
30 | #define MEI_WDT_DEFAULT_TIMEOUT 120 /* seconds */ | ||
31 | #define MEI_WDT_MIN_TIMEOUT 120 /* seconds */ | ||
32 | #define MEI_WDT_MAX_TIMEOUT 65535 /* seconds */ | ||
33 | |||
34 | /* Commands */ | ||
35 | #define MEI_MANAGEMENT_CONTROL 0x02 | ||
36 | |||
37 | /* MEI Management Control version number */ | ||
38 | #define MEI_MC_VERSION_NUMBER 0x10 | ||
39 | |||
40 | /* Sub Commands */ | ||
41 | #define MEI_MC_START_WD_TIMER_REQ 0x13 | ||
42 | #define MEI_MC_START_WD_TIMER_RES 0x83 | ||
43 | #define MEI_WDT_STATUS_SUCCESS 0 | ||
44 | #define MEI_WDT_WDSTATE_NOT_REQUIRED 0x1 | ||
45 | #define MEI_MC_STOP_WD_TIMER_REQ 0x14 | ||
46 | |||
47 | /** | ||
48 | * enum mei_wdt_state - internal watchdog state | ||
49 | * | ||
50 | * @MEI_WDT_PROBE: wd in probing stage | ||
51 | * @MEI_WDT_IDLE: wd is idle and not opened | ||
52 | * @MEI_WDT_START: wd was opened, start was called | ||
53 | * @MEI_WDT_RUNNING: wd is expecting keep alive pings | ||
54 | * @MEI_WDT_STOPPING: wd is stopping and will move to IDLE | ||
55 | * @MEI_WDT_NOT_REQUIRED: wd device is not required | ||
56 | */ | ||
57 | enum mei_wdt_state { | ||
58 | MEI_WDT_PROBE, | ||
59 | MEI_WDT_IDLE, | ||
60 | MEI_WDT_START, | ||
61 | MEI_WDT_RUNNING, | ||
62 | MEI_WDT_STOPPING, | ||
63 | MEI_WDT_NOT_REQUIRED, | ||
64 | }; | ||
65 | |||
66 | static const char *mei_wdt_state_str(enum mei_wdt_state state) | ||
67 | { | ||
68 | switch (state) { | ||
69 | case MEI_WDT_PROBE: | ||
70 | return "PROBE"; | ||
71 | case MEI_WDT_IDLE: | ||
72 | return "IDLE"; | ||
73 | case MEI_WDT_START: | ||
74 | return "START"; | ||
75 | case MEI_WDT_RUNNING: | ||
76 | return "RUNNING"; | ||
77 | case MEI_WDT_STOPPING: | ||
78 | return "STOPPING"; | ||
79 | case MEI_WDT_NOT_REQUIRED: | ||
80 | return "NOT_REQUIRED"; | ||
81 | default: | ||
82 | return "unknown"; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * struct mei_wdt - mei watchdog driver | ||
88 | * @wdd: watchdog device | ||
89 | * | ||
90 | * @cldev: mei watchdog client device | ||
91 | * @state: watchdog internal state | ||
92 | * @resp_required: ping required response | ||
93 | * @response: ping response completion | ||
94 | * @unregister: unregister worker | ||
95 | * @reg_lock: watchdog device registration lock | ||
96 | * @timeout: watchdog current timeout | ||
97 | * | ||
98 | * @dbgfs_dir: debugfs dir entry | ||
99 | */ | ||
100 | struct mei_wdt { | ||
101 | struct watchdog_device wdd; | ||
102 | |||
103 | struct mei_cl_device *cldev; | ||
104 | enum mei_wdt_state state; | ||
105 | bool resp_required; | ||
106 | struct completion response; | ||
107 | struct work_struct unregister; | ||
108 | struct mutex reg_lock; | ||
109 | u16 timeout; | ||
110 | |||
111 | #if IS_ENABLED(CONFIG_DEBUG_FS) | ||
112 | struct dentry *dbgfs_dir; | ||
113 | #endif /* CONFIG_DEBUG_FS */ | ||
114 | }; | ||
115 | |||
116 | /* | ||
117 | * struct mei_mc_hdr - Management Control Command Header | ||
118 | * | ||
119 | * @command: Management Control (0x2) | ||
120 | * @bytecount: Number of bytes in the message beyond this byte | ||
121 | * @subcommand: Management Control Subcommand | ||
122 | * @versionnumber: Management Control Version (0x10) | ||
123 | */ | ||
124 | struct mei_mc_hdr { | ||
125 | u8 command; | ||
126 | u8 bytecount; | ||
127 | u8 subcommand; | ||
128 | u8 versionnumber; | ||
129 | }; | ||
130 | |||
131 | /** | ||
132 | * struct mei_wdt_start_request watchdog start/ping | ||
133 | * | ||
134 | * @hdr: Management Control Command Header | ||
135 | * @timeout: timeout value | ||
136 | * @reserved: reserved (legacy) | ||
137 | */ | ||
138 | struct mei_wdt_start_request { | ||
139 | struct mei_mc_hdr hdr; | ||
140 | u16 timeout; | ||
141 | u8 reserved[17]; | ||
142 | } __packed; | ||
143 | |||
144 | /** | ||
145 | * struct mei_wdt_start_response watchdog start/ping response | ||
146 | * | ||
147 | * @hdr: Management Control Command Header | ||
148 | * @status: operation status | ||
149 | * @wdstate: watchdog status bit mask | ||
150 | */ | ||
151 | struct mei_wdt_start_response { | ||
152 | struct mei_mc_hdr hdr; | ||
153 | u8 status; | ||
154 | u8 wdstate; | ||
155 | } __packed; | ||
156 | |||
157 | /** | ||
158 | * struct mei_wdt_stop_request - watchdog stop | ||
159 | * | ||
160 | * @hdr: Management Control Command Header | ||
161 | */ | ||
162 | struct mei_wdt_stop_request { | ||
163 | struct mei_mc_hdr hdr; | ||
164 | } __packed; | ||
165 | |||
166 | /** | ||
167 | * mei_wdt_ping - send wd start/ping command | ||
168 | * | ||
169 | * @wdt: mei watchdog device | ||
170 | * | ||
171 | * Return: 0 on success, | ||
172 | * negative errno code on failure | ||
173 | */ | ||
174 | static int mei_wdt_ping(struct mei_wdt *wdt) | ||
175 | { | ||
176 | struct mei_wdt_start_request req; | ||
177 | const size_t req_len = sizeof(req); | ||
178 | int ret; | ||
179 | |||
180 | memset(&req, 0, req_len); | ||
181 | req.hdr.command = MEI_MANAGEMENT_CONTROL; | ||
182 | req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand); | ||
183 | req.hdr.subcommand = MEI_MC_START_WD_TIMER_REQ; | ||
184 | req.hdr.versionnumber = MEI_MC_VERSION_NUMBER; | ||
185 | req.timeout = wdt->timeout; | ||
186 | |||
187 | ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len); | ||
188 | if (ret < 0) | ||
189 | return ret; | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * mei_wdt_stop - send wd stop command | ||
196 | * | ||
197 | * @wdt: mei watchdog device | ||
198 | * | ||
199 | * Return: 0 on success, | ||
200 | * negative errno code on failure | ||
201 | */ | ||
202 | static int mei_wdt_stop(struct mei_wdt *wdt) | ||
203 | { | ||
204 | struct mei_wdt_stop_request req; | ||
205 | const size_t req_len = sizeof(req); | ||
206 | int ret; | ||
207 | |||
208 | memset(&req, 0, req_len); | ||
209 | req.hdr.command = MEI_MANAGEMENT_CONTROL; | ||
210 | req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand); | ||
211 | req.hdr.subcommand = MEI_MC_STOP_WD_TIMER_REQ; | ||
212 | req.hdr.versionnumber = MEI_MC_VERSION_NUMBER; | ||
213 | |||
214 | ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len); | ||
215 | if (ret < 0) | ||
216 | return ret; | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * mei_wdt_ops_start - wd start command from the watchdog core. | ||
223 | * | ||
224 | * @wdd: watchdog device | ||
225 | * | ||
226 | * Return: 0 on success or -ENODEV; | ||
227 | */ | ||
228 | static int mei_wdt_ops_start(struct watchdog_device *wdd) | ||
229 | { | ||
230 | struct mei_wdt *wdt = watchdog_get_drvdata(wdd); | ||
231 | |||
232 | wdt->state = MEI_WDT_START; | ||
233 | wdd->timeout = wdt->timeout; | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * mei_wdt_ops_stop - wd stop command from the watchdog core. | ||
239 | * | ||
240 | * @wdd: watchdog device | ||
241 | * | ||
242 | * Return: 0 if success, negative errno code for failure | ||
243 | */ | ||
244 | static int mei_wdt_ops_stop(struct watchdog_device *wdd) | ||
245 | { | ||
246 | struct mei_wdt *wdt = watchdog_get_drvdata(wdd); | ||
247 | int ret; | ||
248 | |||
249 | if (wdt->state != MEI_WDT_RUNNING) | ||
250 | return 0; | ||
251 | |||
252 | wdt->state = MEI_WDT_STOPPING; | ||
253 | |||
254 | ret = mei_wdt_stop(wdt); | ||
255 | if (ret) | ||
256 | return ret; | ||
257 | |||
258 | wdt->state = MEI_WDT_IDLE; | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * mei_wdt_ops_ping - wd ping command from the watchdog core. | ||
265 | * | ||
266 | * @wdd: watchdog device | ||
267 | * | ||
268 | * Return: 0 if success, negative errno code on failure | ||
269 | */ | ||
270 | static int mei_wdt_ops_ping(struct watchdog_device *wdd) | ||
271 | { | ||
272 | struct mei_wdt *wdt = watchdog_get_drvdata(wdd); | ||
273 | int ret; | ||
274 | |||
275 | if (wdt->state != MEI_WDT_START && wdt->state != MEI_WDT_RUNNING) | ||
276 | return 0; | ||
277 | |||
278 | if (wdt->resp_required) | ||
279 | init_completion(&wdt->response); | ||
280 | |||
281 | wdt->state = MEI_WDT_RUNNING; | ||
282 | ret = mei_wdt_ping(wdt); | ||
283 | if (ret) | ||
284 | return ret; | ||
285 | |||
286 | if (wdt->resp_required) | ||
287 | ret = wait_for_completion_killable(&wdt->response); | ||
288 | |||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * mei_wdt_ops_set_timeout - wd set timeout command from the watchdog core. | ||
294 | * | ||
295 | * @wdd: watchdog device | ||
296 | * @timeout: timeout value to set | ||
297 | * | ||
298 | * Return: 0 if success, negative errno code for failure | ||
299 | */ | ||
300 | static int mei_wdt_ops_set_timeout(struct watchdog_device *wdd, | ||
301 | unsigned int timeout) | ||
302 | { | ||
303 | |||
304 | struct mei_wdt *wdt = watchdog_get_drvdata(wdd); | ||
305 | |||
306 | /* valid value is already checked by the caller */ | ||
307 | wdt->timeout = timeout; | ||
308 | wdd->timeout = timeout; | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static const struct watchdog_ops wd_ops = { | ||
314 | .owner = THIS_MODULE, | ||
315 | .start = mei_wdt_ops_start, | ||
316 | .stop = mei_wdt_ops_stop, | ||
317 | .ping = mei_wdt_ops_ping, | ||
318 | .set_timeout = mei_wdt_ops_set_timeout, | ||
319 | }; | ||
320 | |||
321 | /* not const as the firmware_version field need to be retrieved */ | ||
322 | static struct watchdog_info wd_info = { | ||
323 | .identity = INTEL_AMT_WATCHDOG_ID, | ||
324 | .options = WDIOF_KEEPALIVEPING | | ||
325 | WDIOF_SETTIMEOUT | | ||
326 | WDIOF_ALARMONLY, | ||
327 | }; | ||
328 | |||
329 | /** | ||
330 | * __mei_wdt_is_registered - check if wdt is registered | ||
331 | * | ||
332 | * @wdt: mei watchdog device | ||
333 | * | ||
334 | * Return: true if the wdt is registered with the watchdog subsystem | ||
335 | * Locking: should be called under wdt->reg_lock | ||
336 | */ | ||
337 | static inline bool __mei_wdt_is_registered(struct mei_wdt *wdt) | ||
338 | { | ||
339 | return !!watchdog_get_drvdata(&wdt->wdd); | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * mei_wdt_unregister - unregister from the watchdog subsystem | ||
344 | * | ||
345 | * @wdt: mei watchdog device | ||
346 | */ | ||
347 | static void mei_wdt_unregister(struct mei_wdt *wdt) | ||
348 | { | ||
349 | mutex_lock(&wdt->reg_lock); | ||
350 | |||
351 | if (__mei_wdt_is_registered(wdt)) { | ||
352 | watchdog_unregister_device(&wdt->wdd); | ||
353 | watchdog_set_drvdata(&wdt->wdd, NULL); | ||
354 | memset(&wdt->wdd, 0, sizeof(wdt->wdd)); | ||
355 | } | ||
356 | |||
357 | mutex_unlock(&wdt->reg_lock); | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * mei_wdt_register - register with the watchdog subsystem | ||
362 | * | ||
363 | * @wdt: mei watchdog device | ||
364 | * | ||
365 | * Return: 0 if success, negative errno code for failure | ||
366 | */ | ||
367 | static int mei_wdt_register(struct mei_wdt *wdt) | ||
368 | { | ||
369 | struct device *dev; | ||
370 | int ret; | ||
371 | |||
372 | if (!wdt || !wdt->cldev) | ||
373 | return -EINVAL; | ||
374 | |||
375 | dev = &wdt->cldev->dev; | ||
376 | |||
377 | mutex_lock(&wdt->reg_lock); | ||
378 | |||
379 | if (__mei_wdt_is_registered(wdt)) { | ||
380 | ret = 0; | ||
381 | goto out; | ||
382 | } | ||
383 | |||
384 | wdt->wdd.info = &wd_info; | ||
385 | wdt->wdd.ops = &wd_ops; | ||
386 | wdt->wdd.parent = dev; | ||
387 | wdt->wdd.timeout = MEI_WDT_DEFAULT_TIMEOUT; | ||
388 | wdt->wdd.min_timeout = MEI_WDT_MIN_TIMEOUT; | ||
389 | wdt->wdd.max_timeout = MEI_WDT_MAX_TIMEOUT; | ||
390 | |||
391 | watchdog_set_drvdata(&wdt->wdd, wdt); | ||
392 | ret = watchdog_register_device(&wdt->wdd); | ||
393 | if (ret) { | ||
394 | dev_err(dev, "unable to register watchdog device = %d.\n", ret); | ||
395 | watchdog_set_drvdata(&wdt->wdd, NULL); | ||
396 | } | ||
397 | |||
398 | wdt->state = MEI_WDT_IDLE; | ||
399 | |||
400 | out: | ||
401 | mutex_unlock(&wdt->reg_lock); | ||
402 | return ret; | ||
403 | } | ||
404 | |||
405 | static void mei_wdt_unregister_work(struct work_struct *work) | ||
406 | { | ||
407 | struct mei_wdt *wdt = container_of(work, struct mei_wdt, unregister); | ||
408 | |||
409 | mei_wdt_unregister(wdt); | ||
410 | } | ||
411 | |||
412 | /** | ||
413 | * mei_wdt_event_rx - callback for data receive | ||
414 | * | ||
415 | * @cldev: bus device | ||
416 | */ | ||
417 | static void mei_wdt_event_rx(struct mei_cl_device *cldev) | ||
418 | { | ||
419 | struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); | ||
420 | struct mei_wdt_start_response res; | ||
421 | const size_t res_len = sizeof(res); | ||
422 | int ret; | ||
423 | |||
424 | ret = mei_cldev_recv(wdt->cldev, (u8 *)&res, res_len); | ||
425 | if (ret < 0) { | ||
426 | dev_err(&cldev->dev, "failure in recv %d\n", ret); | ||
427 | return; | ||
428 | } | ||
429 | |||
430 | /* Empty response can be sent on stop */ | ||
431 | if (ret == 0) | ||
432 | return; | ||
433 | |||
434 | if (ret < sizeof(struct mei_mc_hdr)) { | ||
435 | dev_err(&cldev->dev, "recv small data %d\n", ret); | ||
436 | return; | ||
437 | } | ||
438 | |||
439 | if (res.hdr.command != MEI_MANAGEMENT_CONTROL || | ||
440 | res.hdr.versionnumber != MEI_MC_VERSION_NUMBER) { | ||
441 | dev_err(&cldev->dev, "wrong command received\n"); | ||
442 | return; | ||
443 | } | ||
444 | |||
445 | if (res.hdr.subcommand != MEI_MC_START_WD_TIMER_RES) { | ||
446 | dev_warn(&cldev->dev, "unsupported command %d :%s[%d]\n", | ||
447 | res.hdr.subcommand, | ||
448 | mei_wdt_state_str(wdt->state), | ||
449 | wdt->state); | ||
450 | return; | ||
451 | } | ||
452 | |||
453 | /* Run the unregistration in a worker as this can be | ||
454 | * run only after ping completion, otherwise the flow will | ||
455 | * deadlock on watchdog core mutex. | ||
456 | */ | ||
457 | if (wdt->state == MEI_WDT_RUNNING) { | ||
458 | if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) { | ||
459 | wdt->state = MEI_WDT_NOT_REQUIRED; | ||
460 | schedule_work(&wdt->unregister); | ||
461 | } | ||
462 | goto out; | ||
463 | } | ||
464 | |||
465 | if (wdt->state == MEI_WDT_PROBE) { | ||
466 | if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) { | ||
467 | wdt->state = MEI_WDT_NOT_REQUIRED; | ||
468 | } else { | ||
469 | /* stop the watchdog and register watchdog device */ | ||
470 | mei_wdt_stop(wdt); | ||
471 | mei_wdt_register(wdt); | ||
472 | } | ||
473 | return; | ||
474 | } | ||
475 | |||
476 | dev_warn(&cldev->dev, "not in correct state %s[%d]\n", | ||
477 | mei_wdt_state_str(wdt->state), wdt->state); | ||
478 | |||
479 | out: | ||
480 | if (!completion_done(&wdt->response)) | ||
481 | complete(&wdt->response); | ||
482 | } | ||
483 | |||
484 | /* | ||
485 | * mei_wdt_notify_event - callback for event notification | ||
486 | * | ||
487 | * @cldev: bus device | ||
488 | */ | ||
489 | static void mei_wdt_notify_event(struct mei_cl_device *cldev) | ||
490 | { | ||
491 | struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); | ||
492 | |||
493 | if (wdt->state != MEI_WDT_NOT_REQUIRED) | ||
494 | return; | ||
495 | |||
496 | mei_wdt_register(wdt); | ||
497 | } | ||
498 | |||
499 | /** | ||
500 | * mei_wdt_event - callback for event receive | ||
501 | * | ||
502 | * @cldev: bus device | ||
503 | * @events: event mask | ||
504 | * @context: callback context | ||
505 | */ | ||
506 | static void mei_wdt_event(struct mei_cl_device *cldev, | ||
507 | u32 events, void *context) | ||
508 | { | ||
509 | if (events & BIT(MEI_CL_EVENT_RX)) | ||
510 | mei_wdt_event_rx(cldev); | ||
511 | |||
512 | if (events & BIT(MEI_CL_EVENT_NOTIF)) | ||
513 | mei_wdt_notify_event(cldev); | ||
514 | } | ||
515 | |||
516 | #if IS_ENABLED(CONFIG_DEBUG_FS) | ||
517 | |||
518 | static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf, | ||
519 | size_t cnt, loff_t *ppos) | ||
520 | { | ||
521 | struct mei_wdt *wdt = file->private_data; | ||
522 | const size_t bufsz = 32; | ||
523 | char buf[32]; | ||
524 | ssize_t pos; | ||
525 | |||
526 | mutex_lock(&wdt->reg_lock); | ||
527 | pos = scnprintf(buf, bufsz, "%s\n", | ||
528 | __mei_wdt_is_registered(wdt) ? "activated" : "deactivated"); | ||
529 | mutex_unlock(&wdt->reg_lock); | ||
530 | |||
531 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); | ||
532 | } | ||
533 | |||
534 | static const struct file_operations dbgfs_fops_activation = { | ||
535 | .open = simple_open, | ||
536 | .read = mei_dbgfs_read_activation, | ||
537 | .llseek = generic_file_llseek, | ||
538 | }; | ||
539 | |||
540 | static ssize_t mei_dbgfs_read_state(struct file *file, char __user *ubuf, | ||
541 | size_t cnt, loff_t *ppos) | ||
542 | { | ||
543 | struct mei_wdt *wdt = file->private_data; | ||
544 | const size_t bufsz = 32; | ||
545 | char buf[bufsz]; | ||
546 | ssize_t pos; | ||
547 | |||
548 | pos = scnprintf(buf, bufsz, "state: %s\n", | ||
549 | mei_wdt_state_str(wdt->state)); | ||
550 | |||
551 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); | ||
552 | } | ||
553 | |||
554 | static const struct file_operations dbgfs_fops_state = { | ||
555 | .open = simple_open, | ||
556 | .read = mei_dbgfs_read_state, | ||
557 | .llseek = generic_file_llseek, | ||
558 | }; | ||
559 | |||
560 | static void dbgfs_unregister(struct mei_wdt *wdt) | ||
561 | { | ||
562 | debugfs_remove_recursive(wdt->dbgfs_dir); | ||
563 | wdt->dbgfs_dir = NULL; | ||
564 | } | ||
565 | |||
566 | static int dbgfs_register(struct mei_wdt *wdt) | ||
567 | { | ||
568 | struct dentry *dir, *f; | ||
569 | |||
570 | dir = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
571 | if (!dir) | ||
572 | return -ENOMEM; | ||
573 | |||
574 | wdt->dbgfs_dir = dir; | ||
575 | f = debugfs_create_file("state", S_IRUSR, dir, wdt, &dbgfs_fops_state); | ||
576 | if (!f) | ||
577 | goto err; | ||
578 | |||
579 | f = debugfs_create_file("activation", S_IRUSR, | ||
580 | dir, wdt, &dbgfs_fops_activation); | ||
581 | if (!f) | ||
582 | goto err; | ||
583 | |||
584 | return 0; | ||
585 | err: | ||
586 | dbgfs_unregister(wdt); | ||
587 | return -ENODEV; | ||
588 | } | ||
589 | |||
590 | #else | ||
591 | |||
592 | static inline void dbgfs_unregister(struct mei_wdt *wdt) {} | ||
593 | |||
594 | static inline int dbgfs_register(struct mei_wdt *wdt) | ||
595 | { | ||
596 | return 0; | ||
597 | } | ||
598 | #endif /* CONFIG_DEBUG_FS */ | ||
599 | |||
600 | static int mei_wdt_probe(struct mei_cl_device *cldev, | ||
601 | const struct mei_cl_device_id *id) | ||
602 | { | ||
603 | struct mei_wdt *wdt; | ||
604 | int ret; | ||
605 | |||
606 | wdt = kzalloc(sizeof(struct mei_wdt), GFP_KERNEL); | ||
607 | if (!wdt) | ||
608 | return -ENOMEM; | ||
609 | |||
610 | wdt->timeout = MEI_WDT_DEFAULT_TIMEOUT; | ||
611 | wdt->state = MEI_WDT_PROBE; | ||
612 | wdt->cldev = cldev; | ||
613 | wdt->resp_required = mei_cldev_ver(cldev) > 0x1; | ||
614 | mutex_init(&wdt->reg_lock); | ||
615 | init_completion(&wdt->response); | ||
616 | INIT_WORK(&wdt->unregister, mei_wdt_unregister_work); | ||
617 | |||
618 | mei_cldev_set_drvdata(cldev, wdt); | ||
619 | |||
620 | ret = mei_cldev_enable(cldev); | ||
621 | if (ret < 0) { | ||
622 | dev_err(&cldev->dev, "Could not enable cl device\n"); | ||
623 | goto err_out; | ||
624 | } | ||
625 | |||
626 | ret = mei_cldev_register_event_cb(wdt->cldev, | ||
627 | BIT(MEI_CL_EVENT_RX) | | ||
628 | BIT(MEI_CL_EVENT_NOTIF), | ||
629 | mei_wdt_event, NULL); | ||
630 | |||
631 | /* on legacy devices notification is not supported | ||
632 | * this doesn't fail the registration for RX event | ||
633 | */ | ||
634 | if (ret && ret != -EOPNOTSUPP) { | ||
635 | dev_err(&cldev->dev, "Could not register event ret=%d\n", ret); | ||
636 | goto err_disable; | ||
637 | } | ||
638 | |||
639 | wd_info.firmware_version = mei_cldev_ver(cldev); | ||
640 | |||
641 | if (wdt->resp_required) | ||
642 | ret = mei_wdt_ping(wdt); | ||
643 | else | ||
644 | ret = mei_wdt_register(wdt); | ||
645 | |||
646 | if (ret) | ||
647 | goto err_disable; | ||
648 | |||
649 | if (dbgfs_register(wdt)) | ||
650 | dev_warn(&cldev->dev, "cannot register debugfs\n"); | ||
651 | |||
652 | return 0; | ||
653 | |||
654 | err_disable: | ||
655 | mei_cldev_disable(cldev); | ||
656 | |||
657 | err_out: | ||
658 | kfree(wdt); | ||
659 | |||
660 | return ret; | ||
661 | } | ||
662 | |||
663 | static int mei_wdt_remove(struct mei_cl_device *cldev) | ||
664 | { | ||
665 | struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); | ||
666 | |||
667 | /* Free the caller in case of fw initiated or unexpected reset */ | ||
668 | if (!completion_done(&wdt->response)) | ||
669 | complete(&wdt->response); | ||
670 | |||
671 | cancel_work_sync(&wdt->unregister); | ||
672 | |||
673 | mei_wdt_unregister(wdt); | ||
674 | |||
675 | mei_cldev_disable(cldev); | ||
676 | |||
677 | dbgfs_unregister(wdt); | ||
678 | |||
679 | kfree(wdt); | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | #define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \ | ||
685 | 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB) | ||
686 | |||
687 | static struct mei_cl_device_id mei_wdt_tbl[] = { | ||
688 | { .uuid = MEI_UUID_WD, .version = MEI_CL_VERSION_ANY }, | ||
689 | /* required last entry */ | ||
690 | { } | ||
691 | }; | ||
692 | MODULE_DEVICE_TABLE(mei, mei_wdt_tbl); | ||
693 | |||
694 | static struct mei_cl_driver mei_wdt_driver = { | ||
695 | .id_table = mei_wdt_tbl, | ||
696 | .name = KBUILD_MODNAME, | ||
697 | |||
698 | .probe = mei_wdt_probe, | ||
699 | .remove = mei_wdt_remove, | ||
700 | }; | ||
701 | |||
702 | static int __init mei_wdt_init(void) | ||
703 | { | ||
704 | int ret; | ||
705 | |||
706 | ret = mei_cldev_driver_register(&mei_wdt_driver); | ||
707 | if (ret) { | ||
708 | pr_err(KBUILD_MODNAME ": module registration failed\n"); | ||
709 | return ret; | ||
710 | } | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | static void __exit mei_wdt_exit(void) | ||
715 | { | ||
716 | mei_cldev_driver_unregister(&mei_wdt_driver); | ||
717 | } | ||
718 | |||
719 | module_init(mei_wdt_init); | ||
720 | module_exit(mei_wdt_exit); | ||
721 | |||
722 | MODULE_AUTHOR("Intel Corporation"); | ||
723 | MODULE_LICENSE("GPL"); | ||
724 | MODULE_DESCRIPTION("Device driver for Intel MEI iAMT watchdog"); | ||
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 6402eaf8ab95..bd01b92aad98 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
@@ -1040,28 +1040,6 @@ COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS) | |||
1040 | /* PPPOX */ | 1040 | /* PPPOX */ |
1041 | COMPATIBLE_IOCTL(PPPOEIOCSFWD) | 1041 | COMPATIBLE_IOCTL(PPPOEIOCSFWD) |
1042 | COMPATIBLE_IOCTL(PPPOEIOCDFWD) | 1042 | COMPATIBLE_IOCTL(PPPOEIOCDFWD) |
1043 | /* ppdev */ | ||
1044 | COMPATIBLE_IOCTL(PPSETMODE) | ||
1045 | COMPATIBLE_IOCTL(PPRSTATUS) | ||
1046 | COMPATIBLE_IOCTL(PPRCONTROL) | ||
1047 | COMPATIBLE_IOCTL(PPWCONTROL) | ||
1048 | COMPATIBLE_IOCTL(PPFCONTROL) | ||
1049 | COMPATIBLE_IOCTL(PPRDATA) | ||
1050 | COMPATIBLE_IOCTL(PPWDATA) | ||
1051 | COMPATIBLE_IOCTL(PPCLAIM) | ||
1052 | COMPATIBLE_IOCTL(PPRELEASE) | ||
1053 | COMPATIBLE_IOCTL(PPYIELD) | ||
1054 | COMPATIBLE_IOCTL(PPEXCL) | ||
1055 | COMPATIBLE_IOCTL(PPDATADIR) | ||
1056 | COMPATIBLE_IOCTL(PPNEGOT) | ||
1057 | COMPATIBLE_IOCTL(PPWCTLONIRQ) | ||
1058 | COMPATIBLE_IOCTL(PPCLRIRQ) | ||
1059 | COMPATIBLE_IOCTL(PPSETPHASE) | ||
1060 | COMPATIBLE_IOCTL(PPGETMODES) | ||
1061 | COMPATIBLE_IOCTL(PPGETMODE) | ||
1062 | COMPATIBLE_IOCTL(PPGETPHASE) | ||
1063 | COMPATIBLE_IOCTL(PPGETFLAGS) | ||
1064 | COMPATIBLE_IOCTL(PPSETFLAGS) | ||
1065 | /* Big A */ | 1043 | /* Big A */ |
1066 | /* sparc only */ | 1044 | /* sparc only */ |
1067 | /* Big Q for sound/OSS */ | 1045 | /* Big Q for sound/OSS */ |
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 9006c4e75cf7..3d8dcdd1aeae 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h | |||
@@ -163,4 +163,13 @@ struct amba_device name##_device = { \ | |||
163 | #define module_amba_driver(__amba_drv) \ | 163 | #define module_amba_driver(__amba_drv) \ |
164 | module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) | 164 | module_driver(__amba_drv, amba_driver_register, amba_driver_unregister) |
165 | 165 | ||
166 | /* | ||
167 | * builtin_amba_driver() - Helper macro for drivers that don't do anything | ||
168 | * special in driver initcall. This eliminates a lot of boilerplate. Each | ||
169 | * driver may only use this macro once, and calling it replaces the instance | ||
170 | * device_initcall(). | ||
171 | */ | ||
172 | #define builtin_amba_driver(__amba_drv) \ | ||
173 | builtin_driver(__amba_drv, amba_driver_register) | ||
174 | |||
166 | #endif | 175 | #endif |
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h new file mode 100644 index 000000000000..7d410260661b --- /dev/null +++ b/include/linux/coresight-pmu.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef _LINUX_CORESIGHT_PMU_H | ||
19 | #define _LINUX_CORESIGHT_PMU_H | ||
20 | |||
21 | #define CORESIGHT_ETM_PMU_NAME "cs_etm" | ||
22 | #define CORESIGHT_ETM_PMU_SEED 0x10 | ||
23 | |||
24 | /* ETMv3.5/PTM's ETMCR config bit */ | ||
25 | #define ETM_OPT_CYCACC 12 | ||
26 | #define ETM_OPT_TS 28 | ||
27 | |||
28 | static inline int coresight_get_trace_id(int cpu) | ||
29 | { | ||
30 | /* | ||
31 | * A trace ID of value 0 is invalid, so let's start at some | ||
32 | * random value that fits in 7 bits and go from there. Since | ||
33 | * the common convention is to have data trace IDs be I(N) + 1, | ||
34 | * set instruction trace IDs as a function of the CPU number. | ||
35 | */ | ||
36 | return (CORESIGHT_ETM_PMU_SEED + (cpu * 2)); | ||
37 | } | ||
38 | |||
39 | #endif | ||
diff --git a/include/linux/coresight.h b/include/linux/coresight.h index a7cabfa23b55..385d62e64abb 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #define _LINUX_CORESIGHT_H | 14 | #define _LINUX_CORESIGHT_H |
15 | 15 | ||
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/perf_event.h> | ||
17 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
18 | 19 | ||
19 | /* Peripheral id registers (0xFD0-0xFEC) */ | 20 | /* Peripheral id registers (0xFD0-0xFEC) */ |
@@ -152,7 +153,6 @@ struct coresight_connection { | |||
152 | by @coresight_ops. | 153 | by @coresight_ops. |
153 | * @dev: The device entity associated to this component. | 154 | * @dev: The device entity associated to this component. |
154 | * @refcnt: keep track of what is in use. | 155 | * @refcnt: keep track of what is in use. |
155 | * @path_link: link of current component into the path being enabled. | ||
156 | * @orphan: true if the component has connections that haven't been linked. | 156 | * @orphan: true if the component has connections that haven't been linked. |
157 | * @enable: 'true' if component is currently part of an active path. | 157 | * @enable: 'true' if component is currently part of an active path. |
158 | * @activated: 'true' only if a _sink_ has been activated. A sink can be | 158 | * @activated: 'true' only if a _sink_ has been activated. A sink can be |
@@ -168,7 +168,6 @@ struct coresight_device { | |||
168 | const struct coresight_ops *ops; | 168 | const struct coresight_ops *ops; |
169 | struct device dev; | 169 | struct device dev; |
170 | atomic_t *refcnt; | 170 | atomic_t *refcnt; |
171 | struct list_head path_link; | ||
172 | bool orphan; | 171 | bool orphan; |
173 | bool enable; /* true only if configured as part of a path */ | 172 | bool enable; /* true only if configured as part of a path */ |
174 | bool activated; /* true only if a sink is part of a path */ | 173 | bool activated; /* true only if a sink is part of a path */ |
@@ -183,12 +182,29 @@ struct coresight_device { | |||
183 | /** | 182 | /** |
184 | * struct coresight_ops_sink - basic operations for a sink | 183 | * struct coresight_ops_sink - basic operations for a sink |
185 | * Operations available for sinks | 184 | * Operations available for sinks |
186 | * @enable: enables the sink. | 185 | * @enable: enables the sink. |
187 | * @disable: disables the sink. | 186 | * @disable: disables the sink. |
187 | * @alloc_buffer: initialises perf's ring buffer for trace collection. | ||
188 | * @free_buffer: release memory allocated in @get_config. | ||
189 | * @set_buffer: initialises buffer mechanic before a trace session. | ||
190 | * @reset_buffer: finalises buffer mechanic after a trace session. | ||
191 | * @update_buffer: update buffer pointers after a trace session. | ||
188 | */ | 192 | */ |
189 | struct coresight_ops_sink { | 193 | struct coresight_ops_sink { |
190 | int (*enable)(struct coresight_device *csdev); | 194 | int (*enable)(struct coresight_device *csdev, u32 mode); |
191 | void (*disable)(struct coresight_device *csdev); | 195 | void (*disable)(struct coresight_device *csdev); |
196 | void *(*alloc_buffer)(struct coresight_device *csdev, int cpu, | ||
197 | void **pages, int nr_pages, bool overwrite); | ||
198 | void (*free_buffer)(void *config); | ||
199 | int (*set_buffer)(struct coresight_device *csdev, | ||
200 | struct perf_output_handle *handle, | ||
201 | void *sink_config); | ||
202 | unsigned long (*reset_buffer)(struct coresight_device *csdev, | ||
203 | struct perf_output_handle *handle, | ||
204 | void *sink_config, bool *lost); | ||
205 | void (*update_buffer)(struct coresight_device *csdev, | ||
206 | struct perf_output_handle *handle, | ||
207 | void *sink_config); | ||
192 | }; | 208 | }; |
193 | 209 | ||
194 | /** | 210 | /** |
@@ -205,14 +221,18 @@ struct coresight_ops_link { | |||
205 | /** | 221 | /** |
206 | * struct coresight_ops_source - basic operations for a source | 222 | * struct coresight_ops_source - basic operations for a source |
207 | * Operations available for sources. | 223 | * Operations available for sources. |
224 | * @cpu_id: returns the value of the CPU number this component | ||
225 | * is associated to. | ||
208 | * @trace_id: returns the value of the component's trace ID as known | 226 | * @trace_id: returns the value of the component's trace ID as known |
209 | to the HW. | 227 | * to the HW. |
210 | * @enable: enables tracing for a source. | 228 | * @enable: enables tracing for a source. |
211 | * @disable: disables tracing for a source. | 229 | * @disable: disables tracing for a source. |
212 | */ | 230 | */ |
213 | struct coresight_ops_source { | 231 | struct coresight_ops_source { |
232 | int (*cpu_id)(struct coresight_device *csdev); | ||
214 | int (*trace_id)(struct coresight_device *csdev); | 233 | int (*trace_id)(struct coresight_device *csdev); |
215 | int (*enable)(struct coresight_device *csdev); | 234 | int (*enable)(struct coresight_device *csdev, |
235 | struct perf_event_attr *attr, u32 mode); | ||
216 | void (*disable)(struct coresight_device *csdev); | 236 | void (*disable)(struct coresight_device *csdev); |
217 | }; | 237 | }; |
218 | 238 | ||
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h index 542888504994..05b97144d342 100644 --- a/include/linux/davinci_emac.h +++ b/include/linux/davinci_emac.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #define _LINUX_DAVINCI_EMAC_H | 12 | #define _LINUX_DAVINCI_EMAC_H |
13 | 13 | ||
14 | #include <linux/if_ether.h> | 14 | #include <linux/if_ether.h> |
15 | #include <linux/memory.h> | 15 | #include <linux/nvmem-consumer.h> |
16 | 16 | ||
17 | struct mdio_platform_data { | 17 | struct mdio_platform_data { |
18 | unsigned long bus_freq; | 18 | unsigned long bus_freq; |
@@ -46,5 +46,5 @@ enum { | |||
46 | EMAC_VERSION_2, /* DM646x */ | 46 | EMAC_VERSION_2, /* DM646x */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context); | 49 | void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context); |
50 | #endif | 50 | #endif |
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h index 06791811e49d..885f587a3555 100644 --- a/include/linux/eeprom_93xx46.h +++ b/include/linux/eeprom_93xx46.h | |||
@@ -3,16 +3,25 @@ | |||
3 | * platform description for 93xx46 EEPROMs. | 3 | * platform description for 93xx46 EEPROMs. |
4 | */ | 4 | */ |
5 | 5 | ||
6 | struct gpio_desc; | ||
7 | |||
6 | struct eeprom_93xx46_platform_data { | 8 | struct eeprom_93xx46_platform_data { |
7 | unsigned char flags; | 9 | unsigned char flags; |
8 | #define EE_ADDR8 0x01 /* 8 bit addr. cfg */ | 10 | #define EE_ADDR8 0x01 /* 8 bit addr. cfg */ |
9 | #define EE_ADDR16 0x02 /* 16 bit addr. cfg */ | 11 | #define EE_ADDR16 0x02 /* 16 bit addr. cfg */ |
10 | #define EE_READONLY 0x08 /* forbid writing */ | 12 | #define EE_READONLY 0x08 /* forbid writing */ |
11 | 13 | ||
14 | unsigned int quirks; | ||
15 | /* Single word read transfers only; no sequential read. */ | ||
16 | #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) | ||
17 | /* Instructions such as EWEN are (addrlen + 2) in length. */ | ||
18 | #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) | ||
19 | |||
12 | /* | 20 | /* |
13 | * optional hooks to control additional logic | 21 | * optional hooks to control additional logic |
14 | * before and after spi transfer. | 22 | * before and after spi transfer. |
15 | */ | 23 | */ |
16 | void (*prepare)(void *); | 24 | void (*prepare)(void *); |
17 | void (*finish)(void *); | 25 | void (*finish)(void *); |
26 | struct gpio_desc *select; | ||
18 | }; | 27 | }; |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 753dbad0bf94..aa0fadce9308 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -235,6 +235,7 @@ struct vmbus_channel_offer { | |||
235 | #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 | 235 | #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 |
236 | #define VMBUS_CHANNEL_PARENT_OFFER 0x200 | 236 | #define VMBUS_CHANNEL_PARENT_OFFER 0x200 |
237 | #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 | 237 | #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 |
238 | #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000 | ||
238 | 239 | ||
239 | struct vmpacket_descriptor { | 240 | struct vmpacket_descriptor { |
240 | u16 type; | 241 | u16 type; |
@@ -391,6 +392,10 @@ enum vmbus_channel_message_type { | |||
391 | CHANNELMSG_VERSION_RESPONSE = 15, | 392 | CHANNELMSG_VERSION_RESPONSE = 15, |
392 | CHANNELMSG_UNLOAD = 16, | 393 | CHANNELMSG_UNLOAD = 16, |
393 | CHANNELMSG_UNLOAD_RESPONSE = 17, | 394 | CHANNELMSG_UNLOAD_RESPONSE = 17, |
395 | CHANNELMSG_18 = 18, | ||
396 | CHANNELMSG_19 = 19, | ||
397 | CHANNELMSG_20 = 20, | ||
398 | CHANNELMSG_TL_CONNECT_REQUEST = 21, | ||
394 | CHANNELMSG_COUNT | 399 | CHANNELMSG_COUNT |
395 | }; | 400 | }; |
396 | 401 | ||
@@ -561,6 +566,13 @@ struct vmbus_channel_initiate_contact { | |||
561 | u64 monitor_page2; | 566 | u64 monitor_page2; |
562 | } __packed; | 567 | } __packed; |
563 | 568 | ||
569 | /* Hyper-V socket: guest's connect()-ing to host */ | ||
570 | struct vmbus_channel_tl_connect_request { | ||
571 | struct vmbus_channel_message_header header; | ||
572 | uuid_le guest_endpoint_id; | ||
573 | uuid_le host_service_id; | ||
574 | } __packed; | ||
575 | |||
564 | struct vmbus_channel_version_response { | 576 | struct vmbus_channel_version_response { |
565 | struct vmbus_channel_message_header header; | 577 | struct vmbus_channel_message_header header; |
566 | u8 version_supported; | 578 | u8 version_supported; |
@@ -633,6 +645,32 @@ enum hv_signal_policy { | |||
633 | HV_SIGNAL_POLICY_EXPLICIT, | 645 | HV_SIGNAL_POLICY_EXPLICIT, |
634 | }; | 646 | }; |
635 | 647 | ||
648 | enum vmbus_device_type { | ||
649 | HV_IDE = 0, | ||
650 | HV_SCSI, | ||
651 | HV_FC, | ||
652 | HV_NIC, | ||
653 | HV_ND, | ||
654 | HV_PCIE, | ||
655 | HV_FB, | ||
656 | HV_KBD, | ||
657 | HV_MOUSE, | ||
658 | HV_KVP, | ||
659 | HV_TS, | ||
660 | HV_HB, | ||
661 | HV_SHUTDOWN, | ||
662 | HV_FCOPY, | ||
663 | HV_BACKUP, | ||
664 | HV_DM, | ||
665 | HV_UNKOWN, | ||
666 | }; | ||
667 | |||
668 | struct vmbus_device { | ||
669 | u16 dev_type; | ||
670 | uuid_le guid; | ||
671 | bool perf_device; | ||
672 | }; | ||
673 | |||
636 | struct vmbus_channel { | 674 | struct vmbus_channel { |
637 | /* Unique channel id */ | 675 | /* Unique channel id */ |
638 | int id; | 676 | int id; |
@@ -728,6 +766,12 @@ struct vmbus_channel { | |||
728 | void (*sc_creation_callback)(struct vmbus_channel *new_sc); | 766 | void (*sc_creation_callback)(struct vmbus_channel *new_sc); |
729 | 767 | ||
730 | /* | 768 | /* |
769 | * Channel rescind callback. Some channels (the hvsock ones), need to | ||
770 | * register a callback which is invoked in vmbus_onoffer_rescind(). | ||
771 | */ | ||
772 | void (*chn_rescind_callback)(struct vmbus_channel *channel); | ||
773 | |||
774 | /* | ||
731 | * The spinlock to protect the structure. It is being used to protect | 775 | * The spinlock to protect the structure. It is being used to protect |
732 | * test-and-set access to various attributes of the structure as well | 776 | * test-and-set access to various attributes of the structure as well |
733 | * as all sc_list operations. | 777 | * as all sc_list operations. |
@@ -767,8 +811,30 @@ struct vmbus_channel { | |||
767 | * signaling control. | 811 | * signaling control. |
768 | */ | 812 | */ |
769 | enum hv_signal_policy signal_policy; | 813 | enum hv_signal_policy signal_policy; |
814 | /* | ||
815 | * On the channel send side, many of the VMBUS | ||
816 | * device drivers explicity serialize access to the | ||
817 | * outgoing ring buffer. Give more control to the | ||
818 | * VMBUS device drivers in terms how to serialize | ||
819 | * accesss to the outgoing ring buffer. | ||
820 | * The default behavior will be to aquire the | ||
821 | * ring lock to preserve the current behavior. | ||
822 | */ | ||
823 | bool acquire_ring_lock; | ||
824 | |||
770 | }; | 825 | }; |
771 | 826 | ||
827 | static inline void set_channel_lock_state(struct vmbus_channel *c, bool state) | ||
828 | { | ||
829 | c->acquire_ring_lock = state; | ||
830 | } | ||
831 | |||
832 | static inline bool is_hvsock_channel(const struct vmbus_channel *c) | ||
833 | { | ||
834 | return !!(c->offermsg.offer.chn_flags & | ||
835 | VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); | ||
836 | } | ||
837 | |||
772 | static inline void set_channel_signal_state(struct vmbus_channel *c, | 838 | static inline void set_channel_signal_state(struct vmbus_channel *c, |
773 | enum hv_signal_policy policy) | 839 | enum hv_signal_policy policy) |
774 | { | 840 | { |
@@ -790,6 +856,12 @@ static inline void *get_per_channel_state(struct vmbus_channel *c) | |||
790 | return c->per_channel_state; | 856 | return c->per_channel_state; |
791 | } | 857 | } |
792 | 858 | ||
859 | static inline void set_channel_pending_send_size(struct vmbus_channel *c, | ||
860 | u32 size) | ||
861 | { | ||
862 | c->outbound.ring_buffer->pending_send_sz = size; | ||
863 | } | ||
864 | |||
793 | void vmbus_onmessage(void *context); | 865 | void vmbus_onmessage(void *context); |
794 | 866 | ||
795 | int vmbus_request_offers(void); | 867 | int vmbus_request_offers(void); |
@@ -801,6 +873,9 @@ int vmbus_request_offers(void); | |||
801 | void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, | 873 | void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, |
802 | void (*sc_cr_cb)(struct vmbus_channel *new_sc)); | 874 | void (*sc_cr_cb)(struct vmbus_channel *new_sc)); |
803 | 875 | ||
876 | void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, | ||
877 | void (*chn_rescind_cb)(struct vmbus_channel *)); | ||
878 | |||
804 | /* | 879 | /* |
805 | * Retrieve the (sub) channel on which to send an outgoing request. | 880 | * Retrieve the (sub) channel on which to send an outgoing request. |
806 | * When a primary channel has multiple sub-channels, we choose a | 881 | * When a primary channel has multiple sub-channels, we choose a |
@@ -940,6 +1015,20 @@ extern void vmbus_ontimer(unsigned long data); | |||
940 | struct hv_driver { | 1015 | struct hv_driver { |
941 | const char *name; | 1016 | const char *name; |
942 | 1017 | ||
1018 | /* | ||
1019 | * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER | ||
1020 | * channel flag, actually doesn't mean a synthetic device because the | ||
1021 | * offer's if_type/if_instance can change for every new hvsock | ||
1022 | * connection. | ||
1023 | * | ||
1024 | * However, to facilitate the notification of new-offer/rescind-offer | ||
1025 | * from vmbus driver to hvsock driver, we can handle hvsock offer as | ||
1026 | * a special vmbus device, and hence we need the below flag to | ||
1027 | * indicate if the driver is the hvsock driver or not: we need to | ||
1028 | * specially treat the hvosck offer & driver in vmbus_match(). | ||
1029 | */ | ||
1030 | bool hvsock; | ||
1031 | |||
943 | /* the device type supported by this driver */ | 1032 | /* the device type supported by this driver */ |
944 | uuid_le dev_type; | 1033 | uuid_le dev_type; |
945 | const struct hv_vmbus_device_id *id_table; | 1034 | const struct hv_vmbus_device_id *id_table; |
@@ -959,6 +1048,8 @@ struct hv_device { | |||
959 | 1048 | ||
960 | /* the device instance id of this device */ | 1049 | /* the device instance id of this device */ |
961 | uuid_le dev_instance; | 1050 | uuid_le dev_instance; |
1051 | u16 vendor_id; | ||
1052 | u16 device_id; | ||
962 | 1053 | ||
963 | struct device device; | 1054 | struct device device; |
964 | 1055 | ||
@@ -994,6 +1085,8 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, | |||
994 | const char *mod_name); | 1085 | const char *mod_name); |
995 | void vmbus_driver_unregister(struct hv_driver *hv_driver); | 1086 | void vmbus_driver_unregister(struct hv_driver *hv_driver); |
996 | 1087 | ||
1088 | void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); | ||
1089 | |||
997 | int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, | 1090 | int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, |
998 | resource_size_t min, resource_size_t max, | 1091 | resource_size_t min, resource_size_t max, |
999 | resource_size_t size, resource_size_t align, | 1092 | resource_size_t size, resource_size_t align, |
@@ -1158,6 +1251,7 @@ u64 hv_do_hypercall(u64 control, void *input, void *output); | |||
1158 | 1251 | ||
1159 | struct hv_util_service { | 1252 | struct hv_util_service { |
1160 | u8 *recv_buffer; | 1253 | u8 *recv_buffer; |
1254 | void *channel; | ||
1161 | void (*util_cb)(void *); | 1255 | void (*util_cb)(void *); |
1162 | int (*util_init)(struct hv_util_service *); | 1256 | int (*util_init)(struct hv_util_service *); |
1163 | void (*util_deinit)(void); | 1257 | void (*util_deinit)(void); |
@@ -1242,4 +1336,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); | |||
1242 | 1336 | ||
1243 | extern __u32 vmbus_proto_version; | 1337 | extern __u32 vmbus_proto_version; |
1244 | 1338 | ||
1339 | int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, | ||
1340 | const uuid_le *shv_host_servie_id); | ||
1245 | #endif /* _HYPERV_H */ | 1341 | #endif /* _HYPERV_H */ |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 82730adba950..093607f90b91 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -140,17 +140,6 @@ extern struct memory_block *find_memory_block(struct mem_section *); | |||
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | /* | 142 | /* |
143 | * 'struct memory_accessor' is a generic interface to provide | ||
144 | * in-kernel access to persistent memory such as i2c or SPI EEPROMs | ||
145 | */ | ||
146 | struct memory_accessor { | ||
147 | ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset, | ||
148 | size_t count); | ||
149 | ssize_t (*write)(struct memory_accessor *, const char *buf, | ||
150 | off_t offset, size_t count); | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * Kernel text modification mutex, used for code patching. Users of this lock | 143 | * Kernel text modification mutex, used for code patching. Users of this lock |
155 | * can sleep. | 144 | * can sleep. |
156 | */ | 145 | */ |
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index c800dbc42079..5c9a1d44c125 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h | |||
@@ -580,7 +580,9 @@ struct palmas_usb { | |||
580 | int vbus_irq; | 580 | int vbus_irq; |
581 | 581 | ||
582 | int gpio_id_irq; | 582 | int gpio_id_irq; |
583 | int gpio_vbus_irq; | ||
583 | struct gpio_desc *id_gpiod; | 584 | struct gpio_desc *id_gpiod; |
585 | struct gpio_desc *vbus_gpiod; | ||
584 | unsigned long sw_debounce_jiffies; | 586 | unsigned long sw_debounce_jiffies; |
585 | struct delayed_work wq_detectid; | 587 | struct delayed_work wq_detectid; |
586 | 588 | ||
@@ -589,6 +591,7 @@ struct palmas_usb { | |||
589 | bool enable_vbus_detection; | 591 | bool enable_vbus_detection; |
590 | bool enable_id_detection; | 592 | bool enable_id_detection; |
591 | bool enable_gpio_id_detection; | 593 | bool enable_gpio_id_detection; |
594 | bool enable_gpio_vbus_detection; | ||
592 | }; | 595 | }; |
593 | 596 | ||
594 | #define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator) | 597 | #define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator) |
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 0b68caff1b3c..a4fcc90b0f20 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h | |||
@@ -23,6 +23,10 @@ struct nvmem_config { | |||
23 | const struct nvmem_cell_info *cells; | 23 | const struct nvmem_cell_info *cells; |
24 | int ncells; | 24 | int ncells; |
25 | bool read_only; | 25 | bool read_only; |
26 | bool root_only; | ||
27 | /* To be only used by old driver/misc/eeprom drivers */ | ||
28 | bool compat; | ||
29 | struct device *base_dev; | ||
26 | }; | 30 | }; |
27 | 31 | ||
28 | #if IS_ENABLED(CONFIG_NVMEM) | 32 | #if IS_ENABLED(CONFIG_NVMEM) |
@@ -43,5 +47,4 @@ static inline int nvmem_unregister(struct nvmem_device *nvmem) | |||
43 | } | 47 | } |
44 | 48 | ||
45 | #endif /* CONFIG_NVMEM */ | 49 | #endif /* CONFIG_NVMEM */ |
46 | |||
47 | #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ | 50 | #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ |
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h index c42aa89d34ee..dc9a13e5acda 100644 --- a/include/linux/platform_data/at24.h +++ b/include/linux/platform_data/at24.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #define _LINUX_AT24_H | 9 | #define _LINUX_AT24_H |
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/memory.h> | 12 | #include <linux/nvmem-consumer.h> |
13 | 13 | ||
14 | /** | 14 | /** |
15 | * struct at24_platform_data - data to set up at24 (generic eeprom) driver | 15 | * struct at24_platform_data - data to set up at24 (generic eeprom) driver |
@@ -17,7 +17,7 @@ | |||
17 | * @page_size: number of byte which can be written in one go | 17 | * @page_size: number of byte which can be written in one go |
18 | * @flags: tunable options, check AT24_FLAG_* defines | 18 | * @flags: tunable options, check AT24_FLAG_* defines |
19 | * @setup: an optional callback invoked after eeprom is probed; enables kernel | 19 | * @setup: an optional callback invoked after eeprom is probed; enables kernel |
20 | code to access eeprom via memory_accessor, see example | 20 | code to access eeprom via nvmem, see example |
21 | * @context: optional parameter passed to setup() | 21 | * @context: optional parameter passed to setup() |
22 | * | 22 | * |
23 | * If you set up a custom eeprom type, please double-check the parameters. | 23 | * If you set up a custom eeprom type, please double-check the parameters. |
@@ -26,13 +26,13 @@ | |||
26 | * | 26 | * |
27 | * An example in pseudo code for a setup() callback: | 27 | * An example in pseudo code for a setup() callback: |
28 | * | 28 | * |
29 | * void get_mac_addr(struct memory_accessor *mem_acc, void *context) | 29 | * void get_mac_addr(struct mvmem_device *nvmem, void *context) |
30 | * { | 30 | * { |
31 | * u8 *mac_addr = ethernet_pdata->mac_addr; | 31 | * u8 *mac_addr = ethernet_pdata->mac_addr; |
32 | * off_t offset = context; | 32 | * off_t offset = context; |
33 | * | 33 | * |
34 | * // Read MAC addr from EEPROM | 34 | * // Read MAC addr from EEPROM |
35 | * if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN) | 35 | * if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) |
36 | * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); | 36 | * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); |
37 | * } | 37 | * } |
38 | * | 38 | * |
@@ -48,7 +48,7 @@ struct at24_platform_data { | |||
48 | #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ | 48 | #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ |
49 | #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ | 49 | #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ |
50 | 50 | ||
51 | void (*setup)(struct memory_accessor *, void *context); | 51 | void (*setup)(struct nvmem_device *nvmem, void *context); |
52 | void *context; | 52 | void *context; |
53 | }; | 53 | }; |
54 | 54 | ||
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h index 403e007aef68..e34e169f9dcb 100644 --- a/include/linux/spi/eeprom.h +++ b/include/linux/spi/eeprom.h | |||
@@ -30,8 +30,6 @@ struct spi_eeprom { | |||
30 | */ | 30 | */ |
31 | #define EE_INSTR_BIT3_IS_ADDR 0x0010 | 31 | #define EE_INSTR_BIT3_IS_ADDR 0x0010 |
32 | 32 | ||
33 | /* for exporting this chip's data to other kernel code */ | ||
34 | void (*setup)(struct memory_accessor *mem, void *context); | ||
35 | void *context; | 33 | void *context; |
36 | }; | 34 | }; |
37 | 35 | ||
diff --git a/include/linux/stm.h b/include/linux/stm.h index 9d0083d364e6..1a79ed8e43da 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h | |||
@@ -67,6 +67,16 @@ struct stm_device; | |||
67 | * description. That is, the lowest master that can be allocated to software | 67 | * description. That is, the lowest master that can be allocated to software |
68 | * writers is @sw_start and data from this writer will appear is @sw_start | 68 | * writers is @sw_start and data from this writer will appear is @sw_start |
69 | * master in the STP stream. | 69 | * master in the STP stream. |
70 | * | ||
71 | * The @packet callback should adhere to the following rules: | ||
72 | * 1) it must return the number of bytes it consumed from the payload; | ||
73 | * 2) therefore, if it sent a packet that does not have payload (like FLAG), | ||
74 | * it must return zero; | ||
75 | * 3) if it does not support the requested packet type/flag combination, | ||
76 | * it must return -ENOTSUPP. | ||
77 | * | ||
78 | * The @unlink callback is called when there are no more active writers so | ||
79 | * that the master/channel can be quiesced. | ||
70 | */ | 80 | */ |
71 | struct stm_data { | 81 | struct stm_data { |
72 | const char *name; | 82 | const char *name; |
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 65ac54c61c18..1bd31a38c51e 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h | |||
@@ -734,6 +734,41 @@ static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data) | |||
734 | } | 734 | } |
735 | 735 | ||
736 | /* | 736 | /* |
737 | * Helper to read a value from a head or tail pointer. For X86_32, the | ||
738 | * pointer is treated as a 32bit value, since the pointer value | ||
739 | * never exceeds a 32bit value in this case. Also, doing an | ||
740 | * atomic64_read on X86_32 uniprocessor systems may be implemented | ||
741 | * as a non locked cmpxchg8b, that may end up overwriting updates done | ||
742 | * by the VMCI device to the memory location. On 32bit SMP, the lock | ||
743 | * prefix will be used, so correctness isn't an issue, but using a | ||
744 | * 64bit operation still adds unnecessary overhead. | ||
745 | */ | ||
746 | static inline u64 vmci_q_read_pointer(atomic64_t *var) | ||
747 | { | ||
748 | #if defined(CONFIG_X86_32) | ||
749 | return atomic_read((atomic_t *)var); | ||
750 | #else | ||
751 | return atomic64_read(var); | ||
752 | #endif | ||
753 | } | ||
754 | |||
755 | /* | ||
756 | * Helper to set the value of a head or tail pointer. For X86_32, the | ||
757 | * pointer is treated as a 32bit value, since the pointer value | ||
758 | * never exceeds a 32bit value in this case. On 32bit SMP, using a | ||
759 | * locked cmpxchg8b adds unnecessary overhead. | ||
760 | */ | ||
761 | static inline void vmci_q_set_pointer(atomic64_t *var, | ||
762 | u64 new_val) | ||
763 | { | ||
764 | #if defined(CONFIG_X86_32) | ||
765 | return atomic_set((atomic_t *)var, (u32)new_val); | ||
766 | #else | ||
767 | return atomic64_set(var, new_val); | ||
768 | #endif | ||
769 | } | ||
770 | |||
771 | /* | ||
737 | * Helper to add a given offset to a head or tail pointer. Wraps the | 772 | * Helper to add a given offset to a head or tail pointer. Wraps the |
738 | * value of the pointer around the max size of the queue. | 773 | * value of the pointer around the max size of the queue. |
739 | */ | 774 | */ |
@@ -741,14 +776,14 @@ static inline void vmci_qp_add_pointer(atomic64_t *var, | |||
741 | size_t add, | 776 | size_t add, |
742 | u64 size) | 777 | u64 size) |
743 | { | 778 | { |
744 | u64 new_val = atomic64_read(var); | 779 | u64 new_val = vmci_q_read_pointer(var); |
745 | 780 | ||
746 | if (new_val >= size - add) | 781 | if (new_val >= size - add) |
747 | new_val -= size; | 782 | new_val -= size; |
748 | 783 | ||
749 | new_val += add; | 784 | new_val += add; |
750 | 785 | ||
751 | atomic64_set(var, new_val); | 786 | vmci_q_set_pointer(var, new_val); |
752 | } | 787 | } |
753 | 788 | ||
754 | /* | 789 | /* |
@@ -758,7 +793,7 @@ static inline u64 | |||
758 | vmci_q_header_producer_tail(const struct vmci_queue_header *q_header) | 793 | vmci_q_header_producer_tail(const struct vmci_queue_header *q_header) |
759 | { | 794 | { |
760 | struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; | 795 | struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; |
761 | return atomic64_read(&qh->producer_tail); | 796 | return vmci_q_read_pointer(&qh->producer_tail); |
762 | } | 797 | } |
763 | 798 | ||
764 | /* | 799 | /* |
@@ -768,7 +803,7 @@ static inline u64 | |||
768 | vmci_q_header_consumer_head(const struct vmci_queue_header *q_header) | 803 | vmci_q_header_consumer_head(const struct vmci_queue_header *q_header) |
769 | { | 804 | { |
770 | struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; | 805 | struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header; |
771 | return atomic64_read(&qh->consumer_head); | 806 | return vmci_q_read_pointer(&qh->consumer_head); |
772 | } | 807 | } |
773 | 808 | ||
774 | /* | 809 | /* |
diff --git a/lib/devres.c b/lib/devres.c index 8c85672639d3..cb1464c411a2 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -236,7 +236,7 @@ struct pcim_iomap_devres { | |||
236 | 236 | ||
237 | static void pcim_iomap_release(struct device *gendev, void *res) | 237 | static void pcim_iomap_release(struct device *gendev, void *res) |
238 | { | 238 | { |
239 | struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); | 239 | struct pci_dev *dev = to_pci_dev(gendev); |
240 | struct pcim_iomap_devres *this = res; | 240 | struct pcim_iomap_devres *this = res; |
241 | int i; | 241 | int i; |
242 | 242 | ||
diff --git a/scripts/ver_linux b/scripts/ver_linux index 024a11ac8b97..0d8bd29b1bd6 100755 --- a/scripts/ver_linux +++ b/scripts/ver_linux | |||
@@ -1,6 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # Before running this script please ensure that your PATH is | 2 | # Before running this script please ensure that your PATH is |
3 | # typical as you use for compilation/istallation. I use | 3 | # typical as you use for compilation/installation. I use |
4 | # /bin /sbin /usr/bin /usr/sbin /usr/local/bin, but it may | 4 | # /bin /sbin /usr/bin /usr/sbin /usr/local/bin, but it may |
5 | # differ on your system. | 5 | # differ on your system. |
6 | # | 6 | # |
diff --git a/tools/hv/Makefile b/tools/hv/Makefile index a8ab79556926..a8c4644022a6 100644 --- a/tools/hv/Makefile +++ b/tools/hv/Makefile | |||
@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread | |||
5 | WARNINGS = -Wall -Wextra | 5 | WARNINGS = -Wall -Wextra |
6 | CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS) | 6 | CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS) |
7 | 7 | ||
8 | CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include | ||
9 | |||
8 | all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon | 10 | all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon |
9 | %: %.c | 11 | %: %.c |
10 | $(CC) $(CFLAGS) -o $@ $^ | 12 | $(CC) $(CFLAGS) -o $@ $^ |